xref: /openbmc/linux/mm/memory.c (revision e7bae9bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/sched/mm.h>
45 #include <linux/sched/coredump.h>
46 #include <linux/sched/numa_balancing.h>
47 #include <linux/sched/task.h>
48 #include <linux/hugetlb.h>
49 #include <linux/mman.h>
50 #include <linux/swap.h>
51 #include <linux/highmem.h>
52 #include <linux/pagemap.h>
53 #include <linux/memremap.h>
54 #include <linux/ksm.h>
55 #include <linux/rmap.h>
56 #include <linux/export.h>
57 #include <linux/delayacct.h>
58 #include <linux/init.h>
59 #include <linux/pfn_t.h>
60 #include <linux/writeback.h>
61 #include <linux/memcontrol.h>
62 #include <linux/mmu_notifier.h>
63 #include <linux/swapops.h>
64 #include <linux/elf.h>
65 #include <linux/gfp.h>
66 #include <linux/migrate.h>
67 #include <linux/string.h>
68 #include <linux/dma-debug.h>
69 #include <linux/debugfs.h>
70 #include <linux/userfaultfd_k.h>
71 #include <linux/dax.h>
72 #include <linux/oom.h>
73 #include <linux/numa.h>
74 #include <linux/perf_event.h>
75 #include <linux/ptrace.h>
76 #include <linux/vmalloc.h>
77 
78 #include <trace/events/kmem.h>
79 
80 #include <asm/io.h>
81 #include <asm/mmu_context.h>
82 #include <asm/pgalloc.h>
83 #include <linux/uaccess.h>
84 #include <asm/tlb.h>
85 #include <asm/tlbflush.h>
86 
87 #include "pgalloc-track.h"
88 #include "internal.h"
89 
90 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
91 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
92 #endif
93 
94 #ifndef CONFIG_NEED_MULTIPLE_NODES
95 /* use the per-pgdat data instead for discontigmem - mbligh */
96 unsigned long max_mapnr;
97 EXPORT_SYMBOL(max_mapnr);
98 
99 struct page *mem_map;
100 EXPORT_SYMBOL(mem_map);
101 #endif
102 
103 /*
104  * A number of key systems in x86 including ioremap() rely on the assumption
105  * that high_memory defines the upper bound on direct map memory, then end
106  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
107  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
108  * and ZONE_HIGHMEM.
109  */
110 void *high_memory;
111 EXPORT_SYMBOL(high_memory);
112 
113 /*
114  * Randomize the address space (stacks, mmaps, brk, etc.).
115  *
116  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
117  *   as ancient (libc5 based) binaries can segfault. )
118  */
119 int randomize_va_space __read_mostly =
120 #ifdef CONFIG_COMPAT_BRK
121 					1;
122 #else
123 					2;
124 #endif
125 
126 #ifndef arch_faults_on_old_pte
127 static inline bool arch_faults_on_old_pte(void)
128 {
129 	/*
130 	 * Those arches which don't have hw access flag feature need to
131 	 * implement their own helper. By default, "true" means pagefault
132 	 * will be hit on old pte.
133 	 */
134 	return true;
135 }
136 #endif
137 
138 static int __init disable_randmaps(char *s)
139 {
140 	randomize_va_space = 0;
141 	return 1;
142 }
143 __setup("norandmaps", disable_randmaps);
144 
145 unsigned long zero_pfn __read_mostly;
146 EXPORT_SYMBOL(zero_pfn);
147 
148 unsigned long highest_memmap_pfn __read_mostly;
149 
150 /*
151  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
152  */
153 static int __init init_zero_pfn(void)
154 {
155 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
156 	return 0;
157 }
158 core_initcall(init_zero_pfn);
159 
160 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
161 {
162 	trace_rss_stat(mm, member, count);
163 }
164 
165 #if defined(SPLIT_RSS_COUNTING)
166 
167 void sync_mm_rss(struct mm_struct *mm)
168 {
169 	int i;
170 
171 	for (i = 0; i < NR_MM_COUNTERS; i++) {
172 		if (current->rss_stat.count[i]) {
173 			add_mm_counter(mm, i, current->rss_stat.count[i]);
174 			current->rss_stat.count[i] = 0;
175 		}
176 	}
177 	current->rss_stat.events = 0;
178 }
179 
180 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
181 {
182 	struct task_struct *task = current;
183 
184 	if (likely(task->mm == mm))
185 		task->rss_stat.count[member] += val;
186 	else
187 		add_mm_counter(mm, member, val);
188 }
189 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
190 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
191 
192 /* sync counter once per 64 page faults */
193 #define TASK_RSS_EVENTS_THRESH	(64)
194 static void check_sync_rss_stat(struct task_struct *task)
195 {
196 	if (unlikely(task != current))
197 		return;
198 	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
199 		sync_mm_rss(task->mm);
200 }
201 #else /* SPLIT_RSS_COUNTING */
202 
203 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
204 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
205 
206 static void check_sync_rss_stat(struct task_struct *task)
207 {
208 }
209 
210 #endif /* SPLIT_RSS_COUNTING */
211 
212 /*
213  * Note: this doesn't free the actual pages themselves. That
214  * has been handled earlier when unmapping all the memory regions.
215  */
216 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
217 			   unsigned long addr)
218 {
219 	pgtable_t token = pmd_pgtable(*pmd);
220 	pmd_clear(pmd);
221 	pte_free_tlb(tlb, token, addr);
222 	mm_dec_nr_ptes(tlb->mm);
223 }
224 
225 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
226 				unsigned long addr, unsigned long end,
227 				unsigned long floor, unsigned long ceiling)
228 {
229 	pmd_t *pmd;
230 	unsigned long next;
231 	unsigned long start;
232 
233 	start = addr;
234 	pmd = pmd_offset(pud, addr);
235 	do {
236 		next = pmd_addr_end(addr, end);
237 		if (pmd_none_or_clear_bad(pmd))
238 			continue;
239 		free_pte_range(tlb, pmd, addr);
240 	} while (pmd++, addr = next, addr != end);
241 
242 	start &= PUD_MASK;
243 	if (start < floor)
244 		return;
245 	if (ceiling) {
246 		ceiling &= PUD_MASK;
247 		if (!ceiling)
248 			return;
249 	}
250 	if (end - 1 > ceiling - 1)
251 		return;
252 
253 	pmd = pmd_offset(pud, start);
254 	pud_clear(pud);
255 	pmd_free_tlb(tlb, pmd, start);
256 	mm_dec_nr_pmds(tlb->mm);
257 }
258 
259 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
260 				unsigned long addr, unsigned long end,
261 				unsigned long floor, unsigned long ceiling)
262 {
263 	pud_t *pud;
264 	unsigned long next;
265 	unsigned long start;
266 
267 	start = addr;
268 	pud = pud_offset(p4d, addr);
269 	do {
270 		next = pud_addr_end(addr, end);
271 		if (pud_none_or_clear_bad(pud))
272 			continue;
273 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
274 	} while (pud++, addr = next, addr != end);
275 
276 	start &= P4D_MASK;
277 	if (start < floor)
278 		return;
279 	if (ceiling) {
280 		ceiling &= P4D_MASK;
281 		if (!ceiling)
282 			return;
283 	}
284 	if (end - 1 > ceiling - 1)
285 		return;
286 
287 	pud = pud_offset(p4d, start);
288 	p4d_clear(p4d);
289 	pud_free_tlb(tlb, pud, start);
290 	mm_dec_nr_puds(tlb->mm);
291 }
292 
293 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
294 				unsigned long addr, unsigned long end,
295 				unsigned long floor, unsigned long ceiling)
296 {
297 	p4d_t *p4d;
298 	unsigned long next;
299 	unsigned long start;
300 
301 	start = addr;
302 	p4d = p4d_offset(pgd, addr);
303 	do {
304 		next = p4d_addr_end(addr, end);
305 		if (p4d_none_or_clear_bad(p4d))
306 			continue;
307 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
308 	} while (p4d++, addr = next, addr != end);
309 
310 	start &= PGDIR_MASK;
311 	if (start < floor)
312 		return;
313 	if (ceiling) {
314 		ceiling &= PGDIR_MASK;
315 		if (!ceiling)
316 			return;
317 	}
318 	if (end - 1 > ceiling - 1)
319 		return;
320 
321 	p4d = p4d_offset(pgd, start);
322 	pgd_clear(pgd);
323 	p4d_free_tlb(tlb, p4d, start);
324 }
325 
326 /*
327  * This function frees user-level page tables of a process.
328  */
329 void free_pgd_range(struct mmu_gather *tlb,
330 			unsigned long addr, unsigned long end,
331 			unsigned long floor, unsigned long ceiling)
332 {
333 	pgd_t *pgd;
334 	unsigned long next;
335 
336 	/*
337 	 * The next few lines have given us lots of grief...
338 	 *
339 	 * Why are we testing PMD* at this top level?  Because often
340 	 * there will be no work to do at all, and we'd prefer not to
341 	 * go all the way down to the bottom just to discover that.
342 	 *
343 	 * Why all these "- 1"s?  Because 0 represents both the bottom
344 	 * of the address space and the top of it (using -1 for the
345 	 * top wouldn't help much: the masks would do the wrong thing).
346 	 * The rule is that addr 0 and floor 0 refer to the bottom of
347 	 * the address space, but end 0 and ceiling 0 refer to the top
348 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
349 	 * that end 0 case should be mythical).
350 	 *
351 	 * Wherever addr is brought up or ceiling brought down, we must
352 	 * be careful to reject "the opposite 0" before it confuses the
353 	 * subsequent tests.  But what about where end is brought down
354 	 * by PMD_SIZE below? no, end can't go down to 0 there.
355 	 *
356 	 * Whereas we round start (addr) and ceiling down, by different
357 	 * masks at different levels, in order to test whether a table
358 	 * now has no other vmas using it, so can be freed, we don't
359 	 * bother to round floor or end up - the tests don't need that.
360 	 */
361 
362 	addr &= PMD_MASK;
363 	if (addr < floor) {
364 		addr += PMD_SIZE;
365 		if (!addr)
366 			return;
367 	}
368 	if (ceiling) {
369 		ceiling &= PMD_MASK;
370 		if (!ceiling)
371 			return;
372 	}
373 	if (end - 1 > ceiling - 1)
374 		end -= PMD_SIZE;
375 	if (addr > end - 1)
376 		return;
377 	/*
378 	 * We add page table cache pages with PAGE_SIZE,
379 	 * (see pte_free_tlb()), flush the tlb if we need
380 	 */
381 	tlb_change_page_size(tlb, PAGE_SIZE);
382 	pgd = pgd_offset(tlb->mm, addr);
383 	do {
384 		next = pgd_addr_end(addr, end);
385 		if (pgd_none_or_clear_bad(pgd))
386 			continue;
387 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
388 	} while (pgd++, addr = next, addr != end);
389 }
390 
391 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
392 		unsigned long floor, unsigned long ceiling)
393 {
394 	while (vma) {
395 		struct vm_area_struct *next = vma->vm_next;
396 		unsigned long addr = vma->vm_start;
397 
398 		/*
399 		 * Hide vma from rmap and truncate_pagecache before freeing
400 		 * pgtables
401 		 */
402 		unlink_anon_vmas(vma);
403 		unlink_file_vma(vma);
404 
405 		if (is_vm_hugetlb_page(vma)) {
406 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
407 				floor, next ? next->vm_start : ceiling);
408 		} else {
409 			/*
410 			 * Optimization: gather nearby vmas into one call down
411 			 */
412 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
413 			       && !is_vm_hugetlb_page(next)) {
414 				vma = next;
415 				next = vma->vm_next;
416 				unlink_anon_vmas(vma);
417 				unlink_file_vma(vma);
418 			}
419 			free_pgd_range(tlb, addr, vma->vm_end,
420 				floor, next ? next->vm_start : ceiling);
421 		}
422 		vma = next;
423 	}
424 }
425 
426 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
427 {
428 	spinlock_t *ptl;
429 	pgtable_t new = pte_alloc_one(mm);
430 	if (!new)
431 		return -ENOMEM;
432 
433 	/*
434 	 * Ensure all pte setup (eg. pte page lock and page clearing) are
435 	 * visible before the pte is made visible to other CPUs by being
436 	 * put into page tables.
437 	 *
438 	 * The other side of the story is the pointer chasing in the page
439 	 * table walking code (when walking the page table without locking;
440 	 * ie. most of the time). Fortunately, these data accesses consist
441 	 * of a chain of data-dependent loads, meaning most CPUs (alpha
442 	 * being the notable exception) will already guarantee loads are
443 	 * seen in-order. See the alpha page table accessors for the
444 	 * smp_rmb() barriers in page table walking code.
445 	 */
446 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
447 
448 	ptl = pmd_lock(mm, pmd);
449 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
450 		mm_inc_nr_ptes(mm);
451 		pmd_populate(mm, pmd, new);
452 		new = NULL;
453 	}
454 	spin_unlock(ptl);
455 	if (new)
456 		pte_free(mm, new);
457 	return 0;
458 }
459 
460 int __pte_alloc_kernel(pmd_t *pmd)
461 {
462 	pte_t *new = pte_alloc_one_kernel(&init_mm);
463 	if (!new)
464 		return -ENOMEM;
465 
466 	smp_wmb(); /* See comment in __pte_alloc */
467 
468 	spin_lock(&init_mm.page_table_lock);
469 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
470 		pmd_populate_kernel(&init_mm, pmd, new);
471 		new = NULL;
472 	}
473 	spin_unlock(&init_mm.page_table_lock);
474 	if (new)
475 		pte_free_kernel(&init_mm, new);
476 	return 0;
477 }
478 
479 static inline void init_rss_vec(int *rss)
480 {
481 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
482 }
483 
484 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
485 {
486 	int i;
487 
488 	if (current->mm == mm)
489 		sync_mm_rss(mm);
490 	for (i = 0; i < NR_MM_COUNTERS; i++)
491 		if (rss[i])
492 			add_mm_counter(mm, i, rss[i]);
493 }
494 
495 /*
496  * This function is called to print an error when a bad pte
497  * is found. For example, we might have a PFN-mapped pte in
498  * a region that doesn't allow it.
499  *
500  * The calling function must still handle the error.
501  */
502 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
503 			  pte_t pte, struct page *page)
504 {
505 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
506 	p4d_t *p4d = p4d_offset(pgd, addr);
507 	pud_t *pud = pud_offset(p4d, addr);
508 	pmd_t *pmd = pmd_offset(pud, addr);
509 	struct address_space *mapping;
510 	pgoff_t index;
511 	static unsigned long resume;
512 	static unsigned long nr_shown;
513 	static unsigned long nr_unshown;
514 
515 	/*
516 	 * Allow a burst of 60 reports, then keep quiet for that minute;
517 	 * or allow a steady drip of one report per second.
518 	 */
519 	if (nr_shown == 60) {
520 		if (time_before(jiffies, resume)) {
521 			nr_unshown++;
522 			return;
523 		}
524 		if (nr_unshown) {
525 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
526 				 nr_unshown);
527 			nr_unshown = 0;
528 		}
529 		nr_shown = 0;
530 	}
531 	if (nr_shown++ == 0)
532 		resume = jiffies + 60 * HZ;
533 
534 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
535 	index = linear_page_index(vma, addr);
536 
537 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
538 		 current->comm,
539 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
540 	if (page)
541 		dump_page(page, "bad pte");
542 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
543 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
544 	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
545 		 vma->vm_file,
546 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
547 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
548 		 mapping ? mapping->a_ops->readpage : NULL);
549 	dump_stack();
550 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
551 }
552 
553 /*
554  * vm_normal_page -- This function gets the "struct page" associated with a pte.
555  *
556  * "Special" mappings do not wish to be associated with a "struct page" (either
557  * it doesn't exist, or it exists but they don't want to touch it). In this
558  * case, NULL is returned here. "Normal" mappings do have a struct page.
559  *
560  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
561  * pte bit, in which case this function is trivial. Secondly, an architecture
562  * may not have a spare pte bit, which requires a more complicated scheme,
563  * described below.
564  *
565  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
566  * special mapping (even if there are underlying and valid "struct pages").
567  * COWed pages of a VM_PFNMAP are always normal.
568  *
569  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
570  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
571  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
572  * mapping will always honor the rule
573  *
574  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
575  *
576  * And for normal mappings this is false.
577  *
578  * This restricts such mappings to be a linear translation from virtual address
579  * to pfn. To get around this restriction, we allow arbitrary mappings so long
580  * as the vma is not a COW mapping; in that case, we know that all ptes are
581  * special (because none can have been COWed).
582  *
583  *
584  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
585  *
586  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
587  * page" backing, however the difference is that _all_ pages with a struct
588  * page (that is, those where pfn_valid is true) are refcounted and considered
589  * normal pages by the VM. The disadvantage is that pages are refcounted
590  * (which can be slower and simply not an option for some PFNMAP users). The
591  * advantage is that we don't have to follow the strict linearity rule of
592  * PFNMAP mappings in order to support COWable mappings.
593  *
594  */
595 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
596 			    pte_t pte)
597 {
598 	unsigned long pfn = pte_pfn(pte);
599 
600 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
601 		if (likely(!pte_special(pte)))
602 			goto check_pfn;
603 		if (vma->vm_ops && vma->vm_ops->find_special_page)
604 			return vma->vm_ops->find_special_page(vma, addr);
605 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
606 			return NULL;
607 		if (is_zero_pfn(pfn))
608 			return NULL;
609 		if (pte_devmap(pte))
610 			return NULL;
611 
612 		print_bad_pte(vma, addr, pte, NULL);
613 		return NULL;
614 	}
615 
616 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
617 
618 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
619 		if (vma->vm_flags & VM_MIXEDMAP) {
620 			if (!pfn_valid(pfn))
621 				return NULL;
622 			goto out;
623 		} else {
624 			unsigned long off;
625 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
626 			if (pfn == vma->vm_pgoff + off)
627 				return NULL;
628 			if (!is_cow_mapping(vma->vm_flags))
629 				return NULL;
630 		}
631 	}
632 
633 	if (is_zero_pfn(pfn))
634 		return NULL;
635 
636 check_pfn:
637 	if (unlikely(pfn > highest_memmap_pfn)) {
638 		print_bad_pte(vma, addr, pte, NULL);
639 		return NULL;
640 	}
641 
642 	/*
643 	 * NOTE! We still have PageReserved() pages in the page tables.
644 	 * eg. VDSO mappings can cause them to exist.
645 	 */
646 out:
647 	return pfn_to_page(pfn);
648 }
649 
650 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
651 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
652 				pmd_t pmd)
653 {
654 	unsigned long pfn = pmd_pfn(pmd);
655 
656 	/*
657 	 * There is no pmd_special() but there may be special pmds, e.g.
658 	 * in a direct-access (dax) mapping, so let's just replicate the
659 	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
660 	 */
661 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
662 		if (vma->vm_flags & VM_MIXEDMAP) {
663 			if (!pfn_valid(pfn))
664 				return NULL;
665 			goto out;
666 		} else {
667 			unsigned long off;
668 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
669 			if (pfn == vma->vm_pgoff + off)
670 				return NULL;
671 			if (!is_cow_mapping(vma->vm_flags))
672 				return NULL;
673 		}
674 	}
675 
676 	if (pmd_devmap(pmd))
677 		return NULL;
678 	if (is_huge_zero_pmd(pmd))
679 		return NULL;
680 	if (unlikely(pfn > highest_memmap_pfn))
681 		return NULL;
682 
683 	/*
684 	 * NOTE! We still have PageReserved() pages in the page tables.
685 	 * eg. VDSO mappings can cause them to exist.
686 	 */
687 out:
688 	return pfn_to_page(pfn);
689 }
690 #endif
691 
692 /*
693  * copy one vm_area from one task to the other. Assumes the page tables
694  * already present in the new task to be cleared in the whole range
695  * covered by this vma.
696  */
697 
698 static unsigned long
699 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
700 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
701 		unsigned long addr, int *rss)
702 {
703 	unsigned long vm_flags = vma->vm_flags;
704 	pte_t pte = *src_pte;
705 	struct page *page;
706 	swp_entry_t entry = pte_to_swp_entry(pte);
707 
708 	if (likely(!non_swap_entry(entry))) {
709 		if (swap_duplicate(entry) < 0)
710 			return entry.val;
711 
712 		/* make sure dst_mm is on swapoff's mmlist. */
713 		if (unlikely(list_empty(&dst_mm->mmlist))) {
714 			spin_lock(&mmlist_lock);
715 			if (list_empty(&dst_mm->mmlist))
716 				list_add(&dst_mm->mmlist,
717 						&src_mm->mmlist);
718 			spin_unlock(&mmlist_lock);
719 		}
720 		rss[MM_SWAPENTS]++;
721 	} else if (is_migration_entry(entry)) {
722 		page = migration_entry_to_page(entry);
723 
724 		rss[mm_counter(page)]++;
725 
726 		if (is_write_migration_entry(entry) &&
727 				is_cow_mapping(vm_flags)) {
728 			/*
729 			 * COW mappings require pages in both
730 			 * parent and child to be set to read.
731 			 */
732 			make_migration_entry_read(&entry);
733 			pte = swp_entry_to_pte(entry);
734 			if (pte_swp_soft_dirty(*src_pte))
735 				pte = pte_swp_mksoft_dirty(pte);
736 			if (pte_swp_uffd_wp(*src_pte))
737 				pte = pte_swp_mkuffd_wp(pte);
738 			set_pte_at(src_mm, addr, src_pte, pte);
739 		}
740 	} else if (is_device_private_entry(entry)) {
741 		page = device_private_entry_to_page(entry);
742 
743 		/*
744 		 * Update rss count even for unaddressable pages, as
745 		 * they should treated just like normal pages in this
746 		 * respect.
747 		 *
748 		 * We will likely want to have some new rss counters
749 		 * for unaddressable pages, at some point. But for now
750 		 * keep things as they are.
751 		 */
752 		get_page(page);
753 		rss[mm_counter(page)]++;
754 		page_dup_rmap(page, false);
755 
756 		/*
757 		 * We do not preserve soft-dirty information, because so
758 		 * far, checkpoint/restore is the only feature that
759 		 * requires that. And checkpoint/restore does not work
760 		 * when a device driver is involved (you cannot easily
761 		 * save and restore device driver state).
762 		 */
763 		if (is_write_device_private_entry(entry) &&
764 		    is_cow_mapping(vm_flags)) {
765 			make_device_private_entry_read(&entry);
766 			pte = swp_entry_to_pte(entry);
767 			if (pte_swp_uffd_wp(*src_pte))
768 				pte = pte_swp_mkuffd_wp(pte);
769 			set_pte_at(src_mm, addr, src_pte, pte);
770 		}
771 	}
772 	set_pte_at(dst_mm, addr, dst_pte, pte);
773 	return 0;
774 }
775 
776 /*
777  * Copy a present and normal page if necessary.
778  *
779  * NOTE! The usual case is that this doesn't need to do
780  * anything, and can just return a positive value. That
781  * will let the caller know that it can just increase
782  * the page refcount and re-use the pte the traditional
783  * way.
784  *
785  * But _if_ we need to copy it because it needs to be
786  * pinned in the parent (and the child should get its own
787  * copy rather than just a reference to the same page),
788  * we'll do that here and return zero to let the caller
789  * know we're done.
790  *
791  * And if we need a pre-allocated page but don't yet have
792  * one, return a negative error to let the preallocation
793  * code know so that it can do so outside the page table
794  * lock.
795  */
796 static inline int
797 copy_present_page(struct mm_struct *dst_mm, struct mm_struct *src_mm,
798 		pte_t *dst_pte, pte_t *src_pte,
799 		struct vm_area_struct *vma, struct vm_area_struct *new,
800 		unsigned long addr, int *rss, struct page **prealloc,
801 		pte_t pte, struct page *page)
802 {
803 	struct page *new_page;
804 
805 	if (!is_cow_mapping(vma->vm_flags))
806 		return 1;
807 
808 	/*
809 	 * The trick starts.
810 	 *
811 	 * What we want to do is to check whether this page may
812 	 * have been pinned by the parent process.  If so,
813 	 * instead of wrprotect the pte on both sides, we copy
814 	 * the page immediately so that we'll always guarantee
815 	 * the pinned page won't be randomly replaced in the
816 	 * future.
817 	 *
818 	 * To achieve this, we do the following:
819 	 *
820 	 * 1. Write-protect the pte if it's writable.  This is
821 	 *    to protect concurrent write fast-gup with
822 	 *    FOLL_PIN, so that we'll fail the fast-gup with
823 	 *    the write bit removed.
824 	 *
825 	 * 2. Check page_maybe_dma_pinned() to see whether this
826 	 *    page may have been pinned.
827 	 *
828 	 * The order of these steps is important to serialize
829 	 * against the fast-gup code (gup_pte_range()) on the
830 	 * pte check and try_grab_compound_head(), so that
831 	 * we'll make sure either we'll capture that fast-gup
832 	 * so we'll copy the pinned page here, or we'll fail
833 	 * that fast-gup.
834 	 *
835 	 * NOTE! Even if we don't end up copying the page,
836 	 * we won't undo this wrprotect(), because the normal
837 	 * reference copy will need it anyway.
838 	 */
839 	if (pte_write(pte))
840 		ptep_set_wrprotect(src_mm, addr, src_pte);
841 
842 	/*
843 	 * These are the "normally we can just copy by reference"
844 	 * checks.
845 	 */
846 	if (likely(!atomic_read(&src_mm->has_pinned)))
847 		return 1;
848 	if (likely(!page_maybe_dma_pinned(page)))
849 		return 1;
850 
851 	/*
852 	 * Uhhuh. It looks like the page might be a pinned page,
853 	 * and we actually need to copy it. Now we can set the
854 	 * source pte back to being writable.
855 	 */
856 	if (pte_write(pte))
857 		set_pte_at(src_mm, addr, src_pte, pte);
858 
859 	new_page = *prealloc;
860 	if (!new_page)
861 		return -EAGAIN;
862 
863 	/*
864 	 * We have a prealloc page, all good!  Take it
865 	 * over and copy the page & arm it.
866 	 */
867 	*prealloc = NULL;
868 	copy_user_highpage(new_page, page, addr, vma);
869 	__SetPageUptodate(new_page);
870 	page_add_new_anon_rmap(new_page, new, addr, false);
871 	lru_cache_add_inactive_or_unevictable(new_page, new);
872 	rss[mm_counter(new_page)]++;
873 
874 	/* All done, just insert the new page copy in the child */
875 	pte = mk_pte(new_page, new->vm_page_prot);
876 	pte = maybe_mkwrite(pte_mkdirty(pte), new);
877 	set_pte_at(dst_mm, addr, dst_pte, pte);
878 	return 0;
879 }
880 
881 /*
882  * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
883  * is required to copy this pte.
884  */
885 static inline int
886 copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
887 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
888 		struct vm_area_struct *new,
889 		unsigned long addr, int *rss, struct page **prealloc)
890 {
891 	unsigned long vm_flags = vma->vm_flags;
892 	pte_t pte = *src_pte;
893 	struct page *page;
894 
895 	page = vm_normal_page(vma, addr, pte);
896 	if (page) {
897 		int retval;
898 
899 		retval = copy_present_page(dst_mm, src_mm,
900 			dst_pte, src_pte,
901 			vma, new,
902 			addr, rss, prealloc,
903 			pte, page);
904 		if (retval <= 0)
905 			return retval;
906 
907 		get_page(page);
908 		page_dup_rmap(page, false);
909 		rss[mm_counter(page)]++;
910 	}
911 
912 	/*
913 	 * If it's a COW mapping, write protect it both
914 	 * in the parent and the child
915 	 */
916 	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
917 		ptep_set_wrprotect(src_mm, addr, src_pte);
918 		pte = pte_wrprotect(pte);
919 	}
920 
921 	/*
922 	 * If it's a shared mapping, mark it clean in
923 	 * the child
924 	 */
925 	if (vm_flags & VM_SHARED)
926 		pte = pte_mkclean(pte);
927 	pte = pte_mkold(pte);
928 
929 	/*
930 	 * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
931 	 * does not have the VM_UFFD_WP, which means that the uffd
932 	 * fork event is not enabled.
933 	 */
934 	if (!(vm_flags & VM_UFFD_WP))
935 		pte = pte_clear_uffd_wp(pte);
936 
937 	set_pte_at(dst_mm, addr, dst_pte, pte);
938 	return 0;
939 }
940 
941 static inline struct page *
942 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
943 		   unsigned long addr)
944 {
945 	struct page *new_page;
946 
947 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
948 	if (!new_page)
949 		return NULL;
950 
951 	if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
952 		put_page(new_page);
953 		return NULL;
954 	}
955 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
956 
957 	return new_page;
958 }
959 
960 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
961 		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
962 		   struct vm_area_struct *new,
963 		   unsigned long addr, unsigned long end)
964 {
965 	pte_t *orig_src_pte, *orig_dst_pte;
966 	pte_t *src_pte, *dst_pte;
967 	spinlock_t *src_ptl, *dst_ptl;
968 	int progress, ret = 0;
969 	int rss[NR_MM_COUNTERS];
970 	swp_entry_t entry = (swp_entry_t){0};
971 	struct page *prealloc = NULL;
972 
973 again:
974 	progress = 0;
975 	init_rss_vec(rss);
976 
977 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
978 	if (!dst_pte) {
979 		ret = -ENOMEM;
980 		goto out;
981 	}
982 	src_pte = pte_offset_map(src_pmd, addr);
983 	src_ptl = pte_lockptr(src_mm, src_pmd);
984 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
985 	orig_src_pte = src_pte;
986 	orig_dst_pte = dst_pte;
987 	arch_enter_lazy_mmu_mode();
988 
989 	do {
990 		/*
991 		 * We are holding two locks at this point - either of them
992 		 * could generate latencies in another task on another CPU.
993 		 */
994 		if (progress >= 32) {
995 			progress = 0;
996 			if (need_resched() ||
997 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
998 				break;
999 		}
1000 		if (pte_none(*src_pte)) {
1001 			progress++;
1002 			continue;
1003 		}
1004 		if (unlikely(!pte_present(*src_pte))) {
1005 			entry.val = copy_nonpresent_pte(dst_mm, src_mm,
1006 							dst_pte, src_pte,
1007 							vma, addr, rss);
1008 			if (entry.val)
1009 				break;
1010 			progress += 8;
1011 			continue;
1012 		}
1013 		/* copy_present_pte() will clear `*prealloc' if consumed */
1014 		ret = copy_present_pte(dst_mm, src_mm, dst_pte, src_pte,
1015 				       vma, new, addr, rss, &prealloc);
1016 		/*
1017 		 * If we need a pre-allocated page for this pte, drop the
1018 		 * locks, allocate, and try again.
1019 		 */
1020 		if (unlikely(ret == -EAGAIN))
1021 			break;
1022 		if (unlikely(prealloc)) {
1023 			/*
1024 			 * pre-alloc page cannot be reused by next time so as
1025 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1026 			 * will allocate page according to address).  This
1027 			 * could only happen if one pinned pte changed.
1028 			 */
1029 			put_page(prealloc);
1030 			prealloc = NULL;
1031 		}
1032 		progress += 8;
1033 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1034 
1035 	arch_leave_lazy_mmu_mode();
1036 	spin_unlock(src_ptl);
1037 	pte_unmap(orig_src_pte);
1038 	add_mm_rss_vec(dst_mm, rss);
1039 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1040 	cond_resched();
1041 
1042 	if (entry.val) {
1043 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1044 			ret = -ENOMEM;
1045 			goto out;
1046 		}
1047 		entry.val = 0;
1048 	} else if (ret) {
1049 		WARN_ON_ONCE(ret != -EAGAIN);
1050 		prealloc = page_copy_prealloc(src_mm, vma, addr);
1051 		if (!prealloc)
1052 			return -ENOMEM;
1053 		/* We've captured and resolved the error. Reset, try again. */
1054 		ret = 0;
1055 	}
1056 	if (addr != end)
1057 		goto again;
1058 out:
1059 	if (unlikely(prealloc))
1060 		put_page(prealloc);
1061 	return ret;
1062 }
1063 
1064 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1065 		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
1066 		struct vm_area_struct *new,
1067 		unsigned long addr, unsigned long end)
1068 {
1069 	pmd_t *src_pmd, *dst_pmd;
1070 	unsigned long next;
1071 
1072 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1073 	if (!dst_pmd)
1074 		return -ENOMEM;
1075 	src_pmd = pmd_offset(src_pud, addr);
1076 	do {
1077 		next = pmd_addr_end(addr, end);
1078 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1079 			|| pmd_devmap(*src_pmd)) {
1080 			int err;
1081 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
1082 			err = copy_huge_pmd(dst_mm, src_mm,
1083 					    dst_pmd, src_pmd, addr, vma);
1084 			if (err == -ENOMEM)
1085 				return -ENOMEM;
1086 			if (!err)
1087 				continue;
1088 			/* fall through */
1089 		}
1090 		if (pmd_none_or_clear_bad(src_pmd))
1091 			continue;
1092 		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
1093 				   vma, new, addr, next))
1094 			return -ENOMEM;
1095 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1096 	return 0;
1097 }
1098 
1099 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1100 		p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
1101 		struct vm_area_struct *new,
1102 		unsigned long addr, unsigned long end)
1103 {
1104 	pud_t *src_pud, *dst_pud;
1105 	unsigned long next;
1106 
1107 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1108 	if (!dst_pud)
1109 		return -ENOMEM;
1110 	src_pud = pud_offset(src_p4d, addr);
1111 	do {
1112 		next = pud_addr_end(addr, end);
1113 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1114 			int err;
1115 
1116 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
1117 			err = copy_huge_pud(dst_mm, src_mm,
1118 					    dst_pud, src_pud, addr, vma);
1119 			if (err == -ENOMEM)
1120 				return -ENOMEM;
1121 			if (!err)
1122 				continue;
1123 			/* fall through */
1124 		}
1125 		if (pud_none_or_clear_bad(src_pud))
1126 			continue;
1127 		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
1128 				   vma, new, addr, next))
1129 			return -ENOMEM;
1130 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1131 	return 0;
1132 }
1133 
1134 static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1135 		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
1136 		struct vm_area_struct *new,
1137 		unsigned long addr, unsigned long end)
1138 {
1139 	p4d_t *src_p4d, *dst_p4d;
1140 	unsigned long next;
1141 
1142 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1143 	if (!dst_p4d)
1144 		return -ENOMEM;
1145 	src_p4d = p4d_offset(src_pgd, addr);
1146 	do {
1147 		next = p4d_addr_end(addr, end);
1148 		if (p4d_none_or_clear_bad(src_p4d))
1149 			continue;
1150 		if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
1151 				   vma, new, addr, next))
1152 			return -ENOMEM;
1153 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1154 	return 0;
1155 }
1156 
1157 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1158 		    struct vm_area_struct *vma, struct vm_area_struct *new)
1159 {
1160 	pgd_t *src_pgd, *dst_pgd;
1161 	unsigned long next;
1162 	unsigned long addr = vma->vm_start;
1163 	unsigned long end = vma->vm_end;
1164 	struct mmu_notifier_range range;
1165 	bool is_cow;
1166 	int ret;
1167 
1168 	/*
1169 	 * Don't copy ptes where a page fault will fill them correctly.
1170 	 * Fork becomes much lighter when there are big shared or private
1171 	 * readonly mappings. The tradeoff is that copy_page_range is more
1172 	 * efficient than faulting.
1173 	 */
1174 	if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1175 			!vma->anon_vma)
1176 		return 0;
1177 
1178 	if (is_vm_hugetlb_page(vma))
1179 		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
1180 
1181 	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
1182 		/*
1183 		 * We do not free on error cases below as remove_vma
1184 		 * gets called on error from higher level routine
1185 		 */
1186 		ret = track_pfn_copy(vma);
1187 		if (ret)
1188 			return ret;
1189 	}
1190 
1191 	/*
1192 	 * We need to invalidate the secondary MMU mappings only when
1193 	 * there could be a permission downgrade on the ptes of the
1194 	 * parent mm. And a permission downgrade will only happen if
1195 	 * is_cow_mapping() returns true.
1196 	 */
1197 	is_cow = is_cow_mapping(vma->vm_flags);
1198 
1199 	if (is_cow) {
1200 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1201 					0, vma, src_mm, addr, end);
1202 		mmu_notifier_invalidate_range_start(&range);
1203 	}
1204 
1205 	ret = 0;
1206 	dst_pgd = pgd_offset(dst_mm, addr);
1207 	src_pgd = pgd_offset(src_mm, addr);
1208 	do {
1209 		next = pgd_addr_end(addr, end);
1210 		if (pgd_none_or_clear_bad(src_pgd))
1211 			continue;
1212 		if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
1213 					    vma, new, addr, next))) {
1214 			ret = -ENOMEM;
1215 			break;
1216 		}
1217 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1218 
1219 	if (is_cow)
1220 		mmu_notifier_invalidate_range_end(&range);
1221 	return ret;
1222 }
1223 
1224 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1225 				struct vm_area_struct *vma, pmd_t *pmd,
1226 				unsigned long addr, unsigned long end,
1227 				struct zap_details *details)
1228 {
1229 	struct mm_struct *mm = tlb->mm;
1230 	int force_flush = 0;
1231 	int rss[NR_MM_COUNTERS];
1232 	spinlock_t *ptl;
1233 	pte_t *start_pte;
1234 	pte_t *pte;
1235 	swp_entry_t entry;
1236 
1237 	tlb_change_page_size(tlb, PAGE_SIZE);
1238 again:
1239 	init_rss_vec(rss);
1240 	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1241 	pte = start_pte;
1242 	flush_tlb_batched_pending(mm);
1243 	arch_enter_lazy_mmu_mode();
1244 	do {
1245 		pte_t ptent = *pte;
1246 		if (pte_none(ptent))
1247 			continue;
1248 
1249 		if (need_resched())
1250 			break;
1251 
1252 		if (pte_present(ptent)) {
1253 			struct page *page;
1254 
1255 			page = vm_normal_page(vma, addr, ptent);
1256 			if (unlikely(details) && page) {
1257 				/*
1258 				 * unmap_shared_mapping_pages() wants to
1259 				 * invalidate cache without truncating:
1260 				 * unmap shared but keep private pages.
1261 				 */
1262 				if (details->check_mapping &&
1263 				    details->check_mapping != page_rmapping(page))
1264 					continue;
1265 			}
1266 			ptent = ptep_get_and_clear_full(mm, addr, pte,
1267 							tlb->fullmm);
1268 			tlb_remove_tlb_entry(tlb, pte, addr);
1269 			if (unlikely(!page))
1270 				continue;
1271 
1272 			if (!PageAnon(page)) {
1273 				if (pte_dirty(ptent)) {
1274 					force_flush = 1;
1275 					set_page_dirty(page);
1276 				}
1277 				if (pte_young(ptent) &&
1278 				    likely(!(vma->vm_flags & VM_SEQ_READ)))
1279 					mark_page_accessed(page);
1280 			}
1281 			rss[mm_counter(page)]--;
1282 			page_remove_rmap(page, false);
1283 			if (unlikely(page_mapcount(page) < 0))
1284 				print_bad_pte(vma, addr, ptent, page);
1285 			if (unlikely(__tlb_remove_page(tlb, page))) {
1286 				force_flush = 1;
1287 				addr += PAGE_SIZE;
1288 				break;
1289 			}
1290 			continue;
1291 		}
1292 
1293 		entry = pte_to_swp_entry(ptent);
1294 		if (is_device_private_entry(entry)) {
1295 			struct page *page = device_private_entry_to_page(entry);
1296 
1297 			if (unlikely(details && details->check_mapping)) {
1298 				/*
1299 				 * unmap_shared_mapping_pages() wants to
1300 				 * invalidate cache without truncating:
1301 				 * unmap shared but keep private pages.
1302 				 */
1303 				if (details->check_mapping !=
1304 				    page_rmapping(page))
1305 					continue;
1306 			}
1307 
1308 			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1309 			rss[mm_counter(page)]--;
1310 			page_remove_rmap(page, false);
1311 			put_page(page);
1312 			continue;
1313 		}
1314 
1315 		/* If details->check_mapping, we leave swap entries. */
1316 		if (unlikely(details))
1317 			continue;
1318 
1319 		if (!non_swap_entry(entry))
1320 			rss[MM_SWAPENTS]--;
1321 		else if (is_migration_entry(entry)) {
1322 			struct page *page;
1323 
1324 			page = migration_entry_to_page(entry);
1325 			rss[mm_counter(page)]--;
1326 		}
1327 		if (unlikely(!free_swap_and_cache(entry)))
1328 			print_bad_pte(vma, addr, ptent, NULL);
1329 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1330 	} while (pte++, addr += PAGE_SIZE, addr != end);
1331 
1332 	add_mm_rss_vec(mm, rss);
1333 	arch_leave_lazy_mmu_mode();
1334 
1335 	/* Do the actual TLB flush before dropping ptl */
1336 	if (force_flush)
1337 		tlb_flush_mmu_tlbonly(tlb);
1338 	pte_unmap_unlock(start_pte, ptl);
1339 
1340 	/*
1341 	 * If we forced a TLB flush (either due to running out of
1342 	 * batch buffers or because we needed to flush dirty TLB
1343 	 * entries before releasing the ptl), free the batched
1344 	 * memory too. Restart if we didn't do everything.
1345 	 */
1346 	if (force_flush) {
1347 		force_flush = 0;
1348 		tlb_flush_mmu(tlb);
1349 	}
1350 
1351 	if (addr != end) {
1352 		cond_resched();
1353 		goto again;
1354 	}
1355 
1356 	return addr;
1357 }
1358 
1359 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1360 				struct vm_area_struct *vma, pud_t *pud,
1361 				unsigned long addr, unsigned long end,
1362 				struct zap_details *details)
1363 {
1364 	pmd_t *pmd;
1365 	unsigned long next;
1366 
1367 	pmd = pmd_offset(pud, addr);
1368 	do {
1369 		next = pmd_addr_end(addr, end);
1370 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1371 			if (next - addr != HPAGE_PMD_SIZE)
1372 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1373 			else if (zap_huge_pmd(tlb, vma, pmd, addr))
1374 				goto next;
1375 			/* fall through */
1376 		}
1377 		/*
1378 		 * Here there can be other concurrent MADV_DONTNEED or
1379 		 * trans huge page faults running, and if the pmd is
1380 		 * none or trans huge it can change under us. This is
1381 		 * because MADV_DONTNEED holds the mmap_lock in read
1382 		 * mode.
1383 		 */
1384 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1385 			goto next;
1386 		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1387 next:
1388 		cond_resched();
1389 	} while (pmd++, addr = next, addr != end);
1390 
1391 	return addr;
1392 }
1393 
1394 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1395 				struct vm_area_struct *vma, p4d_t *p4d,
1396 				unsigned long addr, unsigned long end,
1397 				struct zap_details *details)
1398 {
1399 	pud_t *pud;
1400 	unsigned long next;
1401 
1402 	pud = pud_offset(p4d, addr);
1403 	do {
1404 		next = pud_addr_end(addr, end);
1405 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1406 			if (next - addr != HPAGE_PUD_SIZE) {
1407 				mmap_assert_locked(tlb->mm);
1408 				split_huge_pud(vma, pud, addr);
1409 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1410 				goto next;
1411 			/* fall through */
1412 		}
1413 		if (pud_none_or_clear_bad(pud))
1414 			continue;
1415 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1416 next:
1417 		cond_resched();
1418 	} while (pud++, addr = next, addr != end);
1419 
1420 	return addr;
1421 }
1422 
1423 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1424 				struct vm_area_struct *vma, pgd_t *pgd,
1425 				unsigned long addr, unsigned long end,
1426 				struct zap_details *details)
1427 {
1428 	p4d_t *p4d;
1429 	unsigned long next;
1430 
1431 	p4d = p4d_offset(pgd, addr);
1432 	do {
1433 		next = p4d_addr_end(addr, end);
1434 		if (p4d_none_or_clear_bad(p4d))
1435 			continue;
1436 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1437 	} while (p4d++, addr = next, addr != end);
1438 
1439 	return addr;
1440 }
1441 
1442 void unmap_page_range(struct mmu_gather *tlb,
1443 			     struct vm_area_struct *vma,
1444 			     unsigned long addr, unsigned long end,
1445 			     struct zap_details *details)
1446 {
1447 	pgd_t *pgd;
1448 	unsigned long next;
1449 
1450 	BUG_ON(addr >= end);
1451 	tlb_start_vma(tlb, vma);
1452 	pgd = pgd_offset(vma->vm_mm, addr);
1453 	do {
1454 		next = pgd_addr_end(addr, end);
1455 		if (pgd_none_or_clear_bad(pgd))
1456 			continue;
1457 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1458 	} while (pgd++, addr = next, addr != end);
1459 	tlb_end_vma(tlb, vma);
1460 }
1461 
1462 
1463 static void unmap_single_vma(struct mmu_gather *tlb,
1464 		struct vm_area_struct *vma, unsigned long start_addr,
1465 		unsigned long end_addr,
1466 		struct zap_details *details)
1467 {
1468 	unsigned long start = max(vma->vm_start, start_addr);
1469 	unsigned long end;
1470 
1471 	if (start >= vma->vm_end)
1472 		return;
1473 	end = min(vma->vm_end, end_addr);
1474 	if (end <= vma->vm_start)
1475 		return;
1476 
1477 	if (vma->vm_file)
1478 		uprobe_munmap(vma, start, end);
1479 
1480 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1481 		untrack_pfn(vma, 0, 0);
1482 
1483 	if (start != end) {
1484 		if (unlikely(is_vm_hugetlb_page(vma))) {
1485 			/*
1486 			 * It is undesirable to test vma->vm_file as it
1487 			 * should be non-null for valid hugetlb area.
1488 			 * However, vm_file will be NULL in the error
1489 			 * cleanup path of mmap_region. When
1490 			 * hugetlbfs ->mmap method fails,
1491 			 * mmap_region() nullifies vma->vm_file
1492 			 * before calling this function to clean up.
1493 			 * Since no pte has actually been setup, it is
1494 			 * safe to do nothing in this case.
1495 			 */
1496 			if (vma->vm_file) {
1497 				i_mmap_lock_write(vma->vm_file->f_mapping);
1498 				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1499 				i_mmap_unlock_write(vma->vm_file->f_mapping);
1500 			}
1501 		} else
1502 			unmap_page_range(tlb, vma, start, end, details);
1503 	}
1504 }
1505 
1506 /**
1507  * unmap_vmas - unmap a range of memory covered by a list of vma's
1508  * @tlb: address of the caller's struct mmu_gather
1509  * @vma: the starting vma
1510  * @start_addr: virtual address at which to start unmapping
1511  * @end_addr: virtual address at which to end unmapping
1512  *
1513  * Unmap all pages in the vma list.
1514  *
1515  * Only addresses between `start' and `end' will be unmapped.
1516  *
1517  * The VMA list must be sorted in ascending virtual address order.
1518  *
1519  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1520  * range after unmap_vmas() returns.  So the only responsibility here is to
1521  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1522  * drops the lock and schedules.
1523  */
1524 void unmap_vmas(struct mmu_gather *tlb,
1525 		struct vm_area_struct *vma, unsigned long start_addr,
1526 		unsigned long end_addr)
1527 {
1528 	struct mmu_notifier_range range;
1529 
1530 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1531 				start_addr, end_addr);
1532 	mmu_notifier_invalidate_range_start(&range);
1533 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1534 		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1535 	mmu_notifier_invalidate_range_end(&range);
1536 }
1537 
1538 /**
1539  * zap_page_range - remove user pages in a given range
1540  * @vma: vm_area_struct holding the applicable pages
1541  * @start: starting address of pages to zap
1542  * @size: number of bytes to zap
1543  *
1544  * Caller must protect the VMA list
1545  */
1546 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1547 		unsigned long size)
1548 {
1549 	struct mmu_notifier_range range;
1550 	struct mmu_gather tlb;
1551 
1552 	lru_add_drain();
1553 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1554 				start, start + size);
1555 	tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1556 	update_hiwater_rss(vma->vm_mm);
1557 	mmu_notifier_invalidate_range_start(&range);
1558 	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1559 		unmap_single_vma(&tlb, vma, start, range.end, NULL);
1560 	mmu_notifier_invalidate_range_end(&range);
1561 	tlb_finish_mmu(&tlb, start, range.end);
1562 }
1563 
1564 /**
1565  * zap_page_range_single - remove user pages in a given range
1566  * @vma: vm_area_struct holding the applicable pages
1567  * @address: starting address of pages to zap
1568  * @size: number of bytes to zap
1569  * @details: details of shared cache invalidation
1570  *
1571  * The range must fit into one VMA.
1572  */
1573 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1574 		unsigned long size, struct zap_details *details)
1575 {
1576 	struct mmu_notifier_range range;
1577 	struct mmu_gather tlb;
1578 
1579 	lru_add_drain();
1580 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1581 				address, address + size);
1582 	tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1583 	update_hiwater_rss(vma->vm_mm);
1584 	mmu_notifier_invalidate_range_start(&range);
1585 	unmap_single_vma(&tlb, vma, address, range.end, details);
1586 	mmu_notifier_invalidate_range_end(&range);
1587 	tlb_finish_mmu(&tlb, address, range.end);
1588 }
1589 
1590 /**
1591  * zap_vma_ptes - remove ptes mapping the vma
1592  * @vma: vm_area_struct holding ptes to be zapped
1593  * @address: starting address of pages to zap
1594  * @size: number of bytes to zap
1595  *
1596  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1597  *
1598  * The entire address range must be fully contained within the vma.
1599  *
1600  */
1601 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1602 		unsigned long size)
1603 {
1604 	if (address < vma->vm_start || address + size > vma->vm_end ||
1605 	    		!(vma->vm_flags & VM_PFNMAP))
1606 		return;
1607 
1608 	zap_page_range_single(vma, address, size, NULL);
1609 }
1610 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1611 
1612 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1613 {
1614 	pgd_t *pgd;
1615 	p4d_t *p4d;
1616 	pud_t *pud;
1617 	pmd_t *pmd;
1618 
1619 	pgd = pgd_offset(mm, addr);
1620 	p4d = p4d_alloc(mm, pgd, addr);
1621 	if (!p4d)
1622 		return NULL;
1623 	pud = pud_alloc(mm, p4d, addr);
1624 	if (!pud)
1625 		return NULL;
1626 	pmd = pmd_alloc(mm, pud, addr);
1627 	if (!pmd)
1628 		return NULL;
1629 
1630 	VM_BUG_ON(pmd_trans_huge(*pmd));
1631 	return pmd;
1632 }
1633 
1634 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1635 			spinlock_t **ptl)
1636 {
1637 	pmd_t *pmd = walk_to_pmd(mm, addr);
1638 
1639 	if (!pmd)
1640 		return NULL;
1641 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1642 }
1643 
1644 static int validate_page_before_insert(struct page *page)
1645 {
1646 	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1647 		return -EINVAL;
1648 	flush_dcache_page(page);
1649 	return 0;
1650 }
1651 
1652 static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1653 			unsigned long addr, struct page *page, pgprot_t prot)
1654 {
1655 	if (!pte_none(*pte))
1656 		return -EBUSY;
1657 	/* Ok, finally just insert the thing.. */
1658 	get_page(page);
1659 	inc_mm_counter_fast(mm, mm_counter_file(page));
1660 	page_add_file_rmap(page, false);
1661 	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1662 	return 0;
1663 }
1664 
1665 /*
1666  * This is the old fallback for page remapping.
1667  *
1668  * For historical reasons, it only allows reserved pages. Only
1669  * old drivers should use this, and they needed to mark their
1670  * pages reserved for the old functions anyway.
1671  */
1672 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1673 			struct page *page, pgprot_t prot)
1674 {
1675 	struct mm_struct *mm = vma->vm_mm;
1676 	int retval;
1677 	pte_t *pte;
1678 	spinlock_t *ptl;
1679 
1680 	retval = validate_page_before_insert(page);
1681 	if (retval)
1682 		goto out;
1683 	retval = -ENOMEM;
1684 	pte = get_locked_pte(mm, addr, &ptl);
1685 	if (!pte)
1686 		goto out;
1687 	retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
1688 	pte_unmap_unlock(pte, ptl);
1689 out:
1690 	return retval;
1691 }
1692 
1693 #ifdef pte_index
1694 static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
1695 			unsigned long addr, struct page *page, pgprot_t prot)
1696 {
1697 	int err;
1698 
1699 	if (!page_count(page))
1700 		return -EINVAL;
1701 	err = validate_page_before_insert(page);
1702 	if (err)
1703 		return err;
1704 	return insert_page_into_pte_locked(mm, pte, addr, page, prot);
1705 }
1706 
1707 /* insert_pages() amortizes the cost of spinlock operations
1708  * when inserting pages in a loop. Arch *must* define pte_index.
1709  */
1710 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1711 			struct page **pages, unsigned long *num, pgprot_t prot)
1712 {
1713 	pmd_t *pmd = NULL;
1714 	pte_t *start_pte, *pte;
1715 	spinlock_t *pte_lock;
1716 	struct mm_struct *const mm = vma->vm_mm;
1717 	unsigned long curr_page_idx = 0;
1718 	unsigned long remaining_pages_total = *num;
1719 	unsigned long pages_to_write_in_pmd;
1720 	int ret;
1721 more:
1722 	ret = -EFAULT;
1723 	pmd = walk_to_pmd(mm, addr);
1724 	if (!pmd)
1725 		goto out;
1726 
1727 	pages_to_write_in_pmd = min_t(unsigned long,
1728 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1729 
1730 	/* Allocate the PTE if necessary; takes PMD lock once only. */
1731 	ret = -ENOMEM;
1732 	if (pte_alloc(mm, pmd))
1733 		goto out;
1734 
1735 	while (pages_to_write_in_pmd) {
1736 		int pte_idx = 0;
1737 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1738 
1739 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1740 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1741 			int err = insert_page_in_batch_locked(mm, pte,
1742 				addr, pages[curr_page_idx], prot);
1743 			if (unlikely(err)) {
1744 				pte_unmap_unlock(start_pte, pte_lock);
1745 				ret = err;
1746 				remaining_pages_total -= pte_idx;
1747 				goto out;
1748 			}
1749 			addr += PAGE_SIZE;
1750 			++curr_page_idx;
1751 		}
1752 		pte_unmap_unlock(start_pte, pte_lock);
1753 		pages_to_write_in_pmd -= batch_size;
1754 		remaining_pages_total -= batch_size;
1755 	}
1756 	if (remaining_pages_total)
1757 		goto more;
1758 	ret = 0;
1759 out:
1760 	*num = remaining_pages_total;
1761 	return ret;
1762 }
1763 #endif  /* ifdef pte_index */
1764 
1765 /**
1766  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1767  * @vma: user vma to map to
1768  * @addr: target start user address of these pages
1769  * @pages: source kernel pages
1770  * @num: in: number of pages to map. out: number of pages that were *not*
1771  * mapped. (0 means all pages were successfully mapped).
1772  *
1773  * Preferred over vm_insert_page() when inserting multiple pages.
1774  *
1775  * In case of error, we may have mapped a subset of the provided
1776  * pages. It is the caller's responsibility to account for this case.
1777  *
1778  * The same restrictions apply as in vm_insert_page().
1779  */
1780 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1781 			struct page **pages, unsigned long *num)
1782 {
1783 #ifdef pte_index
1784 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1785 
1786 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1787 		return -EFAULT;
1788 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1789 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1790 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1791 		vma->vm_flags |= VM_MIXEDMAP;
1792 	}
1793 	/* Defer page refcount checking till we're about to map that page. */
1794 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1795 #else
1796 	unsigned long idx = 0, pgcount = *num;
1797 	int err = -EINVAL;
1798 
1799 	for (; idx < pgcount; ++idx) {
1800 		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1801 		if (err)
1802 			break;
1803 	}
1804 	*num = pgcount - idx;
1805 	return err;
1806 #endif  /* ifdef pte_index */
1807 }
1808 EXPORT_SYMBOL(vm_insert_pages);
1809 
1810 /**
1811  * vm_insert_page - insert single page into user vma
1812  * @vma: user vma to map to
1813  * @addr: target user address of this page
1814  * @page: source kernel page
1815  *
1816  * This allows drivers to insert individual pages they've allocated
1817  * into a user vma.
1818  *
1819  * The page has to be a nice clean _individual_ kernel allocation.
1820  * If you allocate a compound page, you need to have marked it as
1821  * such (__GFP_COMP), or manually just split the page up yourself
1822  * (see split_page()).
1823  *
1824  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1825  * took an arbitrary page protection parameter. This doesn't allow
1826  * that. Your vma protection will have to be set up correctly, which
1827  * means that if you want a shared writable mapping, you'd better
1828  * ask for a shared writable mapping!
1829  *
1830  * The page does not need to be reserved.
1831  *
1832  * Usually this function is called from f_op->mmap() handler
1833  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
1834  * Caller must set VM_MIXEDMAP on vma if it wants to call this
1835  * function from other places, for example from page-fault handler.
1836  *
1837  * Return: %0 on success, negative error code otherwise.
1838  */
1839 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1840 			struct page *page)
1841 {
1842 	if (addr < vma->vm_start || addr >= vma->vm_end)
1843 		return -EFAULT;
1844 	if (!page_count(page))
1845 		return -EINVAL;
1846 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1847 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1848 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1849 		vma->vm_flags |= VM_MIXEDMAP;
1850 	}
1851 	return insert_page(vma, addr, page, vma->vm_page_prot);
1852 }
1853 EXPORT_SYMBOL(vm_insert_page);
1854 
1855 /*
1856  * __vm_map_pages - maps range of kernel pages into user vma
1857  * @vma: user vma to map to
1858  * @pages: pointer to array of source kernel pages
1859  * @num: number of pages in page array
1860  * @offset: user's requested vm_pgoff
1861  *
1862  * This allows drivers to map range of kernel pages into a user vma.
1863  *
1864  * Return: 0 on success and error code otherwise.
1865  */
1866 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1867 				unsigned long num, unsigned long offset)
1868 {
1869 	unsigned long count = vma_pages(vma);
1870 	unsigned long uaddr = vma->vm_start;
1871 	int ret, i;
1872 
1873 	/* Fail if the user requested offset is beyond the end of the object */
1874 	if (offset >= num)
1875 		return -ENXIO;
1876 
1877 	/* Fail if the user requested size exceeds available object size */
1878 	if (count > num - offset)
1879 		return -ENXIO;
1880 
1881 	for (i = 0; i < count; i++) {
1882 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1883 		if (ret < 0)
1884 			return ret;
1885 		uaddr += PAGE_SIZE;
1886 	}
1887 
1888 	return 0;
1889 }
1890 
1891 /**
1892  * vm_map_pages - maps range of kernel pages starts with non zero offset
1893  * @vma: user vma to map to
1894  * @pages: pointer to array of source kernel pages
1895  * @num: number of pages in page array
1896  *
1897  * Maps an object consisting of @num pages, catering for the user's
1898  * requested vm_pgoff
1899  *
1900  * If we fail to insert any page into the vma, the function will return
1901  * immediately leaving any previously inserted pages present.  Callers
1902  * from the mmap handler may immediately return the error as their caller
1903  * will destroy the vma, removing any successfully inserted pages. Other
1904  * callers should make their own arrangements for calling unmap_region().
1905  *
1906  * Context: Process context. Called by mmap handlers.
1907  * Return: 0 on success and error code otherwise.
1908  */
1909 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1910 				unsigned long num)
1911 {
1912 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
1913 }
1914 EXPORT_SYMBOL(vm_map_pages);
1915 
1916 /**
1917  * vm_map_pages_zero - map range of kernel pages starts with zero offset
1918  * @vma: user vma to map to
1919  * @pages: pointer to array of source kernel pages
1920  * @num: number of pages in page array
1921  *
1922  * Similar to vm_map_pages(), except that it explicitly sets the offset
1923  * to 0. This function is intended for the drivers that did not consider
1924  * vm_pgoff.
1925  *
1926  * Context: Process context. Called by mmap handlers.
1927  * Return: 0 on success and error code otherwise.
1928  */
1929 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
1930 				unsigned long num)
1931 {
1932 	return __vm_map_pages(vma, pages, num, 0);
1933 }
1934 EXPORT_SYMBOL(vm_map_pages_zero);
1935 
1936 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1937 			pfn_t pfn, pgprot_t prot, bool mkwrite)
1938 {
1939 	struct mm_struct *mm = vma->vm_mm;
1940 	pte_t *pte, entry;
1941 	spinlock_t *ptl;
1942 
1943 	pte = get_locked_pte(mm, addr, &ptl);
1944 	if (!pte)
1945 		return VM_FAULT_OOM;
1946 	if (!pte_none(*pte)) {
1947 		if (mkwrite) {
1948 			/*
1949 			 * For read faults on private mappings the PFN passed
1950 			 * in may not match the PFN we have mapped if the
1951 			 * mapped PFN is a writeable COW page.  In the mkwrite
1952 			 * case we are creating a writable PTE for a shared
1953 			 * mapping and we expect the PFNs to match. If they
1954 			 * don't match, we are likely racing with block
1955 			 * allocation and mapping invalidation so just skip the
1956 			 * update.
1957 			 */
1958 			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
1959 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
1960 				goto out_unlock;
1961 			}
1962 			entry = pte_mkyoung(*pte);
1963 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1964 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
1965 				update_mmu_cache(vma, addr, pte);
1966 		}
1967 		goto out_unlock;
1968 	}
1969 
1970 	/* Ok, finally just insert the thing.. */
1971 	if (pfn_t_devmap(pfn))
1972 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1973 	else
1974 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
1975 
1976 	if (mkwrite) {
1977 		entry = pte_mkyoung(entry);
1978 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1979 	}
1980 
1981 	set_pte_at(mm, addr, pte, entry);
1982 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
1983 
1984 out_unlock:
1985 	pte_unmap_unlock(pte, ptl);
1986 	return VM_FAULT_NOPAGE;
1987 }
1988 
1989 /**
1990  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1991  * @vma: user vma to map to
1992  * @addr: target user address of this page
1993  * @pfn: source kernel pfn
1994  * @pgprot: pgprot flags for the inserted page
1995  *
1996  * This is exactly like vmf_insert_pfn(), except that it allows drivers
1997  * to override pgprot on a per-page basis.
1998  *
1999  * This only makes sense for IO mappings, and it makes no sense for
2000  * COW mappings.  In general, using multiple vmas is preferable;
2001  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2002  * impractical.
2003  *
2004  * See vmf_insert_mixed_prot() for a discussion of the implication of using
2005  * a value of @pgprot different from that of @vma->vm_page_prot.
2006  *
2007  * Context: Process context.  May allocate using %GFP_KERNEL.
2008  * Return: vm_fault_t value.
2009  */
2010 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2011 			unsigned long pfn, pgprot_t pgprot)
2012 {
2013 	/*
2014 	 * Technically, architectures with pte_special can avoid all these
2015 	 * restrictions (same for remap_pfn_range).  However we would like
2016 	 * consistency in testing and feature parity among all, so we should
2017 	 * try to keep these invariants in place for everybody.
2018 	 */
2019 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2020 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2021 						(VM_PFNMAP|VM_MIXEDMAP));
2022 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2023 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2024 
2025 	if (addr < vma->vm_start || addr >= vma->vm_end)
2026 		return VM_FAULT_SIGBUS;
2027 
2028 	if (!pfn_modify_allowed(pfn, pgprot))
2029 		return VM_FAULT_SIGBUS;
2030 
2031 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2032 
2033 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2034 			false);
2035 }
2036 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2037 
2038 /**
2039  * vmf_insert_pfn - insert single pfn into user vma
2040  * @vma: user vma to map to
2041  * @addr: target user address of this page
2042  * @pfn: source kernel pfn
2043  *
2044  * Similar to vm_insert_page, this allows drivers to insert individual pages
2045  * they've allocated into a user vma. Same comments apply.
2046  *
2047  * This function should only be called from a vm_ops->fault handler, and
2048  * in that case the handler should return the result of this function.
2049  *
2050  * vma cannot be a COW mapping.
2051  *
2052  * As this is called only for pages that do not currently exist, we
2053  * do not need to flush old virtual caches or the TLB.
2054  *
2055  * Context: Process context.  May allocate using %GFP_KERNEL.
2056  * Return: vm_fault_t value.
2057  */
2058 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2059 			unsigned long pfn)
2060 {
2061 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2062 }
2063 EXPORT_SYMBOL(vmf_insert_pfn);
2064 
2065 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2066 {
2067 	/* these checks mirror the abort conditions in vm_normal_page */
2068 	if (vma->vm_flags & VM_MIXEDMAP)
2069 		return true;
2070 	if (pfn_t_devmap(pfn))
2071 		return true;
2072 	if (pfn_t_special(pfn))
2073 		return true;
2074 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2075 		return true;
2076 	return false;
2077 }
2078 
2079 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2080 		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2081 		bool mkwrite)
2082 {
2083 	int err;
2084 
2085 	BUG_ON(!vm_mixed_ok(vma, pfn));
2086 
2087 	if (addr < vma->vm_start || addr >= vma->vm_end)
2088 		return VM_FAULT_SIGBUS;
2089 
2090 	track_pfn_insert(vma, &pgprot, pfn);
2091 
2092 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2093 		return VM_FAULT_SIGBUS;
2094 
2095 	/*
2096 	 * If we don't have pte special, then we have to use the pfn_valid()
2097 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2098 	 * refcount the page if pfn_valid is true (hence insert_page rather
2099 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2100 	 * without pte special, it would there be refcounted as a normal page.
2101 	 */
2102 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2103 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2104 		struct page *page;
2105 
2106 		/*
2107 		 * At this point we are committed to insert_page()
2108 		 * regardless of whether the caller specified flags that
2109 		 * result in pfn_t_has_page() == false.
2110 		 */
2111 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2112 		err = insert_page(vma, addr, page, pgprot);
2113 	} else {
2114 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2115 	}
2116 
2117 	if (err == -ENOMEM)
2118 		return VM_FAULT_OOM;
2119 	if (err < 0 && err != -EBUSY)
2120 		return VM_FAULT_SIGBUS;
2121 
2122 	return VM_FAULT_NOPAGE;
2123 }
2124 
2125 /**
2126  * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2127  * @vma: user vma to map to
2128  * @addr: target user address of this page
2129  * @pfn: source kernel pfn
2130  * @pgprot: pgprot flags for the inserted page
2131  *
2132  * This is exactly like vmf_insert_mixed(), except that it allows drivers
2133  * to override pgprot on a per-page basis.
2134  *
2135  * Typically this function should be used by drivers to set caching- and
2136  * encryption bits different than those of @vma->vm_page_prot, because
2137  * the caching- or encryption mode may not be known at mmap() time.
2138  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2139  * to set caching and encryption bits for those vmas (except for COW pages).
2140  * This is ensured by core vm only modifying these page table entries using
2141  * functions that don't touch caching- or encryption bits, using pte_modify()
2142  * if needed. (See for example mprotect()).
2143  * Also when new page-table entries are created, this is only done using the
2144  * fault() callback, and never using the value of vma->vm_page_prot,
2145  * except for page-table entries that point to anonymous pages as the result
2146  * of COW.
2147  *
2148  * Context: Process context.  May allocate using %GFP_KERNEL.
2149  * Return: vm_fault_t value.
2150  */
2151 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2152 				 pfn_t pfn, pgprot_t pgprot)
2153 {
2154 	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2155 }
2156 EXPORT_SYMBOL(vmf_insert_mixed_prot);
2157 
2158 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2159 		pfn_t pfn)
2160 {
2161 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2162 }
2163 EXPORT_SYMBOL(vmf_insert_mixed);
2164 
2165 /*
2166  *  If the insertion of PTE failed because someone else already added a
2167  *  different entry in the mean time, we treat that as success as we assume
2168  *  the same entry was actually inserted.
2169  */
2170 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2171 		unsigned long addr, pfn_t pfn)
2172 {
2173 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2174 }
2175 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2176 
2177 /*
2178  * maps a range of physical memory into the requested pages. the old
2179  * mappings are removed. any references to nonexistent pages results
2180  * in null mappings (currently treated as "copy-on-access")
2181  */
2182 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2183 			unsigned long addr, unsigned long end,
2184 			unsigned long pfn, pgprot_t prot)
2185 {
2186 	pte_t *pte;
2187 	spinlock_t *ptl;
2188 	int err = 0;
2189 
2190 	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2191 	if (!pte)
2192 		return -ENOMEM;
2193 	arch_enter_lazy_mmu_mode();
2194 	do {
2195 		BUG_ON(!pte_none(*pte));
2196 		if (!pfn_modify_allowed(pfn, prot)) {
2197 			err = -EACCES;
2198 			break;
2199 		}
2200 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2201 		pfn++;
2202 	} while (pte++, addr += PAGE_SIZE, addr != end);
2203 	arch_leave_lazy_mmu_mode();
2204 	pte_unmap_unlock(pte - 1, ptl);
2205 	return err;
2206 }
2207 
2208 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2209 			unsigned long addr, unsigned long end,
2210 			unsigned long pfn, pgprot_t prot)
2211 {
2212 	pmd_t *pmd;
2213 	unsigned long next;
2214 	int err;
2215 
2216 	pfn -= addr >> PAGE_SHIFT;
2217 	pmd = pmd_alloc(mm, pud, addr);
2218 	if (!pmd)
2219 		return -ENOMEM;
2220 	VM_BUG_ON(pmd_trans_huge(*pmd));
2221 	do {
2222 		next = pmd_addr_end(addr, end);
2223 		err = remap_pte_range(mm, pmd, addr, next,
2224 				pfn + (addr >> PAGE_SHIFT), prot);
2225 		if (err)
2226 			return err;
2227 	} while (pmd++, addr = next, addr != end);
2228 	return 0;
2229 }
2230 
2231 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2232 			unsigned long addr, unsigned long end,
2233 			unsigned long pfn, pgprot_t prot)
2234 {
2235 	pud_t *pud;
2236 	unsigned long next;
2237 	int err;
2238 
2239 	pfn -= addr >> PAGE_SHIFT;
2240 	pud = pud_alloc(mm, p4d, addr);
2241 	if (!pud)
2242 		return -ENOMEM;
2243 	do {
2244 		next = pud_addr_end(addr, end);
2245 		err = remap_pmd_range(mm, pud, addr, next,
2246 				pfn + (addr >> PAGE_SHIFT), prot);
2247 		if (err)
2248 			return err;
2249 	} while (pud++, addr = next, addr != end);
2250 	return 0;
2251 }
2252 
2253 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2254 			unsigned long addr, unsigned long end,
2255 			unsigned long pfn, pgprot_t prot)
2256 {
2257 	p4d_t *p4d;
2258 	unsigned long next;
2259 	int err;
2260 
2261 	pfn -= addr >> PAGE_SHIFT;
2262 	p4d = p4d_alloc(mm, pgd, addr);
2263 	if (!p4d)
2264 		return -ENOMEM;
2265 	do {
2266 		next = p4d_addr_end(addr, end);
2267 		err = remap_pud_range(mm, p4d, addr, next,
2268 				pfn + (addr >> PAGE_SHIFT), prot);
2269 		if (err)
2270 			return err;
2271 	} while (p4d++, addr = next, addr != end);
2272 	return 0;
2273 }
2274 
2275 /**
2276  * remap_pfn_range - remap kernel memory to userspace
2277  * @vma: user vma to map to
2278  * @addr: target page aligned user address to start at
2279  * @pfn: page frame number of kernel physical memory address
2280  * @size: size of mapping area
2281  * @prot: page protection flags for this mapping
2282  *
2283  * Note: this is only safe if the mm semaphore is held when called.
2284  *
2285  * Return: %0 on success, negative error code otherwise.
2286  */
2287 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2288 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2289 {
2290 	pgd_t *pgd;
2291 	unsigned long next;
2292 	unsigned long end = addr + PAGE_ALIGN(size);
2293 	struct mm_struct *mm = vma->vm_mm;
2294 	unsigned long remap_pfn = pfn;
2295 	int err;
2296 
2297 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2298 		return -EINVAL;
2299 
2300 	/*
2301 	 * Physically remapped pages are special. Tell the
2302 	 * rest of the world about it:
2303 	 *   VM_IO tells people not to look at these pages
2304 	 *	(accesses can have side effects).
2305 	 *   VM_PFNMAP tells the core MM that the base pages are just
2306 	 *	raw PFN mappings, and do not have a "struct page" associated
2307 	 *	with them.
2308 	 *   VM_DONTEXPAND
2309 	 *      Disable vma merging and expanding with mremap().
2310 	 *   VM_DONTDUMP
2311 	 *      Omit vma from core dump, even when VM_IO turned off.
2312 	 *
2313 	 * There's a horrible special case to handle copy-on-write
2314 	 * behaviour that some programs depend on. We mark the "original"
2315 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2316 	 * See vm_normal_page() for details.
2317 	 */
2318 	if (is_cow_mapping(vma->vm_flags)) {
2319 		if (addr != vma->vm_start || end != vma->vm_end)
2320 			return -EINVAL;
2321 		vma->vm_pgoff = pfn;
2322 	}
2323 
2324 	err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
2325 	if (err)
2326 		return -EINVAL;
2327 
2328 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2329 
2330 	BUG_ON(addr >= end);
2331 	pfn -= addr >> PAGE_SHIFT;
2332 	pgd = pgd_offset(mm, addr);
2333 	flush_cache_range(vma, addr, end);
2334 	do {
2335 		next = pgd_addr_end(addr, end);
2336 		err = remap_p4d_range(mm, pgd, addr, next,
2337 				pfn + (addr >> PAGE_SHIFT), prot);
2338 		if (err)
2339 			break;
2340 	} while (pgd++, addr = next, addr != end);
2341 
2342 	if (err)
2343 		untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
2344 
2345 	return err;
2346 }
2347 EXPORT_SYMBOL(remap_pfn_range);
2348 
2349 /**
2350  * vm_iomap_memory - remap memory to userspace
2351  * @vma: user vma to map to
2352  * @start: start of the physical memory to be mapped
2353  * @len: size of area
2354  *
2355  * This is a simplified io_remap_pfn_range() for common driver use. The
2356  * driver just needs to give us the physical memory range to be mapped,
2357  * we'll figure out the rest from the vma information.
2358  *
2359  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2360  * whatever write-combining details or similar.
2361  *
2362  * Return: %0 on success, negative error code otherwise.
2363  */
2364 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2365 {
2366 	unsigned long vm_len, pfn, pages;
2367 
2368 	/* Check that the physical memory area passed in looks valid */
2369 	if (start + len < start)
2370 		return -EINVAL;
2371 	/*
2372 	 * You *really* shouldn't map things that aren't page-aligned,
2373 	 * but we've historically allowed it because IO memory might
2374 	 * just have smaller alignment.
2375 	 */
2376 	len += start & ~PAGE_MASK;
2377 	pfn = start >> PAGE_SHIFT;
2378 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2379 	if (pfn + pages < pfn)
2380 		return -EINVAL;
2381 
2382 	/* We start the mapping 'vm_pgoff' pages into the area */
2383 	if (vma->vm_pgoff > pages)
2384 		return -EINVAL;
2385 	pfn += vma->vm_pgoff;
2386 	pages -= vma->vm_pgoff;
2387 
2388 	/* Can we fit all of the mapping? */
2389 	vm_len = vma->vm_end - vma->vm_start;
2390 	if (vm_len >> PAGE_SHIFT > pages)
2391 		return -EINVAL;
2392 
2393 	/* Ok, let it rip */
2394 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2395 }
2396 EXPORT_SYMBOL(vm_iomap_memory);
2397 
2398 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2399 				     unsigned long addr, unsigned long end,
2400 				     pte_fn_t fn, void *data, bool create,
2401 				     pgtbl_mod_mask *mask)
2402 {
2403 	pte_t *pte;
2404 	int err = 0;
2405 	spinlock_t *ptl;
2406 
2407 	if (create) {
2408 		pte = (mm == &init_mm) ?
2409 			pte_alloc_kernel_track(pmd, addr, mask) :
2410 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2411 		if (!pte)
2412 			return -ENOMEM;
2413 	} else {
2414 		pte = (mm == &init_mm) ?
2415 			pte_offset_kernel(pmd, addr) :
2416 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2417 	}
2418 
2419 	BUG_ON(pmd_huge(*pmd));
2420 
2421 	arch_enter_lazy_mmu_mode();
2422 
2423 	do {
2424 		if (create || !pte_none(*pte)) {
2425 			err = fn(pte++, addr, data);
2426 			if (err)
2427 				break;
2428 		}
2429 	} while (addr += PAGE_SIZE, addr != end);
2430 	*mask |= PGTBL_PTE_MODIFIED;
2431 
2432 	arch_leave_lazy_mmu_mode();
2433 
2434 	if (mm != &init_mm)
2435 		pte_unmap_unlock(pte-1, ptl);
2436 	return err;
2437 }
2438 
2439 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2440 				     unsigned long addr, unsigned long end,
2441 				     pte_fn_t fn, void *data, bool create,
2442 				     pgtbl_mod_mask *mask)
2443 {
2444 	pmd_t *pmd;
2445 	unsigned long next;
2446 	int err = 0;
2447 
2448 	BUG_ON(pud_huge(*pud));
2449 
2450 	if (create) {
2451 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2452 		if (!pmd)
2453 			return -ENOMEM;
2454 	} else {
2455 		pmd = pmd_offset(pud, addr);
2456 	}
2457 	do {
2458 		next = pmd_addr_end(addr, end);
2459 		if (create || !pmd_none_or_clear_bad(pmd)) {
2460 			err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
2461 						 create, mask);
2462 			if (err)
2463 				break;
2464 		}
2465 	} while (pmd++, addr = next, addr != end);
2466 	return err;
2467 }
2468 
2469 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2470 				     unsigned long addr, unsigned long end,
2471 				     pte_fn_t fn, void *data, bool create,
2472 				     pgtbl_mod_mask *mask)
2473 {
2474 	pud_t *pud;
2475 	unsigned long next;
2476 	int err = 0;
2477 
2478 	if (create) {
2479 		pud = pud_alloc_track(mm, p4d, addr, mask);
2480 		if (!pud)
2481 			return -ENOMEM;
2482 	} else {
2483 		pud = pud_offset(p4d, addr);
2484 	}
2485 	do {
2486 		next = pud_addr_end(addr, end);
2487 		if (create || !pud_none_or_clear_bad(pud)) {
2488 			err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
2489 						 create, mask);
2490 			if (err)
2491 				break;
2492 		}
2493 	} while (pud++, addr = next, addr != end);
2494 	return err;
2495 }
2496 
2497 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2498 				     unsigned long addr, unsigned long end,
2499 				     pte_fn_t fn, void *data, bool create,
2500 				     pgtbl_mod_mask *mask)
2501 {
2502 	p4d_t *p4d;
2503 	unsigned long next;
2504 	int err = 0;
2505 
2506 	if (create) {
2507 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2508 		if (!p4d)
2509 			return -ENOMEM;
2510 	} else {
2511 		p4d = p4d_offset(pgd, addr);
2512 	}
2513 	do {
2514 		next = p4d_addr_end(addr, end);
2515 		if (create || !p4d_none_or_clear_bad(p4d)) {
2516 			err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
2517 						 create, mask);
2518 			if (err)
2519 				break;
2520 		}
2521 	} while (p4d++, addr = next, addr != end);
2522 	return err;
2523 }
2524 
2525 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2526 				 unsigned long size, pte_fn_t fn,
2527 				 void *data, bool create)
2528 {
2529 	pgd_t *pgd;
2530 	unsigned long start = addr, next;
2531 	unsigned long end = addr + size;
2532 	pgtbl_mod_mask mask = 0;
2533 	int err = 0;
2534 
2535 	if (WARN_ON(addr >= end))
2536 		return -EINVAL;
2537 
2538 	pgd = pgd_offset(mm, addr);
2539 	do {
2540 		next = pgd_addr_end(addr, end);
2541 		if (!create && pgd_none_or_clear_bad(pgd))
2542 			continue;
2543 		err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
2544 		if (err)
2545 			break;
2546 	} while (pgd++, addr = next, addr != end);
2547 
2548 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2549 		arch_sync_kernel_mappings(start, start + size);
2550 
2551 	return err;
2552 }
2553 
2554 /*
2555  * Scan a region of virtual memory, filling in page tables as necessary
2556  * and calling a provided function on each leaf page table.
2557  */
2558 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2559 			unsigned long size, pte_fn_t fn, void *data)
2560 {
2561 	return __apply_to_page_range(mm, addr, size, fn, data, true);
2562 }
2563 EXPORT_SYMBOL_GPL(apply_to_page_range);
2564 
2565 /*
2566  * Scan a region of virtual memory, calling a provided function on
2567  * each leaf page table where it exists.
2568  *
2569  * Unlike apply_to_page_range, this does _not_ fill in page tables
2570  * where they are absent.
2571  */
2572 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2573 				 unsigned long size, pte_fn_t fn, void *data)
2574 {
2575 	return __apply_to_page_range(mm, addr, size, fn, data, false);
2576 }
2577 EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2578 
2579 /*
2580  * handle_pte_fault chooses page fault handler according to an entry which was
2581  * read non-atomically.  Before making any commitment, on those architectures
2582  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2583  * parts, do_swap_page must check under lock before unmapping the pte and
2584  * proceeding (but do_wp_page is only called after already making such a check;
2585  * and do_anonymous_page can safely check later on).
2586  */
2587 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2588 				pte_t *page_table, pte_t orig_pte)
2589 {
2590 	int same = 1;
2591 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2592 	if (sizeof(pte_t) > sizeof(unsigned long)) {
2593 		spinlock_t *ptl = pte_lockptr(mm, pmd);
2594 		spin_lock(ptl);
2595 		same = pte_same(*page_table, orig_pte);
2596 		spin_unlock(ptl);
2597 	}
2598 #endif
2599 	pte_unmap(page_table);
2600 	return same;
2601 }
2602 
2603 static inline bool cow_user_page(struct page *dst, struct page *src,
2604 				 struct vm_fault *vmf)
2605 {
2606 	bool ret;
2607 	void *kaddr;
2608 	void __user *uaddr;
2609 	bool locked = false;
2610 	struct vm_area_struct *vma = vmf->vma;
2611 	struct mm_struct *mm = vma->vm_mm;
2612 	unsigned long addr = vmf->address;
2613 
2614 	if (likely(src)) {
2615 		copy_user_highpage(dst, src, addr, vma);
2616 		return true;
2617 	}
2618 
2619 	/*
2620 	 * If the source page was a PFN mapping, we don't have
2621 	 * a "struct page" for it. We do a best-effort copy by
2622 	 * just copying from the original user address. If that
2623 	 * fails, we just zero-fill it. Live with it.
2624 	 */
2625 	kaddr = kmap_atomic(dst);
2626 	uaddr = (void __user *)(addr & PAGE_MASK);
2627 
2628 	/*
2629 	 * On architectures with software "accessed" bits, we would
2630 	 * take a double page fault, so mark it accessed here.
2631 	 */
2632 	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
2633 		pte_t entry;
2634 
2635 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2636 		locked = true;
2637 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2638 			/*
2639 			 * Other thread has already handled the fault
2640 			 * and update local tlb only
2641 			 */
2642 			update_mmu_tlb(vma, addr, vmf->pte);
2643 			ret = false;
2644 			goto pte_unlock;
2645 		}
2646 
2647 		entry = pte_mkyoung(vmf->orig_pte);
2648 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2649 			update_mmu_cache(vma, addr, vmf->pte);
2650 	}
2651 
2652 	/*
2653 	 * This really shouldn't fail, because the page is there
2654 	 * in the page tables. But it might just be unreadable,
2655 	 * in which case we just give up and fill the result with
2656 	 * zeroes.
2657 	 */
2658 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2659 		if (locked)
2660 			goto warn;
2661 
2662 		/* Re-validate under PTL if the page is still mapped */
2663 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2664 		locked = true;
2665 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2666 			/* The PTE changed under us, update local tlb */
2667 			update_mmu_tlb(vma, addr, vmf->pte);
2668 			ret = false;
2669 			goto pte_unlock;
2670 		}
2671 
2672 		/*
2673 		 * The same page can be mapped back since last copy attempt.
2674 		 * Try to copy again under PTL.
2675 		 */
2676 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2677 			/*
2678 			 * Give a warn in case there can be some obscure
2679 			 * use-case
2680 			 */
2681 warn:
2682 			WARN_ON_ONCE(1);
2683 			clear_page(kaddr);
2684 		}
2685 	}
2686 
2687 	ret = true;
2688 
2689 pte_unlock:
2690 	if (locked)
2691 		pte_unmap_unlock(vmf->pte, vmf->ptl);
2692 	kunmap_atomic(kaddr);
2693 	flush_dcache_page(dst);
2694 
2695 	return ret;
2696 }
2697 
2698 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2699 {
2700 	struct file *vm_file = vma->vm_file;
2701 
2702 	if (vm_file)
2703 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2704 
2705 	/*
2706 	 * Special mappings (e.g. VDSO) do not have any file so fake
2707 	 * a default GFP_KERNEL for them.
2708 	 */
2709 	return GFP_KERNEL;
2710 }
2711 
2712 /*
2713  * Notify the address space that the page is about to become writable so that
2714  * it can prohibit this or wait for the page to get into an appropriate state.
2715  *
2716  * We do this without the lock held, so that it can sleep if it needs to.
2717  */
2718 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
2719 {
2720 	vm_fault_t ret;
2721 	struct page *page = vmf->page;
2722 	unsigned int old_flags = vmf->flags;
2723 
2724 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2725 
2726 	if (vmf->vma->vm_file &&
2727 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2728 		return VM_FAULT_SIGBUS;
2729 
2730 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2731 	/* Restore original flags so that caller is not surprised */
2732 	vmf->flags = old_flags;
2733 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2734 		return ret;
2735 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2736 		lock_page(page);
2737 		if (!page->mapping) {
2738 			unlock_page(page);
2739 			return 0; /* retry */
2740 		}
2741 		ret |= VM_FAULT_LOCKED;
2742 	} else
2743 		VM_BUG_ON_PAGE(!PageLocked(page), page);
2744 	return ret;
2745 }
2746 
2747 /*
2748  * Handle dirtying of a page in shared file mapping on a write fault.
2749  *
2750  * The function expects the page to be locked and unlocks it.
2751  */
2752 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2753 {
2754 	struct vm_area_struct *vma = vmf->vma;
2755 	struct address_space *mapping;
2756 	struct page *page = vmf->page;
2757 	bool dirtied;
2758 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2759 
2760 	dirtied = set_page_dirty(page);
2761 	VM_BUG_ON_PAGE(PageAnon(page), page);
2762 	/*
2763 	 * Take a local copy of the address_space - page.mapping may be zeroed
2764 	 * by truncate after unlock_page().   The address_space itself remains
2765 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
2766 	 * release semantics to prevent the compiler from undoing this copying.
2767 	 */
2768 	mapping = page_rmapping(page);
2769 	unlock_page(page);
2770 
2771 	if (!page_mkwrite)
2772 		file_update_time(vma->vm_file);
2773 
2774 	/*
2775 	 * Throttle page dirtying rate down to writeback speed.
2776 	 *
2777 	 * mapping may be NULL here because some device drivers do not
2778 	 * set page.mapping but still dirty their pages
2779 	 *
2780 	 * Drop the mmap_lock before waiting on IO, if we can. The file
2781 	 * is pinning the mapping, as per above.
2782 	 */
2783 	if ((dirtied || page_mkwrite) && mapping) {
2784 		struct file *fpin;
2785 
2786 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2787 		balance_dirty_pages_ratelimited(mapping);
2788 		if (fpin) {
2789 			fput(fpin);
2790 			return VM_FAULT_RETRY;
2791 		}
2792 	}
2793 
2794 	return 0;
2795 }
2796 
2797 /*
2798  * Handle write page faults for pages that can be reused in the current vma
2799  *
2800  * This can happen either due to the mapping being with the VM_SHARED flag,
2801  * or due to us being the last reference standing to the page. In either
2802  * case, all we need to do here is to mark the page as writable and update
2803  * any related book-keeping.
2804  */
2805 static inline void wp_page_reuse(struct vm_fault *vmf)
2806 	__releases(vmf->ptl)
2807 {
2808 	struct vm_area_struct *vma = vmf->vma;
2809 	struct page *page = vmf->page;
2810 	pte_t entry;
2811 	/*
2812 	 * Clear the pages cpupid information as the existing
2813 	 * information potentially belongs to a now completely
2814 	 * unrelated process.
2815 	 */
2816 	if (page)
2817 		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2818 
2819 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2820 	entry = pte_mkyoung(vmf->orig_pte);
2821 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2822 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2823 		update_mmu_cache(vma, vmf->address, vmf->pte);
2824 	pte_unmap_unlock(vmf->pte, vmf->ptl);
2825 	count_vm_event(PGREUSE);
2826 }
2827 
2828 /*
2829  * Handle the case of a page which we actually need to copy to a new page.
2830  *
2831  * Called with mmap_lock locked and the old page referenced, but
2832  * without the ptl held.
2833  *
2834  * High level logic flow:
2835  *
2836  * - Allocate a page, copy the content of the old page to the new one.
2837  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2838  * - Take the PTL. If the pte changed, bail out and release the allocated page
2839  * - If the pte is still the way we remember it, update the page table and all
2840  *   relevant references. This includes dropping the reference the page-table
2841  *   held to the old page, as well as updating the rmap.
2842  * - In any case, unlock the PTL and drop the reference we took to the old page.
2843  */
2844 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
2845 {
2846 	struct vm_area_struct *vma = vmf->vma;
2847 	struct mm_struct *mm = vma->vm_mm;
2848 	struct page *old_page = vmf->page;
2849 	struct page *new_page = NULL;
2850 	pte_t entry;
2851 	int page_copied = 0;
2852 	struct mmu_notifier_range range;
2853 
2854 	if (unlikely(anon_vma_prepare(vma)))
2855 		goto oom;
2856 
2857 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
2858 		new_page = alloc_zeroed_user_highpage_movable(vma,
2859 							      vmf->address);
2860 		if (!new_page)
2861 			goto oom;
2862 	} else {
2863 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
2864 				vmf->address);
2865 		if (!new_page)
2866 			goto oom;
2867 
2868 		if (!cow_user_page(new_page, old_page, vmf)) {
2869 			/*
2870 			 * COW failed, if the fault was solved by other,
2871 			 * it's fine. If not, userspace would re-fault on
2872 			 * the same address and we will handle the fault
2873 			 * from the second attempt.
2874 			 */
2875 			put_page(new_page);
2876 			if (old_page)
2877 				put_page(old_page);
2878 			return 0;
2879 		}
2880 	}
2881 
2882 	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
2883 		goto oom_free_new;
2884 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
2885 
2886 	__SetPageUptodate(new_page);
2887 
2888 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
2889 				vmf->address & PAGE_MASK,
2890 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
2891 	mmu_notifier_invalidate_range_start(&range);
2892 
2893 	/*
2894 	 * Re-check the pte - we dropped the lock
2895 	 */
2896 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
2897 	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2898 		if (old_page) {
2899 			if (!PageAnon(old_page)) {
2900 				dec_mm_counter_fast(mm,
2901 						mm_counter_file(old_page));
2902 				inc_mm_counter_fast(mm, MM_ANONPAGES);
2903 			}
2904 		} else {
2905 			inc_mm_counter_fast(mm, MM_ANONPAGES);
2906 		}
2907 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2908 		entry = mk_pte(new_page, vma->vm_page_prot);
2909 		entry = pte_sw_mkyoung(entry);
2910 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2911 		/*
2912 		 * Clear the pte entry and flush it first, before updating the
2913 		 * pte with the new entry. This will avoid a race condition
2914 		 * seen in the presence of one thread doing SMC and another
2915 		 * thread doing COW.
2916 		 */
2917 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2918 		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
2919 		lru_cache_add_inactive_or_unevictable(new_page, vma);
2920 		/*
2921 		 * We call the notify macro here because, when using secondary
2922 		 * mmu page tables (such as kvm shadow page tables), we want the
2923 		 * new page to be mapped directly into the secondary page table.
2924 		 */
2925 		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2926 		update_mmu_cache(vma, vmf->address, vmf->pte);
2927 		if (old_page) {
2928 			/*
2929 			 * Only after switching the pte to the new page may
2930 			 * we remove the mapcount here. Otherwise another
2931 			 * process may come and find the rmap count decremented
2932 			 * before the pte is switched to the new page, and
2933 			 * "reuse" the old page writing into it while our pte
2934 			 * here still points into it and can be read by other
2935 			 * threads.
2936 			 *
2937 			 * The critical issue is to order this
2938 			 * page_remove_rmap with the ptp_clear_flush above.
2939 			 * Those stores are ordered by (if nothing else,)
2940 			 * the barrier present in the atomic_add_negative
2941 			 * in page_remove_rmap.
2942 			 *
2943 			 * Then the TLB flush in ptep_clear_flush ensures that
2944 			 * no process can access the old page before the
2945 			 * decremented mapcount is visible. And the old page
2946 			 * cannot be reused until after the decremented
2947 			 * mapcount is visible. So transitively, TLBs to
2948 			 * old page will be flushed before it can be reused.
2949 			 */
2950 			page_remove_rmap(old_page, false);
2951 		}
2952 
2953 		/* Free the old page.. */
2954 		new_page = old_page;
2955 		page_copied = 1;
2956 	} else {
2957 		update_mmu_tlb(vma, vmf->address, vmf->pte);
2958 	}
2959 
2960 	if (new_page)
2961 		put_page(new_page);
2962 
2963 	pte_unmap_unlock(vmf->pte, vmf->ptl);
2964 	/*
2965 	 * No need to double call mmu_notifier->invalidate_range() callback as
2966 	 * the above ptep_clear_flush_notify() did already call it.
2967 	 */
2968 	mmu_notifier_invalidate_range_only_end(&range);
2969 	if (old_page) {
2970 		/*
2971 		 * Don't let another task, with possibly unlocked vma,
2972 		 * keep the mlocked page.
2973 		 */
2974 		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
2975 			lock_page(old_page);	/* LRU manipulation */
2976 			if (PageMlocked(old_page))
2977 				munlock_vma_page(old_page);
2978 			unlock_page(old_page);
2979 		}
2980 		put_page(old_page);
2981 	}
2982 	return page_copied ? VM_FAULT_WRITE : 0;
2983 oom_free_new:
2984 	put_page(new_page);
2985 oom:
2986 	if (old_page)
2987 		put_page(old_page);
2988 	return VM_FAULT_OOM;
2989 }
2990 
2991 /**
2992  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
2993  *			  writeable once the page is prepared
2994  *
2995  * @vmf: structure describing the fault
2996  *
2997  * This function handles all that is needed to finish a write page fault in a
2998  * shared mapping due to PTE being read-only once the mapped page is prepared.
2999  * It handles locking of PTE and modifying it.
3000  *
3001  * The function expects the page to be locked or other protection against
3002  * concurrent faults / writeback (such as DAX radix tree locks).
3003  *
3004  * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
3005  * we acquired PTE lock.
3006  */
3007 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3008 {
3009 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3010 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3011 				       &vmf->ptl);
3012 	/*
3013 	 * We might have raced with another page fault while we released the
3014 	 * pte_offset_map_lock.
3015 	 */
3016 	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3017 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3018 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3019 		return VM_FAULT_NOPAGE;
3020 	}
3021 	wp_page_reuse(vmf);
3022 	return 0;
3023 }
3024 
3025 /*
3026  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3027  * mapping
3028  */
3029 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3030 {
3031 	struct vm_area_struct *vma = vmf->vma;
3032 
3033 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3034 		vm_fault_t ret;
3035 
3036 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3037 		vmf->flags |= FAULT_FLAG_MKWRITE;
3038 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3039 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3040 			return ret;
3041 		return finish_mkwrite_fault(vmf);
3042 	}
3043 	wp_page_reuse(vmf);
3044 	return VM_FAULT_WRITE;
3045 }
3046 
3047 static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3048 	__releases(vmf->ptl)
3049 {
3050 	struct vm_area_struct *vma = vmf->vma;
3051 	vm_fault_t ret = VM_FAULT_WRITE;
3052 
3053 	get_page(vmf->page);
3054 
3055 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3056 		vm_fault_t tmp;
3057 
3058 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3059 		tmp = do_page_mkwrite(vmf);
3060 		if (unlikely(!tmp || (tmp &
3061 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3062 			put_page(vmf->page);
3063 			return tmp;
3064 		}
3065 		tmp = finish_mkwrite_fault(vmf);
3066 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3067 			unlock_page(vmf->page);
3068 			put_page(vmf->page);
3069 			return tmp;
3070 		}
3071 	} else {
3072 		wp_page_reuse(vmf);
3073 		lock_page(vmf->page);
3074 	}
3075 	ret |= fault_dirty_shared_page(vmf);
3076 	put_page(vmf->page);
3077 
3078 	return ret;
3079 }
3080 
3081 /*
3082  * This routine handles present pages, when users try to write
3083  * to a shared page. It is done by copying the page to a new address
3084  * and decrementing the shared-page counter for the old page.
3085  *
3086  * Note that this routine assumes that the protection checks have been
3087  * done by the caller (the low-level page fault routine in most cases).
3088  * Thus we can safely just mark it writable once we've done any necessary
3089  * COW.
3090  *
3091  * We also mark the page dirty at this point even though the page will
3092  * change only once the write actually happens. This avoids a few races,
3093  * and potentially makes it more efficient.
3094  *
3095  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3096  * but allow concurrent faults), with pte both mapped and locked.
3097  * We return with mmap_lock still held, but pte unmapped and unlocked.
3098  */
3099 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3100 	__releases(vmf->ptl)
3101 {
3102 	struct vm_area_struct *vma = vmf->vma;
3103 
3104 	if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3105 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3106 		return handle_userfault(vmf, VM_UFFD_WP);
3107 	}
3108 
3109 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3110 	if (!vmf->page) {
3111 		/*
3112 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3113 		 * VM_PFNMAP VMA.
3114 		 *
3115 		 * We should not cow pages in a shared writeable mapping.
3116 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3117 		 */
3118 		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3119 				     (VM_WRITE|VM_SHARED))
3120 			return wp_pfn_shared(vmf);
3121 
3122 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3123 		return wp_page_copy(vmf);
3124 	}
3125 
3126 	/*
3127 	 * Take out anonymous pages first, anonymous shared vmas are
3128 	 * not dirty accountable.
3129 	 */
3130 	if (PageAnon(vmf->page)) {
3131 		struct page *page = vmf->page;
3132 
3133 		/* PageKsm() doesn't necessarily raise the page refcount */
3134 		if (PageKsm(page) || page_count(page) != 1)
3135 			goto copy;
3136 		if (!trylock_page(page))
3137 			goto copy;
3138 		if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3139 			unlock_page(page);
3140 			goto copy;
3141 		}
3142 		/*
3143 		 * Ok, we've got the only map reference, and the only
3144 		 * page count reference, and the page is locked,
3145 		 * it's dark out, and we're wearing sunglasses. Hit it.
3146 		 */
3147 		unlock_page(page);
3148 		wp_page_reuse(vmf);
3149 		return VM_FAULT_WRITE;
3150 	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3151 					(VM_WRITE|VM_SHARED))) {
3152 		return wp_page_shared(vmf);
3153 	}
3154 copy:
3155 	/*
3156 	 * Ok, we need to copy. Oh, well..
3157 	 */
3158 	get_page(vmf->page);
3159 
3160 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3161 	return wp_page_copy(vmf);
3162 }
3163 
3164 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3165 		unsigned long start_addr, unsigned long end_addr,
3166 		struct zap_details *details)
3167 {
3168 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3169 }
3170 
3171 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3172 					    struct zap_details *details)
3173 {
3174 	struct vm_area_struct *vma;
3175 	pgoff_t vba, vea, zba, zea;
3176 
3177 	vma_interval_tree_foreach(vma, root,
3178 			details->first_index, details->last_index) {
3179 
3180 		vba = vma->vm_pgoff;
3181 		vea = vba + vma_pages(vma) - 1;
3182 		zba = details->first_index;
3183 		if (zba < vba)
3184 			zba = vba;
3185 		zea = details->last_index;
3186 		if (zea > vea)
3187 			zea = vea;
3188 
3189 		unmap_mapping_range_vma(vma,
3190 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3191 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3192 				details);
3193 	}
3194 }
3195 
3196 /**
3197  * unmap_mapping_pages() - Unmap pages from processes.
3198  * @mapping: The address space containing pages to be unmapped.
3199  * @start: Index of first page to be unmapped.
3200  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3201  * @even_cows: Whether to unmap even private COWed pages.
3202  *
3203  * Unmap the pages in this address space from any userspace process which
3204  * has them mmaped.  Generally, you want to remove COWed pages as well when
3205  * a file is being truncated, but not when invalidating pages from the page
3206  * cache.
3207  */
3208 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3209 		pgoff_t nr, bool even_cows)
3210 {
3211 	struct zap_details details = { };
3212 
3213 	details.check_mapping = even_cows ? NULL : mapping;
3214 	details.first_index = start;
3215 	details.last_index = start + nr - 1;
3216 	if (details.last_index < details.first_index)
3217 		details.last_index = ULONG_MAX;
3218 
3219 	i_mmap_lock_write(mapping);
3220 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3221 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3222 	i_mmap_unlock_write(mapping);
3223 }
3224 
3225 /**
3226  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3227  * address_space corresponding to the specified byte range in the underlying
3228  * file.
3229  *
3230  * @mapping: the address space containing mmaps to be unmapped.
3231  * @holebegin: byte in first page to unmap, relative to the start of
3232  * the underlying file.  This will be rounded down to a PAGE_SIZE
3233  * boundary.  Note that this is different from truncate_pagecache(), which
3234  * must keep the partial page.  In contrast, we must get rid of
3235  * partial pages.
3236  * @holelen: size of prospective hole in bytes.  This will be rounded
3237  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3238  * end of the file.
3239  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3240  * but 0 when invalidating pagecache, don't throw away private data.
3241  */
3242 void unmap_mapping_range(struct address_space *mapping,
3243 		loff_t const holebegin, loff_t const holelen, int even_cows)
3244 {
3245 	pgoff_t hba = holebegin >> PAGE_SHIFT;
3246 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3247 
3248 	/* Check for overflow. */
3249 	if (sizeof(holelen) > sizeof(hlen)) {
3250 		long long holeend =
3251 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3252 		if (holeend & ~(long long)ULONG_MAX)
3253 			hlen = ULONG_MAX - hba + 1;
3254 	}
3255 
3256 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3257 }
3258 EXPORT_SYMBOL(unmap_mapping_range);
3259 
3260 /*
3261  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3262  * but allow concurrent faults), and pte mapped but not yet locked.
3263  * We return with pte unmapped and unlocked.
3264  *
3265  * We return with the mmap_lock locked or unlocked in the same cases
3266  * as does filemap_fault().
3267  */
3268 vm_fault_t do_swap_page(struct vm_fault *vmf)
3269 {
3270 	struct vm_area_struct *vma = vmf->vma;
3271 	struct page *page = NULL, *swapcache;
3272 	swp_entry_t entry;
3273 	pte_t pte;
3274 	int locked;
3275 	int exclusive = 0;
3276 	vm_fault_t ret = 0;
3277 	void *shadow = NULL;
3278 
3279 	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
3280 		goto out;
3281 
3282 	entry = pte_to_swp_entry(vmf->orig_pte);
3283 	if (unlikely(non_swap_entry(entry))) {
3284 		if (is_migration_entry(entry)) {
3285 			migration_entry_wait(vma->vm_mm, vmf->pmd,
3286 					     vmf->address);
3287 		} else if (is_device_private_entry(entry)) {
3288 			vmf->page = device_private_entry_to_page(entry);
3289 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3290 		} else if (is_hwpoison_entry(entry)) {
3291 			ret = VM_FAULT_HWPOISON;
3292 		} else {
3293 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3294 			ret = VM_FAULT_SIGBUS;
3295 		}
3296 		goto out;
3297 	}
3298 
3299 
3300 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
3301 	page = lookup_swap_cache(entry, vma, vmf->address);
3302 	swapcache = page;
3303 
3304 	if (!page) {
3305 		struct swap_info_struct *si = swp_swap_info(entry);
3306 
3307 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3308 		    __swap_count(entry) == 1) {
3309 			/* skip swapcache */
3310 			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3311 							vmf->address);
3312 			if (page) {
3313 				int err;
3314 
3315 				__SetPageLocked(page);
3316 				__SetPageSwapBacked(page);
3317 				set_page_private(page, entry.val);
3318 
3319 				/* Tell memcg to use swap ownership records */
3320 				SetPageSwapCache(page);
3321 				err = mem_cgroup_charge(page, vma->vm_mm,
3322 							GFP_KERNEL);
3323 				ClearPageSwapCache(page);
3324 				if (err) {
3325 					ret = VM_FAULT_OOM;
3326 					goto out_page;
3327 				}
3328 
3329 				shadow = get_shadow_from_swap_cache(entry);
3330 				if (shadow)
3331 					workingset_refault(page, shadow);
3332 
3333 				lru_cache_add(page);
3334 				swap_readpage(page, true);
3335 			}
3336 		} else {
3337 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3338 						vmf);
3339 			swapcache = page;
3340 		}
3341 
3342 		if (!page) {
3343 			/*
3344 			 * Back out if somebody else faulted in this pte
3345 			 * while we released the pte lock.
3346 			 */
3347 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3348 					vmf->address, &vmf->ptl);
3349 			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3350 				ret = VM_FAULT_OOM;
3351 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3352 			goto unlock;
3353 		}
3354 
3355 		/* Had to read the page from swap area: Major fault */
3356 		ret = VM_FAULT_MAJOR;
3357 		count_vm_event(PGMAJFAULT);
3358 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3359 	} else if (PageHWPoison(page)) {
3360 		/*
3361 		 * hwpoisoned dirty swapcache pages are kept for killing
3362 		 * owner processes (which may be unknown at hwpoison time)
3363 		 */
3364 		ret = VM_FAULT_HWPOISON;
3365 		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3366 		goto out_release;
3367 	}
3368 
3369 	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3370 
3371 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3372 	if (!locked) {
3373 		ret |= VM_FAULT_RETRY;
3374 		goto out_release;
3375 	}
3376 
3377 	/*
3378 	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3379 	 * release the swapcache from under us.  The page pin, and pte_same
3380 	 * test below, are not enough to exclude that.  Even if it is still
3381 	 * swapcache, we need to check that the page's swap has not changed.
3382 	 */
3383 	if (unlikely((!PageSwapCache(page) ||
3384 			page_private(page) != entry.val)) && swapcache)
3385 		goto out_page;
3386 
3387 	page = ksm_might_need_to_copy(page, vma, vmf->address);
3388 	if (unlikely(!page)) {
3389 		ret = VM_FAULT_OOM;
3390 		page = swapcache;
3391 		goto out_page;
3392 	}
3393 
3394 	cgroup_throttle_swaprate(page, GFP_KERNEL);
3395 
3396 	/*
3397 	 * Back out if somebody else already faulted in this pte.
3398 	 */
3399 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3400 			&vmf->ptl);
3401 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3402 		goto out_nomap;
3403 
3404 	if (unlikely(!PageUptodate(page))) {
3405 		ret = VM_FAULT_SIGBUS;
3406 		goto out_nomap;
3407 	}
3408 
3409 	/*
3410 	 * The page isn't present yet, go ahead with the fault.
3411 	 *
3412 	 * Be careful about the sequence of operations here.
3413 	 * To get its accounting right, reuse_swap_page() must be called
3414 	 * while the page is counted on swap but not yet in mapcount i.e.
3415 	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3416 	 * must be called after the swap_free(), or it will never succeed.
3417 	 */
3418 
3419 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3420 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3421 	pte = mk_pte(page, vma->vm_page_prot);
3422 	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3423 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3424 		vmf->flags &= ~FAULT_FLAG_WRITE;
3425 		ret |= VM_FAULT_WRITE;
3426 		exclusive = RMAP_EXCLUSIVE;
3427 	}
3428 	flush_icache_page(vma, page);
3429 	if (pte_swp_soft_dirty(vmf->orig_pte))
3430 		pte = pte_mksoft_dirty(pte);
3431 	if (pte_swp_uffd_wp(vmf->orig_pte)) {
3432 		pte = pte_mkuffd_wp(pte);
3433 		pte = pte_wrprotect(pte);
3434 	}
3435 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3436 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3437 	vmf->orig_pte = pte;
3438 
3439 	/* ksm created a completely new copy */
3440 	if (unlikely(page != swapcache && swapcache)) {
3441 		page_add_new_anon_rmap(page, vma, vmf->address, false);
3442 		lru_cache_add_inactive_or_unevictable(page, vma);
3443 	} else {
3444 		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3445 	}
3446 
3447 	swap_free(entry);
3448 	if (mem_cgroup_swap_full(page) ||
3449 	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3450 		try_to_free_swap(page);
3451 	unlock_page(page);
3452 	if (page != swapcache && swapcache) {
3453 		/*
3454 		 * Hold the lock to avoid the swap entry to be reused
3455 		 * until we take the PT lock for the pte_same() check
3456 		 * (to avoid false positives from pte_same). For
3457 		 * further safety release the lock after the swap_free
3458 		 * so that the swap count won't change under a
3459 		 * parallel locked swapcache.
3460 		 */
3461 		unlock_page(swapcache);
3462 		put_page(swapcache);
3463 	}
3464 
3465 	if (vmf->flags & FAULT_FLAG_WRITE) {
3466 		ret |= do_wp_page(vmf);
3467 		if (ret & VM_FAULT_ERROR)
3468 			ret &= VM_FAULT_ERROR;
3469 		goto out;
3470 	}
3471 
3472 	/* No need to invalidate - it was non-present before */
3473 	update_mmu_cache(vma, vmf->address, vmf->pte);
3474 unlock:
3475 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3476 out:
3477 	return ret;
3478 out_nomap:
3479 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3480 out_page:
3481 	unlock_page(page);
3482 out_release:
3483 	put_page(page);
3484 	if (page != swapcache && swapcache) {
3485 		unlock_page(swapcache);
3486 		put_page(swapcache);
3487 	}
3488 	return ret;
3489 }
3490 
3491 /*
3492  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3493  * but allow concurrent faults), and pte mapped but not yet locked.
3494  * We return with mmap_lock still held, but pte unmapped and unlocked.
3495  */
3496 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3497 {
3498 	struct vm_area_struct *vma = vmf->vma;
3499 	struct page *page;
3500 	vm_fault_t ret = 0;
3501 	pte_t entry;
3502 
3503 	/* File mapping without ->vm_ops ? */
3504 	if (vma->vm_flags & VM_SHARED)
3505 		return VM_FAULT_SIGBUS;
3506 
3507 	/*
3508 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
3509 	 * pte_offset_map() on pmds where a huge pmd might be created
3510 	 * from a different thread.
3511 	 *
3512 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
3513 	 * parallel threads are excluded by other means.
3514 	 *
3515 	 * Here we only have mmap_read_lock(mm).
3516 	 */
3517 	if (pte_alloc(vma->vm_mm, vmf->pmd))
3518 		return VM_FAULT_OOM;
3519 
3520 	/* See the comment in pte_alloc_one_map() */
3521 	if (unlikely(pmd_trans_unstable(vmf->pmd)))
3522 		return 0;
3523 
3524 	/* Use the zero-page for reads */
3525 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3526 			!mm_forbids_zeropage(vma->vm_mm)) {
3527 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3528 						vma->vm_page_prot));
3529 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3530 				vmf->address, &vmf->ptl);
3531 		if (!pte_none(*vmf->pte)) {
3532 			update_mmu_tlb(vma, vmf->address, vmf->pte);
3533 			goto unlock;
3534 		}
3535 		ret = check_stable_address_space(vma->vm_mm);
3536 		if (ret)
3537 			goto unlock;
3538 		/* Deliver the page fault to userland, check inside PT lock */
3539 		if (userfaultfd_missing(vma)) {
3540 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3541 			return handle_userfault(vmf, VM_UFFD_MISSING);
3542 		}
3543 		goto setpte;
3544 	}
3545 
3546 	/* Allocate our own private page. */
3547 	if (unlikely(anon_vma_prepare(vma)))
3548 		goto oom;
3549 	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3550 	if (!page)
3551 		goto oom;
3552 
3553 	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
3554 		goto oom_free_page;
3555 	cgroup_throttle_swaprate(page, GFP_KERNEL);
3556 
3557 	/*
3558 	 * The memory barrier inside __SetPageUptodate makes sure that
3559 	 * preceding stores to the page contents become visible before
3560 	 * the set_pte_at() write.
3561 	 */
3562 	__SetPageUptodate(page);
3563 
3564 	entry = mk_pte(page, vma->vm_page_prot);
3565 	entry = pte_sw_mkyoung(entry);
3566 	if (vma->vm_flags & VM_WRITE)
3567 		entry = pte_mkwrite(pte_mkdirty(entry));
3568 
3569 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3570 			&vmf->ptl);
3571 	if (!pte_none(*vmf->pte)) {
3572 		update_mmu_cache(vma, vmf->address, vmf->pte);
3573 		goto release;
3574 	}
3575 
3576 	ret = check_stable_address_space(vma->vm_mm);
3577 	if (ret)
3578 		goto release;
3579 
3580 	/* Deliver the page fault to userland, check inside PT lock */
3581 	if (userfaultfd_missing(vma)) {
3582 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3583 		put_page(page);
3584 		return handle_userfault(vmf, VM_UFFD_MISSING);
3585 	}
3586 
3587 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3588 	page_add_new_anon_rmap(page, vma, vmf->address, false);
3589 	lru_cache_add_inactive_or_unevictable(page, vma);
3590 setpte:
3591 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3592 
3593 	/* No need to invalidate - it was non-present before */
3594 	update_mmu_cache(vma, vmf->address, vmf->pte);
3595 unlock:
3596 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3597 	return ret;
3598 release:
3599 	put_page(page);
3600 	goto unlock;
3601 oom_free_page:
3602 	put_page(page);
3603 oom:
3604 	return VM_FAULT_OOM;
3605 }
3606 
3607 /*
3608  * The mmap_lock must have been held on entry, and may have been
3609  * released depending on flags and vma->vm_ops->fault() return value.
3610  * See filemap_fault() and __lock_page_retry().
3611  */
3612 static vm_fault_t __do_fault(struct vm_fault *vmf)
3613 {
3614 	struct vm_area_struct *vma = vmf->vma;
3615 	vm_fault_t ret;
3616 
3617 	/*
3618 	 * Preallocate pte before we take page_lock because this might lead to
3619 	 * deadlocks for memcg reclaim which waits for pages under writeback:
3620 	 *				lock_page(A)
3621 	 *				SetPageWriteback(A)
3622 	 *				unlock_page(A)
3623 	 * lock_page(B)
3624 	 *				lock_page(B)
3625 	 * pte_alloc_pne
3626 	 *   shrink_page_list
3627 	 *     wait_on_page_writeback(A)
3628 	 *				SetPageWriteback(B)
3629 	 *				unlock_page(B)
3630 	 *				# flush A, B to clear the writeback
3631 	 */
3632 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3633 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
3634 		if (!vmf->prealloc_pte)
3635 			return VM_FAULT_OOM;
3636 		smp_wmb(); /* See comment in __pte_alloc() */
3637 	}
3638 
3639 	ret = vma->vm_ops->fault(vmf);
3640 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
3641 			    VM_FAULT_DONE_COW)))
3642 		return ret;
3643 
3644 	if (unlikely(PageHWPoison(vmf->page))) {
3645 		if (ret & VM_FAULT_LOCKED)
3646 			unlock_page(vmf->page);
3647 		put_page(vmf->page);
3648 		vmf->page = NULL;
3649 		return VM_FAULT_HWPOISON;
3650 	}
3651 
3652 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
3653 		lock_page(vmf->page);
3654 	else
3655 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3656 
3657 	return ret;
3658 }
3659 
3660 /*
3661  * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
3662  * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
3663  * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
3664  * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
3665  */
3666 static int pmd_devmap_trans_unstable(pmd_t *pmd)
3667 {
3668 	return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
3669 }
3670 
3671 static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
3672 {
3673 	struct vm_area_struct *vma = vmf->vma;
3674 
3675 	if (!pmd_none(*vmf->pmd))
3676 		goto map_pte;
3677 	if (vmf->prealloc_pte) {
3678 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3679 		if (unlikely(!pmd_none(*vmf->pmd))) {
3680 			spin_unlock(vmf->ptl);
3681 			goto map_pte;
3682 		}
3683 
3684 		mm_inc_nr_ptes(vma->vm_mm);
3685 		pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3686 		spin_unlock(vmf->ptl);
3687 		vmf->prealloc_pte = NULL;
3688 	} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
3689 		return VM_FAULT_OOM;
3690 	}
3691 map_pte:
3692 	/*
3693 	 * If a huge pmd materialized under us just retry later.  Use
3694 	 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
3695 	 * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
3696 	 * under us and then back to pmd_none, as a result of MADV_DONTNEED
3697 	 * running immediately after a huge pmd fault in a different thread of
3698 	 * this mm, in turn leading to a misleading pmd_trans_huge() retval.
3699 	 * All we have to ensure is that it is a regular pmd that we can walk
3700 	 * with pte_offset_map() and we can do that through an atomic read in
3701 	 * C, which is what pmd_trans_unstable() provides.
3702 	 */
3703 	if (pmd_devmap_trans_unstable(vmf->pmd))
3704 		return VM_FAULT_NOPAGE;
3705 
3706 	/*
3707 	 * At this point we know that our vmf->pmd points to a page of ptes
3708 	 * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
3709 	 * for the duration of the fault.  If a racing MADV_DONTNEED runs and
3710 	 * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3711 	 * be valid and we will re-check to make sure the vmf->pte isn't
3712 	 * pte_none() under vmf->ptl protection when we return to
3713 	 * alloc_set_pte().
3714 	 */
3715 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3716 			&vmf->ptl);
3717 	return 0;
3718 }
3719 
3720 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3721 static void deposit_prealloc_pte(struct vm_fault *vmf)
3722 {
3723 	struct vm_area_struct *vma = vmf->vma;
3724 
3725 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3726 	/*
3727 	 * We are going to consume the prealloc table,
3728 	 * count that as nr_ptes.
3729 	 */
3730 	mm_inc_nr_ptes(vma->vm_mm);
3731 	vmf->prealloc_pte = NULL;
3732 }
3733 
3734 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3735 {
3736 	struct vm_area_struct *vma = vmf->vma;
3737 	bool write = vmf->flags & FAULT_FLAG_WRITE;
3738 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3739 	pmd_t entry;
3740 	int i;
3741 	vm_fault_t ret;
3742 
3743 	if (!transhuge_vma_suitable(vma, haddr))
3744 		return VM_FAULT_FALLBACK;
3745 
3746 	ret = VM_FAULT_FALLBACK;
3747 	page = compound_head(page);
3748 
3749 	/*
3750 	 * Archs like ppc64 need additonal space to store information
3751 	 * related to pte entry. Use the preallocated table for that.
3752 	 */
3753 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
3754 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3755 		if (!vmf->prealloc_pte)
3756 			return VM_FAULT_OOM;
3757 		smp_wmb(); /* See comment in __pte_alloc() */
3758 	}
3759 
3760 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3761 	if (unlikely(!pmd_none(*vmf->pmd)))
3762 		goto out;
3763 
3764 	for (i = 0; i < HPAGE_PMD_NR; i++)
3765 		flush_icache_page(vma, page + i);
3766 
3767 	entry = mk_huge_pmd(page, vma->vm_page_prot);
3768 	if (write)
3769 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3770 
3771 	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
3772 	page_add_file_rmap(page, true);
3773 	/*
3774 	 * deposit and withdraw with pmd lock held
3775 	 */
3776 	if (arch_needs_pgtable_deposit())
3777 		deposit_prealloc_pte(vmf);
3778 
3779 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3780 
3781 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
3782 
3783 	/* fault is handled */
3784 	ret = 0;
3785 	count_vm_event(THP_FILE_MAPPED);
3786 out:
3787 	spin_unlock(vmf->ptl);
3788 	return ret;
3789 }
3790 #else
3791 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3792 {
3793 	BUILD_BUG();
3794 	return 0;
3795 }
3796 #endif
3797 
3798 /**
3799  * alloc_set_pte - setup new PTE entry for given page and add reverse page
3800  * mapping. If needed, the fucntion allocates page table or use pre-allocated.
3801  *
3802  * @vmf: fault environment
3803  * @page: page to map
3804  *
3805  * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
3806  * return.
3807  *
3808  * Target users are page handler itself and implementations of
3809  * vm_ops->map_pages.
3810  *
3811  * Return: %0 on success, %VM_FAULT_ code in case of error.
3812  */
3813 vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
3814 {
3815 	struct vm_area_struct *vma = vmf->vma;
3816 	bool write = vmf->flags & FAULT_FLAG_WRITE;
3817 	pte_t entry;
3818 	vm_fault_t ret;
3819 
3820 	if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
3821 		ret = do_set_pmd(vmf, page);
3822 		if (ret != VM_FAULT_FALLBACK)
3823 			return ret;
3824 	}
3825 
3826 	if (!vmf->pte) {
3827 		ret = pte_alloc_one_map(vmf);
3828 		if (ret)
3829 			return ret;
3830 	}
3831 
3832 	/* Re-check under ptl */
3833 	if (unlikely(!pte_none(*vmf->pte))) {
3834 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3835 		return VM_FAULT_NOPAGE;
3836 	}
3837 
3838 	flush_icache_page(vma, page);
3839 	entry = mk_pte(page, vma->vm_page_prot);
3840 	entry = pte_sw_mkyoung(entry);
3841 	if (write)
3842 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3843 	/* copy-on-write page */
3844 	if (write && !(vma->vm_flags & VM_SHARED)) {
3845 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3846 		page_add_new_anon_rmap(page, vma, vmf->address, false);
3847 		lru_cache_add_inactive_or_unevictable(page, vma);
3848 	} else {
3849 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
3850 		page_add_file_rmap(page, false);
3851 	}
3852 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3853 
3854 	/* no need to invalidate: a not-present page won't be cached */
3855 	update_mmu_cache(vma, vmf->address, vmf->pte);
3856 
3857 	return 0;
3858 }
3859 
3860 
3861 /**
3862  * finish_fault - finish page fault once we have prepared the page to fault
3863  *
3864  * @vmf: structure describing the fault
3865  *
3866  * This function handles all that is needed to finish a page fault once the
3867  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3868  * given page, adds reverse page mapping, handles memcg charges and LRU
3869  * addition.
3870  *
3871  * The function expects the page to be locked and on success it consumes a
3872  * reference of a page being mapped (for the PTE which maps it).
3873  *
3874  * Return: %0 on success, %VM_FAULT_ code in case of error.
3875  */
3876 vm_fault_t finish_fault(struct vm_fault *vmf)
3877 {
3878 	struct page *page;
3879 	vm_fault_t ret = 0;
3880 
3881 	/* Did we COW the page? */
3882 	if ((vmf->flags & FAULT_FLAG_WRITE) &&
3883 	    !(vmf->vma->vm_flags & VM_SHARED))
3884 		page = vmf->cow_page;
3885 	else
3886 		page = vmf->page;
3887 
3888 	/*
3889 	 * check even for read faults because we might have lost our CoWed
3890 	 * page
3891 	 */
3892 	if (!(vmf->vma->vm_flags & VM_SHARED))
3893 		ret = check_stable_address_space(vmf->vma->vm_mm);
3894 	if (!ret)
3895 		ret = alloc_set_pte(vmf, page);
3896 	if (vmf->pte)
3897 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3898 	return ret;
3899 }
3900 
3901 static unsigned long fault_around_bytes __read_mostly =
3902 	rounddown_pow_of_two(65536);
3903 
3904 #ifdef CONFIG_DEBUG_FS
3905 static int fault_around_bytes_get(void *data, u64 *val)
3906 {
3907 	*val = fault_around_bytes;
3908 	return 0;
3909 }
3910 
3911 /*
3912  * fault_around_bytes must be rounded down to the nearest page order as it's
3913  * what do_fault_around() expects to see.
3914  */
3915 static int fault_around_bytes_set(void *data, u64 val)
3916 {
3917 	if (val / PAGE_SIZE > PTRS_PER_PTE)
3918 		return -EINVAL;
3919 	if (val > PAGE_SIZE)
3920 		fault_around_bytes = rounddown_pow_of_two(val);
3921 	else
3922 		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
3923 	return 0;
3924 }
3925 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
3926 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
3927 
3928 static int __init fault_around_debugfs(void)
3929 {
3930 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
3931 				   &fault_around_bytes_fops);
3932 	return 0;
3933 }
3934 late_initcall(fault_around_debugfs);
3935 #endif
3936 
3937 /*
3938  * do_fault_around() tries to map few pages around the fault address. The hope
3939  * is that the pages will be needed soon and this will lower the number of
3940  * faults to handle.
3941  *
3942  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
3943  * not ready to be mapped: not up-to-date, locked, etc.
3944  *
3945  * This function is called with the page table lock taken. In the split ptlock
3946  * case the page table lock only protects only those entries which belong to
3947  * the page table corresponding to the fault address.
3948  *
3949  * This function doesn't cross the VMA boundaries, in order to call map_pages()
3950  * only once.
3951  *
3952  * fault_around_bytes defines how many bytes we'll try to map.
3953  * do_fault_around() expects it to be set to a power of two less than or equal
3954  * to PTRS_PER_PTE.
3955  *
3956  * The virtual address of the area that we map is naturally aligned to
3957  * fault_around_bytes rounded down to the machine page size
3958  * (and therefore to page order).  This way it's easier to guarantee
3959  * that we don't cross page table boundaries.
3960  */
3961 static vm_fault_t do_fault_around(struct vm_fault *vmf)
3962 {
3963 	unsigned long address = vmf->address, nr_pages, mask;
3964 	pgoff_t start_pgoff = vmf->pgoff;
3965 	pgoff_t end_pgoff;
3966 	int off;
3967 	vm_fault_t ret = 0;
3968 
3969 	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
3970 	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
3971 
3972 	vmf->address = max(address & mask, vmf->vma->vm_start);
3973 	off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
3974 	start_pgoff -= off;
3975 
3976 	/*
3977 	 *  end_pgoff is either the end of the page table, the end of
3978 	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
3979 	 */
3980 	end_pgoff = start_pgoff -
3981 		((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
3982 		PTRS_PER_PTE - 1;
3983 	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
3984 			start_pgoff + nr_pages - 1);
3985 
3986 	if (pmd_none(*vmf->pmd)) {
3987 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
3988 		if (!vmf->prealloc_pte)
3989 			goto out;
3990 		smp_wmb(); /* See comment in __pte_alloc() */
3991 	}
3992 
3993 	vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
3994 
3995 	/* Huge page is mapped? Page fault is solved */
3996 	if (pmd_trans_huge(*vmf->pmd)) {
3997 		ret = VM_FAULT_NOPAGE;
3998 		goto out;
3999 	}
4000 
4001 	/* ->map_pages() haven't done anything useful. Cold page cache? */
4002 	if (!vmf->pte)
4003 		goto out;
4004 
4005 	/* check if the page fault is solved */
4006 	vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
4007 	if (!pte_none(*vmf->pte))
4008 		ret = VM_FAULT_NOPAGE;
4009 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4010 out:
4011 	vmf->address = address;
4012 	vmf->pte = NULL;
4013 	return ret;
4014 }
4015 
4016 static vm_fault_t do_read_fault(struct vm_fault *vmf)
4017 {
4018 	struct vm_area_struct *vma = vmf->vma;
4019 	vm_fault_t ret = 0;
4020 
4021 	/*
4022 	 * Let's call ->map_pages() first and use ->fault() as fallback
4023 	 * if page by the offset is not ready to be mapped (cold cache or
4024 	 * something).
4025 	 */
4026 	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
4027 		ret = do_fault_around(vmf);
4028 		if (ret)
4029 			return ret;
4030 	}
4031 
4032 	ret = __do_fault(vmf);
4033 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4034 		return ret;
4035 
4036 	ret |= finish_fault(vmf);
4037 	unlock_page(vmf->page);
4038 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4039 		put_page(vmf->page);
4040 	return ret;
4041 }
4042 
4043 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4044 {
4045 	struct vm_area_struct *vma = vmf->vma;
4046 	vm_fault_t ret;
4047 
4048 	if (unlikely(anon_vma_prepare(vma)))
4049 		return VM_FAULT_OOM;
4050 
4051 	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4052 	if (!vmf->cow_page)
4053 		return VM_FAULT_OOM;
4054 
4055 	if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4056 		put_page(vmf->cow_page);
4057 		return VM_FAULT_OOM;
4058 	}
4059 	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4060 
4061 	ret = __do_fault(vmf);
4062 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4063 		goto uncharge_out;
4064 	if (ret & VM_FAULT_DONE_COW)
4065 		return ret;
4066 
4067 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4068 	__SetPageUptodate(vmf->cow_page);
4069 
4070 	ret |= finish_fault(vmf);
4071 	unlock_page(vmf->page);
4072 	put_page(vmf->page);
4073 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4074 		goto uncharge_out;
4075 	return ret;
4076 uncharge_out:
4077 	put_page(vmf->cow_page);
4078 	return ret;
4079 }
4080 
4081 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4082 {
4083 	struct vm_area_struct *vma = vmf->vma;
4084 	vm_fault_t ret, tmp;
4085 
4086 	ret = __do_fault(vmf);
4087 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4088 		return ret;
4089 
4090 	/*
4091 	 * Check if the backing address space wants to know that the page is
4092 	 * about to become writable
4093 	 */
4094 	if (vma->vm_ops->page_mkwrite) {
4095 		unlock_page(vmf->page);
4096 		tmp = do_page_mkwrite(vmf);
4097 		if (unlikely(!tmp ||
4098 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4099 			put_page(vmf->page);
4100 			return tmp;
4101 		}
4102 	}
4103 
4104 	ret |= finish_fault(vmf);
4105 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4106 					VM_FAULT_RETRY))) {
4107 		unlock_page(vmf->page);
4108 		put_page(vmf->page);
4109 		return ret;
4110 	}
4111 
4112 	ret |= fault_dirty_shared_page(vmf);
4113 	return ret;
4114 }
4115 
4116 /*
4117  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4118  * but allow concurrent faults).
4119  * The mmap_lock may have been released depending on flags and our
4120  * return value.  See filemap_fault() and __lock_page_or_retry().
4121  * If mmap_lock is released, vma may become invalid (for example
4122  * by other thread calling munmap()).
4123  */
4124 static vm_fault_t do_fault(struct vm_fault *vmf)
4125 {
4126 	struct vm_area_struct *vma = vmf->vma;
4127 	struct mm_struct *vm_mm = vma->vm_mm;
4128 	vm_fault_t ret;
4129 
4130 	/*
4131 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4132 	 */
4133 	if (!vma->vm_ops->fault) {
4134 		/*
4135 		 * If we find a migration pmd entry or a none pmd entry, which
4136 		 * should never happen, return SIGBUS
4137 		 */
4138 		if (unlikely(!pmd_present(*vmf->pmd)))
4139 			ret = VM_FAULT_SIGBUS;
4140 		else {
4141 			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4142 						       vmf->pmd,
4143 						       vmf->address,
4144 						       &vmf->ptl);
4145 			/*
4146 			 * Make sure this is not a temporary clearing of pte
4147 			 * by holding ptl and checking again. A R/M/W update
4148 			 * of pte involves: take ptl, clearing the pte so that
4149 			 * we don't have concurrent modification by hardware
4150 			 * followed by an update.
4151 			 */
4152 			if (unlikely(pte_none(*vmf->pte)))
4153 				ret = VM_FAULT_SIGBUS;
4154 			else
4155 				ret = VM_FAULT_NOPAGE;
4156 
4157 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4158 		}
4159 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4160 		ret = do_read_fault(vmf);
4161 	else if (!(vma->vm_flags & VM_SHARED))
4162 		ret = do_cow_fault(vmf);
4163 	else
4164 		ret = do_shared_fault(vmf);
4165 
4166 	/* preallocated pagetable is unused: free it */
4167 	if (vmf->prealloc_pte) {
4168 		pte_free(vm_mm, vmf->prealloc_pte);
4169 		vmf->prealloc_pte = NULL;
4170 	}
4171 	return ret;
4172 }
4173 
4174 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4175 				unsigned long addr, int page_nid,
4176 				int *flags)
4177 {
4178 	get_page(page);
4179 
4180 	count_vm_numa_event(NUMA_HINT_FAULTS);
4181 	if (page_nid == numa_node_id()) {
4182 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4183 		*flags |= TNF_FAULT_LOCAL;
4184 	}
4185 
4186 	return mpol_misplaced(page, vma, addr);
4187 }
4188 
4189 static vm_fault_t do_numa_page(struct vm_fault *vmf)
4190 {
4191 	struct vm_area_struct *vma = vmf->vma;
4192 	struct page *page = NULL;
4193 	int page_nid = NUMA_NO_NODE;
4194 	int last_cpupid;
4195 	int target_nid;
4196 	bool migrated = false;
4197 	pte_t pte, old_pte;
4198 	bool was_writable = pte_savedwrite(vmf->orig_pte);
4199 	int flags = 0;
4200 
4201 	/*
4202 	 * The "pte" at this point cannot be used safely without
4203 	 * validation through pte_unmap_same(). It's of NUMA type but
4204 	 * the pfn may be screwed if the read is non atomic.
4205 	 */
4206 	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4207 	spin_lock(vmf->ptl);
4208 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4209 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4210 		goto out;
4211 	}
4212 
4213 	/*
4214 	 * Make it present again, Depending on how arch implementes non
4215 	 * accessible ptes, some can allow access by kernel mode.
4216 	 */
4217 	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4218 	pte = pte_modify(old_pte, vma->vm_page_prot);
4219 	pte = pte_mkyoung(pte);
4220 	if (was_writable)
4221 		pte = pte_mkwrite(pte);
4222 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4223 	update_mmu_cache(vma, vmf->address, vmf->pte);
4224 
4225 	page = vm_normal_page(vma, vmf->address, pte);
4226 	if (!page) {
4227 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4228 		return 0;
4229 	}
4230 
4231 	/* TODO: handle PTE-mapped THP */
4232 	if (PageCompound(page)) {
4233 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4234 		return 0;
4235 	}
4236 
4237 	/*
4238 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4239 	 * much anyway since they can be in shared cache state. This misses
4240 	 * the case where a mapping is writable but the process never writes
4241 	 * to it but pte_write gets cleared during protection updates and
4242 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4243 	 * background writeback, dirty balancing and application behaviour.
4244 	 */
4245 	if (!pte_write(pte))
4246 		flags |= TNF_NO_GROUP;
4247 
4248 	/*
4249 	 * Flag if the page is shared between multiple address spaces. This
4250 	 * is later used when determining whether to group tasks together
4251 	 */
4252 	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4253 		flags |= TNF_SHARED;
4254 
4255 	last_cpupid = page_cpupid_last(page);
4256 	page_nid = page_to_nid(page);
4257 	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4258 			&flags);
4259 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4260 	if (target_nid == NUMA_NO_NODE) {
4261 		put_page(page);
4262 		goto out;
4263 	}
4264 
4265 	/* Migrate to the requested node */
4266 	migrated = migrate_misplaced_page(page, vma, target_nid);
4267 	if (migrated) {
4268 		page_nid = target_nid;
4269 		flags |= TNF_MIGRATED;
4270 	} else
4271 		flags |= TNF_MIGRATE_FAIL;
4272 
4273 out:
4274 	if (page_nid != NUMA_NO_NODE)
4275 		task_numa_fault(last_cpupid, page_nid, 1, flags);
4276 	return 0;
4277 }
4278 
4279 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4280 {
4281 	if (vma_is_anonymous(vmf->vma))
4282 		return do_huge_pmd_anonymous_page(vmf);
4283 	if (vmf->vma->vm_ops->huge_fault)
4284 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4285 	return VM_FAULT_FALLBACK;
4286 }
4287 
4288 /* `inline' is required to avoid gcc 4.1.2 build error */
4289 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
4290 {
4291 	if (vma_is_anonymous(vmf->vma)) {
4292 		if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
4293 			return handle_userfault(vmf, VM_UFFD_WP);
4294 		return do_huge_pmd_wp_page(vmf, orig_pmd);
4295 	}
4296 	if (vmf->vma->vm_ops->huge_fault) {
4297 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4298 
4299 		if (!(ret & VM_FAULT_FALLBACK))
4300 			return ret;
4301 	}
4302 
4303 	/* COW or write-notify handled on pte level: split pmd. */
4304 	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4305 
4306 	return VM_FAULT_FALLBACK;
4307 }
4308 
4309 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4310 {
4311 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4312 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4313 	/* No support for anonymous transparent PUD pages yet */
4314 	if (vma_is_anonymous(vmf->vma))
4315 		goto split;
4316 	if (vmf->vma->vm_ops->huge_fault) {
4317 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4318 
4319 		if (!(ret & VM_FAULT_FALLBACK))
4320 			return ret;
4321 	}
4322 split:
4323 	/* COW or write-notify not handled on PUD level: split pud.*/
4324 	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4325 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4326 	return VM_FAULT_FALLBACK;
4327 }
4328 
4329 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4330 {
4331 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4332 	/* No support for anonymous transparent PUD pages yet */
4333 	if (vma_is_anonymous(vmf->vma))
4334 		return VM_FAULT_FALLBACK;
4335 	if (vmf->vma->vm_ops->huge_fault)
4336 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4337 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4338 	return VM_FAULT_FALLBACK;
4339 }
4340 
4341 /*
4342  * These routines also need to handle stuff like marking pages dirty
4343  * and/or accessed for architectures that don't do it in hardware (most
4344  * RISC architectures).  The early dirtying is also good on the i386.
4345  *
4346  * There is also a hook called "update_mmu_cache()" that architectures
4347  * with external mmu caches can use to update those (ie the Sparc or
4348  * PowerPC hashed page tables that act as extended TLBs).
4349  *
4350  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4351  * concurrent faults).
4352  *
4353  * The mmap_lock may have been released depending on flags and our return value.
4354  * See filemap_fault() and __lock_page_or_retry().
4355  */
4356 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4357 {
4358 	pte_t entry;
4359 
4360 	if (unlikely(pmd_none(*vmf->pmd))) {
4361 		/*
4362 		 * Leave __pte_alloc() until later: because vm_ops->fault may
4363 		 * want to allocate huge page, and if we expose page table
4364 		 * for an instant, it will be difficult to retract from
4365 		 * concurrent faults and from rmap lookups.
4366 		 */
4367 		vmf->pte = NULL;
4368 	} else {
4369 		/* See comment in pte_alloc_one_map() */
4370 		if (pmd_devmap_trans_unstable(vmf->pmd))
4371 			return 0;
4372 		/*
4373 		 * A regular pmd is established and it can't morph into a huge
4374 		 * pmd from under us anymore at this point because we hold the
4375 		 * mmap_lock read mode and khugepaged takes it in write mode.
4376 		 * So now it's safe to run pte_offset_map().
4377 		 */
4378 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4379 		vmf->orig_pte = *vmf->pte;
4380 
4381 		/*
4382 		 * some architectures can have larger ptes than wordsize,
4383 		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
4384 		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4385 		 * accesses.  The code below just needs a consistent view
4386 		 * for the ifs and we later double check anyway with the
4387 		 * ptl lock held. So here a barrier will do.
4388 		 */
4389 		barrier();
4390 		if (pte_none(vmf->orig_pte)) {
4391 			pte_unmap(vmf->pte);
4392 			vmf->pte = NULL;
4393 		}
4394 	}
4395 
4396 	if (!vmf->pte) {
4397 		if (vma_is_anonymous(vmf->vma))
4398 			return do_anonymous_page(vmf);
4399 		else
4400 			return do_fault(vmf);
4401 	}
4402 
4403 	if (!pte_present(vmf->orig_pte))
4404 		return do_swap_page(vmf);
4405 
4406 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4407 		return do_numa_page(vmf);
4408 
4409 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4410 	spin_lock(vmf->ptl);
4411 	entry = vmf->orig_pte;
4412 	if (unlikely(!pte_same(*vmf->pte, entry))) {
4413 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4414 		goto unlock;
4415 	}
4416 	if (vmf->flags & FAULT_FLAG_WRITE) {
4417 		if (!pte_write(entry))
4418 			return do_wp_page(vmf);
4419 		entry = pte_mkdirty(entry);
4420 	}
4421 	entry = pte_mkyoung(entry);
4422 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4423 				vmf->flags & FAULT_FLAG_WRITE)) {
4424 		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4425 	} else {
4426 		/* Skip spurious TLB flush for retried page fault */
4427 		if (vmf->flags & FAULT_FLAG_TRIED)
4428 			goto unlock;
4429 		/*
4430 		 * This is needed only for protection faults but the arch code
4431 		 * is not yet telling us if this is a protection fault or not.
4432 		 * This still avoids useless tlb flushes for .text page faults
4433 		 * with threads.
4434 		 */
4435 		if (vmf->flags & FAULT_FLAG_WRITE)
4436 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4437 	}
4438 unlock:
4439 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4440 	return 0;
4441 }
4442 
4443 /*
4444  * By the time we get here, we already hold the mm semaphore
4445  *
4446  * The mmap_lock may have been released depending on flags and our
4447  * return value.  See filemap_fault() and __lock_page_or_retry().
4448  */
4449 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4450 		unsigned long address, unsigned int flags)
4451 {
4452 	struct vm_fault vmf = {
4453 		.vma = vma,
4454 		.address = address & PAGE_MASK,
4455 		.flags = flags,
4456 		.pgoff = linear_page_index(vma, address),
4457 		.gfp_mask = __get_fault_gfp_mask(vma),
4458 	};
4459 	unsigned int dirty = flags & FAULT_FLAG_WRITE;
4460 	struct mm_struct *mm = vma->vm_mm;
4461 	pgd_t *pgd;
4462 	p4d_t *p4d;
4463 	vm_fault_t ret;
4464 
4465 	pgd = pgd_offset(mm, address);
4466 	p4d = p4d_alloc(mm, pgd, address);
4467 	if (!p4d)
4468 		return VM_FAULT_OOM;
4469 
4470 	vmf.pud = pud_alloc(mm, p4d, address);
4471 	if (!vmf.pud)
4472 		return VM_FAULT_OOM;
4473 retry_pud:
4474 	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
4475 		ret = create_huge_pud(&vmf);
4476 		if (!(ret & VM_FAULT_FALLBACK))
4477 			return ret;
4478 	} else {
4479 		pud_t orig_pud = *vmf.pud;
4480 
4481 		barrier();
4482 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
4483 
4484 			/* NUMA case for anonymous PUDs would go here */
4485 
4486 			if (dirty && !pud_write(orig_pud)) {
4487 				ret = wp_huge_pud(&vmf, orig_pud);
4488 				if (!(ret & VM_FAULT_FALLBACK))
4489 					return ret;
4490 			} else {
4491 				huge_pud_set_accessed(&vmf, orig_pud);
4492 				return 0;
4493 			}
4494 		}
4495 	}
4496 
4497 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4498 	if (!vmf.pmd)
4499 		return VM_FAULT_OOM;
4500 
4501 	/* Huge pud page fault raced with pmd_alloc? */
4502 	if (pud_trans_unstable(vmf.pud))
4503 		goto retry_pud;
4504 
4505 	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
4506 		ret = create_huge_pmd(&vmf);
4507 		if (!(ret & VM_FAULT_FALLBACK))
4508 			return ret;
4509 	} else {
4510 		pmd_t orig_pmd = *vmf.pmd;
4511 
4512 		barrier();
4513 		if (unlikely(is_swap_pmd(orig_pmd))) {
4514 			VM_BUG_ON(thp_migration_supported() &&
4515 					  !is_pmd_migration_entry(orig_pmd));
4516 			if (is_pmd_migration_entry(orig_pmd))
4517 				pmd_migration_entry_wait(mm, vmf.pmd);
4518 			return 0;
4519 		}
4520 		if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
4521 			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
4522 				return do_huge_pmd_numa_page(&vmf, orig_pmd);
4523 
4524 			if (dirty && !pmd_write(orig_pmd)) {
4525 				ret = wp_huge_pmd(&vmf, orig_pmd);
4526 				if (!(ret & VM_FAULT_FALLBACK))
4527 					return ret;
4528 			} else {
4529 				huge_pmd_set_accessed(&vmf, orig_pmd);
4530 				return 0;
4531 			}
4532 		}
4533 	}
4534 
4535 	return handle_pte_fault(&vmf);
4536 }
4537 
4538 /**
4539  * mm_account_fault - Do page fault accountings
4540  *
4541  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
4542  *        of perf event counters, but we'll still do the per-task accounting to
4543  *        the task who triggered this page fault.
4544  * @address: the faulted address.
4545  * @flags: the fault flags.
4546  * @ret: the fault retcode.
4547  *
4548  * This will take care of most of the page fault accountings.  Meanwhile, it
4549  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4550  * updates.  However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4551  * still be in per-arch page fault handlers at the entry of page fault.
4552  */
4553 static inline void mm_account_fault(struct pt_regs *regs,
4554 				    unsigned long address, unsigned int flags,
4555 				    vm_fault_t ret)
4556 {
4557 	bool major;
4558 
4559 	/*
4560 	 * We don't do accounting for some specific faults:
4561 	 *
4562 	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
4563 	 *   includes arch_vma_access_permitted() failing before reaching here.
4564 	 *   So this is not a "this many hardware page faults" counter.  We
4565 	 *   should use the hw profiling for that.
4566 	 *
4567 	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
4568 	 *   once they're completed.
4569 	 */
4570 	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4571 		return;
4572 
4573 	/*
4574 	 * We define the fault as a major fault when the final successful fault
4575 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4576 	 * handle it immediately previously).
4577 	 */
4578 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4579 
4580 	if (major)
4581 		current->maj_flt++;
4582 	else
4583 		current->min_flt++;
4584 
4585 	/*
4586 	 * If the fault is done for GUP, regs will be NULL.  We only do the
4587 	 * accounting for the per thread fault counters who triggered the
4588 	 * fault, and we skip the perf event updates.
4589 	 */
4590 	if (!regs)
4591 		return;
4592 
4593 	if (major)
4594 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
4595 	else
4596 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
4597 }
4598 
4599 /*
4600  * By the time we get here, we already hold the mm semaphore
4601  *
4602  * The mmap_lock may have been released depending on flags and our
4603  * return value.  See filemap_fault() and __lock_page_or_retry().
4604  */
4605 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4606 			   unsigned int flags, struct pt_regs *regs)
4607 {
4608 	vm_fault_t ret;
4609 
4610 	__set_current_state(TASK_RUNNING);
4611 
4612 	count_vm_event(PGFAULT);
4613 	count_memcg_event_mm(vma->vm_mm, PGFAULT);
4614 
4615 	/* do counter updates before entering really critical section. */
4616 	check_sync_rss_stat(current);
4617 
4618 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4619 					    flags & FAULT_FLAG_INSTRUCTION,
4620 					    flags & FAULT_FLAG_REMOTE))
4621 		return VM_FAULT_SIGSEGV;
4622 
4623 	/*
4624 	 * Enable the memcg OOM handling for faults triggered in user
4625 	 * space.  Kernel faults are handled more gracefully.
4626 	 */
4627 	if (flags & FAULT_FLAG_USER)
4628 		mem_cgroup_enter_user_fault();
4629 
4630 	if (unlikely(is_vm_hugetlb_page(vma)))
4631 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4632 	else
4633 		ret = __handle_mm_fault(vma, address, flags);
4634 
4635 	if (flags & FAULT_FLAG_USER) {
4636 		mem_cgroup_exit_user_fault();
4637 		/*
4638 		 * The task may have entered a memcg OOM situation but
4639 		 * if the allocation error was handled gracefully (no
4640 		 * VM_FAULT_OOM), there is no need to kill anything.
4641 		 * Just clean up the OOM state peacefully.
4642 		 */
4643 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4644 			mem_cgroup_oom_synchronize(false);
4645 	}
4646 
4647 	mm_account_fault(regs, address, flags, ret);
4648 
4649 	return ret;
4650 }
4651 EXPORT_SYMBOL_GPL(handle_mm_fault);
4652 
4653 #ifndef __PAGETABLE_P4D_FOLDED
4654 /*
4655  * Allocate p4d page table.
4656  * We've already handled the fast-path in-line.
4657  */
4658 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4659 {
4660 	p4d_t *new = p4d_alloc_one(mm, address);
4661 	if (!new)
4662 		return -ENOMEM;
4663 
4664 	smp_wmb(); /* See comment in __pte_alloc */
4665 
4666 	spin_lock(&mm->page_table_lock);
4667 	if (pgd_present(*pgd))		/* Another has populated it */
4668 		p4d_free(mm, new);
4669 	else
4670 		pgd_populate(mm, pgd, new);
4671 	spin_unlock(&mm->page_table_lock);
4672 	return 0;
4673 }
4674 #endif /* __PAGETABLE_P4D_FOLDED */
4675 
4676 #ifndef __PAGETABLE_PUD_FOLDED
4677 /*
4678  * Allocate page upper directory.
4679  * We've already handled the fast-path in-line.
4680  */
4681 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
4682 {
4683 	pud_t *new = pud_alloc_one(mm, address);
4684 	if (!new)
4685 		return -ENOMEM;
4686 
4687 	smp_wmb(); /* See comment in __pte_alloc */
4688 
4689 	spin_lock(&mm->page_table_lock);
4690 	if (!p4d_present(*p4d)) {
4691 		mm_inc_nr_puds(mm);
4692 		p4d_populate(mm, p4d, new);
4693 	} else	/* Another has populated it */
4694 		pud_free(mm, new);
4695 	spin_unlock(&mm->page_table_lock);
4696 	return 0;
4697 }
4698 #endif /* __PAGETABLE_PUD_FOLDED */
4699 
4700 #ifndef __PAGETABLE_PMD_FOLDED
4701 /*
4702  * Allocate page middle directory.
4703  * We've already handled the fast-path in-line.
4704  */
4705 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
4706 {
4707 	spinlock_t *ptl;
4708 	pmd_t *new = pmd_alloc_one(mm, address);
4709 	if (!new)
4710 		return -ENOMEM;
4711 
4712 	smp_wmb(); /* See comment in __pte_alloc */
4713 
4714 	ptl = pud_lock(mm, pud);
4715 	if (!pud_present(*pud)) {
4716 		mm_inc_nr_pmds(mm);
4717 		pud_populate(mm, pud, new);
4718 	} else	/* Another has populated it */
4719 		pmd_free(mm, new);
4720 	spin_unlock(ptl);
4721 	return 0;
4722 }
4723 #endif /* __PAGETABLE_PMD_FOLDED */
4724 
4725 static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4726 			    struct mmu_notifier_range *range,
4727 			    pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4728 {
4729 	pgd_t *pgd;
4730 	p4d_t *p4d;
4731 	pud_t *pud;
4732 	pmd_t *pmd;
4733 	pte_t *ptep;
4734 
4735 	pgd = pgd_offset(mm, address);
4736 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4737 		goto out;
4738 
4739 	p4d = p4d_offset(pgd, address);
4740 	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4741 		goto out;
4742 
4743 	pud = pud_offset(p4d, address);
4744 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4745 		goto out;
4746 
4747 	pmd = pmd_offset(pud, address);
4748 	VM_BUG_ON(pmd_trans_huge(*pmd));
4749 
4750 	if (pmd_huge(*pmd)) {
4751 		if (!pmdpp)
4752 			goto out;
4753 
4754 		if (range) {
4755 			mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
4756 						NULL, mm, address & PMD_MASK,
4757 						(address & PMD_MASK) + PMD_SIZE);
4758 			mmu_notifier_invalidate_range_start(range);
4759 		}
4760 		*ptlp = pmd_lock(mm, pmd);
4761 		if (pmd_huge(*pmd)) {
4762 			*pmdpp = pmd;
4763 			return 0;
4764 		}
4765 		spin_unlock(*ptlp);
4766 		if (range)
4767 			mmu_notifier_invalidate_range_end(range);
4768 	}
4769 
4770 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
4771 		goto out;
4772 
4773 	if (range) {
4774 		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
4775 					address & PAGE_MASK,
4776 					(address & PAGE_MASK) + PAGE_SIZE);
4777 		mmu_notifier_invalidate_range_start(range);
4778 	}
4779 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
4780 	if (!pte_present(*ptep))
4781 		goto unlock;
4782 	*ptepp = ptep;
4783 	return 0;
4784 unlock:
4785 	pte_unmap_unlock(ptep, *ptlp);
4786 	if (range)
4787 		mmu_notifier_invalidate_range_end(range);
4788 out:
4789 	return -EINVAL;
4790 }
4791 
4792 static inline int follow_pte(struct mm_struct *mm, unsigned long address,
4793 			     pte_t **ptepp, spinlock_t **ptlp)
4794 {
4795 	int res;
4796 
4797 	/* (void) is needed to make gcc happy */
4798 	(void) __cond_lock(*ptlp,
4799 			   !(res = __follow_pte_pmd(mm, address, NULL,
4800 						    ptepp, NULL, ptlp)));
4801 	return res;
4802 }
4803 
4804 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4805 		   struct mmu_notifier_range *range,
4806 		   pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4807 {
4808 	int res;
4809 
4810 	/* (void) is needed to make gcc happy */
4811 	(void) __cond_lock(*ptlp,
4812 			   !(res = __follow_pte_pmd(mm, address, range,
4813 						    ptepp, pmdpp, ptlp)));
4814 	return res;
4815 }
4816 EXPORT_SYMBOL(follow_pte_pmd);
4817 
4818 /**
4819  * follow_pfn - look up PFN at a user virtual address
4820  * @vma: memory mapping
4821  * @address: user virtual address
4822  * @pfn: location to store found PFN
4823  *
4824  * Only IO mappings and raw PFN mappings are allowed.
4825  *
4826  * Return: zero and the pfn at @pfn on success, -ve otherwise.
4827  */
4828 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4829 	unsigned long *pfn)
4830 {
4831 	int ret = -EINVAL;
4832 	spinlock_t *ptl;
4833 	pte_t *ptep;
4834 
4835 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4836 		return ret;
4837 
4838 	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
4839 	if (ret)
4840 		return ret;
4841 	*pfn = pte_pfn(*ptep);
4842 	pte_unmap_unlock(ptep, ptl);
4843 	return 0;
4844 }
4845 EXPORT_SYMBOL(follow_pfn);
4846 
4847 #ifdef CONFIG_HAVE_IOREMAP_PROT
4848 int follow_phys(struct vm_area_struct *vma,
4849 		unsigned long address, unsigned int flags,
4850 		unsigned long *prot, resource_size_t *phys)
4851 {
4852 	int ret = -EINVAL;
4853 	pte_t *ptep, pte;
4854 	spinlock_t *ptl;
4855 
4856 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4857 		goto out;
4858 
4859 	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
4860 		goto out;
4861 	pte = *ptep;
4862 
4863 	if ((flags & FOLL_WRITE) && !pte_write(pte))
4864 		goto unlock;
4865 
4866 	*prot = pgprot_val(pte_pgprot(pte));
4867 	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
4868 
4869 	ret = 0;
4870 unlock:
4871 	pte_unmap_unlock(ptep, ptl);
4872 out:
4873 	return ret;
4874 }
4875 
4876 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
4877 			void *buf, int len, int write)
4878 {
4879 	resource_size_t phys_addr;
4880 	unsigned long prot = 0;
4881 	void __iomem *maddr;
4882 	int offset = addr & (PAGE_SIZE-1);
4883 
4884 	if (follow_phys(vma, addr, write, &prot, &phys_addr))
4885 		return -EINVAL;
4886 
4887 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
4888 	if (!maddr)
4889 		return -ENOMEM;
4890 
4891 	if (write)
4892 		memcpy_toio(maddr + offset, buf, len);
4893 	else
4894 		memcpy_fromio(buf, maddr + offset, len);
4895 	iounmap(maddr);
4896 
4897 	return len;
4898 }
4899 EXPORT_SYMBOL_GPL(generic_access_phys);
4900 #endif
4901 
4902 /*
4903  * Access another process' address space as given in mm.  If non-NULL, use the
4904  * given task for page fault accounting.
4905  */
4906 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
4907 		unsigned long addr, void *buf, int len, unsigned int gup_flags)
4908 {
4909 	struct vm_area_struct *vma;
4910 	void *old_buf = buf;
4911 	int write = gup_flags & FOLL_WRITE;
4912 
4913 	if (mmap_read_lock_killable(mm))
4914 		return 0;
4915 
4916 	/* ignore errors, just check how much was successfully transferred */
4917 	while (len) {
4918 		int bytes, ret, offset;
4919 		void *maddr;
4920 		struct page *page = NULL;
4921 
4922 		ret = get_user_pages_remote(mm, addr, 1,
4923 				gup_flags, &page, &vma, NULL);
4924 		if (ret <= 0) {
4925 #ifndef CONFIG_HAVE_IOREMAP_PROT
4926 			break;
4927 #else
4928 			/*
4929 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
4930 			 * we can access using slightly different code.
4931 			 */
4932 			vma = find_vma(mm, addr);
4933 			if (!vma || vma->vm_start > addr)
4934 				break;
4935 			if (vma->vm_ops && vma->vm_ops->access)
4936 				ret = vma->vm_ops->access(vma, addr, buf,
4937 							  len, write);
4938 			if (ret <= 0)
4939 				break;
4940 			bytes = ret;
4941 #endif
4942 		} else {
4943 			bytes = len;
4944 			offset = addr & (PAGE_SIZE-1);
4945 			if (bytes > PAGE_SIZE-offset)
4946 				bytes = PAGE_SIZE-offset;
4947 
4948 			maddr = kmap(page);
4949 			if (write) {
4950 				copy_to_user_page(vma, page, addr,
4951 						  maddr + offset, buf, bytes);
4952 				set_page_dirty_lock(page);
4953 			} else {
4954 				copy_from_user_page(vma, page, addr,
4955 						    buf, maddr + offset, bytes);
4956 			}
4957 			kunmap(page);
4958 			put_page(page);
4959 		}
4960 		len -= bytes;
4961 		buf += bytes;
4962 		addr += bytes;
4963 	}
4964 	mmap_read_unlock(mm);
4965 
4966 	return buf - old_buf;
4967 }
4968 
4969 /**
4970  * access_remote_vm - access another process' address space
4971  * @mm:		the mm_struct of the target address space
4972  * @addr:	start address to access
4973  * @buf:	source or destination buffer
4974  * @len:	number of bytes to transfer
4975  * @gup_flags:	flags modifying lookup behaviour
4976  *
4977  * The caller must hold a reference on @mm.
4978  *
4979  * Return: number of bytes copied from source to destination.
4980  */
4981 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
4982 		void *buf, int len, unsigned int gup_flags)
4983 {
4984 	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
4985 }
4986 
4987 /*
4988  * Access another process' address space.
4989  * Source/target buffer must be kernel space,
4990  * Do not walk the page table directly, use get_user_pages
4991  */
4992 int access_process_vm(struct task_struct *tsk, unsigned long addr,
4993 		void *buf, int len, unsigned int gup_flags)
4994 {
4995 	struct mm_struct *mm;
4996 	int ret;
4997 
4998 	mm = get_task_mm(tsk);
4999 	if (!mm)
5000 		return 0;
5001 
5002 	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
5003 
5004 	mmput(mm);
5005 
5006 	return ret;
5007 }
5008 EXPORT_SYMBOL_GPL(access_process_vm);
5009 
5010 /*
5011  * Print the name of a VMA.
5012  */
5013 void print_vma_addr(char *prefix, unsigned long ip)
5014 {
5015 	struct mm_struct *mm = current->mm;
5016 	struct vm_area_struct *vma;
5017 
5018 	/*
5019 	 * we might be running from an atomic context so we cannot sleep
5020 	 */
5021 	if (!mmap_read_trylock(mm))
5022 		return;
5023 
5024 	vma = find_vma(mm, ip);
5025 	if (vma && vma->vm_file) {
5026 		struct file *f = vma->vm_file;
5027 		char *buf = (char *)__get_free_page(GFP_NOWAIT);
5028 		if (buf) {
5029 			char *p;
5030 
5031 			p = file_path(f, buf, PAGE_SIZE);
5032 			if (IS_ERR(p))
5033 				p = "?";
5034 			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5035 					vma->vm_start,
5036 					vma->vm_end - vma->vm_start);
5037 			free_page((unsigned long)buf);
5038 		}
5039 	}
5040 	mmap_read_unlock(mm);
5041 }
5042 
5043 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5044 void __might_fault(const char *file, int line)
5045 {
5046 	/*
5047 	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
5048 	 * holding the mmap_lock, this is safe because kernel memory doesn't
5049 	 * get paged out, therefore we'll never actually fault, and the
5050 	 * below annotations will generate false positives.
5051 	 */
5052 	if (uaccess_kernel())
5053 		return;
5054 	if (pagefault_disabled())
5055 		return;
5056 	__might_sleep(file, line, 0);
5057 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5058 	if (current->mm)
5059 		might_lock_read(&current->mm->mmap_lock);
5060 #endif
5061 }
5062 EXPORT_SYMBOL(__might_fault);
5063 #endif
5064 
5065 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5066 /*
5067  * Process all subpages of the specified huge page with the specified
5068  * operation.  The target subpage will be processed last to keep its
5069  * cache lines hot.
5070  */
5071 static inline void process_huge_page(
5072 	unsigned long addr_hint, unsigned int pages_per_huge_page,
5073 	void (*process_subpage)(unsigned long addr, int idx, void *arg),
5074 	void *arg)
5075 {
5076 	int i, n, base, l;
5077 	unsigned long addr = addr_hint &
5078 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5079 
5080 	/* Process target subpage last to keep its cache lines hot */
5081 	might_sleep();
5082 	n = (addr_hint - addr) / PAGE_SIZE;
5083 	if (2 * n <= pages_per_huge_page) {
5084 		/* If target subpage in first half of huge page */
5085 		base = 0;
5086 		l = n;
5087 		/* Process subpages at the end of huge page */
5088 		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5089 			cond_resched();
5090 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5091 		}
5092 	} else {
5093 		/* If target subpage in second half of huge page */
5094 		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5095 		l = pages_per_huge_page - n;
5096 		/* Process subpages at the begin of huge page */
5097 		for (i = 0; i < base; i++) {
5098 			cond_resched();
5099 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5100 		}
5101 	}
5102 	/*
5103 	 * Process remaining subpages in left-right-left-right pattern
5104 	 * towards the target subpage
5105 	 */
5106 	for (i = 0; i < l; i++) {
5107 		int left_idx = base + i;
5108 		int right_idx = base + 2 * l - 1 - i;
5109 
5110 		cond_resched();
5111 		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5112 		cond_resched();
5113 		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5114 	}
5115 }
5116 
5117 static void clear_gigantic_page(struct page *page,
5118 				unsigned long addr,
5119 				unsigned int pages_per_huge_page)
5120 {
5121 	int i;
5122 	struct page *p = page;
5123 
5124 	might_sleep();
5125 	for (i = 0; i < pages_per_huge_page;
5126 	     i++, p = mem_map_next(p, page, i)) {
5127 		cond_resched();
5128 		clear_user_highpage(p, addr + i * PAGE_SIZE);
5129 	}
5130 }
5131 
5132 static void clear_subpage(unsigned long addr, int idx, void *arg)
5133 {
5134 	struct page *page = arg;
5135 
5136 	clear_user_highpage(page + idx, addr);
5137 }
5138 
5139 void clear_huge_page(struct page *page,
5140 		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5141 {
5142 	unsigned long addr = addr_hint &
5143 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5144 
5145 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5146 		clear_gigantic_page(page, addr, pages_per_huge_page);
5147 		return;
5148 	}
5149 
5150 	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5151 }
5152 
5153 static void copy_user_gigantic_page(struct page *dst, struct page *src,
5154 				    unsigned long addr,
5155 				    struct vm_area_struct *vma,
5156 				    unsigned int pages_per_huge_page)
5157 {
5158 	int i;
5159 	struct page *dst_base = dst;
5160 	struct page *src_base = src;
5161 
5162 	for (i = 0; i < pages_per_huge_page; ) {
5163 		cond_resched();
5164 		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5165 
5166 		i++;
5167 		dst = mem_map_next(dst, dst_base, i);
5168 		src = mem_map_next(src, src_base, i);
5169 	}
5170 }
5171 
5172 struct copy_subpage_arg {
5173 	struct page *dst;
5174 	struct page *src;
5175 	struct vm_area_struct *vma;
5176 };
5177 
5178 static void copy_subpage(unsigned long addr, int idx, void *arg)
5179 {
5180 	struct copy_subpage_arg *copy_arg = arg;
5181 
5182 	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5183 			   addr, copy_arg->vma);
5184 }
5185 
5186 void copy_user_huge_page(struct page *dst, struct page *src,
5187 			 unsigned long addr_hint, struct vm_area_struct *vma,
5188 			 unsigned int pages_per_huge_page)
5189 {
5190 	unsigned long addr = addr_hint &
5191 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5192 	struct copy_subpage_arg arg = {
5193 		.dst = dst,
5194 		.src = src,
5195 		.vma = vma,
5196 	};
5197 
5198 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5199 		copy_user_gigantic_page(dst, src, addr, vma,
5200 					pages_per_huge_page);
5201 		return;
5202 	}
5203 
5204 	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5205 }
5206 
5207 long copy_huge_page_from_user(struct page *dst_page,
5208 				const void __user *usr_src,
5209 				unsigned int pages_per_huge_page,
5210 				bool allow_pagefault)
5211 {
5212 	void *src = (void *)usr_src;
5213 	void *page_kaddr;
5214 	unsigned long i, rc = 0;
5215 	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5216 
5217 	for (i = 0; i < pages_per_huge_page; i++) {
5218 		if (allow_pagefault)
5219 			page_kaddr = kmap(dst_page + i);
5220 		else
5221 			page_kaddr = kmap_atomic(dst_page + i);
5222 		rc = copy_from_user(page_kaddr,
5223 				(const void __user *)(src + i * PAGE_SIZE),
5224 				PAGE_SIZE);
5225 		if (allow_pagefault)
5226 			kunmap(dst_page + i);
5227 		else
5228 			kunmap_atomic(page_kaddr);
5229 
5230 		ret_val -= (PAGE_SIZE - rc);
5231 		if (rc)
5232 			break;
5233 
5234 		cond_resched();
5235 	}
5236 	return ret_val;
5237 }
5238 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5239 
5240 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5241 
5242 static struct kmem_cache *page_ptl_cachep;
5243 
5244 void __init ptlock_cache_init(void)
5245 {
5246 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5247 			SLAB_PANIC, NULL);
5248 }
5249 
5250 bool ptlock_alloc(struct page *page)
5251 {
5252 	spinlock_t *ptl;
5253 
5254 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5255 	if (!ptl)
5256 		return false;
5257 	page->ptl = ptl;
5258 	return true;
5259 }
5260 
5261 void ptlock_free(struct page *page)
5262 {
5263 	kmem_cache_free(page_ptl_cachep, page->ptl);
5264 }
5265 #endif
5266