xref: /openbmc/linux/mm/gup.c (revision d2ba09c1)
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/err.h>
4 #include <linux/spinlock.h>
5 
6 #include <linux/mm.h>
7 #include <linux/memremap.h>
8 #include <linux/pagemap.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12 
13 #include <linux/sched/signal.h>
14 #include <linux/rwsem.h>
15 #include <linux/hugetlb.h>
16 
17 #include <asm/mmu_context.h>
18 #include <asm/pgtable.h>
19 #include <asm/tlbflush.h>
20 
21 #include "internal.h"
22 
23 static struct page *no_page_table(struct vm_area_struct *vma,
24 		unsigned int flags)
25 {
26 	/*
27 	 * When core dumping an enormous anonymous area that nobody
28 	 * has touched so far, we don't want to allocate unnecessary pages or
29 	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
30 	 * then get_dump_page() will return NULL to leave a hole in the dump.
31 	 * But we can only make this optimization where a hole would surely
32 	 * be zero-filled if handle_mm_fault() actually did handle it.
33 	 */
34 	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
35 		return ERR_PTR(-EFAULT);
36 	return NULL;
37 }
38 
39 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
40 		pte_t *pte, unsigned int flags)
41 {
42 	/* No page to get reference */
43 	if (flags & FOLL_GET)
44 		return -EFAULT;
45 
46 	if (flags & FOLL_TOUCH) {
47 		pte_t entry = *pte;
48 
49 		if (flags & FOLL_WRITE)
50 			entry = pte_mkdirty(entry);
51 		entry = pte_mkyoung(entry);
52 
53 		if (!pte_same(*pte, entry)) {
54 			set_pte_at(vma->vm_mm, address, pte, entry);
55 			update_mmu_cache(vma, address, pte);
56 		}
57 	}
58 
59 	/* Proper page table entry exists, but no corresponding struct page */
60 	return -EEXIST;
61 }
62 
63 /*
64  * FOLL_FORCE can write to even unwritable pte's, but only
65  * after we've gone through a COW cycle and they are dirty.
66  */
67 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
68 {
69 	return pte_write(pte) ||
70 		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
71 }
72 
73 static struct page *follow_page_pte(struct vm_area_struct *vma,
74 		unsigned long address, pmd_t *pmd, unsigned int flags)
75 {
76 	struct mm_struct *mm = vma->vm_mm;
77 	struct dev_pagemap *pgmap = NULL;
78 	struct page *page;
79 	spinlock_t *ptl;
80 	pte_t *ptep, pte;
81 
82 retry:
83 	if (unlikely(pmd_bad(*pmd)))
84 		return no_page_table(vma, flags);
85 
86 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
87 	pte = *ptep;
88 	if (!pte_present(pte)) {
89 		swp_entry_t entry;
90 		/*
91 		 * KSM's break_ksm() relies upon recognizing a ksm page
92 		 * even while it is being migrated, so for that case we
93 		 * need migration_entry_wait().
94 		 */
95 		if (likely(!(flags & FOLL_MIGRATION)))
96 			goto no_page;
97 		if (pte_none(pte))
98 			goto no_page;
99 		entry = pte_to_swp_entry(pte);
100 		if (!is_migration_entry(entry))
101 			goto no_page;
102 		pte_unmap_unlock(ptep, ptl);
103 		migration_entry_wait(mm, pmd, address);
104 		goto retry;
105 	}
106 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
107 		goto no_page;
108 	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
109 		pte_unmap_unlock(ptep, ptl);
110 		return NULL;
111 	}
112 
113 	page = vm_normal_page(vma, address, pte);
114 	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
115 		/*
116 		 * Only return device mapping pages in the FOLL_GET case since
117 		 * they are only valid while holding the pgmap reference.
118 		 */
119 		pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
120 		if (pgmap)
121 			page = pte_page(pte);
122 		else
123 			goto no_page;
124 	} else if (unlikely(!page)) {
125 		if (flags & FOLL_DUMP) {
126 			/* Avoid special (like zero) pages in core dumps */
127 			page = ERR_PTR(-EFAULT);
128 			goto out;
129 		}
130 
131 		if (is_zero_pfn(pte_pfn(pte))) {
132 			page = pte_page(pte);
133 		} else {
134 			int ret;
135 
136 			ret = follow_pfn_pte(vma, address, ptep, flags);
137 			page = ERR_PTR(ret);
138 			goto out;
139 		}
140 	}
141 
142 	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
143 		int ret;
144 		get_page(page);
145 		pte_unmap_unlock(ptep, ptl);
146 		lock_page(page);
147 		ret = split_huge_page(page);
148 		unlock_page(page);
149 		put_page(page);
150 		if (ret)
151 			return ERR_PTR(ret);
152 		goto retry;
153 	}
154 
155 	if (flags & FOLL_GET) {
156 		get_page(page);
157 
158 		/* drop the pgmap reference now that we hold the page */
159 		if (pgmap) {
160 			put_dev_pagemap(pgmap);
161 			pgmap = NULL;
162 		}
163 	}
164 	if (flags & FOLL_TOUCH) {
165 		if ((flags & FOLL_WRITE) &&
166 		    !pte_dirty(pte) && !PageDirty(page))
167 			set_page_dirty(page);
168 		/*
169 		 * pte_mkyoung() would be more correct here, but atomic care
170 		 * is needed to avoid losing the dirty bit: it is easier to use
171 		 * mark_page_accessed().
172 		 */
173 		mark_page_accessed(page);
174 	}
175 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
176 		/* Do not mlock pte-mapped THP */
177 		if (PageTransCompound(page))
178 			goto out;
179 
180 		/*
181 		 * The preliminary mapping check is mainly to avoid the
182 		 * pointless overhead of lock_page on the ZERO_PAGE
183 		 * which might bounce very badly if there is contention.
184 		 *
185 		 * If the page is already locked, we don't need to
186 		 * handle it now - vmscan will handle it later if and
187 		 * when it attempts to reclaim the page.
188 		 */
189 		if (page->mapping && trylock_page(page)) {
190 			lru_add_drain();  /* push cached pages to LRU */
191 			/*
192 			 * Because we lock page here, and migration is
193 			 * blocked by the pte's page reference, and we
194 			 * know the page is still mapped, we don't even
195 			 * need to check for file-cache page truncation.
196 			 */
197 			mlock_vma_page(page);
198 			unlock_page(page);
199 		}
200 	}
201 out:
202 	pte_unmap_unlock(ptep, ptl);
203 	return page;
204 no_page:
205 	pte_unmap_unlock(ptep, ptl);
206 	if (!pte_none(pte))
207 		return NULL;
208 	return no_page_table(vma, flags);
209 }
210 
211 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
212 				    unsigned long address, pud_t *pudp,
213 				    unsigned int flags, unsigned int *page_mask)
214 {
215 	pmd_t *pmd;
216 	spinlock_t *ptl;
217 	struct page *page;
218 	struct mm_struct *mm = vma->vm_mm;
219 
220 	pmd = pmd_offset(pudp, address);
221 	if (pmd_none(*pmd))
222 		return no_page_table(vma, flags);
223 	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
224 		page = follow_huge_pmd(mm, address, pmd, flags);
225 		if (page)
226 			return page;
227 		return no_page_table(vma, flags);
228 	}
229 	if (is_hugepd(__hugepd(pmd_val(*pmd)))) {
230 		page = follow_huge_pd(vma, address,
231 				      __hugepd(pmd_val(*pmd)), flags,
232 				      PMD_SHIFT);
233 		if (page)
234 			return page;
235 		return no_page_table(vma, flags);
236 	}
237 retry:
238 	if (!pmd_present(*pmd)) {
239 		if (likely(!(flags & FOLL_MIGRATION)))
240 			return no_page_table(vma, flags);
241 		VM_BUG_ON(thp_migration_supported() &&
242 				  !is_pmd_migration_entry(*pmd));
243 		if (is_pmd_migration_entry(*pmd))
244 			pmd_migration_entry_wait(mm, pmd);
245 		goto retry;
246 	}
247 	if (pmd_devmap(*pmd)) {
248 		ptl = pmd_lock(mm, pmd);
249 		page = follow_devmap_pmd(vma, address, pmd, flags);
250 		spin_unlock(ptl);
251 		if (page)
252 			return page;
253 	}
254 	if (likely(!pmd_trans_huge(*pmd)))
255 		return follow_page_pte(vma, address, pmd, flags);
256 
257 	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
258 		return no_page_table(vma, flags);
259 
260 retry_locked:
261 	ptl = pmd_lock(mm, pmd);
262 	if (unlikely(!pmd_present(*pmd))) {
263 		spin_unlock(ptl);
264 		if (likely(!(flags & FOLL_MIGRATION)))
265 			return no_page_table(vma, flags);
266 		pmd_migration_entry_wait(mm, pmd);
267 		goto retry_locked;
268 	}
269 	if (unlikely(!pmd_trans_huge(*pmd))) {
270 		spin_unlock(ptl);
271 		return follow_page_pte(vma, address, pmd, flags);
272 	}
273 	if (flags & FOLL_SPLIT) {
274 		int ret;
275 		page = pmd_page(*pmd);
276 		if (is_huge_zero_page(page)) {
277 			spin_unlock(ptl);
278 			ret = 0;
279 			split_huge_pmd(vma, pmd, address);
280 			if (pmd_trans_unstable(pmd))
281 				ret = -EBUSY;
282 		} else {
283 			get_page(page);
284 			spin_unlock(ptl);
285 			lock_page(page);
286 			ret = split_huge_page(page);
287 			unlock_page(page);
288 			put_page(page);
289 			if (pmd_none(*pmd))
290 				return no_page_table(vma, flags);
291 		}
292 
293 		return ret ? ERR_PTR(ret) :
294 			follow_page_pte(vma, address, pmd, flags);
295 	}
296 	page = follow_trans_huge_pmd(vma, address, pmd, flags);
297 	spin_unlock(ptl);
298 	*page_mask = HPAGE_PMD_NR - 1;
299 	return page;
300 }
301 
302 
303 static struct page *follow_pud_mask(struct vm_area_struct *vma,
304 				    unsigned long address, p4d_t *p4dp,
305 				    unsigned int flags, unsigned int *page_mask)
306 {
307 	pud_t *pud;
308 	spinlock_t *ptl;
309 	struct page *page;
310 	struct mm_struct *mm = vma->vm_mm;
311 
312 	pud = pud_offset(p4dp, address);
313 	if (pud_none(*pud))
314 		return no_page_table(vma, flags);
315 	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
316 		page = follow_huge_pud(mm, address, pud, flags);
317 		if (page)
318 			return page;
319 		return no_page_table(vma, flags);
320 	}
321 	if (is_hugepd(__hugepd(pud_val(*pud)))) {
322 		page = follow_huge_pd(vma, address,
323 				      __hugepd(pud_val(*pud)), flags,
324 				      PUD_SHIFT);
325 		if (page)
326 			return page;
327 		return no_page_table(vma, flags);
328 	}
329 	if (pud_devmap(*pud)) {
330 		ptl = pud_lock(mm, pud);
331 		page = follow_devmap_pud(vma, address, pud, flags);
332 		spin_unlock(ptl);
333 		if (page)
334 			return page;
335 	}
336 	if (unlikely(pud_bad(*pud)))
337 		return no_page_table(vma, flags);
338 
339 	return follow_pmd_mask(vma, address, pud, flags, page_mask);
340 }
341 
342 
343 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
344 				    unsigned long address, pgd_t *pgdp,
345 				    unsigned int flags, unsigned int *page_mask)
346 {
347 	p4d_t *p4d;
348 	struct page *page;
349 
350 	p4d = p4d_offset(pgdp, address);
351 	if (p4d_none(*p4d))
352 		return no_page_table(vma, flags);
353 	BUILD_BUG_ON(p4d_huge(*p4d));
354 	if (unlikely(p4d_bad(*p4d)))
355 		return no_page_table(vma, flags);
356 
357 	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
358 		page = follow_huge_pd(vma, address,
359 				      __hugepd(p4d_val(*p4d)), flags,
360 				      P4D_SHIFT);
361 		if (page)
362 			return page;
363 		return no_page_table(vma, flags);
364 	}
365 	return follow_pud_mask(vma, address, p4d, flags, page_mask);
366 }
367 
368 /**
369  * follow_page_mask - look up a page descriptor from a user-virtual address
370  * @vma: vm_area_struct mapping @address
371  * @address: virtual address to look up
372  * @flags: flags modifying lookup behaviour
373  * @page_mask: on output, *page_mask is set according to the size of the page
374  *
375  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
376  *
377  * Returns the mapped (struct page *), %NULL if no mapping exists, or
378  * an error pointer if there is a mapping to something not represented
379  * by a page descriptor (see also vm_normal_page()).
380  */
381 struct page *follow_page_mask(struct vm_area_struct *vma,
382 			      unsigned long address, unsigned int flags,
383 			      unsigned int *page_mask)
384 {
385 	pgd_t *pgd;
386 	struct page *page;
387 	struct mm_struct *mm = vma->vm_mm;
388 
389 	*page_mask = 0;
390 
391 	/* make this handle hugepd */
392 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
393 	if (!IS_ERR(page)) {
394 		BUG_ON(flags & FOLL_GET);
395 		return page;
396 	}
397 
398 	pgd = pgd_offset(mm, address);
399 
400 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
401 		return no_page_table(vma, flags);
402 
403 	if (pgd_huge(*pgd)) {
404 		page = follow_huge_pgd(mm, address, pgd, flags);
405 		if (page)
406 			return page;
407 		return no_page_table(vma, flags);
408 	}
409 	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
410 		page = follow_huge_pd(vma, address,
411 				      __hugepd(pgd_val(*pgd)), flags,
412 				      PGDIR_SHIFT);
413 		if (page)
414 			return page;
415 		return no_page_table(vma, flags);
416 	}
417 
418 	return follow_p4d_mask(vma, address, pgd, flags, page_mask);
419 }
420 
421 static int get_gate_page(struct mm_struct *mm, unsigned long address,
422 		unsigned int gup_flags, struct vm_area_struct **vma,
423 		struct page **page)
424 {
425 	pgd_t *pgd;
426 	p4d_t *p4d;
427 	pud_t *pud;
428 	pmd_t *pmd;
429 	pte_t *pte;
430 	int ret = -EFAULT;
431 
432 	/* user gate pages are read-only */
433 	if (gup_flags & FOLL_WRITE)
434 		return -EFAULT;
435 	if (address > TASK_SIZE)
436 		pgd = pgd_offset_k(address);
437 	else
438 		pgd = pgd_offset_gate(mm, address);
439 	BUG_ON(pgd_none(*pgd));
440 	p4d = p4d_offset(pgd, address);
441 	BUG_ON(p4d_none(*p4d));
442 	pud = pud_offset(p4d, address);
443 	BUG_ON(pud_none(*pud));
444 	pmd = pmd_offset(pud, address);
445 	if (!pmd_present(*pmd))
446 		return -EFAULT;
447 	VM_BUG_ON(pmd_trans_huge(*pmd));
448 	pte = pte_offset_map(pmd, address);
449 	if (pte_none(*pte))
450 		goto unmap;
451 	*vma = get_gate_vma(mm);
452 	if (!page)
453 		goto out;
454 	*page = vm_normal_page(*vma, address, *pte);
455 	if (!*page) {
456 		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
457 			goto unmap;
458 		*page = pte_page(*pte);
459 
460 		/*
461 		 * This should never happen (a device public page in the gate
462 		 * area).
463 		 */
464 		if (is_device_public_page(*page))
465 			goto unmap;
466 	}
467 	get_page(*page);
468 out:
469 	ret = 0;
470 unmap:
471 	pte_unmap(pte);
472 	return ret;
473 }
474 
475 /*
476  * mmap_sem must be held on entry.  If @nonblocking != NULL and
477  * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
478  * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
479  */
480 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
481 		unsigned long address, unsigned int *flags, int *nonblocking)
482 {
483 	unsigned int fault_flags = 0;
484 	int ret;
485 
486 	/* mlock all present pages, but do not fault in new pages */
487 	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
488 		return -ENOENT;
489 	if (*flags & FOLL_WRITE)
490 		fault_flags |= FAULT_FLAG_WRITE;
491 	if (*flags & FOLL_REMOTE)
492 		fault_flags |= FAULT_FLAG_REMOTE;
493 	if (nonblocking)
494 		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
495 	if (*flags & FOLL_NOWAIT)
496 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
497 	if (*flags & FOLL_TRIED) {
498 		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
499 		fault_flags |= FAULT_FLAG_TRIED;
500 	}
501 
502 	ret = handle_mm_fault(vma, address, fault_flags);
503 	if (ret & VM_FAULT_ERROR) {
504 		int err = vm_fault_to_errno(ret, *flags);
505 
506 		if (err)
507 			return err;
508 		BUG();
509 	}
510 
511 	if (tsk) {
512 		if (ret & VM_FAULT_MAJOR)
513 			tsk->maj_flt++;
514 		else
515 			tsk->min_flt++;
516 	}
517 
518 	if (ret & VM_FAULT_RETRY) {
519 		if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
520 			*nonblocking = 0;
521 		return -EBUSY;
522 	}
523 
524 	/*
525 	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
526 	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
527 	 * can thus safely do subsequent page lookups as if they were reads.
528 	 * But only do so when looping for pte_write is futile: in some cases
529 	 * userspace may also be wanting to write to the gotten user page,
530 	 * which a read fault here might prevent (a readonly page might get
531 	 * reCOWed by userspace write).
532 	 */
533 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
534 		*flags |= FOLL_COW;
535 	return 0;
536 }
537 
538 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
539 {
540 	vm_flags_t vm_flags = vma->vm_flags;
541 	int write = (gup_flags & FOLL_WRITE);
542 	int foreign = (gup_flags & FOLL_REMOTE);
543 
544 	if (vm_flags & (VM_IO | VM_PFNMAP))
545 		return -EFAULT;
546 
547 	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
548 		return -EFAULT;
549 
550 	if (write) {
551 		if (!(vm_flags & VM_WRITE)) {
552 			if (!(gup_flags & FOLL_FORCE))
553 				return -EFAULT;
554 			/*
555 			 * We used to let the write,force case do COW in a
556 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
557 			 * set a breakpoint in a read-only mapping of an
558 			 * executable, without corrupting the file (yet only
559 			 * when that file had been opened for writing!).
560 			 * Anon pages in shared mappings are surprising: now
561 			 * just reject it.
562 			 */
563 			if (!is_cow_mapping(vm_flags))
564 				return -EFAULT;
565 		}
566 	} else if (!(vm_flags & VM_READ)) {
567 		if (!(gup_flags & FOLL_FORCE))
568 			return -EFAULT;
569 		/*
570 		 * Is there actually any vma we can reach here which does not
571 		 * have VM_MAYREAD set?
572 		 */
573 		if (!(vm_flags & VM_MAYREAD))
574 			return -EFAULT;
575 	}
576 	/*
577 	 * gups are always data accesses, not instruction
578 	 * fetches, so execute=false here
579 	 */
580 	if (!arch_vma_access_permitted(vma, write, false, foreign))
581 		return -EFAULT;
582 	return 0;
583 }
584 
585 /**
586  * __get_user_pages() - pin user pages in memory
587  * @tsk:	task_struct of target task
588  * @mm:		mm_struct of target mm
589  * @start:	starting user address
590  * @nr_pages:	number of pages from start to pin
591  * @gup_flags:	flags modifying pin behaviour
592  * @pages:	array that receives pointers to the pages pinned.
593  *		Should be at least nr_pages long. Or NULL, if caller
594  *		only intends to ensure the pages are faulted in.
595  * @vmas:	array of pointers to vmas corresponding to each page.
596  *		Or NULL if the caller does not require them.
597  * @nonblocking: whether waiting for disk IO or mmap_sem contention
598  *
599  * Returns number of pages pinned. This may be fewer than the number
600  * requested. If nr_pages is 0 or negative, returns 0. If no pages
601  * were pinned, returns -errno. Each page returned must be released
602  * with a put_page() call when it is finished with. vmas will only
603  * remain valid while mmap_sem is held.
604  *
605  * Must be called with mmap_sem held.  It may be released.  See below.
606  *
607  * __get_user_pages walks a process's page tables and takes a reference to
608  * each struct page that each user address corresponds to at a given
609  * instant. That is, it takes the page that would be accessed if a user
610  * thread accesses the given user virtual address at that instant.
611  *
612  * This does not guarantee that the page exists in the user mappings when
613  * __get_user_pages returns, and there may even be a completely different
614  * page there in some cases (eg. if mmapped pagecache has been invalidated
615  * and subsequently re faulted). However it does guarantee that the page
616  * won't be freed completely. And mostly callers simply care that the page
617  * contains data that was valid *at some point in time*. Typically, an IO
618  * or similar operation cannot guarantee anything stronger anyway because
619  * locks can't be held over the syscall boundary.
620  *
621  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
622  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
623  * appropriate) must be called after the page is finished with, and
624  * before put_page is called.
625  *
626  * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
627  * or mmap_sem contention, and if waiting is needed to pin all pages,
628  * *@nonblocking will be set to 0.  Further, if @gup_flags does not
629  * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
630  * this case.
631  *
632  * A caller using such a combination of @nonblocking and @gup_flags
633  * must therefore hold the mmap_sem for reading only, and recognize
634  * when it's been released.  Otherwise, it must be held for either
635  * reading or writing and will not be released.
636  *
637  * In most cases, get_user_pages or get_user_pages_fast should be used
638  * instead of __get_user_pages. __get_user_pages should be used only if
639  * you need some special @gup_flags.
640  */
641 static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
642 		unsigned long start, unsigned long nr_pages,
643 		unsigned int gup_flags, struct page **pages,
644 		struct vm_area_struct **vmas, int *nonblocking)
645 {
646 	long i = 0;
647 	unsigned int page_mask;
648 	struct vm_area_struct *vma = NULL;
649 
650 	if (!nr_pages)
651 		return 0;
652 
653 	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
654 
655 	/*
656 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
657 	 * fault information is unrelated to the reference behaviour of a task
658 	 * using the address space
659 	 */
660 	if (!(gup_flags & FOLL_FORCE))
661 		gup_flags |= FOLL_NUMA;
662 
663 	do {
664 		struct page *page;
665 		unsigned int foll_flags = gup_flags;
666 		unsigned int page_increm;
667 
668 		/* first iteration or cross vma bound */
669 		if (!vma || start >= vma->vm_end) {
670 			vma = find_extend_vma(mm, start);
671 			if (!vma && in_gate_area(mm, start)) {
672 				int ret;
673 				ret = get_gate_page(mm, start & PAGE_MASK,
674 						gup_flags, &vma,
675 						pages ? &pages[i] : NULL);
676 				if (ret)
677 					return i ? : ret;
678 				page_mask = 0;
679 				goto next_page;
680 			}
681 
682 			if (!vma || check_vma_flags(vma, gup_flags))
683 				return i ? : -EFAULT;
684 			if (is_vm_hugetlb_page(vma)) {
685 				i = follow_hugetlb_page(mm, vma, pages, vmas,
686 						&start, &nr_pages, i,
687 						gup_flags, nonblocking);
688 				continue;
689 			}
690 		}
691 retry:
692 		/*
693 		 * If we have a pending SIGKILL, don't keep faulting pages and
694 		 * potentially allocating memory.
695 		 */
696 		if (unlikely(fatal_signal_pending(current)))
697 			return i ? i : -ERESTARTSYS;
698 		cond_resched();
699 		page = follow_page_mask(vma, start, foll_flags, &page_mask);
700 		if (!page) {
701 			int ret;
702 			ret = faultin_page(tsk, vma, start, &foll_flags,
703 					nonblocking);
704 			switch (ret) {
705 			case 0:
706 				goto retry;
707 			case -EFAULT:
708 			case -ENOMEM:
709 			case -EHWPOISON:
710 				return i ? i : ret;
711 			case -EBUSY:
712 				return i;
713 			case -ENOENT:
714 				goto next_page;
715 			}
716 			BUG();
717 		} else if (PTR_ERR(page) == -EEXIST) {
718 			/*
719 			 * Proper page table entry exists, but no corresponding
720 			 * struct page.
721 			 */
722 			goto next_page;
723 		} else if (IS_ERR(page)) {
724 			return i ? i : PTR_ERR(page);
725 		}
726 		if (pages) {
727 			pages[i] = page;
728 			flush_anon_page(vma, page, start);
729 			flush_dcache_page(page);
730 			page_mask = 0;
731 		}
732 next_page:
733 		if (vmas) {
734 			vmas[i] = vma;
735 			page_mask = 0;
736 		}
737 		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
738 		if (page_increm > nr_pages)
739 			page_increm = nr_pages;
740 		i += page_increm;
741 		start += page_increm * PAGE_SIZE;
742 		nr_pages -= page_increm;
743 	} while (nr_pages);
744 	return i;
745 }
746 
747 static bool vma_permits_fault(struct vm_area_struct *vma,
748 			      unsigned int fault_flags)
749 {
750 	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
751 	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
752 	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
753 
754 	if (!(vm_flags & vma->vm_flags))
755 		return false;
756 
757 	/*
758 	 * The architecture might have a hardware protection
759 	 * mechanism other than read/write that can deny access.
760 	 *
761 	 * gup always represents data access, not instruction
762 	 * fetches, so execute=false here:
763 	 */
764 	if (!arch_vma_access_permitted(vma, write, false, foreign))
765 		return false;
766 
767 	return true;
768 }
769 
770 /*
771  * fixup_user_fault() - manually resolve a user page fault
772  * @tsk:	the task_struct to use for page fault accounting, or
773  *		NULL if faults are not to be recorded.
774  * @mm:		mm_struct of target mm
775  * @address:	user address
776  * @fault_flags:flags to pass down to handle_mm_fault()
777  * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
778  *		does not allow retry
779  *
780  * This is meant to be called in the specific scenario where for locking reasons
781  * we try to access user memory in atomic context (within a pagefault_disable()
782  * section), this returns -EFAULT, and we want to resolve the user fault before
783  * trying again.
784  *
785  * Typically this is meant to be used by the futex code.
786  *
787  * The main difference with get_user_pages() is that this function will
788  * unconditionally call handle_mm_fault() which will in turn perform all the
789  * necessary SW fixup of the dirty and young bits in the PTE, while
790  * get_user_pages() only guarantees to update these in the struct page.
791  *
792  * This is important for some architectures where those bits also gate the
793  * access permission to the page because they are maintained in software.  On
794  * such architectures, gup() will not be enough to make a subsequent access
795  * succeed.
796  *
797  * This function will not return with an unlocked mmap_sem. So it has not the
798  * same semantics wrt the @mm->mmap_sem as does filemap_fault().
799  */
800 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
801 		     unsigned long address, unsigned int fault_flags,
802 		     bool *unlocked)
803 {
804 	struct vm_area_struct *vma;
805 	int ret, major = 0;
806 
807 	if (unlocked)
808 		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
809 
810 retry:
811 	vma = find_extend_vma(mm, address);
812 	if (!vma || address < vma->vm_start)
813 		return -EFAULT;
814 
815 	if (!vma_permits_fault(vma, fault_flags))
816 		return -EFAULT;
817 
818 	ret = handle_mm_fault(vma, address, fault_flags);
819 	major |= ret & VM_FAULT_MAJOR;
820 	if (ret & VM_FAULT_ERROR) {
821 		int err = vm_fault_to_errno(ret, 0);
822 
823 		if (err)
824 			return err;
825 		BUG();
826 	}
827 
828 	if (ret & VM_FAULT_RETRY) {
829 		down_read(&mm->mmap_sem);
830 		if (!(fault_flags & FAULT_FLAG_TRIED)) {
831 			*unlocked = true;
832 			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
833 			fault_flags |= FAULT_FLAG_TRIED;
834 			goto retry;
835 		}
836 	}
837 
838 	if (tsk) {
839 		if (major)
840 			tsk->maj_flt++;
841 		else
842 			tsk->min_flt++;
843 	}
844 	return 0;
845 }
846 EXPORT_SYMBOL_GPL(fixup_user_fault);
847 
848 static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
849 						struct mm_struct *mm,
850 						unsigned long start,
851 						unsigned long nr_pages,
852 						struct page **pages,
853 						struct vm_area_struct **vmas,
854 						int *locked,
855 						unsigned int flags)
856 {
857 	long ret, pages_done;
858 	bool lock_dropped;
859 
860 	if (locked) {
861 		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
862 		BUG_ON(vmas);
863 		/* check caller initialized locked */
864 		BUG_ON(*locked != 1);
865 	}
866 
867 	if (pages)
868 		flags |= FOLL_GET;
869 
870 	pages_done = 0;
871 	lock_dropped = false;
872 	for (;;) {
873 		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
874 				       vmas, locked);
875 		if (!locked)
876 			/* VM_FAULT_RETRY couldn't trigger, bypass */
877 			return ret;
878 
879 		/* VM_FAULT_RETRY cannot return errors */
880 		if (!*locked) {
881 			BUG_ON(ret < 0);
882 			BUG_ON(ret >= nr_pages);
883 		}
884 
885 		if (!pages)
886 			/* If it's a prefault don't insist harder */
887 			return ret;
888 
889 		if (ret > 0) {
890 			nr_pages -= ret;
891 			pages_done += ret;
892 			if (!nr_pages)
893 				break;
894 		}
895 		if (*locked) {
896 			/*
897 			 * VM_FAULT_RETRY didn't trigger or it was a
898 			 * FOLL_NOWAIT.
899 			 */
900 			if (!pages_done)
901 				pages_done = ret;
902 			break;
903 		}
904 		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
905 		pages += ret;
906 		start += ret << PAGE_SHIFT;
907 
908 		/*
909 		 * Repeat on the address that fired VM_FAULT_RETRY
910 		 * without FAULT_FLAG_ALLOW_RETRY but with
911 		 * FAULT_FLAG_TRIED.
912 		 */
913 		*locked = 1;
914 		lock_dropped = true;
915 		down_read(&mm->mmap_sem);
916 		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
917 				       pages, NULL, NULL);
918 		if (ret != 1) {
919 			BUG_ON(ret > 1);
920 			if (!pages_done)
921 				pages_done = ret;
922 			break;
923 		}
924 		nr_pages--;
925 		pages_done++;
926 		if (!nr_pages)
927 			break;
928 		pages++;
929 		start += PAGE_SIZE;
930 	}
931 	if (lock_dropped && *locked) {
932 		/*
933 		 * We must let the caller know we temporarily dropped the lock
934 		 * and so the critical section protected by it was lost.
935 		 */
936 		up_read(&mm->mmap_sem);
937 		*locked = 0;
938 	}
939 	return pages_done;
940 }
941 
942 /*
943  * We can leverage the VM_FAULT_RETRY functionality in the page fault
944  * paths better by using either get_user_pages_locked() or
945  * get_user_pages_unlocked().
946  *
947  * get_user_pages_locked() is suitable to replace the form:
948  *
949  *      down_read(&mm->mmap_sem);
950  *      do_something()
951  *      get_user_pages(tsk, mm, ..., pages, NULL);
952  *      up_read(&mm->mmap_sem);
953  *
954  *  to:
955  *
956  *      int locked = 1;
957  *      down_read(&mm->mmap_sem);
958  *      do_something()
959  *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
960  *      if (locked)
961  *          up_read(&mm->mmap_sem);
962  */
963 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
964 			   unsigned int gup_flags, struct page **pages,
965 			   int *locked)
966 {
967 	return __get_user_pages_locked(current, current->mm, start, nr_pages,
968 				       pages, NULL, locked,
969 				       gup_flags | FOLL_TOUCH);
970 }
971 EXPORT_SYMBOL(get_user_pages_locked);
972 
973 /*
974  * get_user_pages_unlocked() is suitable to replace the form:
975  *
976  *      down_read(&mm->mmap_sem);
977  *      get_user_pages(tsk, mm, ..., pages, NULL);
978  *      up_read(&mm->mmap_sem);
979  *
980  *  with:
981  *
982  *      get_user_pages_unlocked(tsk, mm, ..., pages);
983  *
984  * It is functionally equivalent to get_user_pages_fast so
985  * get_user_pages_fast should be used instead if specific gup_flags
986  * (e.g. FOLL_FORCE) are not required.
987  */
988 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
989 			     struct page **pages, unsigned int gup_flags)
990 {
991 	struct mm_struct *mm = current->mm;
992 	int locked = 1;
993 	long ret;
994 
995 	down_read(&mm->mmap_sem);
996 	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
997 				      &locked, gup_flags | FOLL_TOUCH);
998 	if (locked)
999 		up_read(&mm->mmap_sem);
1000 	return ret;
1001 }
1002 EXPORT_SYMBOL(get_user_pages_unlocked);
1003 
1004 /*
1005  * get_user_pages_remote() - pin user pages in memory
1006  * @tsk:	the task_struct to use for page fault accounting, or
1007  *		NULL if faults are not to be recorded.
1008  * @mm:		mm_struct of target mm
1009  * @start:	starting user address
1010  * @nr_pages:	number of pages from start to pin
1011  * @gup_flags:	flags modifying lookup behaviour
1012  * @pages:	array that receives pointers to the pages pinned.
1013  *		Should be at least nr_pages long. Or NULL, if caller
1014  *		only intends to ensure the pages are faulted in.
1015  * @vmas:	array of pointers to vmas corresponding to each page.
1016  *		Or NULL if the caller does not require them.
1017  * @locked:	pointer to lock flag indicating whether lock is held and
1018  *		subsequently whether VM_FAULT_RETRY functionality can be
1019  *		utilised. Lock must initially be held.
1020  *
1021  * Returns number of pages pinned. This may be fewer than the number
1022  * requested. If nr_pages is 0 or negative, returns 0. If no pages
1023  * were pinned, returns -errno. Each page returned must be released
1024  * with a put_page() call when it is finished with. vmas will only
1025  * remain valid while mmap_sem is held.
1026  *
1027  * Must be called with mmap_sem held for read or write.
1028  *
1029  * get_user_pages walks a process's page tables and takes a reference to
1030  * each struct page that each user address corresponds to at a given
1031  * instant. That is, it takes the page that would be accessed if a user
1032  * thread accesses the given user virtual address at that instant.
1033  *
1034  * This does not guarantee that the page exists in the user mappings when
1035  * get_user_pages returns, and there may even be a completely different
1036  * page there in some cases (eg. if mmapped pagecache has been invalidated
1037  * and subsequently re faulted). However it does guarantee that the page
1038  * won't be freed completely. And mostly callers simply care that the page
1039  * contains data that was valid *at some point in time*. Typically, an IO
1040  * or similar operation cannot guarantee anything stronger anyway because
1041  * locks can't be held over the syscall boundary.
1042  *
1043  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1044  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1045  * be called after the page is finished with, and before put_page is called.
1046  *
1047  * get_user_pages is typically used for fewer-copy IO operations, to get a
1048  * handle on the memory by some means other than accesses via the user virtual
1049  * addresses. The pages may be submitted for DMA to devices or accessed via
1050  * their kernel linear mapping (via the kmap APIs). Care should be taken to
1051  * use the correct cache flushing APIs.
1052  *
1053  * See also get_user_pages_fast, for performance critical applications.
1054  *
1055  * get_user_pages should be phased out in favor of
1056  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
1057  * should use get_user_pages because it cannot pass
1058  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1059  */
1060 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1061 		unsigned long start, unsigned long nr_pages,
1062 		unsigned int gup_flags, struct page **pages,
1063 		struct vm_area_struct **vmas, int *locked)
1064 {
1065 	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1066 				       locked,
1067 				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1068 }
1069 EXPORT_SYMBOL(get_user_pages_remote);
1070 
1071 /*
1072  * This is the same as get_user_pages_remote(), just with a
1073  * less-flexible calling convention where we assume that the task
1074  * and mm being operated on are the current task's and don't allow
1075  * passing of a locked parameter.  We also obviously don't pass
1076  * FOLL_REMOTE in here.
1077  */
1078 long get_user_pages(unsigned long start, unsigned long nr_pages,
1079 		unsigned int gup_flags, struct page **pages,
1080 		struct vm_area_struct **vmas)
1081 {
1082 	return __get_user_pages_locked(current, current->mm, start, nr_pages,
1083 				       pages, vmas, NULL,
1084 				       gup_flags | FOLL_TOUCH);
1085 }
1086 EXPORT_SYMBOL(get_user_pages);
1087 
1088 #ifdef CONFIG_FS_DAX
1089 /*
1090  * This is the same as get_user_pages() in that it assumes we are
1091  * operating on the current task's mm, but it goes further to validate
1092  * that the vmas associated with the address range are suitable for
1093  * longterm elevated page reference counts. For example, filesystem-dax
1094  * mappings are subject to the lifetime enforced by the filesystem and
1095  * we need guarantees that longterm users like RDMA and V4L2 only
1096  * establish mappings that have a kernel enforced revocation mechanism.
1097  *
1098  * "longterm" == userspace controlled elevated page count lifetime.
1099  * Contrast this to iov_iter_get_pages() usages which are transient.
1100  */
1101 long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
1102 		unsigned int gup_flags, struct page **pages,
1103 		struct vm_area_struct **vmas_arg)
1104 {
1105 	struct vm_area_struct **vmas = vmas_arg;
1106 	struct vm_area_struct *vma_prev = NULL;
1107 	long rc, i;
1108 
1109 	if (!pages)
1110 		return -EINVAL;
1111 
1112 	if (!vmas) {
1113 		vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
1114 			       GFP_KERNEL);
1115 		if (!vmas)
1116 			return -ENOMEM;
1117 	}
1118 
1119 	rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
1120 
1121 	for (i = 0; i < rc; i++) {
1122 		struct vm_area_struct *vma = vmas[i];
1123 
1124 		if (vma == vma_prev)
1125 			continue;
1126 
1127 		vma_prev = vma;
1128 
1129 		if (vma_is_fsdax(vma))
1130 			break;
1131 	}
1132 
1133 	/*
1134 	 * Either get_user_pages() failed, or the vma validation
1135 	 * succeeded, in either case we don't need to put_page() before
1136 	 * returning.
1137 	 */
1138 	if (i >= rc)
1139 		goto out;
1140 
1141 	for (i = 0; i < rc; i++)
1142 		put_page(pages[i]);
1143 	rc = -EOPNOTSUPP;
1144 out:
1145 	if (vmas != vmas_arg)
1146 		kfree(vmas);
1147 	return rc;
1148 }
1149 EXPORT_SYMBOL(get_user_pages_longterm);
1150 #endif /* CONFIG_FS_DAX */
1151 
1152 /**
1153  * populate_vma_page_range() -  populate a range of pages in the vma.
1154  * @vma:   target vma
1155  * @start: start address
1156  * @end:   end address
1157  * @nonblocking:
1158  *
1159  * This takes care of mlocking the pages too if VM_LOCKED is set.
1160  *
1161  * return 0 on success, negative error code on error.
1162  *
1163  * vma->vm_mm->mmap_sem must be held.
1164  *
1165  * If @nonblocking is NULL, it may be held for read or write and will
1166  * be unperturbed.
1167  *
1168  * If @nonblocking is non-NULL, it must held for read only and may be
1169  * released.  If it's released, *@nonblocking will be set to 0.
1170  */
1171 long populate_vma_page_range(struct vm_area_struct *vma,
1172 		unsigned long start, unsigned long end, int *nonblocking)
1173 {
1174 	struct mm_struct *mm = vma->vm_mm;
1175 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1176 	int gup_flags;
1177 
1178 	VM_BUG_ON(start & ~PAGE_MASK);
1179 	VM_BUG_ON(end   & ~PAGE_MASK);
1180 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1181 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1182 	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
1183 
1184 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1185 	if (vma->vm_flags & VM_LOCKONFAULT)
1186 		gup_flags &= ~FOLL_POPULATE;
1187 	/*
1188 	 * We want to touch writable mappings with a write fault in order
1189 	 * to break COW, except for shared mappings because these don't COW
1190 	 * and we would not want to dirty them for nothing.
1191 	 */
1192 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1193 		gup_flags |= FOLL_WRITE;
1194 
1195 	/*
1196 	 * We want mlock to succeed for regions that have any permissions
1197 	 * other than PROT_NONE.
1198 	 */
1199 	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
1200 		gup_flags |= FOLL_FORCE;
1201 
1202 	/*
1203 	 * We made sure addr is within a VMA, so the following will
1204 	 * not result in a stack expansion that recurses back here.
1205 	 */
1206 	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
1207 				NULL, NULL, nonblocking);
1208 }
1209 
1210 /*
1211  * __mm_populate - populate and/or mlock pages within a range of address space.
1212  *
1213  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1214  * flags. VMAs must be already marked with the desired vm_flags, and
1215  * mmap_sem must not be held.
1216  */
1217 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1218 {
1219 	struct mm_struct *mm = current->mm;
1220 	unsigned long end, nstart, nend;
1221 	struct vm_area_struct *vma = NULL;
1222 	int locked = 0;
1223 	long ret = 0;
1224 
1225 	VM_BUG_ON(start & ~PAGE_MASK);
1226 	VM_BUG_ON(len != PAGE_ALIGN(len));
1227 	end = start + len;
1228 
1229 	for (nstart = start; nstart < end; nstart = nend) {
1230 		/*
1231 		 * We want to fault in pages for [nstart; end) address range.
1232 		 * Find first corresponding VMA.
1233 		 */
1234 		if (!locked) {
1235 			locked = 1;
1236 			down_read(&mm->mmap_sem);
1237 			vma = find_vma(mm, nstart);
1238 		} else if (nstart >= vma->vm_end)
1239 			vma = vma->vm_next;
1240 		if (!vma || vma->vm_start >= end)
1241 			break;
1242 		/*
1243 		 * Set [nstart; nend) to intersection of desired address
1244 		 * range with the first VMA. Also, skip undesirable VMA types.
1245 		 */
1246 		nend = min(end, vma->vm_end);
1247 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1248 			continue;
1249 		if (nstart < vma->vm_start)
1250 			nstart = vma->vm_start;
1251 		/*
1252 		 * Now fault in a range of pages. populate_vma_page_range()
1253 		 * double checks the vma flags, so that it won't mlock pages
1254 		 * if the vma was already munlocked.
1255 		 */
1256 		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1257 		if (ret < 0) {
1258 			if (ignore_errors) {
1259 				ret = 0;
1260 				continue;	/* continue at next VMA */
1261 			}
1262 			break;
1263 		}
1264 		nend = nstart + ret * PAGE_SIZE;
1265 		ret = 0;
1266 	}
1267 	if (locked)
1268 		up_read(&mm->mmap_sem);
1269 	return ret;	/* 0 or negative error code */
1270 }
1271 
1272 /**
1273  * get_dump_page() - pin user page in memory while writing it to core dump
1274  * @addr: user address
1275  *
1276  * Returns struct page pointer of user page pinned for dump,
1277  * to be freed afterwards by put_page().
1278  *
1279  * Returns NULL on any kind of failure - a hole must then be inserted into
1280  * the corefile, to preserve alignment with its headers; and also returns
1281  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1282  * allowing a hole to be left in the corefile to save diskspace.
1283  *
1284  * Called without mmap_sem, but after all other threads have been killed.
1285  */
1286 #ifdef CONFIG_ELF_CORE
1287 struct page *get_dump_page(unsigned long addr)
1288 {
1289 	struct vm_area_struct *vma;
1290 	struct page *page;
1291 
1292 	if (__get_user_pages(current, current->mm, addr, 1,
1293 			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1294 			     NULL) < 1)
1295 		return NULL;
1296 	flush_cache_page(vma, addr, page_to_pfn(page));
1297 	return page;
1298 }
1299 #endif /* CONFIG_ELF_CORE */
1300 
1301 /*
1302  * Generic Fast GUP
1303  *
1304  * get_user_pages_fast attempts to pin user pages by walking the page
1305  * tables directly and avoids taking locks. Thus the walker needs to be
1306  * protected from page table pages being freed from under it, and should
1307  * block any THP splits.
1308  *
1309  * One way to achieve this is to have the walker disable interrupts, and
1310  * rely on IPIs from the TLB flushing code blocking before the page table
1311  * pages are freed. This is unsuitable for architectures that do not need
1312  * to broadcast an IPI when invalidating TLBs.
1313  *
1314  * Another way to achieve this is to batch up page table containing pages
1315  * belonging to more than one mm_user, then rcu_sched a callback to free those
1316  * pages. Disabling interrupts will allow the fast_gup walker to both block
1317  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
1318  * (which is a relatively rare event). The code below adopts this strategy.
1319  *
1320  * Before activating this code, please be aware that the following assumptions
1321  * are currently made:
1322  *
1323  *  *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
1324  *  free pages containing page tables or TLB flushing requires IPI broadcast.
1325  *
1326  *  *) ptes can be read atomically by the architecture.
1327  *
1328  *  *) access_ok is sufficient to validate userspace address ranges.
1329  *
1330  * The last two assumptions can be relaxed by the addition of helper functions.
1331  *
1332  * This code is based heavily on the PowerPC implementation by Nick Piggin.
1333  */
1334 #ifdef CONFIG_HAVE_GENERIC_GUP
1335 
1336 #ifndef gup_get_pte
1337 /*
1338  * We assume that the PTE can be read atomically. If this is not the case for
1339  * your architecture, please provide the helper.
1340  */
1341 static inline pte_t gup_get_pte(pte_t *ptep)
1342 {
1343 	return READ_ONCE(*ptep);
1344 }
1345 #endif
1346 
1347 static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
1348 {
1349 	while ((*nr) - nr_start) {
1350 		struct page *page = pages[--(*nr)];
1351 
1352 		ClearPageReferenced(page);
1353 		put_page(page);
1354 	}
1355 }
1356 
1357 #ifdef __HAVE_ARCH_PTE_SPECIAL
1358 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1359 			 int write, struct page **pages, int *nr)
1360 {
1361 	struct dev_pagemap *pgmap = NULL;
1362 	int nr_start = *nr, ret = 0;
1363 	pte_t *ptep, *ptem;
1364 
1365 	ptem = ptep = pte_offset_map(&pmd, addr);
1366 	do {
1367 		pte_t pte = gup_get_pte(ptep);
1368 		struct page *head, *page;
1369 
1370 		/*
1371 		 * Similar to the PMD case below, NUMA hinting must take slow
1372 		 * path using the pte_protnone check.
1373 		 */
1374 		if (pte_protnone(pte))
1375 			goto pte_unmap;
1376 
1377 		if (!pte_access_permitted(pte, write))
1378 			goto pte_unmap;
1379 
1380 		if (pte_devmap(pte)) {
1381 			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
1382 			if (unlikely(!pgmap)) {
1383 				undo_dev_pagemap(nr, nr_start, pages);
1384 				goto pte_unmap;
1385 			}
1386 		} else if (pte_special(pte))
1387 			goto pte_unmap;
1388 
1389 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1390 		page = pte_page(pte);
1391 		head = compound_head(page);
1392 
1393 		if (!page_cache_get_speculative(head))
1394 			goto pte_unmap;
1395 
1396 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1397 			put_page(head);
1398 			goto pte_unmap;
1399 		}
1400 
1401 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1402 
1403 		SetPageReferenced(page);
1404 		pages[*nr] = page;
1405 		(*nr)++;
1406 
1407 	} while (ptep++, addr += PAGE_SIZE, addr != end);
1408 
1409 	ret = 1;
1410 
1411 pte_unmap:
1412 	if (pgmap)
1413 		put_dev_pagemap(pgmap);
1414 	pte_unmap(ptem);
1415 	return ret;
1416 }
1417 #else
1418 
1419 /*
1420  * If we can't determine whether or not a pte is special, then fail immediately
1421  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
1422  * to be special.
1423  *
1424  * For a futex to be placed on a THP tail page, get_futex_key requires a
1425  * __get_user_pages_fast implementation that can pin pages. Thus it's still
1426  * useful to have gup_huge_pmd even if we can't operate on ptes.
1427  */
1428 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1429 			 int write, struct page **pages, int *nr)
1430 {
1431 	return 0;
1432 }
1433 #endif /* __HAVE_ARCH_PTE_SPECIAL */
1434 
1435 #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1436 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
1437 		unsigned long end, struct page **pages, int *nr)
1438 {
1439 	int nr_start = *nr;
1440 	struct dev_pagemap *pgmap = NULL;
1441 
1442 	do {
1443 		struct page *page = pfn_to_page(pfn);
1444 
1445 		pgmap = get_dev_pagemap(pfn, pgmap);
1446 		if (unlikely(!pgmap)) {
1447 			undo_dev_pagemap(nr, nr_start, pages);
1448 			return 0;
1449 		}
1450 		SetPageReferenced(page);
1451 		pages[*nr] = page;
1452 		get_page(page);
1453 		(*nr)++;
1454 		pfn++;
1455 	} while (addr += PAGE_SIZE, addr != end);
1456 
1457 	if (pgmap)
1458 		put_dev_pagemap(pgmap);
1459 	return 1;
1460 }
1461 
1462 static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
1463 		unsigned long end, struct page **pages, int *nr)
1464 {
1465 	unsigned long fault_pfn;
1466 
1467 	fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1468 	return __gup_device_huge(fault_pfn, addr, end, pages, nr);
1469 }
1470 
1471 static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
1472 		unsigned long end, struct page **pages, int *nr)
1473 {
1474 	unsigned long fault_pfn;
1475 
1476 	fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1477 	return __gup_device_huge(fault_pfn, addr, end, pages, nr);
1478 }
1479 #else
1480 static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
1481 		unsigned long end, struct page **pages, int *nr)
1482 {
1483 	BUILD_BUG();
1484 	return 0;
1485 }
1486 
1487 static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
1488 		unsigned long end, struct page **pages, int *nr)
1489 {
1490 	BUILD_BUG();
1491 	return 0;
1492 }
1493 #endif
1494 
1495 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1496 		unsigned long end, int write, struct page **pages, int *nr)
1497 {
1498 	struct page *head, *page;
1499 	int refs;
1500 
1501 	if (!pmd_access_permitted(orig, write))
1502 		return 0;
1503 
1504 	if (pmd_devmap(orig))
1505 		return __gup_device_huge_pmd(orig, addr, end, pages, nr);
1506 
1507 	refs = 0;
1508 	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1509 	do {
1510 		pages[*nr] = page;
1511 		(*nr)++;
1512 		page++;
1513 		refs++;
1514 	} while (addr += PAGE_SIZE, addr != end);
1515 
1516 	head = compound_head(pmd_page(orig));
1517 	if (!page_cache_add_speculative(head, refs)) {
1518 		*nr -= refs;
1519 		return 0;
1520 	}
1521 
1522 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
1523 		*nr -= refs;
1524 		while (refs--)
1525 			put_page(head);
1526 		return 0;
1527 	}
1528 
1529 	SetPageReferenced(head);
1530 	return 1;
1531 }
1532 
1533 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1534 		unsigned long end, int write, struct page **pages, int *nr)
1535 {
1536 	struct page *head, *page;
1537 	int refs;
1538 
1539 	if (!pud_access_permitted(orig, write))
1540 		return 0;
1541 
1542 	if (pud_devmap(orig))
1543 		return __gup_device_huge_pud(orig, addr, end, pages, nr);
1544 
1545 	refs = 0;
1546 	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1547 	do {
1548 		pages[*nr] = page;
1549 		(*nr)++;
1550 		page++;
1551 		refs++;
1552 	} while (addr += PAGE_SIZE, addr != end);
1553 
1554 	head = compound_head(pud_page(orig));
1555 	if (!page_cache_add_speculative(head, refs)) {
1556 		*nr -= refs;
1557 		return 0;
1558 	}
1559 
1560 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1561 		*nr -= refs;
1562 		while (refs--)
1563 			put_page(head);
1564 		return 0;
1565 	}
1566 
1567 	SetPageReferenced(head);
1568 	return 1;
1569 }
1570 
1571 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1572 			unsigned long end, int write,
1573 			struct page **pages, int *nr)
1574 {
1575 	int refs;
1576 	struct page *head, *page;
1577 
1578 	if (!pgd_access_permitted(orig, write))
1579 		return 0;
1580 
1581 	BUILD_BUG_ON(pgd_devmap(orig));
1582 	refs = 0;
1583 	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1584 	do {
1585 		pages[*nr] = page;
1586 		(*nr)++;
1587 		page++;
1588 		refs++;
1589 	} while (addr += PAGE_SIZE, addr != end);
1590 
1591 	head = compound_head(pgd_page(orig));
1592 	if (!page_cache_add_speculative(head, refs)) {
1593 		*nr -= refs;
1594 		return 0;
1595 	}
1596 
1597 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1598 		*nr -= refs;
1599 		while (refs--)
1600 			put_page(head);
1601 		return 0;
1602 	}
1603 
1604 	SetPageReferenced(head);
1605 	return 1;
1606 }
1607 
1608 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1609 		int write, struct page **pages, int *nr)
1610 {
1611 	unsigned long next;
1612 	pmd_t *pmdp;
1613 
1614 	pmdp = pmd_offset(&pud, addr);
1615 	do {
1616 		pmd_t pmd = READ_ONCE(*pmdp);
1617 
1618 		next = pmd_addr_end(addr, end);
1619 		if (!pmd_present(pmd))
1620 			return 0;
1621 
1622 		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1623 			/*
1624 			 * NUMA hinting faults need to be handled in the GUP
1625 			 * slowpath for accounting purposes and so that they
1626 			 * can be serialised against THP migration.
1627 			 */
1628 			if (pmd_protnone(pmd))
1629 				return 0;
1630 
1631 			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1632 				pages, nr))
1633 				return 0;
1634 
1635 		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1636 			/*
1637 			 * architecture have different format for hugetlbfs
1638 			 * pmd format and THP pmd format
1639 			 */
1640 			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1641 					 PMD_SHIFT, next, write, pages, nr))
1642 				return 0;
1643 		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1644 			return 0;
1645 	} while (pmdp++, addr = next, addr != end);
1646 
1647 	return 1;
1648 }
1649 
1650 static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
1651 			 int write, struct page **pages, int *nr)
1652 {
1653 	unsigned long next;
1654 	pud_t *pudp;
1655 
1656 	pudp = pud_offset(&p4d, addr);
1657 	do {
1658 		pud_t pud = READ_ONCE(*pudp);
1659 
1660 		next = pud_addr_end(addr, end);
1661 		if (pud_none(pud))
1662 			return 0;
1663 		if (unlikely(pud_huge(pud))) {
1664 			if (!gup_huge_pud(pud, pudp, addr, next, write,
1665 					  pages, nr))
1666 				return 0;
1667 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1668 			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1669 					 PUD_SHIFT, next, write, pages, nr))
1670 				return 0;
1671 		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1672 			return 0;
1673 	} while (pudp++, addr = next, addr != end);
1674 
1675 	return 1;
1676 }
1677 
1678 static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
1679 			 int write, struct page **pages, int *nr)
1680 {
1681 	unsigned long next;
1682 	p4d_t *p4dp;
1683 
1684 	p4dp = p4d_offset(&pgd, addr);
1685 	do {
1686 		p4d_t p4d = READ_ONCE(*p4dp);
1687 
1688 		next = p4d_addr_end(addr, end);
1689 		if (p4d_none(p4d))
1690 			return 0;
1691 		BUILD_BUG_ON(p4d_huge(p4d));
1692 		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
1693 			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
1694 					 P4D_SHIFT, next, write, pages, nr))
1695 				return 0;
1696 		} else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
1697 			return 0;
1698 	} while (p4dp++, addr = next, addr != end);
1699 
1700 	return 1;
1701 }
1702 
1703 static void gup_pgd_range(unsigned long addr, unsigned long end,
1704 		int write, struct page **pages, int *nr)
1705 {
1706 	unsigned long next;
1707 	pgd_t *pgdp;
1708 
1709 	pgdp = pgd_offset(current->mm, addr);
1710 	do {
1711 		pgd_t pgd = READ_ONCE(*pgdp);
1712 
1713 		next = pgd_addr_end(addr, end);
1714 		if (pgd_none(pgd))
1715 			return;
1716 		if (unlikely(pgd_huge(pgd))) {
1717 			if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1718 					  pages, nr))
1719 				return;
1720 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1721 			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1722 					 PGDIR_SHIFT, next, write, pages, nr))
1723 				return;
1724 		} else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
1725 			return;
1726 	} while (pgdp++, addr = next, addr != end);
1727 }
1728 
1729 #ifndef gup_fast_permitted
1730 /*
1731  * Check if it's allowed to use __get_user_pages_fast() for the range, or
1732  * we need to fall back to the slow version:
1733  */
1734 bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
1735 {
1736 	unsigned long len, end;
1737 
1738 	len = (unsigned long) nr_pages << PAGE_SHIFT;
1739 	end = start + len;
1740 	return end >= start;
1741 }
1742 #endif
1743 
1744 /*
1745  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1746  * the regular GUP.
1747  * Note a difference with get_user_pages_fast: this always returns the
1748  * number of pages pinned, 0 if no pages were pinned.
1749  */
1750 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1751 			  struct page **pages)
1752 {
1753 	unsigned long addr, len, end;
1754 	unsigned long flags;
1755 	int nr = 0;
1756 
1757 	start &= PAGE_MASK;
1758 	addr = start;
1759 	len = (unsigned long) nr_pages << PAGE_SHIFT;
1760 	end = start + len;
1761 
1762 	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1763 					(void __user *)start, len)))
1764 		return 0;
1765 
1766 	/*
1767 	 * Disable interrupts.  We use the nested form as we can already have
1768 	 * interrupts disabled by get_futex_key.
1769 	 *
1770 	 * With interrupts disabled, we block page table pages from being
1771 	 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1772 	 * for more details.
1773 	 *
1774 	 * We do not adopt an rcu_read_lock(.) here as we also want to
1775 	 * block IPIs that come from THPs splitting.
1776 	 */
1777 
1778 	if (gup_fast_permitted(start, nr_pages, write)) {
1779 		local_irq_save(flags);
1780 		gup_pgd_range(addr, end, write, pages, &nr);
1781 		local_irq_restore(flags);
1782 	}
1783 
1784 	return nr;
1785 }
1786 
1787 /**
1788  * get_user_pages_fast() - pin user pages in memory
1789  * @start:	starting user address
1790  * @nr_pages:	number of pages from start to pin
1791  * @write:	whether pages will be written to
1792  * @pages:	array that receives pointers to the pages pinned.
1793  *		Should be at least nr_pages long.
1794  *
1795  * Attempt to pin user pages in memory without taking mm->mmap_sem.
1796  * If not successful, it will fall back to taking the lock and
1797  * calling get_user_pages().
1798  *
1799  * Returns number of pages pinned. This may be fewer than the number
1800  * requested. If nr_pages is 0 or negative, returns 0. If no pages
1801  * were pinned, returns -errno.
1802  */
1803 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1804 			struct page **pages)
1805 {
1806 	unsigned long addr, len, end;
1807 	int nr = 0, ret = 0;
1808 
1809 	start &= PAGE_MASK;
1810 	addr = start;
1811 	len = (unsigned long) nr_pages << PAGE_SHIFT;
1812 	end = start + len;
1813 
1814 	if (nr_pages <= 0)
1815 		return 0;
1816 
1817 	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1818 					(void __user *)start, len)))
1819 		return -EFAULT;
1820 
1821 	if (gup_fast_permitted(start, nr_pages, write)) {
1822 		local_irq_disable();
1823 		gup_pgd_range(addr, end, write, pages, &nr);
1824 		local_irq_enable();
1825 		ret = nr;
1826 	}
1827 
1828 	if (nr < nr_pages) {
1829 		/* Try to get the remaining pages with get_user_pages */
1830 		start += nr << PAGE_SHIFT;
1831 		pages += nr;
1832 
1833 		ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
1834 				write ? FOLL_WRITE : 0);
1835 
1836 		/* Have to be a bit careful with return values */
1837 		if (nr > 0) {
1838 			if (ret < 0)
1839 				ret = nr;
1840 			else
1841 				ret += nr;
1842 		}
1843 	}
1844 
1845 	return ret;
1846 }
1847 
1848 #endif /* CONFIG_HAVE_GENERIC_GUP */
1849