xref: /openbmc/linux/mm/gup.c (revision f39650de)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/err.h>
5 #include <linux/spinlock.h>
6 
7 #include <linux/mm.h>
8 #include <linux/memremap.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 
14 #include <linux/sched/signal.h>
15 #include <linux/rwsem.h>
16 #include <linux/hugetlb.h>
17 #include <linux/migrate.h>
18 #include <linux/mm_inline.h>
19 #include <linux/sched/mm.h>
20 
21 #include <asm/mmu_context.h>
22 #include <asm/tlbflush.h>
23 
24 #include "internal.h"
25 
26 struct follow_page_context {
27 	struct dev_pagemap *pgmap;
28 	unsigned int page_mask;
29 };
30 
31 static void hpage_pincount_add(struct page *page, int refs)
32 {
33 	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
34 	VM_BUG_ON_PAGE(page != compound_head(page), page);
35 
36 	atomic_add(refs, compound_pincount_ptr(page));
37 }
38 
39 static void hpage_pincount_sub(struct page *page, int refs)
40 {
41 	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
42 	VM_BUG_ON_PAGE(page != compound_head(page), page);
43 
44 	atomic_sub(refs, compound_pincount_ptr(page));
45 }
46 
47 /* Equivalent to calling put_page() @refs times. */
48 static void put_page_refs(struct page *page, int refs)
49 {
50 #ifdef CONFIG_DEBUG_VM
51 	if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
52 		return;
53 #endif
54 
55 	/*
56 	 * Calling put_page() for each ref is unnecessarily slow. Only the last
57 	 * ref needs a put_page().
58 	 */
59 	if (refs > 1)
60 		page_ref_sub(page, refs - 1);
61 	put_page(page);
62 }
63 
64 /*
65  * Return the compound head page with ref appropriately incremented,
66  * or NULL if that failed.
67  */
68 static inline struct page *try_get_compound_head(struct page *page, int refs)
69 {
70 	struct page *head = compound_head(page);
71 
72 	if (WARN_ON_ONCE(page_ref_count(head) < 0))
73 		return NULL;
74 	if (unlikely(!page_cache_add_speculative(head, refs)))
75 		return NULL;
76 
77 	/*
78 	 * At this point we have a stable reference to the head page; but it
79 	 * could be that between the compound_head() lookup and the refcount
80 	 * increment, the compound page was split, in which case we'd end up
81 	 * holding a reference on a page that has nothing to do with the page
82 	 * we were given anymore.
83 	 * So now that the head page is stable, recheck that the pages still
84 	 * belong together.
85 	 */
86 	if (unlikely(compound_head(page) != head)) {
87 		put_page_refs(head, refs);
88 		return NULL;
89 	}
90 
91 	return head;
92 }
93 
94 /*
95  * try_grab_compound_head() - attempt to elevate a page's refcount, by a
96  * flags-dependent amount.
97  *
98  * "grab" names in this file mean, "look at flags to decide whether to use
99  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
100  *
101  * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
102  * same time. (That's true throughout the get_user_pages*() and
103  * pin_user_pages*() APIs.) Cases:
104  *
105  *    FOLL_GET: page's refcount will be incremented by 1.
106  *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
107  *
108  * Return: head page (with refcount appropriately incremented) for success, or
109  * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
110  * considered failure, and furthermore, a likely bug in the caller, so a warning
111  * is also emitted.
112  */
113 __maybe_unused struct page *try_grab_compound_head(struct page *page,
114 						   int refs, unsigned int flags)
115 {
116 	if (flags & FOLL_GET)
117 		return try_get_compound_head(page, refs);
118 	else if (flags & FOLL_PIN) {
119 		int orig_refs = refs;
120 
121 		/*
122 		 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
123 		 * right zone, so fail and let the caller fall back to the slow
124 		 * path.
125 		 */
126 		if (unlikely((flags & FOLL_LONGTERM) &&
127 			     !is_pinnable_page(page)))
128 			return NULL;
129 
130 		/*
131 		 * CAUTION: Don't use compound_head() on the page before this
132 		 * point, the result won't be stable.
133 		 */
134 		page = try_get_compound_head(page, refs);
135 		if (!page)
136 			return NULL;
137 
138 		/*
139 		 * When pinning a compound page of order > 1 (which is what
140 		 * hpage_pincount_available() checks for), use an exact count to
141 		 * track it, via hpage_pincount_add/_sub().
142 		 *
143 		 * However, be sure to *also* increment the normal page refcount
144 		 * field at least once, so that the page really is pinned.
145 		 */
146 		if (hpage_pincount_available(page))
147 			hpage_pincount_add(page, refs);
148 		else
149 			page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
150 
151 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
152 				    orig_refs);
153 
154 		return page;
155 	}
156 
157 	WARN_ON_ONCE(1);
158 	return NULL;
159 }
160 
161 static void put_compound_head(struct page *page, int refs, unsigned int flags)
162 {
163 	if (flags & FOLL_PIN) {
164 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
165 				    refs);
166 
167 		if (hpage_pincount_available(page))
168 			hpage_pincount_sub(page, refs);
169 		else
170 			refs *= GUP_PIN_COUNTING_BIAS;
171 	}
172 
173 	put_page_refs(page, refs);
174 }
175 
176 /**
177  * try_grab_page() - elevate a page's refcount by a flag-dependent amount
178  *
179  * This might not do anything at all, depending on the flags argument.
180  *
181  * "grab" names in this file mean, "look at flags to decide whether to use
182  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
183  *
184  * @page:    pointer to page to be grabbed
185  * @flags:   gup flags: these are the FOLL_* flag values.
186  *
187  * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
188  * time. Cases:
189  *
190  *    FOLL_GET: page's refcount will be incremented by 1.
191  *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
192  *
193  * Return: true for success, or if no action was required (if neither FOLL_PIN
194  * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
195  * FOLL_PIN was set, but the page could not be grabbed.
196  */
197 bool __must_check try_grab_page(struct page *page, unsigned int flags)
198 {
199 	WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
200 
201 	if (flags & FOLL_GET)
202 		return try_get_page(page);
203 	else if (flags & FOLL_PIN) {
204 		int refs = 1;
205 
206 		page = compound_head(page);
207 
208 		if (WARN_ON_ONCE(page_ref_count(page) <= 0))
209 			return false;
210 
211 		if (hpage_pincount_available(page))
212 			hpage_pincount_add(page, 1);
213 		else
214 			refs = GUP_PIN_COUNTING_BIAS;
215 
216 		/*
217 		 * Similar to try_grab_compound_head(): even if using the
218 		 * hpage_pincount_add/_sub() routines, be sure to
219 		 * *also* increment the normal page refcount field at least
220 		 * once, so that the page really is pinned.
221 		 */
222 		page_ref_add(page, refs);
223 
224 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
225 	}
226 
227 	return true;
228 }
229 
230 /**
231  * unpin_user_page() - release a dma-pinned page
232  * @page:            pointer to page to be released
233  *
234  * Pages that were pinned via pin_user_pages*() must be released via either
235  * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
236  * that such pages can be separately tracked and uniquely handled. In
237  * particular, interactions with RDMA and filesystems need special handling.
238  */
239 void unpin_user_page(struct page *page)
240 {
241 	put_compound_head(compound_head(page), 1, FOLL_PIN);
242 }
243 EXPORT_SYMBOL(unpin_user_page);
244 
245 static inline void compound_range_next(unsigned long i, unsigned long npages,
246 				       struct page **list, struct page **head,
247 				       unsigned int *ntails)
248 {
249 	struct page *next, *page;
250 	unsigned int nr = 1;
251 
252 	if (i >= npages)
253 		return;
254 
255 	next = *list + i;
256 	page = compound_head(next);
257 	if (PageCompound(page) && compound_order(page) >= 1)
258 		nr = min_t(unsigned int,
259 			   page + compound_nr(page) - next, npages - i);
260 
261 	*head = page;
262 	*ntails = nr;
263 }
264 
265 #define for_each_compound_range(__i, __list, __npages, __head, __ntails) \
266 	for (__i = 0, \
267 	     compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \
268 	     __i < __npages; __i += __ntails, \
269 	     compound_range_next(__i, __npages, __list, &(__head), &(__ntails)))
270 
271 static inline void compound_next(unsigned long i, unsigned long npages,
272 				 struct page **list, struct page **head,
273 				 unsigned int *ntails)
274 {
275 	struct page *page;
276 	unsigned int nr;
277 
278 	if (i >= npages)
279 		return;
280 
281 	page = compound_head(list[i]);
282 	for (nr = i + 1; nr < npages; nr++) {
283 		if (compound_head(list[nr]) != page)
284 			break;
285 	}
286 
287 	*head = page;
288 	*ntails = nr - i;
289 }
290 
291 #define for_each_compound_head(__i, __list, __npages, __head, __ntails) \
292 	for (__i = 0, \
293 	     compound_next(__i, __npages, __list, &(__head), &(__ntails)); \
294 	     __i < __npages; __i += __ntails, \
295 	     compound_next(__i, __npages, __list, &(__head), &(__ntails)))
296 
297 /**
298  * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
299  * @pages:  array of pages to be maybe marked dirty, and definitely released.
300  * @npages: number of pages in the @pages array.
301  * @make_dirty: whether to mark the pages dirty
302  *
303  * "gup-pinned page" refers to a page that has had one of the get_user_pages()
304  * variants called on that page.
305  *
306  * For each page in the @pages array, make that page (or its head page, if a
307  * compound page) dirty, if @make_dirty is true, and if the page was previously
308  * listed as clean. In any case, releases all pages using unpin_user_page(),
309  * possibly via unpin_user_pages(), for the non-dirty case.
310  *
311  * Please see the unpin_user_page() documentation for details.
312  *
313  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
314  * required, then the caller should a) verify that this is really correct,
315  * because _lock() is usually required, and b) hand code it:
316  * set_page_dirty_lock(), unpin_user_page().
317  *
318  */
319 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
320 				 bool make_dirty)
321 {
322 	unsigned long index;
323 	struct page *head;
324 	unsigned int ntails;
325 
326 	if (!make_dirty) {
327 		unpin_user_pages(pages, npages);
328 		return;
329 	}
330 
331 	for_each_compound_head(index, pages, npages, head, ntails) {
332 		/*
333 		 * Checking PageDirty at this point may race with
334 		 * clear_page_dirty_for_io(), but that's OK. Two key
335 		 * cases:
336 		 *
337 		 * 1) This code sees the page as already dirty, so it
338 		 * skips the call to set_page_dirty(). That could happen
339 		 * because clear_page_dirty_for_io() called
340 		 * page_mkclean(), followed by set_page_dirty().
341 		 * However, now the page is going to get written back,
342 		 * which meets the original intention of setting it
343 		 * dirty, so all is well: clear_page_dirty_for_io() goes
344 		 * on to call TestClearPageDirty(), and write the page
345 		 * back.
346 		 *
347 		 * 2) This code sees the page as clean, so it calls
348 		 * set_page_dirty(). The page stays dirty, despite being
349 		 * written back, so it gets written back again in the
350 		 * next writeback cycle. This is harmless.
351 		 */
352 		if (!PageDirty(head))
353 			set_page_dirty_lock(head);
354 		put_compound_head(head, ntails, FOLL_PIN);
355 	}
356 }
357 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
358 
359 /**
360  * unpin_user_page_range_dirty_lock() - release and optionally dirty
361  * gup-pinned page range
362  *
363  * @page:  the starting page of a range maybe marked dirty, and definitely released.
364  * @npages: number of consecutive pages to release.
365  * @make_dirty: whether to mark the pages dirty
366  *
367  * "gup-pinned page range" refers to a range of pages that has had one of the
368  * pin_user_pages() variants called on that page.
369  *
370  * For the page ranges defined by [page .. page+npages], make that range (or
371  * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
372  * page range was previously listed as clean.
373  *
374  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
375  * required, then the caller should a) verify that this is really correct,
376  * because _lock() is usually required, and b) hand code it:
377  * set_page_dirty_lock(), unpin_user_page().
378  *
379  */
380 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
381 				      bool make_dirty)
382 {
383 	unsigned long index;
384 	struct page *head;
385 	unsigned int ntails;
386 
387 	for_each_compound_range(index, &page, npages, head, ntails) {
388 		if (make_dirty && !PageDirty(head))
389 			set_page_dirty_lock(head);
390 		put_compound_head(head, ntails, FOLL_PIN);
391 	}
392 }
393 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
394 
395 /**
396  * unpin_user_pages() - release an array of gup-pinned pages.
397  * @pages:  array of pages to be marked dirty and released.
398  * @npages: number of pages in the @pages array.
399  *
400  * For each page in the @pages array, release the page using unpin_user_page().
401  *
402  * Please see the unpin_user_page() documentation for details.
403  */
404 void unpin_user_pages(struct page **pages, unsigned long npages)
405 {
406 	unsigned long index;
407 	struct page *head;
408 	unsigned int ntails;
409 
410 	/*
411 	 * If this WARN_ON() fires, then the system *might* be leaking pages (by
412 	 * leaving them pinned), but probably not. More likely, gup/pup returned
413 	 * a hard -ERRNO error to the caller, who erroneously passed it here.
414 	 */
415 	if (WARN_ON(IS_ERR_VALUE(npages)))
416 		return;
417 
418 	for_each_compound_head(index, pages, npages, head, ntails)
419 		put_compound_head(head, ntails, FOLL_PIN);
420 }
421 EXPORT_SYMBOL(unpin_user_pages);
422 
423 /*
424  * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
425  * lifecycle.  Avoid setting the bit unless necessary, or it might cause write
426  * cache bouncing on large SMP machines for concurrent pinned gups.
427  */
428 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
429 {
430 	if (!test_bit(MMF_HAS_PINNED, mm_flags))
431 		set_bit(MMF_HAS_PINNED, mm_flags);
432 }
433 
434 #ifdef CONFIG_MMU
435 static struct page *no_page_table(struct vm_area_struct *vma,
436 		unsigned int flags)
437 {
438 	/*
439 	 * When core dumping an enormous anonymous area that nobody
440 	 * has touched so far, we don't want to allocate unnecessary pages or
441 	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
442 	 * then get_dump_page() will return NULL to leave a hole in the dump.
443 	 * But we can only make this optimization where a hole would surely
444 	 * be zero-filled if handle_mm_fault() actually did handle it.
445 	 */
446 	if ((flags & FOLL_DUMP) &&
447 			(vma_is_anonymous(vma) || !vma->vm_ops->fault))
448 		return ERR_PTR(-EFAULT);
449 	return NULL;
450 }
451 
452 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
453 		pte_t *pte, unsigned int flags)
454 {
455 	/* No page to get reference */
456 	if (flags & FOLL_GET)
457 		return -EFAULT;
458 
459 	if (flags & FOLL_TOUCH) {
460 		pte_t entry = *pte;
461 
462 		if (flags & FOLL_WRITE)
463 			entry = pte_mkdirty(entry);
464 		entry = pte_mkyoung(entry);
465 
466 		if (!pte_same(*pte, entry)) {
467 			set_pte_at(vma->vm_mm, address, pte, entry);
468 			update_mmu_cache(vma, address, pte);
469 		}
470 	}
471 
472 	/* Proper page table entry exists, but no corresponding struct page */
473 	return -EEXIST;
474 }
475 
476 /*
477  * FOLL_FORCE can write to even unwritable pte's, but only
478  * after we've gone through a COW cycle and they are dirty.
479  */
480 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
481 {
482 	return pte_write(pte) ||
483 		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
484 }
485 
486 static struct page *follow_page_pte(struct vm_area_struct *vma,
487 		unsigned long address, pmd_t *pmd, unsigned int flags,
488 		struct dev_pagemap **pgmap)
489 {
490 	struct mm_struct *mm = vma->vm_mm;
491 	struct page *page;
492 	spinlock_t *ptl;
493 	pte_t *ptep, pte;
494 	int ret;
495 
496 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
497 	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
498 			 (FOLL_PIN | FOLL_GET)))
499 		return ERR_PTR(-EINVAL);
500 retry:
501 	if (unlikely(pmd_bad(*pmd)))
502 		return no_page_table(vma, flags);
503 
504 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
505 	pte = *ptep;
506 	if (!pte_present(pte)) {
507 		swp_entry_t entry;
508 		/*
509 		 * KSM's break_ksm() relies upon recognizing a ksm page
510 		 * even while it is being migrated, so for that case we
511 		 * need migration_entry_wait().
512 		 */
513 		if (likely(!(flags & FOLL_MIGRATION)))
514 			goto no_page;
515 		if (pte_none(pte))
516 			goto no_page;
517 		entry = pte_to_swp_entry(pte);
518 		if (!is_migration_entry(entry))
519 			goto no_page;
520 		pte_unmap_unlock(ptep, ptl);
521 		migration_entry_wait(mm, pmd, address);
522 		goto retry;
523 	}
524 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
525 		goto no_page;
526 	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
527 		pte_unmap_unlock(ptep, ptl);
528 		return NULL;
529 	}
530 
531 	page = vm_normal_page(vma, address, pte);
532 	if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
533 		/*
534 		 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
535 		 * case since they are only valid while holding the pgmap
536 		 * reference.
537 		 */
538 		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
539 		if (*pgmap)
540 			page = pte_page(pte);
541 		else
542 			goto no_page;
543 	} else if (unlikely(!page)) {
544 		if (flags & FOLL_DUMP) {
545 			/* Avoid special (like zero) pages in core dumps */
546 			page = ERR_PTR(-EFAULT);
547 			goto out;
548 		}
549 
550 		if (is_zero_pfn(pte_pfn(pte))) {
551 			page = pte_page(pte);
552 		} else {
553 			ret = follow_pfn_pte(vma, address, ptep, flags);
554 			page = ERR_PTR(ret);
555 			goto out;
556 		}
557 	}
558 
559 	/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
560 	if (unlikely(!try_grab_page(page, flags))) {
561 		page = ERR_PTR(-ENOMEM);
562 		goto out;
563 	}
564 	/*
565 	 * We need to make the page accessible if and only if we are going
566 	 * to access its content (the FOLL_PIN case).  Please see
567 	 * Documentation/core-api/pin_user_pages.rst for details.
568 	 */
569 	if (flags & FOLL_PIN) {
570 		ret = arch_make_page_accessible(page);
571 		if (ret) {
572 			unpin_user_page(page);
573 			page = ERR_PTR(ret);
574 			goto out;
575 		}
576 	}
577 	if (flags & FOLL_TOUCH) {
578 		if ((flags & FOLL_WRITE) &&
579 		    !pte_dirty(pte) && !PageDirty(page))
580 			set_page_dirty(page);
581 		/*
582 		 * pte_mkyoung() would be more correct here, but atomic care
583 		 * is needed to avoid losing the dirty bit: it is easier to use
584 		 * mark_page_accessed().
585 		 */
586 		mark_page_accessed(page);
587 	}
588 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
589 		/* Do not mlock pte-mapped THP */
590 		if (PageTransCompound(page))
591 			goto out;
592 
593 		/*
594 		 * The preliminary mapping check is mainly to avoid the
595 		 * pointless overhead of lock_page on the ZERO_PAGE
596 		 * which might bounce very badly if there is contention.
597 		 *
598 		 * If the page is already locked, we don't need to
599 		 * handle it now - vmscan will handle it later if and
600 		 * when it attempts to reclaim the page.
601 		 */
602 		if (page->mapping && trylock_page(page)) {
603 			lru_add_drain();  /* push cached pages to LRU */
604 			/*
605 			 * Because we lock page here, and migration is
606 			 * blocked by the pte's page reference, and we
607 			 * know the page is still mapped, we don't even
608 			 * need to check for file-cache page truncation.
609 			 */
610 			mlock_vma_page(page);
611 			unlock_page(page);
612 		}
613 	}
614 out:
615 	pte_unmap_unlock(ptep, ptl);
616 	return page;
617 no_page:
618 	pte_unmap_unlock(ptep, ptl);
619 	if (!pte_none(pte))
620 		return NULL;
621 	return no_page_table(vma, flags);
622 }
623 
624 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
625 				    unsigned long address, pud_t *pudp,
626 				    unsigned int flags,
627 				    struct follow_page_context *ctx)
628 {
629 	pmd_t *pmd, pmdval;
630 	spinlock_t *ptl;
631 	struct page *page;
632 	struct mm_struct *mm = vma->vm_mm;
633 
634 	pmd = pmd_offset(pudp, address);
635 	/*
636 	 * The READ_ONCE() will stabilize the pmdval in a register or
637 	 * on the stack so that it will stop changing under the code.
638 	 */
639 	pmdval = READ_ONCE(*pmd);
640 	if (pmd_none(pmdval))
641 		return no_page_table(vma, flags);
642 	if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
643 		page = follow_huge_pmd(mm, address, pmd, flags);
644 		if (page)
645 			return page;
646 		return no_page_table(vma, flags);
647 	}
648 	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
649 		page = follow_huge_pd(vma, address,
650 				      __hugepd(pmd_val(pmdval)), flags,
651 				      PMD_SHIFT);
652 		if (page)
653 			return page;
654 		return no_page_table(vma, flags);
655 	}
656 retry:
657 	if (!pmd_present(pmdval)) {
658 		if (likely(!(flags & FOLL_MIGRATION)))
659 			return no_page_table(vma, flags);
660 		VM_BUG_ON(thp_migration_supported() &&
661 				  !is_pmd_migration_entry(pmdval));
662 		if (is_pmd_migration_entry(pmdval))
663 			pmd_migration_entry_wait(mm, pmd);
664 		pmdval = READ_ONCE(*pmd);
665 		/*
666 		 * MADV_DONTNEED may convert the pmd to null because
667 		 * mmap_lock is held in read mode
668 		 */
669 		if (pmd_none(pmdval))
670 			return no_page_table(vma, flags);
671 		goto retry;
672 	}
673 	if (pmd_devmap(pmdval)) {
674 		ptl = pmd_lock(mm, pmd);
675 		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
676 		spin_unlock(ptl);
677 		if (page)
678 			return page;
679 	}
680 	if (likely(!pmd_trans_huge(pmdval)))
681 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
682 
683 	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
684 		return no_page_table(vma, flags);
685 
686 retry_locked:
687 	ptl = pmd_lock(mm, pmd);
688 	if (unlikely(pmd_none(*pmd))) {
689 		spin_unlock(ptl);
690 		return no_page_table(vma, flags);
691 	}
692 	if (unlikely(!pmd_present(*pmd))) {
693 		spin_unlock(ptl);
694 		if (likely(!(flags & FOLL_MIGRATION)))
695 			return no_page_table(vma, flags);
696 		pmd_migration_entry_wait(mm, pmd);
697 		goto retry_locked;
698 	}
699 	if (unlikely(!pmd_trans_huge(*pmd))) {
700 		spin_unlock(ptl);
701 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
702 	}
703 	if (flags & FOLL_SPLIT_PMD) {
704 		int ret;
705 		page = pmd_page(*pmd);
706 		if (is_huge_zero_page(page)) {
707 			spin_unlock(ptl);
708 			ret = 0;
709 			split_huge_pmd(vma, pmd, address);
710 			if (pmd_trans_unstable(pmd))
711 				ret = -EBUSY;
712 		} else {
713 			spin_unlock(ptl);
714 			split_huge_pmd(vma, pmd, address);
715 			ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
716 		}
717 
718 		return ret ? ERR_PTR(ret) :
719 			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
720 	}
721 	page = follow_trans_huge_pmd(vma, address, pmd, flags);
722 	spin_unlock(ptl);
723 	ctx->page_mask = HPAGE_PMD_NR - 1;
724 	return page;
725 }
726 
727 static struct page *follow_pud_mask(struct vm_area_struct *vma,
728 				    unsigned long address, p4d_t *p4dp,
729 				    unsigned int flags,
730 				    struct follow_page_context *ctx)
731 {
732 	pud_t *pud;
733 	spinlock_t *ptl;
734 	struct page *page;
735 	struct mm_struct *mm = vma->vm_mm;
736 
737 	pud = pud_offset(p4dp, address);
738 	if (pud_none(*pud))
739 		return no_page_table(vma, flags);
740 	if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
741 		page = follow_huge_pud(mm, address, pud, flags);
742 		if (page)
743 			return page;
744 		return no_page_table(vma, flags);
745 	}
746 	if (is_hugepd(__hugepd(pud_val(*pud)))) {
747 		page = follow_huge_pd(vma, address,
748 				      __hugepd(pud_val(*pud)), flags,
749 				      PUD_SHIFT);
750 		if (page)
751 			return page;
752 		return no_page_table(vma, flags);
753 	}
754 	if (pud_devmap(*pud)) {
755 		ptl = pud_lock(mm, pud);
756 		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
757 		spin_unlock(ptl);
758 		if (page)
759 			return page;
760 	}
761 	if (unlikely(pud_bad(*pud)))
762 		return no_page_table(vma, flags);
763 
764 	return follow_pmd_mask(vma, address, pud, flags, ctx);
765 }
766 
767 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
768 				    unsigned long address, pgd_t *pgdp,
769 				    unsigned int flags,
770 				    struct follow_page_context *ctx)
771 {
772 	p4d_t *p4d;
773 	struct page *page;
774 
775 	p4d = p4d_offset(pgdp, address);
776 	if (p4d_none(*p4d))
777 		return no_page_table(vma, flags);
778 	BUILD_BUG_ON(p4d_huge(*p4d));
779 	if (unlikely(p4d_bad(*p4d)))
780 		return no_page_table(vma, flags);
781 
782 	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
783 		page = follow_huge_pd(vma, address,
784 				      __hugepd(p4d_val(*p4d)), flags,
785 				      P4D_SHIFT);
786 		if (page)
787 			return page;
788 		return no_page_table(vma, flags);
789 	}
790 	return follow_pud_mask(vma, address, p4d, flags, ctx);
791 }
792 
793 /**
794  * follow_page_mask - look up a page descriptor from a user-virtual address
795  * @vma: vm_area_struct mapping @address
796  * @address: virtual address to look up
797  * @flags: flags modifying lookup behaviour
798  * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
799  *       pointer to output page_mask
800  *
801  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
802  *
803  * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
804  * the device's dev_pagemap metadata to avoid repeating expensive lookups.
805  *
806  * On output, the @ctx->page_mask is set according to the size of the page.
807  *
808  * Return: the mapped (struct page *), %NULL if no mapping exists, or
809  * an error pointer if there is a mapping to something not represented
810  * by a page descriptor (see also vm_normal_page()).
811  */
812 static struct page *follow_page_mask(struct vm_area_struct *vma,
813 			      unsigned long address, unsigned int flags,
814 			      struct follow_page_context *ctx)
815 {
816 	pgd_t *pgd;
817 	struct page *page;
818 	struct mm_struct *mm = vma->vm_mm;
819 
820 	ctx->page_mask = 0;
821 
822 	/* make this handle hugepd */
823 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
824 	if (!IS_ERR(page)) {
825 		WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
826 		return page;
827 	}
828 
829 	pgd = pgd_offset(mm, address);
830 
831 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
832 		return no_page_table(vma, flags);
833 
834 	if (pgd_huge(*pgd)) {
835 		page = follow_huge_pgd(mm, address, pgd, flags);
836 		if (page)
837 			return page;
838 		return no_page_table(vma, flags);
839 	}
840 	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
841 		page = follow_huge_pd(vma, address,
842 				      __hugepd(pgd_val(*pgd)), flags,
843 				      PGDIR_SHIFT);
844 		if (page)
845 			return page;
846 		return no_page_table(vma, flags);
847 	}
848 
849 	return follow_p4d_mask(vma, address, pgd, flags, ctx);
850 }
851 
852 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
853 			 unsigned int foll_flags)
854 {
855 	struct follow_page_context ctx = { NULL };
856 	struct page *page;
857 
858 	page = follow_page_mask(vma, address, foll_flags, &ctx);
859 	if (ctx.pgmap)
860 		put_dev_pagemap(ctx.pgmap);
861 	return page;
862 }
863 
864 static int get_gate_page(struct mm_struct *mm, unsigned long address,
865 		unsigned int gup_flags, struct vm_area_struct **vma,
866 		struct page **page)
867 {
868 	pgd_t *pgd;
869 	p4d_t *p4d;
870 	pud_t *pud;
871 	pmd_t *pmd;
872 	pte_t *pte;
873 	int ret = -EFAULT;
874 
875 	/* user gate pages are read-only */
876 	if (gup_flags & FOLL_WRITE)
877 		return -EFAULT;
878 	if (address > TASK_SIZE)
879 		pgd = pgd_offset_k(address);
880 	else
881 		pgd = pgd_offset_gate(mm, address);
882 	if (pgd_none(*pgd))
883 		return -EFAULT;
884 	p4d = p4d_offset(pgd, address);
885 	if (p4d_none(*p4d))
886 		return -EFAULT;
887 	pud = pud_offset(p4d, address);
888 	if (pud_none(*pud))
889 		return -EFAULT;
890 	pmd = pmd_offset(pud, address);
891 	if (!pmd_present(*pmd))
892 		return -EFAULT;
893 	VM_BUG_ON(pmd_trans_huge(*pmd));
894 	pte = pte_offset_map(pmd, address);
895 	if (pte_none(*pte))
896 		goto unmap;
897 	*vma = get_gate_vma(mm);
898 	if (!page)
899 		goto out;
900 	*page = vm_normal_page(*vma, address, *pte);
901 	if (!*page) {
902 		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
903 			goto unmap;
904 		*page = pte_page(*pte);
905 	}
906 	if (unlikely(!try_grab_page(*page, gup_flags))) {
907 		ret = -ENOMEM;
908 		goto unmap;
909 	}
910 out:
911 	ret = 0;
912 unmap:
913 	pte_unmap(pte);
914 	return ret;
915 }
916 
917 /*
918  * mmap_lock must be held on entry.  If @locked != NULL and *@flags
919  * does not include FOLL_NOWAIT, the mmap_lock may be released.  If it
920  * is, *@locked will be set to 0 and -EBUSY returned.
921  */
922 static int faultin_page(struct vm_area_struct *vma,
923 		unsigned long address, unsigned int *flags, int *locked)
924 {
925 	unsigned int fault_flags = 0;
926 	vm_fault_t ret;
927 
928 	/* mlock all present pages, but do not fault in new pages */
929 	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
930 		return -ENOENT;
931 	if (*flags & FOLL_WRITE)
932 		fault_flags |= FAULT_FLAG_WRITE;
933 	if (*flags & FOLL_REMOTE)
934 		fault_flags |= FAULT_FLAG_REMOTE;
935 	if (locked)
936 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
937 	if (*flags & FOLL_NOWAIT)
938 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
939 	if (*flags & FOLL_TRIED) {
940 		/*
941 		 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
942 		 * can co-exist
943 		 */
944 		fault_flags |= FAULT_FLAG_TRIED;
945 	}
946 
947 	ret = handle_mm_fault(vma, address, fault_flags, NULL);
948 	if (ret & VM_FAULT_ERROR) {
949 		int err = vm_fault_to_errno(ret, *flags);
950 
951 		if (err)
952 			return err;
953 		BUG();
954 	}
955 
956 	if (ret & VM_FAULT_RETRY) {
957 		if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
958 			*locked = 0;
959 		return -EBUSY;
960 	}
961 
962 	/*
963 	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
964 	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
965 	 * can thus safely do subsequent page lookups as if they were reads.
966 	 * But only do so when looping for pte_write is futile: in some cases
967 	 * userspace may also be wanting to write to the gotten user page,
968 	 * which a read fault here might prevent (a readonly page might get
969 	 * reCOWed by userspace write).
970 	 */
971 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
972 		*flags |= FOLL_COW;
973 	return 0;
974 }
975 
976 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
977 {
978 	vm_flags_t vm_flags = vma->vm_flags;
979 	int write = (gup_flags & FOLL_WRITE);
980 	int foreign = (gup_flags & FOLL_REMOTE);
981 
982 	if (vm_flags & (VM_IO | VM_PFNMAP))
983 		return -EFAULT;
984 
985 	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
986 		return -EFAULT;
987 
988 	if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
989 		return -EOPNOTSUPP;
990 
991 	if (write) {
992 		if (!(vm_flags & VM_WRITE)) {
993 			if (!(gup_flags & FOLL_FORCE))
994 				return -EFAULT;
995 			/*
996 			 * We used to let the write,force case do COW in a
997 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
998 			 * set a breakpoint in a read-only mapping of an
999 			 * executable, without corrupting the file (yet only
1000 			 * when that file had been opened for writing!).
1001 			 * Anon pages in shared mappings are surprising: now
1002 			 * just reject it.
1003 			 */
1004 			if (!is_cow_mapping(vm_flags))
1005 				return -EFAULT;
1006 		}
1007 	} else if (!(vm_flags & VM_READ)) {
1008 		if (!(gup_flags & FOLL_FORCE))
1009 			return -EFAULT;
1010 		/*
1011 		 * Is there actually any vma we can reach here which does not
1012 		 * have VM_MAYREAD set?
1013 		 */
1014 		if (!(vm_flags & VM_MAYREAD))
1015 			return -EFAULT;
1016 	}
1017 	/*
1018 	 * gups are always data accesses, not instruction
1019 	 * fetches, so execute=false here
1020 	 */
1021 	if (!arch_vma_access_permitted(vma, write, false, foreign))
1022 		return -EFAULT;
1023 	return 0;
1024 }
1025 
1026 /**
1027  * __get_user_pages() - pin user pages in memory
1028  * @mm:		mm_struct of target mm
1029  * @start:	starting user address
1030  * @nr_pages:	number of pages from start to pin
1031  * @gup_flags:	flags modifying pin behaviour
1032  * @pages:	array that receives pointers to the pages pinned.
1033  *		Should be at least nr_pages long. Or NULL, if caller
1034  *		only intends to ensure the pages are faulted in.
1035  * @vmas:	array of pointers to vmas corresponding to each page.
1036  *		Or NULL if the caller does not require them.
1037  * @locked:     whether we're still with the mmap_lock held
1038  *
1039  * Returns either number of pages pinned (which may be less than the
1040  * number requested), or an error. Details about the return value:
1041  *
1042  * -- If nr_pages is 0, returns 0.
1043  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1044  * -- If nr_pages is >0, and some pages were pinned, returns the number of
1045  *    pages pinned. Again, this may be less than nr_pages.
1046  * -- 0 return value is possible when the fault would need to be retried.
1047  *
1048  * The caller is responsible for releasing returned @pages, via put_page().
1049  *
1050  * @vmas are valid only as long as mmap_lock is held.
1051  *
1052  * Must be called with mmap_lock held.  It may be released.  See below.
1053  *
1054  * __get_user_pages walks a process's page tables and takes a reference to
1055  * each struct page that each user address corresponds to at a given
1056  * instant. That is, it takes the page that would be accessed if a user
1057  * thread accesses the given user virtual address at that instant.
1058  *
1059  * This does not guarantee that the page exists in the user mappings when
1060  * __get_user_pages returns, and there may even be a completely different
1061  * page there in some cases (eg. if mmapped pagecache has been invalidated
1062  * and subsequently re faulted). However it does guarantee that the page
1063  * won't be freed completely. And mostly callers simply care that the page
1064  * contains data that was valid *at some point in time*. Typically, an IO
1065  * or similar operation cannot guarantee anything stronger anyway because
1066  * locks can't be held over the syscall boundary.
1067  *
1068  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1069  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1070  * appropriate) must be called after the page is finished with, and
1071  * before put_page is called.
1072  *
1073  * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1074  * released by an up_read().  That can happen if @gup_flags does not
1075  * have FOLL_NOWAIT.
1076  *
1077  * A caller using such a combination of @locked and @gup_flags
1078  * must therefore hold the mmap_lock for reading only, and recognize
1079  * when it's been released.  Otherwise, it must be held for either
1080  * reading or writing and will not be released.
1081  *
1082  * In most cases, get_user_pages or get_user_pages_fast should be used
1083  * instead of __get_user_pages. __get_user_pages should be used only if
1084  * you need some special @gup_flags.
1085  */
1086 static long __get_user_pages(struct mm_struct *mm,
1087 		unsigned long start, unsigned long nr_pages,
1088 		unsigned int gup_flags, struct page **pages,
1089 		struct vm_area_struct **vmas, int *locked)
1090 {
1091 	long ret = 0, i = 0;
1092 	struct vm_area_struct *vma = NULL;
1093 	struct follow_page_context ctx = { NULL };
1094 
1095 	if (!nr_pages)
1096 		return 0;
1097 
1098 	start = untagged_addr(start);
1099 
1100 	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1101 
1102 	/*
1103 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
1104 	 * fault information is unrelated to the reference behaviour of a task
1105 	 * using the address space
1106 	 */
1107 	if (!(gup_flags & FOLL_FORCE))
1108 		gup_flags |= FOLL_NUMA;
1109 
1110 	do {
1111 		struct page *page;
1112 		unsigned int foll_flags = gup_flags;
1113 		unsigned int page_increm;
1114 
1115 		/* first iteration or cross vma bound */
1116 		if (!vma || start >= vma->vm_end) {
1117 			vma = find_extend_vma(mm, start);
1118 			if (!vma && in_gate_area(mm, start)) {
1119 				ret = get_gate_page(mm, start & PAGE_MASK,
1120 						gup_flags, &vma,
1121 						pages ? &pages[i] : NULL);
1122 				if (ret)
1123 					goto out;
1124 				ctx.page_mask = 0;
1125 				goto next_page;
1126 			}
1127 
1128 			if (!vma) {
1129 				ret = -EFAULT;
1130 				goto out;
1131 			}
1132 			ret = check_vma_flags(vma, gup_flags);
1133 			if (ret)
1134 				goto out;
1135 
1136 			if (is_vm_hugetlb_page(vma)) {
1137 				i = follow_hugetlb_page(mm, vma, pages, vmas,
1138 						&start, &nr_pages, i,
1139 						gup_flags, locked);
1140 				if (locked && *locked == 0) {
1141 					/*
1142 					 * We've got a VM_FAULT_RETRY
1143 					 * and we've lost mmap_lock.
1144 					 * We must stop here.
1145 					 */
1146 					BUG_ON(gup_flags & FOLL_NOWAIT);
1147 					BUG_ON(ret != 0);
1148 					goto out;
1149 				}
1150 				continue;
1151 			}
1152 		}
1153 retry:
1154 		/*
1155 		 * If we have a pending SIGKILL, don't keep faulting pages and
1156 		 * potentially allocating memory.
1157 		 */
1158 		if (fatal_signal_pending(current)) {
1159 			ret = -EINTR;
1160 			goto out;
1161 		}
1162 		cond_resched();
1163 
1164 		page = follow_page_mask(vma, start, foll_flags, &ctx);
1165 		if (!page) {
1166 			ret = faultin_page(vma, start, &foll_flags, locked);
1167 			switch (ret) {
1168 			case 0:
1169 				goto retry;
1170 			case -EBUSY:
1171 				ret = 0;
1172 				fallthrough;
1173 			case -EFAULT:
1174 			case -ENOMEM:
1175 			case -EHWPOISON:
1176 				goto out;
1177 			case -ENOENT:
1178 				goto next_page;
1179 			}
1180 			BUG();
1181 		} else if (PTR_ERR(page) == -EEXIST) {
1182 			/*
1183 			 * Proper page table entry exists, but no corresponding
1184 			 * struct page.
1185 			 */
1186 			goto next_page;
1187 		} else if (IS_ERR(page)) {
1188 			ret = PTR_ERR(page);
1189 			goto out;
1190 		}
1191 		if (pages) {
1192 			pages[i] = page;
1193 			flush_anon_page(vma, page, start);
1194 			flush_dcache_page(page);
1195 			ctx.page_mask = 0;
1196 		}
1197 next_page:
1198 		if (vmas) {
1199 			vmas[i] = vma;
1200 			ctx.page_mask = 0;
1201 		}
1202 		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1203 		if (page_increm > nr_pages)
1204 			page_increm = nr_pages;
1205 		i += page_increm;
1206 		start += page_increm * PAGE_SIZE;
1207 		nr_pages -= page_increm;
1208 	} while (nr_pages);
1209 out:
1210 	if (ctx.pgmap)
1211 		put_dev_pagemap(ctx.pgmap);
1212 	return i ? i : ret;
1213 }
1214 
1215 static bool vma_permits_fault(struct vm_area_struct *vma,
1216 			      unsigned int fault_flags)
1217 {
1218 	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
1219 	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1220 	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1221 
1222 	if (!(vm_flags & vma->vm_flags))
1223 		return false;
1224 
1225 	/*
1226 	 * The architecture might have a hardware protection
1227 	 * mechanism other than read/write that can deny access.
1228 	 *
1229 	 * gup always represents data access, not instruction
1230 	 * fetches, so execute=false here:
1231 	 */
1232 	if (!arch_vma_access_permitted(vma, write, false, foreign))
1233 		return false;
1234 
1235 	return true;
1236 }
1237 
1238 /**
1239  * fixup_user_fault() - manually resolve a user page fault
1240  * @mm:		mm_struct of target mm
1241  * @address:	user address
1242  * @fault_flags:flags to pass down to handle_mm_fault()
1243  * @unlocked:	did we unlock the mmap_lock while retrying, maybe NULL if caller
1244  *		does not allow retry. If NULL, the caller must guarantee
1245  *		that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1246  *
1247  * This is meant to be called in the specific scenario where for locking reasons
1248  * we try to access user memory in atomic context (within a pagefault_disable()
1249  * section), this returns -EFAULT, and we want to resolve the user fault before
1250  * trying again.
1251  *
1252  * Typically this is meant to be used by the futex code.
1253  *
1254  * The main difference with get_user_pages() is that this function will
1255  * unconditionally call handle_mm_fault() which will in turn perform all the
1256  * necessary SW fixup of the dirty and young bits in the PTE, while
1257  * get_user_pages() only guarantees to update these in the struct page.
1258  *
1259  * This is important for some architectures where those bits also gate the
1260  * access permission to the page because they are maintained in software.  On
1261  * such architectures, gup() will not be enough to make a subsequent access
1262  * succeed.
1263  *
1264  * This function will not return with an unlocked mmap_lock. So it has not the
1265  * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1266  */
1267 int fixup_user_fault(struct mm_struct *mm,
1268 		     unsigned long address, unsigned int fault_flags,
1269 		     bool *unlocked)
1270 {
1271 	struct vm_area_struct *vma;
1272 	vm_fault_t ret, major = 0;
1273 
1274 	address = untagged_addr(address);
1275 
1276 	if (unlocked)
1277 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1278 
1279 retry:
1280 	vma = find_extend_vma(mm, address);
1281 	if (!vma || address < vma->vm_start)
1282 		return -EFAULT;
1283 
1284 	if (!vma_permits_fault(vma, fault_flags))
1285 		return -EFAULT;
1286 
1287 	if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1288 	    fatal_signal_pending(current))
1289 		return -EINTR;
1290 
1291 	ret = handle_mm_fault(vma, address, fault_flags, NULL);
1292 	major |= ret & VM_FAULT_MAJOR;
1293 	if (ret & VM_FAULT_ERROR) {
1294 		int err = vm_fault_to_errno(ret, 0);
1295 
1296 		if (err)
1297 			return err;
1298 		BUG();
1299 	}
1300 
1301 	if (ret & VM_FAULT_RETRY) {
1302 		mmap_read_lock(mm);
1303 		*unlocked = true;
1304 		fault_flags |= FAULT_FLAG_TRIED;
1305 		goto retry;
1306 	}
1307 
1308 	return 0;
1309 }
1310 EXPORT_SYMBOL_GPL(fixup_user_fault);
1311 
1312 /*
1313  * Please note that this function, unlike __get_user_pages will not
1314  * return 0 for nr_pages > 0 without FOLL_NOWAIT
1315  */
1316 static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1317 						unsigned long start,
1318 						unsigned long nr_pages,
1319 						struct page **pages,
1320 						struct vm_area_struct **vmas,
1321 						int *locked,
1322 						unsigned int flags)
1323 {
1324 	long ret, pages_done;
1325 	bool lock_dropped;
1326 
1327 	if (locked) {
1328 		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
1329 		BUG_ON(vmas);
1330 		/* check caller initialized locked */
1331 		BUG_ON(*locked != 1);
1332 	}
1333 
1334 	if (flags & FOLL_PIN)
1335 		mm_set_has_pinned_flag(&mm->flags);
1336 
1337 	/*
1338 	 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1339 	 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1340 	 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1341 	 * for FOLL_GET, not for the newer FOLL_PIN.
1342 	 *
1343 	 * FOLL_PIN always expects pages to be non-null, but no need to assert
1344 	 * that here, as any failures will be obvious enough.
1345 	 */
1346 	if (pages && !(flags & FOLL_PIN))
1347 		flags |= FOLL_GET;
1348 
1349 	pages_done = 0;
1350 	lock_dropped = false;
1351 	for (;;) {
1352 		ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1353 				       vmas, locked);
1354 		if (!locked)
1355 			/* VM_FAULT_RETRY couldn't trigger, bypass */
1356 			return ret;
1357 
1358 		/* VM_FAULT_RETRY cannot return errors */
1359 		if (!*locked) {
1360 			BUG_ON(ret < 0);
1361 			BUG_ON(ret >= nr_pages);
1362 		}
1363 
1364 		if (ret > 0) {
1365 			nr_pages -= ret;
1366 			pages_done += ret;
1367 			if (!nr_pages)
1368 				break;
1369 		}
1370 		if (*locked) {
1371 			/*
1372 			 * VM_FAULT_RETRY didn't trigger or it was a
1373 			 * FOLL_NOWAIT.
1374 			 */
1375 			if (!pages_done)
1376 				pages_done = ret;
1377 			break;
1378 		}
1379 		/*
1380 		 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1381 		 * For the prefault case (!pages) we only update counts.
1382 		 */
1383 		if (likely(pages))
1384 			pages += ret;
1385 		start += ret << PAGE_SHIFT;
1386 		lock_dropped = true;
1387 
1388 retry:
1389 		/*
1390 		 * Repeat on the address that fired VM_FAULT_RETRY
1391 		 * with both FAULT_FLAG_ALLOW_RETRY and
1392 		 * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
1393 		 * by fatal signals, so we need to check it before we
1394 		 * start trying again otherwise it can loop forever.
1395 		 */
1396 
1397 		if (fatal_signal_pending(current)) {
1398 			if (!pages_done)
1399 				pages_done = -EINTR;
1400 			break;
1401 		}
1402 
1403 		ret = mmap_read_lock_killable(mm);
1404 		if (ret) {
1405 			BUG_ON(ret > 0);
1406 			if (!pages_done)
1407 				pages_done = ret;
1408 			break;
1409 		}
1410 
1411 		*locked = 1;
1412 		ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1413 				       pages, NULL, locked);
1414 		if (!*locked) {
1415 			/* Continue to retry until we succeeded */
1416 			BUG_ON(ret != 0);
1417 			goto retry;
1418 		}
1419 		if (ret != 1) {
1420 			BUG_ON(ret > 1);
1421 			if (!pages_done)
1422 				pages_done = ret;
1423 			break;
1424 		}
1425 		nr_pages--;
1426 		pages_done++;
1427 		if (!nr_pages)
1428 			break;
1429 		if (likely(pages))
1430 			pages++;
1431 		start += PAGE_SIZE;
1432 	}
1433 	if (lock_dropped && *locked) {
1434 		/*
1435 		 * We must let the caller know we temporarily dropped the lock
1436 		 * and so the critical section protected by it was lost.
1437 		 */
1438 		mmap_read_unlock(mm);
1439 		*locked = 0;
1440 	}
1441 	return pages_done;
1442 }
1443 
1444 /**
1445  * populate_vma_page_range() -  populate a range of pages in the vma.
1446  * @vma:   target vma
1447  * @start: start address
1448  * @end:   end address
1449  * @locked: whether the mmap_lock is still held
1450  *
1451  * This takes care of mlocking the pages too if VM_LOCKED is set.
1452  *
1453  * Return either number of pages pinned in the vma, or a negative error
1454  * code on error.
1455  *
1456  * vma->vm_mm->mmap_lock must be held.
1457  *
1458  * If @locked is NULL, it may be held for read or write and will
1459  * be unperturbed.
1460  *
1461  * If @locked is non-NULL, it must held for read only and may be
1462  * released.  If it's released, *@locked will be set to 0.
1463  */
1464 long populate_vma_page_range(struct vm_area_struct *vma,
1465 		unsigned long start, unsigned long end, int *locked)
1466 {
1467 	struct mm_struct *mm = vma->vm_mm;
1468 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1469 	int gup_flags;
1470 
1471 	VM_BUG_ON(start & ~PAGE_MASK);
1472 	VM_BUG_ON(end   & ~PAGE_MASK);
1473 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1474 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1475 	mmap_assert_locked(mm);
1476 
1477 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1478 	if (vma->vm_flags & VM_LOCKONFAULT)
1479 		gup_flags &= ~FOLL_POPULATE;
1480 	/*
1481 	 * We want to touch writable mappings with a write fault in order
1482 	 * to break COW, except for shared mappings because these don't COW
1483 	 * and we would not want to dirty them for nothing.
1484 	 */
1485 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1486 		gup_flags |= FOLL_WRITE;
1487 
1488 	/*
1489 	 * We want mlock to succeed for regions that have any permissions
1490 	 * other than PROT_NONE.
1491 	 */
1492 	if (vma_is_accessible(vma))
1493 		gup_flags |= FOLL_FORCE;
1494 
1495 	/*
1496 	 * We made sure addr is within a VMA, so the following will
1497 	 * not result in a stack expansion that recurses back here.
1498 	 */
1499 	return __get_user_pages(mm, start, nr_pages, gup_flags,
1500 				NULL, NULL, locked);
1501 }
1502 
1503 /*
1504  * faultin_vma_page_range() - populate (prefault) page tables inside the
1505  *			      given VMA range readable/writable
1506  *
1507  * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1508  *
1509  * @vma: target vma
1510  * @start: start address
1511  * @end: end address
1512  * @write: whether to prefault readable or writable
1513  * @locked: whether the mmap_lock is still held
1514  *
1515  * Returns either number of processed pages in the vma, or a negative error
1516  * code on error (see __get_user_pages()).
1517  *
1518  * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1519  * covered by the VMA.
1520  *
1521  * If @locked is NULL, it may be held for read or write and will be unperturbed.
1522  *
1523  * If @locked is non-NULL, it must held for read only and may be released.  If
1524  * it's released, *@locked will be set to 0.
1525  */
1526 long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1527 			    unsigned long end, bool write, int *locked)
1528 {
1529 	struct mm_struct *mm = vma->vm_mm;
1530 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1531 	int gup_flags;
1532 
1533 	VM_BUG_ON(!PAGE_ALIGNED(start));
1534 	VM_BUG_ON(!PAGE_ALIGNED(end));
1535 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1536 	VM_BUG_ON_VMA(end > vma->vm_end, vma);
1537 	mmap_assert_locked(mm);
1538 
1539 	/*
1540 	 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1541 	 *	       the page dirty with FOLL_WRITE -- which doesn't make a
1542 	 *	       difference with !FOLL_FORCE, because the page is writable
1543 	 *	       in the page table.
1544 	 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1545 	 *		  a poisoned page.
1546 	 * FOLL_POPULATE: Always populate memory with VM_LOCKONFAULT.
1547 	 * !FOLL_FORCE: Require proper access permissions.
1548 	 */
1549 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK | FOLL_HWPOISON;
1550 	if (write)
1551 		gup_flags |= FOLL_WRITE;
1552 
1553 	/*
1554 	 * See check_vma_flags(): Will return -EFAULT on incompatible mappings
1555 	 * or with insufficient permissions.
1556 	 */
1557 	return __get_user_pages(mm, start, nr_pages, gup_flags,
1558 				NULL, NULL, locked);
1559 }
1560 
1561 /*
1562  * __mm_populate - populate and/or mlock pages within a range of address space.
1563  *
1564  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1565  * flags. VMAs must be already marked with the desired vm_flags, and
1566  * mmap_lock must not be held.
1567  */
1568 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1569 {
1570 	struct mm_struct *mm = current->mm;
1571 	unsigned long end, nstart, nend;
1572 	struct vm_area_struct *vma = NULL;
1573 	int locked = 0;
1574 	long ret = 0;
1575 
1576 	end = start + len;
1577 
1578 	for (nstart = start; nstart < end; nstart = nend) {
1579 		/*
1580 		 * We want to fault in pages for [nstart; end) address range.
1581 		 * Find first corresponding VMA.
1582 		 */
1583 		if (!locked) {
1584 			locked = 1;
1585 			mmap_read_lock(mm);
1586 			vma = find_vma(mm, nstart);
1587 		} else if (nstart >= vma->vm_end)
1588 			vma = vma->vm_next;
1589 		if (!vma || vma->vm_start >= end)
1590 			break;
1591 		/*
1592 		 * Set [nstart; nend) to intersection of desired address
1593 		 * range with the first VMA. Also, skip undesirable VMA types.
1594 		 */
1595 		nend = min(end, vma->vm_end);
1596 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1597 			continue;
1598 		if (nstart < vma->vm_start)
1599 			nstart = vma->vm_start;
1600 		/*
1601 		 * Now fault in a range of pages. populate_vma_page_range()
1602 		 * double checks the vma flags, so that it won't mlock pages
1603 		 * if the vma was already munlocked.
1604 		 */
1605 		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1606 		if (ret < 0) {
1607 			if (ignore_errors) {
1608 				ret = 0;
1609 				continue;	/* continue at next VMA */
1610 			}
1611 			break;
1612 		}
1613 		nend = nstart + ret * PAGE_SIZE;
1614 		ret = 0;
1615 	}
1616 	if (locked)
1617 		mmap_read_unlock(mm);
1618 	return ret;	/* 0 or negative error code */
1619 }
1620 #else /* CONFIG_MMU */
1621 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1622 		unsigned long nr_pages, struct page **pages,
1623 		struct vm_area_struct **vmas, int *locked,
1624 		unsigned int foll_flags)
1625 {
1626 	struct vm_area_struct *vma;
1627 	unsigned long vm_flags;
1628 	long i;
1629 
1630 	/* calculate required read or write permissions.
1631 	 * If FOLL_FORCE is set, we only require the "MAY" flags.
1632 	 */
1633 	vm_flags  = (foll_flags & FOLL_WRITE) ?
1634 			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1635 	vm_flags &= (foll_flags & FOLL_FORCE) ?
1636 			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1637 
1638 	for (i = 0; i < nr_pages; i++) {
1639 		vma = find_vma(mm, start);
1640 		if (!vma)
1641 			goto finish_or_fault;
1642 
1643 		/* protect what we can, including chardevs */
1644 		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1645 		    !(vm_flags & vma->vm_flags))
1646 			goto finish_or_fault;
1647 
1648 		if (pages) {
1649 			pages[i] = virt_to_page(start);
1650 			if (pages[i])
1651 				get_page(pages[i]);
1652 		}
1653 		if (vmas)
1654 			vmas[i] = vma;
1655 		start = (start + PAGE_SIZE) & PAGE_MASK;
1656 	}
1657 
1658 	return i;
1659 
1660 finish_or_fault:
1661 	return i ? : -EFAULT;
1662 }
1663 #endif /* !CONFIG_MMU */
1664 
1665 /**
1666  * get_dump_page() - pin user page in memory while writing it to core dump
1667  * @addr: user address
1668  *
1669  * Returns struct page pointer of user page pinned for dump,
1670  * to be freed afterwards by put_page().
1671  *
1672  * Returns NULL on any kind of failure - a hole must then be inserted into
1673  * the corefile, to preserve alignment with its headers; and also returns
1674  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1675  * allowing a hole to be left in the corefile to save disk space.
1676  *
1677  * Called without mmap_lock (takes and releases the mmap_lock by itself).
1678  */
1679 #ifdef CONFIG_ELF_CORE
1680 struct page *get_dump_page(unsigned long addr)
1681 {
1682 	struct mm_struct *mm = current->mm;
1683 	struct page *page;
1684 	int locked = 1;
1685 	int ret;
1686 
1687 	if (mmap_read_lock_killable(mm))
1688 		return NULL;
1689 	ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1690 				      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1691 	if (locked)
1692 		mmap_read_unlock(mm);
1693 	return (ret == 1) ? page : NULL;
1694 }
1695 #endif /* CONFIG_ELF_CORE */
1696 
1697 #ifdef CONFIG_MIGRATION
1698 /*
1699  * Check whether all pages are pinnable, if so return number of pages.  If some
1700  * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1701  * pages were migrated, or if some pages were not successfully isolated.
1702  * Return negative error if migration fails.
1703  */
1704 static long check_and_migrate_movable_pages(unsigned long nr_pages,
1705 					    struct page **pages,
1706 					    unsigned int gup_flags)
1707 {
1708 	unsigned long i;
1709 	unsigned long isolation_error_count = 0;
1710 	bool drain_allow = true;
1711 	LIST_HEAD(movable_page_list);
1712 	long ret = 0;
1713 	struct page *prev_head = NULL;
1714 	struct page *head;
1715 	struct migration_target_control mtc = {
1716 		.nid = NUMA_NO_NODE,
1717 		.gfp_mask = GFP_USER | __GFP_NOWARN,
1718 	};
1719 
1720 	for (i = 0; i < nr_pages; i++) {
1721 		head = compound_head(pages[i]);
1722 		if (head == prev_head)
1723 			continue;
1724 		prev_head = head;
1725 		/*
1726 		 * If we get a movable page, since we are going to be pinning
1727 		 * these entries, try to move them out if possible.
1728 		 */
1729 		if (!is_pinnable_page(head)) {
1730 			if (PageHuge(head)) {
1731 				if (!isolate_huge_page(head, &movable_page_list))
1732 					isolation_error_count++;
1733 			} else {
1734 				if (!PageLRU(head) && drain_allow) {
1735 					lru_add_drain_all();
1736 					drain_allow = false;
1737 				}
1738 
1739 				if (isolate_lru_page(head)) {
1740 					isolation_error_count++;
1741 					continue;
1742 				}
1743 				list_add_tail(&head->lru, &movable_page_list);
1744 				mod_node_page_state(page_pgdat(head),
1745 						    NR_ISOLATED_ANON +
1746 						    page_is_file_lru(head),
1747 						    thp_nr_pages(head));
1748 			}
1749 		}
1750 	}
1751 
1752 	/*
1753 	 * If list is empty, and no isolation errors, means that all pages are
1754 	 * in the correct zone.
1755 	 */
1756 	if (list_empty(&movable_page_list) && !isolation_error_count)
1757 		return nr_pages;
1758 
1759 	if (gup_flags & FOLL_PIN) {
1760 		unpin_user_pages(pages, nr_pages);
1761 	} else {
1762 		for (i = 0; i < nr_pages; i++)
1763 			put_page(pages[i]);
1764 	}
1765 	if (!list_empty(&movable_page_list)) {
1766 		ret = migrate_pages(&movable_page_list, alloc_migration_target,
1767 				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1768 				    MR_LONGTERM_PIN);
1769 		if (ret && !list_empty(&movable_page_list))
1770 			putback_movable_pages(&movable_page_list);
1771 	}
1772 
1773 	return ret > 0 ? -ENOMEM : ret;
1774 }
1775 #else
1776 static long check_and_migrate_movable_pages(unsigned long nr_pages,
1777 					    struct page **pages,
1778 					    unsigned int gup_flags)
1779 {
1780 	return nr_pages;
1781 }
1782 #endif /* CONFIG_MIGRATION */
1783 
1784 /*
1785  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1786  * allows us to process the FOLL_LONGTERM flag.
1787  */
1788 static long __gup_longterm_locked(struct mm_struct *mm,
1789 				  unsigned long start,
1790 				  unsigned long nr_pages,
1791 				  struct page **pages,
1792 				  struct vm_area_struct **vmas,
1793 				  unsigned int gup_flags)
1794 {
1795 	unsigned int flags;
1796 	long rc;
1797 
1798 	if (!(gup_flags & FOLL_LONGTERM))
1799 		return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1800 					       NULL, gup_flags);
1801 	flags = memalloc_pin_save();
1802 	do {
1803 		rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1804 					     NULL, gup_flags);
1805 		if (rc <= 0)
1806 			break;
1807 		rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
1808 	} while (!rc);
1809 	memalloc_pin_restore(flags);
1810 
1811 	return rc;
1812 }
1813 
1814 static bool is_valid_gup_flags(unsigned int gup_flags)
1815 {
1816 	/*
1817 	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1818 	 * never directly by the caller, so enforce that with an assertion:
1819 	 */
1820 	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1821 		return false;
1822 	/*
1823 	 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
1824 	 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
1825 	 * FOLL_PIN.
1826 	 */
1827 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1828 		return false;
1829 
1830 	return true;
1831 }
1832 
1833 #ifdef CONFIG_MMU
1834 static long __get_user_pages_remote(struct mm_struct *mm,
1835 				    unsigned long start, unsigned long nr_pages,
1836 				    unsigned int gup_flags, struct page **pages,
1837 				    struct vm_area_struct **vmas, int *locked)
1838 {
1839 	/*
1840 	 * Parts of FOLL_LONGTERM behavior are incompatible with
1841 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1842 	 * vmas. However, this only comes up if locked is set, and there are
1843 	 * callers that do request FOLL_LONGTERM, but do not set locked. So,
1844 	 * allow what we can.
1845 	 */
1846 	if (gup_flags & FOLL_LONGTERM) {
1847 		if (WARN_ON_ONCE(locked))
1848 			return -EINVAL;
1849 		/*
1850 		 * This will check the vmas (even if our vmas arg is NULL)
1851 		 * and return -ENOTSUPP if DAX isn't allowed in this case:
1852 		 */
1853 		return __gup_longterm_locked(mm, start, nr_pages, pages,
1854 					     vmas, gup_flags | FOLL_TOUCH |
1855 					     FOLL_REMOTE);
1856 	}
1857 
1858 	return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1859 				       locked,
1860 				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1861 }
1862 
1863 /**
1864  * get_user_pages_remote() - pin user pages in memory
1865  * @mm:		mm_struct of target mm
1866  * @start:	starting user address
1867  * @nr_pages:	number of pages from start to pin
1868  * @gup_flags:	flags modifying lookup behaviour
1869  * @pages:	array that receives pointers to the pages pinned.
1870  *		Should be at least nr_pages long. Or NULL, if caller
1871  *		only intends to ensure the pages are faulted in.
1872  * @vmas:	array of pointers to vmas corresponding to each page.
1873  *		Or NULL if the caller does not require them.
1874  * @locked:	pointer to lock flag indicating whether lock is held and
1875  *		subsequently whether VM_FAULT_RETRY functionality can be
1876  *		utilised. Lock must initially be held.
1877  *
1878  * Returns either number of pages pinned (which may be less than the
1879  * number requested), or an error. Details about the return value:
1880  *
1881  * -- If nr_pages is 0, returns 0.
1882  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1883  * -- If nr_pages is >0, and some pages were pinned, returns the number of
1884  *    pages pinned. Again, this may be less than nr_pages.
1885  *
1886  * The caller is responsible for releasing returned @pages, via put_page().
1887  *
1888  * @vmas are valid only as long as mmap_lock is held.
1889  *
1890  * Must be called with mmap_lock held for read or write.
1891  *
1892  * get_user_pages_remote walks a process's page tables and takes a reference
1893  * to each struct page that each user address corresponds to at a given
1894  * instant. That is, it takes the page that would be accessed if a user
1895  * thread accesses the given user virtual address at that instant.
1896  *
1897  * This does not guarantee that the page exists in the user mappings when
1898  * get_user_pages_remote returns, and there may even be a completely different
1899  * page there in some cases (eg. if mmapped pagecache has been invalidated
1900  * and subsequently re faulted). However it does guarantee that the page
1901  * won't be freed completely. And mostly callers simply care that the page
1902  * contains data that was valid *at some point in time*. Typically, an IO
1903  * or similar operation cannot guarantee anything stronger anyway because
1904  * locks can't be held over the syscall boundary.
1905  *
1906  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1907  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1908  * be called after the page is finished with, and before put_page is called.
1909  *
1910  * get_user_pages_remote is typically used for fewer-copy IO operations,
1911  * to get a handle on the memory by some means other than accesses
1912  * via the user virtual addresses. The pages may be submitted for
1913  * DMA to devices or accessed via their kernel linear mapping (via the
1914  * kmap APIs). Care should be taken to use the correct cache flushing APIs.
1915  *
1916  * See also get_user_pages_fast, for performance critical applications.
1917  *
1918  * get_user_pages_remote should be phased out in favor of
1919  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
1920  * should use get_user_pages_remote because it cannot pass
1921  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1922  */
1923 long get_user_pages_remote(struct mm_struct *mm,
1924 		unsigned long start, unsigned long nr_pages,
1925 		unsigned int gup_flags, struct page **pages,
1926 		struct vm_area_struct **vmas, int *locked)
1927 {
1928 	if (!is_valid_gup_flags(gup_flags))
1929 		return -EINVAL;
1930 
1931 	return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
1932 				       pages, vmas, locked);
1933 }
1934 EXPORT_SYMBOL(get_user_pages_remote);
1935 
1936 #else /* CONFIG_MMU */
1937 long get_user_pages_remote(struct mm_struct *mm,
1938 			   unsigned long start, unsigned long nr_pages,
1939 			   unsigned int gup_flags, struct page **pages,
1940 			   struct vm_area_struct **vmas, int *locked)
1941 {
1942 	return 0;
1943 }
1944 
1945 static long __get_user_pages_remote(struct mm_struct *mm,
1946 				    unsigned long start, unsigned long nr_pages,
1947 				    unsigned int gup_flags, struct page **pages,
1948 				    struct vm_area_struct **vmas, int *locked)
1949 {
1950 	return 0;
1951 }
1952 #endif /* !CONFIG_MMU */
1953 
1954 /**
1955  * get_user_pages() - pin user pages in memory
1956  * @start:      starting user address
1957  * @nr_pages:   number of pages from start to pin
1958  * @gup_flags:  flags modifying lookup behaviour
1959  * @pages:      array that receives pointers to the pages pinned.
1960  *              Should be at least nr_pages long. Or NULL, if caller
1961  *              only intends to ensure the pages are faulted in.
1962  * @vmas:       array of pointers to vmas corresponding to each page.
1963  *              Or NULL if the caller does not require them.
1964  *
1965  * This is the same as get_user_pages_remote(), just with a less-flexible
1966  * calling convention where we assume that the mm being operated on belongs to
1967  * the current task, and doesn't allow passing of a locked parameter.  We also
1968  * obviously don't pass FOLL_REMOTE in here.
1969  */
1970 long get_user_pages(unsigned long start, unsigned long nr_pages,
1971 		unsigned int gup_flags, struct page **pages,
1972 		struct vm_area_struct **vmas)
1973 {
1974 	if (!is_valid_gup_flags(gup_flags))
1975 		return -EINVAL;
1976 
1977 	return __gup_longterm_locked(current->mm, start, nr_pages,
1978 				     pages, vmas, gup_flags | FOLL_TOUCH);
1979 }
1980 EXPORT_SYMBOL(get_user_pages);
1981 
1982 /**
1983  * get_user_pages_locked() - variant of get_user_pages()
1984  *
1985  * @start:      starting user address
1986  * @nr_pages:   number of pages from start to pin
1987  * @gup_flags:  flags modifying lookup behaviour
1988  * @pages:      array that receives pointers to the pages pinned.
1989  *              Should be at least nr_pages long. Or NULL, if caller
1990  *              only intends to ensure the pages are faulted in.
1991  * @locked:     pointer to lock flag indicating whether lock is held and
1992  *              subsequently whether VM_FAULT_RETRY functionality can be
1993  *              utilised. Lock must initially be held.
1994  *
1995  * It is suitable to replace the form:
1996  *
1997  *      mmap_read_lock(mm);
1998  *      do_something()
1999  *      get_user_pages(mm, ..., pages, NULL);
2000  *      mmap_read_unlock(mm);
2001  *
2002  *  to:
2003  *
2004  *      int locked = 1;
2005  *      mmap_read_lock(mm);
2006  *      do_something()
2007  *      get_user_pages_locked(mm, ..., pages, &locked);
2008  *      if (locked)
2009  *          mmap_read_unlock(mm);
2010  *
2011  * We can leverage the VM_FAULT_RETRY functionality in the page fault
2012  * paths better by using either get_user_pages_locked() or
2013  * get_user_pages_unlocked().
2014  *
2015  */
2016 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
2017 			   unsigned int gup_flags, struct page **pages,
2018 			   int *locked)
2019 {
2020 	/*
2021 	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2022 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2023 	 * vmas.  As there are no users of this flag in this call we simply
2024 	 * disallow this option for now.
2025 	 */
2026 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2027 		return -EINVAL;
2028 	/*
2029 	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2030 	 * never directly by the caller, so enforce that:
2031 	 */
2032 	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2033 		return -EINVAL;
2034 
2035 	return __get_user_pages_locked(current->mm, start, nr_pages,
2036 				       pages, NULL, locked,
2037 				       gup_flags | FOLL_TOUCH);
2038 }
2039 EXPORT_SYMBOL(get_user_pages_locked);
2040 
2041 /*
2042  * get_user_pages_unlocked() is suitable to replace the form:
2043  *
2044  *      mmap_read_lock(mm);
2045  *      get_user_pages(mm, ..., pages, NULL);
2046  *      mmap_read_unlock(mm);
2047  *
2048  *  with:
2049  *
2050  *      get_user_pages_unlocked(mm, ..., pages);
2051  *
2052  * It is functionally equivalent to get_user_pages_fast so
2053  * get_user_pages_fast should be used instead if specific gup_flags
2054  * (e.g. FOLL_FORCE) are not required.
2055  */
2056 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2057 			     struct page **pages, unsigned int gup_flags)
2058 {
2059 	struct mm_struct *mm = current->mm;
2060 	int locked = 1;
2061 	long ret;
2062 
2063 	/*
2064 	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2065 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2066 	 * vmas.  As there are no users of this flag in this call we simply
2067 	 * disallow this option for now.
2068 	 */
2069 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2070 		return -EINVAL;
2071 
2072 	mmap_read_lock(mm);
2073 	ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
2074 				      &locked, gup_flags | FOLL_TOUCH);
2075 	if (locked)
2076 		mmap_read_unlock(mm);
2077 	return ret;
2078 }
2079 EXPORT_SYMBOL(get_user_pages_unlocked);
2080 
2081 /*
2082  * Fast GUP
2083  *
2084  * get_user_pages_fast attempts to pin user pages by walking the page
2085  * tables directly and avoids taking locks. Thus the walker needs to be
2086  * protected from page table pages being freed from under it, and should
2087  * block any THP splits.
2088  *
2089  * One way to achieve this is to have the walker disable interrupts, and
2090  * rely on IPIs from the TLB flushing code blocking before the page table
2091  * pages are freed. This is unsuitable for architectures that do not need
2092  * to broadcast an IPI when invalidating TLBs.
2093  *
2094  * Another way to achieve this is to batch up page table containing pages
2095  * belonging to more than one mm_user, then rcu_sched a callback to free those
2096  * pages. Disabling interrupts will allow the fast_gup walker to both block
2097  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2098  * (which is a relatively rare event). The code below adopts this strategy.
2099  *
2100  * Before activating this code, please be aware that the following assumptions
2101  * are currently made:
2102  *
2103  *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2104  *  free pages containing page tables or TLB flushing requires IPI broadcast.
2105  *
2106  *  *) ptes can be read atomically by the architecture.
2107  *
2108  *  *) access_ok is sufficient to validate userspace address ranges.
2109  *
2110  * The last two assumptions can be relaxed by the addition of helper functions.
2111  *
2112  * This code is based heavily on the PowerPC implementation by Nick Piggin.
2113  */
2114 #ifdef CONFIG_HAVE_FAST_GUP
2115 
2116 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2117 					    unsigned int flags,
2118 					    struct page **pages)
2119 {
2120 	while ((*nr) - nr_start) {
2121 		struct page *page = pages[--(*nr)];
2122 
2123 		ClearPageReferenced(page);
2124 		if (flags & FOLL_PIN)
2125 			unpin_user_page(page);
2126 		else
2127 			put_page(page);
2128 	}
2129 }
2130 
2131 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2132 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2133 			 unsigned int flags, struct page **pages, int *nr)
2134 {
2135 	struct dev_pagemap *pgmap = NULL;
2136 	int nr_start = *nr, ret = 0;
2137 	pte_t *ptep, *ptem;
2138 
2139 	ptem = ptep = pte_offset_map(&pmd, addr);
2140 	do {
2141 		pte_t pte = ptep_get_lockless(ptep);
2142 		struct page *head, *page;
2143 
2144 		/*
2145 		 * Similar to the PMD case below, NUMA hinting must take slow
2146 		 * path using the pte_protnone check.
2147 		 */
2148 		if (pte_protnone(pte))
2149 			goto pte_unmap;
2150 
2151 		if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2152 			goto pte_unmap;
2153 
2154 		if (pte_devmap(pte)) {
2155 			if (unlikely(flags & FOLL_LONGTERM))
2156 				goto pte_unmap;
2157 
2158 			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2159 			if (unlikely(!pgmap)) {
2160 				undo_dev_pagemap(nr, nr_start, flags, pages);
2161 				goto pte_unmap;
2162 			}
2163 		} else if (pte_special(pte))
2164 			goto pte_unmap;
2165 
2166 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2167 		page = pte_page(pte);
2168 
2169 		head = try_grab_compound_head(page, 1, flags);
2170 		if (!head)
2171 			goto pte_unmap;
2172 
2173 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2174 			put_compound_head(head, 1, flags);
2175 			goto pte_unmap;
2176 		}
2177 
2178 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
2179 
2180 		/*
2181 		 * We need to make the page accessible if and only if we are
2182 		 * going to access its content (the FOLL_PIN case).  Please
2183 		 * see Documentation/core-api/pin_user_pages.rst for
2184 		 * details.
2185 		 */
2186 		if (flags & FOLL_PIN) {
2187 			ret = arch_make_page_accessible(page);
2188 			if (ret) {
2189 				unpin_user_page(page);
2190 				goto pte_unmap;
2191 			}
2192 		}
2193 		SetPageReferenced(page);
2194 		pages[*nr] = page;
2195 		(*nr)++;
2196 
2197 	} while (ptep++, addr += PAGE_SIZE, addr != end);
2198 
2199 	ret = 1;
2200 
2201 pte_unmap:
2202 	if (pgmap)
2203 		put_dev_pagemap(pgmap);
2204 	pte_unmap(ptem);
2205 	return ret;
2206 }
2207 #else
2208 
2209 /*
2210  * If we can't determine whether or not a pte is special, then fail immediately
2211  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2212  * to be special.
2213  *
2214  * For a futex to be placed on a THP tail page, get_futex_key requires a
2215  * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2216  * useful to have gup_huge_pmd even if we can't operate on ptes.
2217  */
2218 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2219 			 unsigned int flags, struct page **pages, int *nr)
2220 {
2221 	return 0;
2222 }
2223 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2224 
2225 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2226 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2227 			     unsigned long end, unsigned int flags,
2228 			     struct page **pages, int *nr)
2229 {
2230 	int nr_start = *nr;
2231 	struct dev_pagemap *pgmap = NULL;
2232 
2233 	do {
2234 		struct page *page = pfn_to_page(pfn);
2235 
2236 		pgmap = get_dev_pagemap(pfn, pgmap);
2237 		if (unlikely(!pgmap)) {
2238 			undo_dev_pagemap(nr, nr_start, flags, pages);
2239 			return 0;
2240 		}
2241 		SetPageReferenced(page);
2242 		pages[*nr] = page;
2243 		if (unlikely(!try_grab_page(page, flags))) {
2244 			undo_dev_pagemap(nr, nr_start, flags, pages);
2245 			return 0;
2246 		}
2247 		(*nr)++;
2248 		pfn++;
2249 	} while (addr += PAGE_SIZE, addr != end);
2250 
2251 	if (pgmap)
2252 		put_dev_pagemap(pgmap);
2253 	return 1;
2254 }
2255 
2256 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2257 				 unsigned long end, unsigned int flags,
2258 				 struct page **pages, int *nr)
2259 {
2260 	unsigned long fault_pfn;
2261 	int nr_start = *nr;
2262 
2263 	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2264 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2265 		return 0;
2266 
2267 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2268 		undo_dev_pagemap(nr, nr_start, flags, pages);
2269 		return 0;
2270 	}
2271 	return 1;
2272 }
2273 
2274 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2275 				 unsigned long end, unsigned int flags,
2276 				 struct page **pages, int *nr)
2277 {
2278 	unsigned long fault_pfn;
2279 	int nr_start = *nr;
2280 
2281 	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2282 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2283 		return 0;
2284 
2285 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2286 		undo_dev_pagemap(nr, nr_start, flags, pages);
2287 		return 0;
2288 	}
2289 	return 1;
2290 }
2291 #else
2292 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2293 				 unsigned long end, unsigned int flags,
2294 				 struct page **pages, int *nr)
2295 {
2296 	BUILD_BUG();
2297 	return 0;
2298 }
2299 
2300 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2301 				 unsigned long end, unsigned int flags,
2302 				 struct page **pages, int *nr)
2303 {
2304 	BUILD_BUG();
2305 	return 0;
2306 }
2307 #endif
2308 
2309 static int record_subpages(struct page *page, unsigned long addr,
2310 			   unsigned long end, struct page **pages)
2311 {
2312 	int nr;
2313 
2314 	for (nr = 0; addr != end; addr += PAGE_SIZE)
2315 		pages[nr++] = page++;
2316 
2317 	return nr;
2318 }
2319 
2320 #ifdef CONFIG_ARCH_HAS_HUGEPD
2321 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2322 				      unsigned long sz)
2323 {
2324 	unsigned long __boundary = (addr + sz) & ~(sz-1);
2325 	return (__boundary - 1 < end - 1) ? __boundary : end;
2326 }
2327 
2328 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2329 		       unsigned long end, unsigned int flags,
2330 		       struct page **pages, int *nr)
2331 {
2332 	unsigned long pte_end;
2333 	struct page *head, *page;
2334 	pte_t pte;
2335 	int refs;
2336 
2337 	pte_end = (addr + sz) & ~(sz-1);
2338 	if (pte_end < end)
2339 		end = pte_end;
2340 
2341 	pte = huge_ptep_get(ptep);
2342 
2343 	if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2344 		return 0;
2345 
2346 	/* hugepages are never "special" */
2347 	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2348 
2349 	head = pte_page(pte);
2350 	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
2351 	refs = record_subpages(page, addr, end, pages + *nr);
2352 
2353 	head = try_grab_compound_head(head, refs, flags);
2354 	if (!head)
2355 		return 0;
2356 
2357 	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2358 		put_compound_head(head, refs, flags);
2359 		return 0;
2360 	}
2361 
2362 	*nr += refs;
2363 	SetPageReferenced(head);
2364 	return 1;
2365 }
2366 
2367 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2368 		unsigned int pdshift, unsigned long end, unsigned int flags,
2369 		struct page **pages, int *nr)
2370 {
2371 	pte_t *ptep;
2372 	unsigned long sz = 1UL << hugepd_shift(hugepd);
2373 	unsigned long next;
2374 
2375 	ptep = hugepte_offset(hugepd, addr, pdshift);
2376 	do {
2377 		next = hugepte_addr_end(addr, end, sz);
2378 		if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2379 			return 0;
2380 	} while (ptep++, addr = next, addr != end);
2381 
2382 	return 1;
2383 }
2384 #else
2385 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2386 		unsigned int pdshift, unsigned long end, unsigned int flags,
2387 		struct page **pages, int *nr)
2388 {
2389 	return 0;
2390 }
2391 #endif /* CONFIG_ARCH_HAS_HUGEPD */
2392 
2393 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2394 			unsigned long end, unsigned int flags,
2395 			struct page **pages, int *nr)
2396 {
2397 	struct page *head, *page;
2398 	int refs;
2399 
2400 	if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2401 		return 0;
2402 
2403 	if (pmd_devmap(orig)) {
2404 		if (unlikely(flags & FOLL_LONGTERM))
2405 			return 0;
2406 		return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2407 					     pages, nr);
2408 	}
2409 
2410 	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2411 	refs = record_subpages(page, addr, end, pages + *nr);
2412 
2413 	head = try_grab_compound_head(pmd_page(orig), refs, flags);
2414 	if (!head)
2415 		return 0;
2416 
2417 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2418 		put_compound_head(head, refs, flags);
2419 		return 0;
2420 	}
2421 
2422 	*nr += refs;
2423 	SetPageReferenced(head);
2424 	return 1;
2425 }
2426 
2427 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2428 			unsigned long end, unsigned int flags,
2429 			struct page **pages, int *nr)
2430 {
2431 	struct page *head, *page;
2432 	int refs;
2433 
2434 	if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2435 		return 0;
2436 
2437 	if (pud_devmap(orig)) {
2438 		if (unlikely(flags & FOLL_LONGTERM))
2439 			return 0;
2440 		return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2441 					     pages, nr);
2442 	}
2443 
2444 	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2445 	refs = record_subpages(page, addr, end, pages + *nr);
2446 
2447 	head = try_grab_compound_head(pud_page(orig), refs, flags);
2448 	if (!head)
2449 		return 0;
2450 
2451 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2452 		put_compound_head(head, refs, flags);
2453 		return 0;
2454 	}
2455 
2456 	*nr += refs;
2457 	SetPageReferenced(head);
2458 	return 1;
2459 }
2460 
2461 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2462 			unsigned long end, unsigned int flags,
2463 			struct page **pages, int *nr)
2464 {
2465 	int refs;
2466 	struct page *head, *page;
2467 
2468 	if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2469 		return 0;
2470 
2471 	BUILD_BUG_ON(pgd_devmap(orig));
2472 
2473 	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2474 	refs = record_subpages(page, addr, end, pages + *nr);
2475 
2476 	head = try_grab_compound_head(pgd_page(orig), refs, flags);
2477 	if (!head)
2478 		return 0;
2479 
2480 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2481 		put_compound_head(head, refs, flags);
2482 		return 0;
2483 	}
2484 
2485 	*nr += refs;
2486 	SetPageReferenced(head);
2487 	return 1;
2488 }
2489 
2490 static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2491 		unsigned int flags, struct page **pages, int *nr)
2492 {
2493 	unsigned long next;
2494 	pmd_t *pmdp;
2495 
2496 	pmdp = pmd_offset_lockless(pudp, pud, addr);
2497 	do {
2498 		pmd_t pmd = READ_ONCE(*pmdp);
2499 
2500 		next = pmd_addr_end(addr, end);
2501 		if (!pmd_present(pmd))
2502 			return 0;
2503 
2504 		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2505 			     pmd_devmap(pmd))) {
2506 			/*
2507 			 * NUMA hinting faults need to be handled in the GUP
2508 			 * slowpath for accounting purposes and so that they
2509 			 * can be serialised against THP migration.
2510 			 */
2511 			if (pmd_protnone(pmd))
2512 				return 0;
2513 
2514 			if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2515 				pages, nr))
2516 				return 0;
2517 
2518 		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2519 			/*
2520 			 * architecture have different format for hugetlbfs
2521 			 * pmd format and THP pmd format
2522 			 */
2523 			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2524 					 PMD_SHIFT, next, flags, pages, nr))
2525 				return 0;
2526 		} else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2527 			return 0;
2528 	} while (pmdp++, addr = next, addr != end);
2529 
2530 	return 1;
2531 }
2532 
2533 static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2534 			 unsigned int flags, struct page **pages, int *nr)
2535 {
2536 	unsigned long next;
2537 	pud_t *pudp;
2538 
2539 	pudp = pud_offset_lockless(p4dp, p4d, addr);
2540 	do {
2541 		pud_t pud = READ_ONCE(*pudp);
2542 
2543 		next = pud_addr_end(addr, end);
2544 		if (unlikely(!pud_present(pud)))
2545 			return 0;
2546 		if (unlikely(pud_huge(pud))) {
2547 			if (!gup_huge_pud(pud, pudp, addr, next, flags,
2548 					  pages, nr))
2549 				return 0;
2550 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2551 			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2552 					 PUD_SHIFT, next, flags, pages, nr))
2553 				return 0;
2554 		} else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2555 			return 0;
2556 	} while (pudp++, addr = next, addr != end);
2557 
2558 	return 1;
2559 }
2560 
2561 static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2562 			 unsigned int flags, struct page **pages, int *nr)
2563 {
2564 	unsigned long next;
2565 	p4d_t *p4dp;
2566 
2567 	p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2568 	do {
2569 		p4d_t p4d = READ_ONCE(*p4dp);
2570 
2571 		next = p4d_addr_end(addr, end);
2572 		if (p4d_none(p4d))
2573 			return 0;
2574 		BUILD_BUG_ON(p4d_huge(p4d));
2575 		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2576 			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2577 					 P4D_SHIFT, next, flags, pages, nr))
2578 				return 0;
2579 		} else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2580 			return 0;
2581 	} while (p4dp++, addr = next, addr != end);
2582 
2583 	return 1;
2584 }
2585 
2586 static void gup_pgd_range(unsigned long addr, unsigned long end,
2587 		unsigned int flags, struct page **pages, int *nr)
2588 {
2589 	unsigned long next;
2590 	pgd_t *pgdp;
2591 
2592 	pgdp = pgd_offset(current->mm, addr);
2593 	do {
2594 		pgd_t pgd = READ_ONCE(*pgdp);
2595 
2596 		next = pgd_addr_end(addr, end);
2597 		if (pgd_none(pgd))
2598 			return;
2599 		if (unlikely(pgd_huge(pgd))) {
2600 			if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2601 					  pages, nr))
2602 				return;
2603 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2604 			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2605 					 PGDIR_SHIFT, next, flags, pages, nr))
2606 				return;
2607 		} else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2608 			return;
2609 	} while (pgdp++, addr = next, addr != end);
2610 }
2611 #else
2612 static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2613 		unsigned int flags, struct page **pages, int *nr)
2614 {
2615 }
2616 #endif /* CONFIG_HAVE_FAST_GUP */
2617 
2618 #ifndef gup_fast_permitted
2619 /*
2620  * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2621  * we need to fall back to the slow version:
2622  */
2623 static bool gup_fast_permitted(unsigned long start, unsigned long end)
2624 {
2625 	return true;
2626 }
2627 #endif
2628 
2629 static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2630 				   unsigned int gup_flags, struct page **pages)
2631 {
2632 	int ret;
2633 
2634 	/*
2635 	 * FIXME: FOLL_LONGTERM does not work with
2636 	 * get_user_pages_unlocked() (see comments in that function)
2637 	 */
2638 	if (gup_flags & FOLL_LONGTERM) {
2639 		mmap_read_lock(current->mm);
2640 		ret = __gup_longterm_locked(current->mm,
2641 					    start, nr_pages,
2642 					    pages, NULL, gup_flags);
2643 		mmap_read_unlock(current->mm);
2644 	} else {
2645 		ret = get_user_pages_unlocked(start, nr_pages,
2646 					      pages, gup_flags);
2647 	}
2648 
2649 	return ret;
2650 }
2651 
2652 static unsigned long lockless_pages_from_mm(unsigned long start,
2653 					    unsigned long end,
2654 					    unsigned int gup_flags,
2655 					    struct page **pages)
2656 {
2657 	unsigned long flags;
2658 	int nr_pinned = 0;
2659 	unsigned seq;
2660 
2661 	if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2662 	    !gup_fast_permitted(start, end))
2663 		return 0;
2664 
2665 	if (gup_flags & FOLL_PIN) {
2666 		seq = raw_read_seqcount(&current->mm->write_protect_seq);
2667 		if (seq & 1)
2668 			return 0;
2669 	}
2670 
2671 	/*
2672 	 * Disable interrupts. The nested form is used, in order to allow full,
2673 	 * general purpose use of this routine.
2674 	 *
2675 	 * With interrupts disabled, we block page table pages from being freed
2676 	 * from under us. See struct mmu_table_batch comments in
2677 	 * include/asm-generic/tlb.h for more details.
2678 	 *
2679 	 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2680 	 * that come from THPs splitting.
2681 	 */
2682 	local_irq_save(flags);
2683 	gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2684 	local_irq_restore(flags);
2685 
2686 	/*
2687 	 * When pinning pages for DMA there could be a concurrent write protect
2688 	 * from fork() via copy_page_range(), in this case always fail fast GUP.
2689 	 */
2690 	if (gup_flags & FOLL_PIN) {
2691 		if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
2692 			unpin_user_pages(pages, nr_pinned);
2693 			return 0;
2694 		}
2695 	}
2696 	return nr_pinned;
2697 }
2698 
2699 static int internal_get_user_pages_fast(unsigned long start,
2700 					unsigned long nr_pages,
2701 					unsigned int gup_flags,
2702 					struct page **pages)
2703 {
2704 	unsigned long len, end;
2705 	unsigned long nr_pinned;
2706 	int ret;
2707 
2708 	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2709 				       FOLL_FORCE | FOLL_PIN | FOLL_GET |
2710 				       FOLL_FAST_ONLY)))
2711 		return -EINVAL;
2712 
2713 	if (gup_flags & FOLL_PIN)
2714 		mm_set_has_pinned_flag(&current->mm->flags);
2715 
2716 	if (!(gup_flags & FOLL_FAST_ONLY))
2717 		might_lock_read(&current->mm->mmap_lock);
2718 
2719 	start = untagged_addr(start) & PAGE_MASK;
2720 	len = nr_pages << PAGE_SHIFT;
2721 	if (check_add_overflow(start, len, &end))
2722 		return 0;
2723 	if (unlikely(!access_ok((void __user *)start, len)))
2724 		return -EFAULT;
2725 
2726 	nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2727 	if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2728 		return nr_pinned;
2729 
2730 	/* Slow path: try to get the remaining pages with get_user_pages */
2731 	start += nr_pinned << PAGE_SHIFT;
2732 	pages += nr_pinned;
2733 	ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2734 				      pages);
2735 	if (ret < 0) {
2736 		/*
2737 		 * The caller has to unpin the pages we already pinned so
2738 		 * returning -errno is not an option
2739 		 */
2740 		if (nr_pinned)
2741 			return nr_pinned;
2742 		return ret;
2743 	}
2744 	return ret + nr_pinned;
2745 }
2746 
2747 /**
2748  * get_user_pages_fast_only() - pin user pages in memory
2749  * @start:      starting user address
2750  * @nr_pages:   number of pages from start to pin
2751  * @gup_flags:  flags modifying pin behaviour
2752  * @pages:      array that receives pointers to the pages pinned.
2753  *              Should be at least nr_pages long.
2754  *
2755  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2756  * the regular GUP.
2757  * Note a difference with get_user_pages_fast: this always returns the
2758  * number of pages pinned, 0 if no pages were pinned.
2759  *
2760  * If the architecture does not support this function, simply return with no
2761  * pages pinned.
2762  *
2763  * Careful, careful! COW breaking can go either way, so a non-write
2764  * access can get ambiguous page results. If you call this function without
2765  * 'write' set, you'd better be sure that you're ok with that ambiguity.
2766  */
2767 int get_user_pages_fast_only(unsigned long start, int nr_pages,
2768 			     unsigned int gup_flags, struct page **pages)
2769 {
2770 	int nr_pinned;
2771 	/*
2772 	 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2773 	 * because gup fast is always a "pin with a +1 page refcount" request.
2774 	 *
2775 	 * FOLL_FAST_ONLY is required in order to match the API description of
2776 	 * this routine: no fall back to regular ("slow") GUP.
2777 	 */
2778 	gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
2779 
2780 	nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2781 						 pages);
2782 
2783 	/*
2784 	 * As specified in the API description above, this routine is not
2785 	 * allowed to return negative values. However, the common core
2786 	 * routine internal_get_user_pages_fast() *can* return -errno.
2787 	 * Therefore, correct for that here:
2788 	 */
2789 	if (nr_pinned < 0)
2790 		nr_pinned = 0;
2791 
2792 	return nr_pinned;
2793 }
2794 EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
2795 
2796 /**
2797  * get_user_pages_fast() - pin user pages in memory
2798  * @start:      starting user address
2799  * @nr_pages:   number of pages from start to pin
2800  * @gup_flags:  flags modifying pin behaviour
2801  * @pages:      array that receives pointers to the pages pinned.
2802  *              Should be at least nr_pages long.
2803  *
2804  * Attempt to pin user pages in memory without taking mm->mmap_lock.
2805  * If not successful, it will fall back to taking the lock and
2806  * calling get_user_pages().
2807  *
2808  * Returns number of pages pinned. This may be fewer than the number requested.
2809  * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2810  * -errno.
2811  */
2812 int get_user_pages_fast(unsigned long start, int nr_pages,
2813 			unsigned int gup_flags, struct page **pages)
2814 {
2815 	if (!is_valid_gup_flags(gup_flags))
2816 		return -EINVAL;
2817 
2818 	/*
2819 	 * The caller may or may not have explicitly set FOLL_GET; either way is
2820 	 * OK. However, internally (within mm/gup.c), gup fast variants must set
2821 	 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2822 	 * request.
2823 	 */
2824 	gup_flags |= FOLL_GET;
2825 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2826 }
2827 EXPORT_SYMBOL_GPL(get_user_pages_fast);
2828 
2829 /**
2830  * pin_user_pages_fast() - pin user pages in memory without taking locks
2831  *
2832  * @start:      starting user address
2833  * @nr_pages:   number of pages from start to pin
2834  * @gup_flags:  flags modifying pin behaviour
2835  * @pages:      array that receives pointers to the pages pinned.
2836  *              Should be at least nr_pages long.
2837  *
2838  * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2839  * get_user_pages_fast() for documentation on the function arguments, because
2840  * the arguments here are identical.
2841  *
2842  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2843  * see Documentation/core-api/pin_user_pages.rst for further details.
2844  */
2845 int pin_user_pages_fast(unsigned long start, int nr_pages,
2846 			unsigned int gup_flags, struct page **pages)
2847 {
2848 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2849 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2850 		return -EINVAL;
2851 
2852 	gup_flags |= FOLL_PIN;
2853 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2854 }
2855 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2856 
2857 /*
2858  * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2859  * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
2860  *
2861  * The API rules are the same, too: no negative values may be returned.
2862  */
2863 int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2864 			     unsigned int gup_flags, struct page **pages)
2865 {
2866 	int nr_pinned;
2867 
2868 	/*
2869 	 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
2870 	 * rules require returning 0, rather than -errno:
2871 	 */
2872 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2873 		return 0;
2874 	/*
2875 	 * FOLL_FAST_ONLY is required in order to match the API description of
2876 	 * this routine: no fall back to regular ("slow") GUP.
2877 	 */
2878 	gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
2879 	nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2880 						 pages);
2881 	/*
2882 	 * This routine is not allowed to return negative values. However,
2883 	 * internal_get_user_pages_fast() *can* return -errno. Therefore,
2884 	 * correct for that here:
2885 	 */
2886 	if (nr_pinned < 0)
2887 		nr_pinned = 0;
2888 
2889 	return nr_pinned;
2890 }
2891 EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
2892 
2893 /**
2894  * pin_user_pages_remote() - pin pages of a remote process
2895  *
2896  * @mm:		mm_struct of target mm
2897  * @start:	starting user address
2898  * @nr_pages:	number of pages from start to pin
2899  * @gup_flags:	flags modifying lookup behaviour
2900  * @pages:	array that receives pointers to the pages pinned.
2901  *		Should be at least nr_pages long. Or NULL, if caller
2902  *		only intends to ensure the pages are faulted in.
2903  * @vmas:	array of pointers to vmas corresponding to each page.
2904  *		Or NULL if the caller does not require them.
2905  * @locked:	pointer to lock flag indicating whether lock is held and
2906  *		subsequently whether VM_FAULT_RETRY functionality can be
2907  *		utilised. Lock must initially be held.
2908  *
2909  * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
2910  * get_user_pages_remote() for documentation on the function arguments, because
2911  * the arguments here are identical.
2912  *
2913  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2914  * see Documentation/core-api/pin_user_pages.rst for details.
2915  */
2916 long pin_user_pages_remote(struct mm_struct *mm,
2917 			   unsigned long start, unsigned long nr_pages,
2918 			   unsigned int gup_flags, struct page **pages,
2919 			   struct vm_area_struct **vmas, int *locked)
2920 {
2921 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2922 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2923 		return -EINVAL;
2924 
2925 	gup_flags |= FOLL_PIN;
2926 	return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
2927 				       pages, vmas, locked);
2928 }
2929 EXPORT_SYMBOL(pin_user_pages_remote);
2930 
2931 /**
2932  * pin_user_pages() - pin user pages in memory for use by other devices
2933  *
2934  * @start:	starting user address
2935  * @nr_pages:	number of pages from start to pin
2936  * @gup_flags:	flags modifying lookup behaviour
2937  * @pages:	array that receives pointers to the pages pinned.
2938  *		Should be at least nr_pages long. Or NULL, if caller
2939  *		only intends to ensure the pages are faulted in.
2940  * @vmas:	array of pointers to vmas corresponding to each page.
2941  *		Or NULL if the caller does not require them.
2942  *
2943  * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
2944  * FOLL_PIN is set.
2945  *
2946  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2947  * see Documentation/core-api/pin_user_pages.rst for details.
2948  */
2949 long pin_user_pages(unsigned long start, unsigned long nr_pages,
2950 		    unsigned int gup_flags, struct page **pages,
2951 		    struct vm_area_struct **vmas)
2952 {
2953 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2954 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2955 		return -EINVAL;
2956 
2957 	gup_flags |= FOLL_PIN;
2958 	return __gup_longterm_locked(current->mm, start, nr_pages,
2959 				     pages, vmas, gup_flags);
2960 }
2961 EXPORT_SYMBOL(pin_user_pages);
2962 
2963 /*
2964  * pin_user_pages_unlocked() is the FOLL_PIN variant of
2965  * get_user_pages_unlocked(). Behavior is the same, except that this one sets
2966  * FOLL_PIN and rejects FOLL_GET.
2967  */
2968 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2969 			     struct page **pages, unsigned int gup_flags)
2970 {
2971 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2972 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2973 		return -EINVAL;
2974 
2975 	gup_flags |= FOLL_PIN;
2976 	return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
2977 }
2978 EXPORT_SYMBOL(pin_user_pages_unlocked);
2979 
2980 /*
2981  * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
2982  * Behavior is the same, except that this one sets FOLL_PIN and rejects
2983  * FOLL_GET.
2984  */
2985 long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
2986 			   unsigned int gup_flags, struct page **pages,
2987 			   int *locked)
2988 {
2989 	/*
2990 	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2991 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2992 	 * vmas.  As there are no users of this flag in this call we simply
2993 	 * disallow this option for now.
2994 	 */
2995 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2996 		return -EINVAL;
2997 
2998 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2999 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3000 		return -EINVAL;
3001 
3002 	gup_flags |= FOLL_PIN;
3003 	return __get_user_pages_locked(current->mm, start, nr_pages,
3004 				       pages, NULL, locked,
3005 				       gup_flags | FOLL_TOUCH);
3006 }
3007 EXPORT_SYMBOL(pin_user_pages_locked);
3008