xref: /openbmc/linux/mm/gup.c (revision 20549801)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/err.h>
5 #include <linux/spinlock.h>
6 
7 #include <linux/mm.h>
8 #include <linux/memremap.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/secretmem.h>
14 
15 #include <linux/sched/signal.h>
16 #include <linux/rwsem.h>
17 #include <linux/hugetlb.h>
18 #include <linux/migrate.h>
19 #include <linux/mm_inline.h>
20 #include <linux/sched/mm.h>
21 
22 #include <asm/mmu_context.h>
23 #include <asm/tlbflush.h>
24 
25 #include "internal.h"
26 
27 struct follow_page_context {
28 	struct dev_pagemap *pgmap;
29 	unsigned int page_mask;
30 };
31 
32 static inline void sanity_check_pinned_pages(struct page **pages,
33 					     unsigned long npages)
34 {
35 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
36 		return;
37 
38 	/*
39 	 * We only pin anonymous pages if they are exclusive. Once pinned, we
40 	 * can no longer turn them possibly shared and PageAnonExclusive() will
41 	 * stick around until the page is freed.
42 	 *
43 	 * We'd like to verify that our pinned anonymous pages are still mapped
44 	 * exclusively. The issue with anon THP is that we don't know how
45 	 * they are/were mapped when pinning them. However, for anon
46 	 * THP we can assume that either the given page (PTE-mapped THP) or
47 	 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
48 	 * neither is the case, there is certainly something wrong.
49 	 */
50 	for (; npages; npages--, pages++) {
51 		struct page *page = *pages;
52 		struct folio *folio = page_folio(page);
53 
54 		if (!folio_test_anon(folio))
55 			continue;
56 		if (!folio_test_large(folio) || folio_test_hugetlb(folio))
57 			VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
58 		else
59 			/* Either a PTE-mapped or a PMD-mapped THP. */
60 			VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
61 				       !PageAnonExclusive(page), page);
62 	}
63 }
64 
65 /*
66  * Return the folio with ref appropriately incremented,
67  * or NULL if that failed.
68  */
69 static inline struct folio *try_get_folio(struct page *page, int refs)
70 {
71 	struct folio *folio;
72 
73 retry:
74 	folio = page_folio(page);
75 	if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
76 		return NULL;
77 	if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
78 		return NULL;
79 
80 	/*
81 	 * At this point we have a stable reference to the folio; but it
82 	 * could be that between calling page_folio() and the refcount
83 	 * increment, the folio was split, in which case we'd end up
84 	 * holding a reference on a folio that has nothing to do with the page
85 	 * we were given anymore.
86 	 * So now that the folio is stable, recheck that the page still
87 	 * belongs to this folio.
88 	 */
89 	if (unlikely(page_folio(page) != folio)) {
90 		folio_put_refs(folio, refs);
91 		goto retry;
92 	}
93 
94 	return folio;
95 }
96 
97 /**
98  * try_grab_folio() - Attempt to get or pin a folio.
99  * @page:  pointer to page to be grabbed
100  * @refs:  the value to (effectively) add to the folio's refcount
101  * @flags: gup flags: these are the FOLL_* flag values.
102  *
103  * "grab" names in this file mean, "look at flags to decide whether to use
104  * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
105  *
106  * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
107  * same time. (That's true throughout the get_user_pages*() and
108  * pin_user_pages*() APIs.) Cases:
109  *
110  *    FOLL_GET: folio's refcount will be incremented by @refs.
111  *
112  *    FOLL_PIN on large folios: folio's refcount will be incremented by
113  *    @refs, and its compound_pincount will be incremented by @refs.
114  *
115  *    FOLL_PIN on single-page folios: folio's refcount will be incremented by
116  *    @refs * GUP_PIN_COUNTING_BIAS.
117  *
118  * Return: The folio containing @page (with refcount appropriately
119  * incremented) for success, or NULL upon failure. If neither FOLL_GET
120  * nor FOLL_PIN was set, that's considered failure, and furthermore,
121  * a likely bug in the caller, so a warning is also emitted.
122  */
123 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
124 {
125 	if (flags & FOLL_GET)
126 		return try_get_folio(page, refs);
127 	else if (flags & FOLL_PIN) {
128 		struct folio *folio;
129 
130 		/*
131 		 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
132 		 * right zone, so fail and let the caller fall back to the slow
133 		 * path.
134 		 */
135 		if (unlikely((flags & FOLL_LONGTERM) &&
136 			     !is_pinnable_page(page)))
137 			return NULL;
138 
139 		/*
140 		 * CAUTION: Don't use compound_head() on the page before this
141 		 * point, the result won't be stable.
142 		 */
143 		folio = try_get_folio(page, refs);
144 		if (!folio)
145 			return NULL;
146 
147 		/*
148 		 * When pinning a large folio, use an exact count to track it.
149 		 *
150 		 * However, be sure to *also* increment the normal folio
151 		 * refcount field at least once, so that the folio really
152 		 * is pinned.  That's why the refcount from the earlier
153 		 * try_get_folio() is left intact.
154 		 */
155 		if (folio_test_large(folio))
156 			atomic_add(refs, folio_pincount_ptr(folio));
157 		else
158 			folio_ref_add(folio,
159 					refs * (GUP_PIN_COUNTING_BIAS - 1));
160 		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
161 
162 		return folio;
163 	}
164 
165 	WARN_ON_ONCE(1);
166 	return NULL;
167 }
168 
169 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
170 {
171 	if (flags & FOLL_PIN) {
172 		node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
173 		if (folio_test_large(folio))
174 			atomic_sub(refs, folio_pincount_ptr(folio));
175 		else
176 			refs *= GUP_PIN_COUNTING_BIAS;
177 	}
178 
179 	folio_put_refs(folio, refs);
180 }
181 
182 /**
183  * try_grab_page() - elevate a page's refcount by a flag-dependent amount
184  * @page:    pointer to page to be grabbed
185  * @flags:   gup flags: these are the FOLL_* flag values.
186  *
187  * This might not do anything at all, depending on the flags argument.
188  *
189  * "grab" names in this file mean, "look at flags to decide whether to use
190  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
191  *
192  * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
193  * time. Cases: please see the try_grab_folio() documentation, with
194  * "refs=1".
195  *
196  * Return: true for success, or if no action was required (if neither FOLL_PIN
197  * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
198  * FOLL_PIN was set, but the page could not be grabbed.
199  */
200 bool __must_check try_grab_page(struct page *page, unsigned int flags)
201 {
202 	struct folio *folio = page_folio(page);
203 
204 	WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
205 	if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
206 		return false;
207 
208 	if (flags & FOLL_GET)
209 		folio_ref_inc(folio);
210 	else if (flags & FOLL_PIN) {
211 		/*
212 		 * Similar to try_grab_folio(): be sure to *also*
213 		 * increment the normal page refcount field at least once,
214 		 * so that the page really is pinned.
215 		 */
216 		if (folio_test_large(folio)) {
217 			folio_ref_add(folio, 1);
218 			atomic_add(1, folio_pincount_ptr(folio));
219 		} else {
220 			folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
221 		}
222 
223 		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
224 	}
225 
226 	return true;
227 }
228 
229 /**
230  * unpin_user_page() - release a dma-pinned page
231  * @page:            pointer to page to be released
232  *
233  * Pages that were pinned via pin_user_pages*() must be released via either
234  * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
235  * that such pages can be separately tracked and uniquely handled. In
236  * particular, interactions with RDMA and filesystems need special handling.
237  */
238 void unpin_user_page(struct page *page)
239 {
240 	sanity_check_pinned_pages(&page, 1);
241 	gup_put_folio(page_folio(page), 1, FOLL_PIN);
242 }
243 EXPORT_SYMBOL(unpin_user_page);
244 
245 static inline struct folio *gup_folio_range_next(struct page *start,
246 		unsigned long npages, unsigned long i, unsigned int *ntails)
247 {
248 	struct page *next = nth_page(start, i);
249 	struct folio *folio = page_folio(next);
250 	unsigned int nr = 1;
251 
252 	if (folio_test_large(folio))
253 		nr = min_t(unsigned int, npages - i,
254 			   folio_nr_pages(folio) - folio_page_idx(folio, next));
255 
256 	*ntails = nr;
257 	return folio;
258 }
259 
260 static inline struct folio *gup_folio_next(struct page **list,
261 		unsigned long npages, unsigned long i, unsigned int *ntails)
262 {
263 	struct folio *folio = page_folio(list[i]);
264 	unsigned int nr;
265 
266 	for (nr = i + 1; nr < npages; nr++) {
267 		if (page_folio(list[nr]) != folio)
268 			break;
269 	}
270 
271 	*ntails = nr - i;
272 	return folio;
273 }
274 
275 /**
276  * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
277  * @pages:  array of pages to be maybe marked dirty, and definitely released.
278  * @npages: number of pages in the @pages array.
279  * @make_dirty: whether to mark the pages dirty
280  *
281  * "gup-pinned page" refers to a page that has had one of the get_user_pages()
282  * variants called on that page.
283  *
284  * For each page in the @pages array, make that page (or its head page, if a
285  * compound page) dirty, if @make_dirty is true, and if the page was previously
286  * listed as clean. In any case, releases all pages using unpin_user_page(),
287  * possibly via unpin_user_pages(), for the non-dirty case.
288  *
289  * Please see the unpin_user_page() documentation for details.
290  *
291  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
292  * required, then the caller should a) verify that this is really correct,
293  * because _lock() is usually required, and b) hand code it:
294  * set_page_dirty_lock(), unpin_user_page().
295  *
296  */
297 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
298 				 bool make_dirty)
299 {
300 	unsigned long i;
301 	struct folio *folio;
302 	unsigned int nr;
303 
304 	if (!make_dirty) {
305 		unpin_user_pages(pages, npages);
306 		return;
307 	}
308 
309 	sanity_check_pinned_pages(pages, npages);
310 	for (i = 0; i < npages; i += nr) {
311 		folio = gup_folio_next(pages, npages, i, &nr);
312 		/*
313 		 * Checking PageDirty at this point may race with
314 		 * clear_page_dirty_for_io(), but that's OK. Two key
315 		 * cases:
316 		 *
317 		 * 1) This code sees the page as already dirty, so it
318 		 * skips the call to set_page_dirty(). That could happen
319 		 * because clear_page_dirty_for_io() called
320 		 * page_mkclean(), followed by set_page_dirty().
321 		 * However, now the page is going to get written back,
322 		 * which meets the original intention of setting it
323 		 * dirty, so all is well: clear_page_dirty_for_io() goes
324 		 * on to call TestClearPageDirty(), and write the page
325 		 * back.
326 		 *
327 		 * 2) This code sees the page as clean, so it calls
328 		 * set_page_dirty(). The page stays dirty, despite being
329 		 * written back, so it gets written back again in the
330 		 * next writeback cycle. This is harmless.
331 		 */
332 		if (!folio_test_dirty(folio)) {
333 			folio_lock(folio);
334 			folio_mark_dirty(folio);
335 			folio_unlock(folio);
336 		}
337 		gup_put_folio(folio, nr, FOLL_PIN);
338 	}
339 }
340 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
341 
342 /**
343  * unpin_user_page_range_dirty_lock() - release and optionally dirty
344  * gup-pinned page range
345  *
346  * @page:  the starting page of a range maybe marked dirty, and definitely released.
347  * @npages: number of consecutive pages to release.
348  * @make_dirty: whether to mark the pages dirty
349  *
350  * "gup-pinned page range" refers to a range of pages that has had one of the
351  * pin_user_pages() variants called on that page.
352  *
353  * For the page ranges defined by [page .. page+npages], make that range (or
354  * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
355  * page range was previously listed as clean.
356  *
357  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
358  * required, then the caller should a) verify that this is really correct,
359  * because _lock() is usually required, and b) hand code it:
360  * set_page_dirty_lock(), unpin_user_page().
361  *
362  */
363 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
364 				      bool make_dirty)
365 {
366 	unsigned long i;
367 	struct folio *folio;
368 	unsigned int nr;
369 
370 	for (i = 0; i < npages; i += nr) {
371 		folio = gup_folio_range_next(page, npages, i, &nr);
372 		if (make_dirty && !folio_test_dirty(folio)) {
373 			folio_lock(folio);
374 			folio_mark_dirty(folio);
375 			folio_unlock(folio);
376 		}
377 		gup_put_folio(folio, nr, FOLL_PIN);
378 	}
379 }
380 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
381 
382 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages)
383 {
384 	unsigned long i;
385 	struct folio *folio;
386 	unsigned int nr;
387 
388 	/*
389 	 * Don't perform any sanity checks because we might have raced with
390 	 * fork() and some anonymous pages might now actually be shared --
391 	 * which is why we're unpinning after all.
392 	 */
393 	for (i = 0; i < npages; i += nr) {
394 		folio = gup_folio_next(pages, npages, i, &nr);
395 		gup_put_folio(folio, nr, FOLL_PIN);
396 	}
397 }
398 
399 /**
400  * unpin_user_pages() - release an array of gup-pinned pages.
401  * @pages:  array of pages to be marked dirty and released.
402  * @npages: number of pages in the @pages array.
403  *
404  * For each page in the @pages array, release the page using unpin_user_page().
405  *
406  * Please see the unpin_user_page() documentation for details.
407  */
408 void unpin_user_pages(struct page **pages, unsigned long npages)
409 {
410 	unsigned long i;
411 	struct folio *folio;
412 	unsigned int nr;
413 
414 	/*
415 	 * If this WARN_ON() fires, then the system *might* be leaking pages (by
416 	 * leaving them pinned), but probably not. More likely, gup/pup returned
417 	 * a hard -ERRNO error to the caller, who erroneously passed it here.
418 	 */
419 	if (WARN_ON(IS_ERR_VALUE(npages)))
420 		return;
421 
422 	sanity_check_pinned_pages(pages, npages);
423 	for (i = 0; i < npages; i += nr) {
424 		folio = gup_folio_next(pages, npages, i, &nr);
425 		gup_put_folio(folio, nr, FOLL_PIN);
426 	}
427 }
428 EXPORT_SYMBOL(unpin_user_pages);
429 
430 /*
431  * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
432  * lifecycle.  Avoid setting the bit unless necessary, or it might cause write
433  * cache bouncing on large SMP machines for concurrent pinned gups.
434  */
435 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
436 {
437 	if (!test_bit(MMF_HAS_PINNED, mm_flags))
438 		set_bit(MMF_HAS_PINNED, mm_flags);
439 }
440 
441 #ifdef CONFIG_MMU
442 static struct page *no_page_table(struct vm_area_struct *vma,
443 		unsigned int flags)
444 {
445 	/*
446 	 * When core dumping an enormous anonymous area that nobody
447 	 * has touched so far, we don't want to allocate unnecessary pages or
448 	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
449 	 * then get_dump_page() will return NULL to leave a hole in the dump.
450 	 * But we can only make this optimization where a hole would surely
451 	 * be zero-filled if handle_mm_fault() actually did handle it.
452 	 */
453 	if ((flags & FOLL_DUMP) &&
454 			(vma_is_anonymous(vma) || !vma->vm_ops->fault))
455 		return ERR_PTR(-EFAULT);
456 	return NULL;
457 }
458 
459 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
460 		pte_t *pte, unsigned int flags)
461 {
462 	if (flags & FOLL_TOUCH) {
463 		pte_t entry = *pte;
464 
465 		if (flags & FOLL_WRITE)
466 			entry = pte_mkdirty(entry);
467 		entry = pte_mkyoung(entry);
468 
469 		if (!pte_same(*pte, entry)) {
470 			set_pte_at(vma->vm_mm, address, pte, entry);
471 			update_mmu_cache(vma, address, pte);
472 		}
473 	}
474 
475 	/* Proper page table entry exists, but no corresponding struct page */
476 	return -EEXIST;
477 }
478 
479 /*
480  * FOLL_FORCE can write to even unwritable pte's, but only
481  * after we've gone through a COW cycle and they are dirty.
482  */
483 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
484 {
485 	return pte_write(pte) ||
486 		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
487 }
488 
489 static struct page *follow_page_pte(struct vm_area_struct *vma,
490 		unsigned long address, pmd_t *pmd, unsigned int flags,
491 		struct dev_pagemap **pgmap)
492 {
493 	struct mm_struct *mm = vma->vm_mm;
494 	struct page *page;
495 	spinlock_t *ptl;
496 	pte_t *ptep, pte;
497 	int ret;
498 
499 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
500 	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
501 			 (FOLL_PIN | FOLL_GET)))
502 		return ERR_PTR(-EINVAL);
503 retry:
504 	if (unlikely(pmd_bad(*pmd)))
505 		return no_page_table(vma, flags);
506 
507 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
508 	pte = *ptep;
509 	if (!pte_present(pte)) {
510 		swp_entry_t entry;
511 		/*
512 		 * KSM's break_ksm() relies upon recognizing a ksm page
513 		 * even while it is being migrated, so for that case we
514 		 * need migration_entry_wait().
515 		 */
516 		if (likely(!(flags & FOLL_MIGRATION)))
517 			goto no_page;
518 		if (pte_none(pte))
519 			goto no_page;
520 		entry = pte_to_swp_entry(pte);
521 		if (!is_migration_entry(entry))
522 			goto no_page;
523 		pte_unmap_unlock(ptep, ptl);
524 		migration_entry_wait(mm, pmd, address);
525 		goto retry;
526 	}
527 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
528 		goto no_page;
529 	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
530 		pte_unmap_unlock(ptep, ptl);
531 		return NULL;
532 	}
533 
534 	page = vm_normal_page(vma, address, pte);
535 	if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
536 		/*
537 		 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
538 		 * case since they are only valid while holding the pgmap
539 		 * reference.
540 		 */
541 		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
542 		if (*pgmap)
543 			page = pte_page(pte);
544 		else
545 			goto no_page;
546 	} else if (unlikely(!page)) {
547 		if (flags & FOLL_DUMP) {
548 			/* Avoid special (like zero) pages in core dumps */
549 			page = ERR_PTR(-EFAULT);
550 			goto out;
551 		}
552 
553 		if (is_zero_pfn(pte_pfn(pte))) {
554 			page = pte_page(pte);
555 		} else {
556 			ret = follow_pfn_pte(vma, address, ptep, flags);
557 			page = ERR_PTR(ret);
558 			goto out;
559 		}
560 	}
561 
562 	if (!pte_write(pte) && gup_must_unshare(flags, page)) {
563 		page = ERR_PTR(-EMLINK);
564 		goto out;
565 	}
566 
567 	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
568 		       !PageAnonExclusive(page), page);
569 
570 	/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
571 	if (unlikely(!try_grab_page(page, flags))) {
572 		page = ERR_PTR(-ENOMEM);
573 		goto out;
574 	}
575 	/*
576 	 * We need to make the page accessible if and only if we are going
577 	 * to access its content (the FOLL_PIN case).  Please see
578 	 * Documentation/core-api/pin_user_pages.rst for details.
579 	 */
580 	if (flags & FOLL_PIN) {
581 		ret = arch_make_page_accessible(page);
582 		if (ret) {
583 			unpin_user_page(page);
584 			page = ERR_PTR(ret);
585 			goto out;
586 		}
587 	}
588 	if (flags & FOLL_TOUCH) {
589 		if ((flags & FOLL_WRITE) &&
590 		    !pte_dirty(pte) && !PageDirty(page))
591 			set_page_dirty(page);
592 		/*
593 		 * pte_mkyoung() would be more correct here, but atomic care
594 		 * is needed to avoid losing the dirty bit: it is easier to use
595 		 * mark_page_accessed().
596 		 */
597 		mark_page_accessed(page);
598 	}
599 out:
600 	pte_unmap_unlock(ptep, ptl);
601 	return page;
602 no_page:
603 	pte_unmap_unlock(ptep, ptl);
604 	if (!pte_none(pte))
605 		return NULL;
606 	return no_page_table(vma, flags);
607 }
608 
609 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
610 				    unsigned long address, pud_t *pudp,
611 				    unsigned int flags,
612 				    struct follow_page_context *ctx)
613 {
614 	pmd_t *pmd, pmdval;
615 	spinlock_t *ptl;
616 	struct page *page;
617 	struct mm_struct *mm = vma->vm_mm;
618 
619 	pmd = pmd_offset(pudp, address);
620 	/*
621 	 * The READ_ONCE() will stabilize the pmdval in a register or
622 	 * on the stack so that it will stop changing under the code.
623 	 */
624 	pmdval = READ_ONCE(*pmd);
625 	if (pmd_none(pmdval))
626 		return no_page_table(vma, flags);
627 	if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
628 		page = follow_huge_pmd(mm, address, pmd, flags);
629 		if (page)
630 			return page;
631 		return no_page_table(vma, flags);
632 	}
633 	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
634 		page = follow_huge_pd(vma, address,
635 				      __hugepd(pmd_val(pmdval)), flags,
636 				      PMD_SHIFT);
637 		if (page)
638 			return page;
639 		return no_page_table(vma, flags);
640 	}
641 retry:
642 	if (!pmd_present(pmdval)) {
643 		/*
644 		 * Should never reach here, if thp migration is not supported;
645 		 * Otherwise, it must be a thp migration entry.
646 		 */
647 		VM_BUG_ON(!thp_migration_supported() ||
648 				  !is_pmd_migration_entry(pmdval));
649 
650 		if (likely(!(flags & FOLL_MIGRATION)))
651 			return no_page_table(vma, flags);
652 
653 		pmd_migration_entry_wait(mm, pmd);
654 		pmdval = READ_ONCE(*pmd);
655 		/*
656 		 * MADV_DONTNEED may convert the pmd to null because
657 		 * mmap_lock is held in read mode
658 		 */
659 		if (pmd_none(pmdval))
660 			return no_page_table(vma, flags);
661 		goto retry;
662 	}
663 	if (pmd_devmap(pmdval)) {
664 		ptl = pmd_lock(mm, pmd);
665 		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
666 		spin_unlock(ptl);
667 		if (page)
668 			return page;
669 	}
670 	if (likely(!pmd_trans_huge(pmdval)))
671 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
672 
673 	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
674 		return no_page_table(vma, flags);
675 
676 retry_locked:
677 	ptl = pmd_lock(mm, pmd);
678 	if (unlikely(pmd_none(*pmd))) {
679 		spin_unlock(ptl);
680 		return no_page_table(vma, flags);
681 	}
682 	if (unlikely(!pmd_present(*pmd))) {
683 		spin_unlock(ptl);
684 		if (likely(!(flags & FOLL_MIGRATION)))
685 			return no_page_table(vma, flags);
686 		pmd_migration_entry_wait(mm, pmd);
687 		goto retry_locked;
688 	}
689 	if (unlikely(!pmd_trans_huge(*pmd))) {
690 		spin_unlock(ptl);
691 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
692 	}
693 	if (flags & FOLL_SPLIT_PMD) {
694 		int ret;
695 		page = pmd_page(*pmd);
696 		if (is_huge_zero_page(page)) {
697 			spin_unlock(ptl);
698 			ret = 0;
699 			split_huge_pmd(vma, pmd, address);
700 			if (pmd_trans_unstable(pmd))
701 				ret = -EBUSY;
702 		} else {
703 			spin_unlock(ptl);
704 			split_huge_pmd(vma, pmd, address);
705 			ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
706 		}
707 
708 		return ret ? ERR_PTR(ret) :
709 			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
710 	}
711 	page = follow_trans_huge_pmd(vma, address, pmd, flags);
712 	spin_unlock(ptl);
713 	ctx->page_mask = HPAGE_PMD_NR - 1;
714 	return page;
715 }
716 
717 static struct page *follow_pud_mask(struct vm_area_struct *vma,
718 				    unsigned long address, p4d_t *p4dp,
719 				    unsigned int flags,
720 				    struct follow_page_context *ctx)
721 {
722 	pud_t *pud;
723 	spinlock_t *ptl;
724 	struct page *page;
725 	struct mm_struct *mm = vma->vm_mm;
726 
727 	pud = pud_offset(p4dp, address);
728 	if (pud_none(*pud))
729 		return no_page_table(vma, flags);
730 	if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
731 		page = follow_huge_pud(mm, address, pud, flags);
732 		if (page)
733 			return page;
734 		return no_page_table(vma, flags);
735 	}
736 	if (is_hugepd(__hugepd(pud_val(*pud)))) {
737 		page = follow_huge_pd(vma, address,
738 				      __hugepd(pud_val(*pud)), flags,
739 				      PUD_SHIFT);
740 		if (page)
741 			return page;
742 		return no_page_table(vma, flags);
743 	}
744 	if (pud_devmap(*pud)) {
745 		ptl = pud_lock(mm, pud);
746 		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
747 		spin_unlock(ptl);
748 		if (page)
749 			return page;
750 	}
751 	if (unlikely(pud_bad(*pud)))
752 		return no_page_table(vma, flags);
753 
754 	return follow_pmd_mask(vma, address, pud, flags, ctx);
755 }
756 
757 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
758 				    unsigned long address, pgd_t *pgdp,
759 				    unsigned int flags,
760 				    struct follow_page_context *ctx)
761 {
762 	p4d_t *p4d;
763 	struct page *page;
764 
765 	p4d = p4d_offset(pgdp, address);
766 	if (p4d_none(*p4d))
767 		return no_page_table(vma, flags);
768 	BUILD_BUG_ON(p4d_huge(*p4d));
769 	if (unlikely(p4d_bad(*p4d)))
770 		return no_page_table(vma, flags);
771 
772 	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
773 		page = follow_huge_pd(vma, address,
774 				      __hugepd(p4d_val(*p4d)), flags,
775 				      P4D_SHIFT);
776 		if (page)
777 			return page;
778 		return no_page_table(vma, flags);
779 	}
780 	return follow_pud_mask(vma, address, p4d, flags, ctx);
781 }
782 
783 /**
784  * follow_page_mask - look up a page descriptor from a user-virtual address
785  * @vma: vm_area_struct mapping @address
786  * @address: virtual address to look up
787  * @flags: flags modifying lookup behaviour
788  * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
789  *       pointer to output page_mask
790  *
791  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
792  *
793  * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
794  * the device's dev_pagemap metadata to avoid repeating expensive lookups.
795  *
796  * When getting an anonymous page and the caller has to trigger unsharing
797  * of a shared anonymous page first, -EMLINK is returned. The caller should
798  * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
799  * relevant with FOLL_PIN and !FOLL_WRITE.
800  *
801  * On output, the @ctx->page_mask is set according to the size of the page.
802  *
803  * Return: the mapped (struct page *), %NULL if no mapping exists, or
804  * an error pointer if there is a mapping to something not represented
805  * by a page descriptor (see also vm_normal_page()).
806  */
807 static struct page *follow_page_mask(struct vm_area_struct *vma,
808 			      unsigned long address, unsigned int flags,
809 			      struct follow_page_context *ctx)
810 {
811 	pgd_t *pgd;
812 	struct page *page;
813 	struct mm_struct *mm = vma->vm_mm;
814 
815 	ctx->page_mask = 0;
816 
817 	/* make this handle hugepd */
818 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
819 	if (!IS_ERR(page)) {
820 		WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
821 		return page;
822 	}
823 
824 	pgd = pgd_offset(mm, address);
825 
826 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
827 		return no_page_table(vma, flags);
828 
829 	if (pgd_huge(*pgd)) {
830 		page = follow_huge_pgd(mm, address, pgd, flags);
831 		if (page)
832 			return page;
833 		return no_page_table(vma, flags);
834 	}
835 	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
836 		page = follow_huge_pd(vma, address,
837 				      __hugepd(pgd_val(*pgd)), flags,
838 				      PGDIR_SHIFT);
839 		if (page)
840 			return page;
841 		return no_page_table(vma, flags);
842 	}
843 
844 	return follow_p4d_mask(vma, address, pgd, flags, ctx);
845 }
846 
847 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
848 			 unsigned int foll_flags)
849 {
850 	struct follow_page_context ctx = { NULL };
851 	struct page *page;
852 
853 	if (vma_is_secretmem(vma))
854 		return NULL;
855 
856 	if (foll_flags & FOLL_PIN)
857 		return NULL;
858 
859 	page = follow_page_mask(vma, address, foll_flags, &ctx);
860 	if (ctx.pgmap)
861 		put_dev_pagemap(ctx.pgmap);
862 	return page;
863 }
864 
865 static int get_gate_page(struct mm_struct *mm, unsigned long address,
866 		unsigned int gup_flags, struct vm_area_struct **vma,
867 		struct page **page)
868 {
869 	pgd_t *pgd;
870 	p4d_t *p4d;
871 	pud_t *pud;
872 	pmd_t *pmd;
873 	pte_t *pte;
874 	int ret = -EFAULT;
875 
876 	/* user gate pages are read-only */
877 	if (gup_flags & FOLL_WRITE)
878 		return -EFAULT;
879 	if (address > TASK_SIZE)
880 		pgd = pgd_offset_k(address);
881 	else
882 		pgd = pgd_offset_gate(mm, address);
883 	if (pgd_none(*pgd))
884 		return -EFAULT;
885 	p4d = p4d_offset(pgd, address);
886 	if (p4d_none(*p4d))
887 		return -EFAULT;
888 	pud = pud_offset(p4d, address);
889 	if (pud_none(*pud))
890 		return -EFAULT;
891 	pmd = pmd_offset(pud, address);
892 	if (!pmd_present(*pmd))
893 		return -EFAULT;
894 	VM_BUG_ON(pmd_trans_huge(*pmd));
895 	pte = pte_offset_map(pmd, address);
896 	if (pte_none(*pte))
897 		goto unmap;
898 	*vma = get_gate_vma(mm);
899 	if (!page)
900 		goto out;
901 	*page = vm_normal_page(*vma, address, *pte);
902 	if (!*page) {
903 		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
904 			goto unmap;
905 		*page = pte_page(*pte);
906 	}
907 	if (unlikely(!try_grab_page(*page, gup_flags))) {
908 		ret = -ENOMEM;
909 		goto unmap;
910 	}
911 out:
912 	ret = 0;
913 unmap:
914 	pte_unmap(pte);
915 	return ret;
916 }
917 
918 /*
919  * mmap_lock must be held on entry.  If @locked != NULL and *@flags
920  * does not include FOLL_NOWAIT, the mmap_lock may be released.  If it
921  * is, *@locked will be set to 0 and -EBUSY returned.
922  */
923 static int faultin_page(struct vm_area_struct *vma,
924 		unsigned long address, unsigned int *flags, bool unshare,
925 		int *locked)
926 {
927 	unsigned int fault_flags = 0;
928 	vm_fault_t ret;
929 
930 	if (*flags & FOLL_NOFAULT)
931 		return -EFAULT;
932 	if (*flags & FOLL_WRITE)
933 		fault_flags |= FAULT_FLAG_WRITE;
934 	if (*flags & FOLL_REMOTE)
935 		fault_flags |= FAULT_FLAG_REMOTE;
936 	if (locked)
937 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
938 	if (*flags & FOLL_NOWAIT)
939 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
940 	if (*flags & FOLL_TRIED) {
941 		/*
942 		 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
943 		 * can co-exist
944 		 */
945 		fault_flags |= FAULT_FLAG_TRIED;
946 	}
947 	if (unshare) {
948 		fault_flags |= FAULT_FLAG_UNSHARE;
949 		/* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
950 		VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
951 	}
952 
953 	ret = handle_mm_fault(vma, address, fault_flags, NULL);
954 
955 	if (ret & VM_FAULT_COMPLETED) {
956 		/*
957 		 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
958 		 * mmap lock in the page fault handler. Sanity check this.
959 		 */
960 		WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
961 		if (locked)
962 			*locked = 0;
963 		/*
964 		 * We should do the same as VM_FAULT_RETRY, but let's not
965 		 * return -EBUSY since that's not reflecting the reality of
966 		 * what has happened - we've just fully completed a page
967 		 * fault, with the mmap lock released.  Use -EAGAIN to show
968 		 * that we want to take the mmap lock _again_.
969 		 */
970 		return -EAGAIN;
971 	}
972 
973 	if (ret & VM_FAULT_ERROR) {
974 		int err = vm_fault_to_errno(ret, *flags);
975 
976 		if (err)
977 			return err;
978 		BUG();
979 	}
980 
981 	if (ret & VM_FAULT_RETRY) {
982 		if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
983 			*locked = 0;
984 		return -EBUSY;
985 	}
986 
987 	/*
988 	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
989 	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
990 	 * can thus safely do subsequent page lookups as if they were reads.
991 	 * But only do so when looping for pte_write is futile: in some cases
992 	 * userspace may also be wanting to write to the gotten user page,
993 	 * which a read fault here might prevent (a readonly page might get
994 	 * reCOWed by userspace write).
995 	 */
996 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
997 		*flags |= FOLL_COW;
998 	return 0;
999 }
1000 
1001 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
1002 {
1003 	vm_flags_t vm_flags = vma->vm_flags;
1004 	int write = (gup_flags & FOLL_WRITE);
1005 	int foreign = (gup_flags & FOLL_REMOTE);
1006 
1007 	if (vm_flags & (VM_IO | VM_PFNMAP))
1008 		return -EFAULT;
1009 
1010 	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
1011 		return -EFAULT;
1012 
1013 	if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
1014 		return -EOPNOTSUPP;
1015 
1016 	if (vma_is_secretmem(vma))
1017 		return -EFAULT;
1018 
1019 	if (write) {
1020 		if (!(vm_flags & VM_WRITE)) {
1021 			if (!(gup_flags & FOLL_FORCE))
1022 				return -EFAULT;
1023 			/*
1024 			 * We used to let the write,force case do COW in a
1025 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
1026 			 * set a breakpoint in a read-only mapping of an
1027 			 * executable, without corrupting the file (yet only
1028 			 * when that file had been opened for writing!).
1029 			 * Anon pages in shared mappings are surprising: now
1030 			 * just reject it.
1031 			 */
1032 			if (!is_cow_mapping(vm_flags))
1033 				return -EFAULT;
1034 		}
1035 	} else if (!(vm_flags & VM_READ)) {
1036 		if (!(gup_flags & FOLL_FORCE))
1037 			return -EFAULT;
1038 		/*
1039 		 * Is there actually any vma we can reach here which does not
1040 		 * have VM_MAYREAD set?
1041 		 */
1042 		if (!(vm_flags & VM_MAYREAD))
1043 			return -EFAULT;
1044 	}
1045 	/*
1046 	 * gups are always data accesses, not instruction
1047 	 * fetches, so execute=false here
1048 	 */
1049 	if (!arch_vma_access_permitted(vma, write, false, foreign))
1050 		return -EFAULT;
1051 	return 0;
1052 }
1053 
1054 /**
1055  * __get_user_pages() - pin user pages in memory
1056  * @mm:		mm_struct of target mm
1057  * @start:	starting user address
1058  * @nr_pages:	number of pages from start to pin
1059  * @gup_flags:	flags modifying pin behaviour
1060  * @pages:	array that receives pointers to the pages pinned.
1061  *		Should be at least nr_pages long. Or NULL, if caller
1062  *		only intends to ensure the pages are faulted in.
1063  * @vmas:	array of pointers to vmas corresponding to each page.
1064  *		Or NULL if the caller does not require them.
1065  * @locked:     whether we're still with the mmap_lock held
1066  *
1067  * Returns either number of pages pinned (which may be less than the
1068  * number requested), or an error. Details about the return value:
1069  *
1070  * -- If nr_pages is 0, returns 0.
1071  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1072  * -- If nr_pages is >0, and some pages were pinned, returns the number of
1073  *    pages pinned. Again, this may be less than nr_pages.
1074  * -- 0 return value is possible when the fault would need to be retried.
1075  *
1076  * The caller is responsible for releasing returned @pages, via put_page().
1077  *
1078  * @vmas are valid only as long as mmap_lock is held.
1079  *
1080  * Must be called with mmap_lock held.  It may be released.  See below.
1081  *
1082  * __get_user_pages walks a process's page tables and takes a reference to
1083  * each struct page that each user address corresponds to at a given
1084  * instant. That is, it takes the page that would be accessed if a user
1085  * thread accesses the given user virtual address at that instant.
1086  *
1087  * This does not guarantee that the page exists in the user mappings when
1088  * __get_user_pages returns, and there may even be a completely different
1089  * page there in some cases (eg. if mmapped pagecache has been invalidated
1090  * and subsequently re faulted). However it does guarantee that the page
1091  * won't be freed completely. And mostly callers simply care that the page
1092  * contains data that was valid *at some point in time*. Typically, an IO
1093  * or similar operation cannot guarantee anything stronger anyway because
1094  * locks can't be held over the syscall boundary.
1095  *
1096  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1097  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1098  * appropriate) must be called after the page is finished with, and
1099  * before put_page is called.
1100  *
1101  * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1102  * released by an up_read().  That can happen if @gup_flags does not
1103  * have FOLL_NOWAIT.
1104  *
1105  * A caller using such a combination of @locked and @gup_flags
1106  * must therefore hold the mmap_lock for reading only, and recognize
1107  * when it's been released.  Otherwise, it must be held for either
1108  * reading or writing and will not be released.
1109  *
1110  * In most cases, get_user_pages or get_user_pages_fast should be used
1111  * instead of __get_user_pages. __get_user_pages should be used only if
1112  * you need some special @gup_flags.
1113  */
1114 static long __get_user_pages(struct mm_struct *mm,
1115 		unsigned long start, unsigned long nr_pages,
1116 		unsigned int gup_flags, struct page **pages,
1117 		struct vm_area_struct **vmas, int *locked)
1118 {
1119 	long ret = 0, i = 0;
1120 	struct vm_area_struct *vma = NULL;
1121 	struct follow_page_context ctx = { NULL };
1122 
1123 	if (!nr_pages)
1124 		return 0;
1125 
1126 	start = untagged_addr(start);
1127 
1128 	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1129 
1130 	/*
1131 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
1132 	 * fault information is unrelated to the reference behaviour of a task
1133 	 * using the address space
1134 	 */
1135 	if (!(gup_flags & FOLL_FORCE))
1136 		gup_flags |= FOLL_NUMA;
1137 
1138 	do {
1139 		struct page *page;
1140 		unsigned int foll_flags = gup_flags;
1141 		unsigned int page_increm;
1142 
1143 		/* first iteration or cross vma bound */
1144 		if (!vma || start >= vma->vm_end) {
1145 			vma = find_extend_vma(mm, start);
1146 			if (!vma && in_gate_area(mm, start)) {
1147 				ret = get_gate_page(mm, start & PAGE_MASK,
1148 						gup_flags, &vma,
1149 						pages ? &pages[i] : NULL);
1150 				if (ret)
1151 					goto out;
1152 				ctx.page_mask = 0;
1153 				goto next_page;
1154 			}
1155 
1156 			if (!vma) {
1157 				ret = -EFAULT;
1158 				goto out;
1159 			}
1160 			ret = check_vma_flags(vma, gup_flags);
1161 			if (ret)
1162 				goto out;
1163 
1164 			if (is_vm_hugetlb_page(vma)) {
1165 				i = follow_hugetlb_page(mm, vma, pages, vmas,
1166 						&start, &nr_pages, i,
1167 						gup_flags, locked);
1168 				if (locked && *locked == 0) {
1169 					/*
1170 					 * We've got a VM_FAULT_RETRY
1171 					 * and we've lost mmap_lock.
1172 					 * We must stop here.
1173 					 */
1174 					BUG_ON(gup_flags & FOLL_NOWAIT);
1175 					goto out;
1176 				}
1177 				continue;
1178 			}
1179 		}
1180 retry:
1181 		/*
1182 		 * If we have a pending SIGKILL, don't keep faulting pages and
1183 		 * potentially allocating memory.
1184 		 */
1185 		if (fatal_signal_pending(current)) {
1186 			ret = -EINTR;
1187 			goto out;
1188 		}
1189 		cond_resched();
1190 
1191 		page = follow_page_mask(vma, start, foll_flags, &ctx);
1192 		if (!page || PTR_ERR(page) == -EMLINK) {
1193 			ret = faultin_page(vma, start, &foll_flags,
1194 					   PTR_ERR(page) == -EMLINK, locked);
1195 			switch (ret) {
1196 			case 0:
1197 				goto retry;
1198 			case -EBUSY:
1199 			case -EAGAIN:
1200 				ret = 0;
1201 				fallthrough;
1202 			case -EFAULT:
1203 			case -ENOMEM:
1204 			case -EHWPOISON:
1205 				goto out;
1206 			}
1207 			BUG();
1208 		} else if (PTR_ERR(page) == -EEXIST) {
1209 			/*
1210 			 * Proper page table entry exists, but no corresponding
1211 			 * struct page. If the caller expects **pages to be
1212 			 * filled in, bail out now, because that can't be done
1213 			 * for this page.
1214 			 */
1215 			if (pages) {
1216 				ret = PTR_ERR(page);
1217 				goto out;
1218 			}
1219 
1220 			goto next_page;
1221 		} else if (IS_ERR(page)) {
1222 			ret = PTR_ERR(page);
1223 			goto out;
1224 		}
1225 		if (pages) {
1226 			pages[i] = page;
1227 			flush_anon_page(vma, page, start);
1228 			flush_dcache_page(page);
1229 			ctx.page_mask = 0;
1230 		}
1231 next_page:
1232 		if (vmas) {
1233 			vmas[i] = vma;
1234 			ctx.page_mask = 0;
1235 		}
1236 		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1237 		if (page_increm > nr_pages)
1238 			page_increm = nr_pages;
1239 		i += page_increm;
1240 		start += page_increm * PAGE_SIZE;
1241 		nr_pages -= page_increm;
1242 	} while (nr_pages);
1243 out:
1244 	if (ctx.pgmap)
1245 		put_dev_pagemap(ctx.pgmap);
1246 	return i ? i : ret;
1247 }
1248 
1249 static bool vma_permits_fault(struct vm_area_struct *vma,
1250 			      unsigned int fault_flags)
1251 {
1252 	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
1253 	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1254 	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1255 
1256 	if (!(vm_flags & vma->vm_flags))
1257 		return false;
1258 
1259 	/*
1260 	 * The architecture might have a hardware protection
1261 	 * mechanism other than read/write that can deny access.
1262 	 *
1263 	 * gup always represents data access, not instruction
1264 	 * fetches, so execute=false here:
1265 	 */
1266 	if (!arch_vma_access_permitted(vma, write, false, foreign))
1267 		return false;
1268 
1269 	return true;
1270 }
1271 
1272 /**
1273  * fixup_user_fault() - manually resolve a user page fault
1274  * @mm:		mm_struct of target mm
1275  * @address:	user address
1276  * @fault_flags:flags to pass down to handle_mm_fault()
1277  * @unlocked:	did we unlock the mmap_lock while retrying, maybe NULL if caller
1278  *		does not allow retry. If NULL, the caller must guarantee
1279  *		that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1280  *
1281  * This is meant to be called in the specific scenario where for locking reasons
1282  * we try to access user memory in atomic context (within a pagefault_disable()
1283  * section), this returns -EFAULT, and we want to resolve the user fault before
1284  * trying again.
1285  *
1286  * Typically this is meant to be used by the futex code.
1287  *
1288  * The main difference with get_user_pages() is that this function will
1289  * unconditionally call handle_mm_fault() which will in turn perform all the
1290  * necessary SW fixup of the dirty and young bits in the PTE, while
1291  * get_user_pages() only guarantees to update these in the struct page.
1292  *
1293  * This is important for some architectures where those bits also gate the
1294  * access permission to the page because they are maintained in software.  On
1295  * such architectures, gup() will not be enough to make a subsequent access
1296  * succeed.
1297  *
1298  * This function will not return with an unlocked mmap_lock. So it has not the
1299  * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1300  */
1301 int fixup_user_fault(struct mm_struct *mm,
1302 		     unsigned long address, unsigned int fault_flags,
1303 		     bool *unlocked)
1304 {
1305 	struct vm_area_struct *vma;
1306 	vm_fault_t ret;
1307 
1308 	address = untagged_addr(address);
1309 
1310 	if (unlocked)
1311 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1312 
1313 retry:
1314 	vma = find_extend_vma(mm, address);
1315 	if (!vma || address < vma->vm_start)
1316 		return -EFAULT;
1317 
1318 	if (!vma_permits_fault(vma, fault_flags))
1319 		return -EFAULT;
1320 
1321 	if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1322 	    fatal_signal_pending(current))
1323 		return -EINTR;
1324 
1325 	ret = handle_mm_fault(vma, address, fault_flags, NULL);
1326 
1327 	if (ret & VM_FAULT_COMPLETED) {
1328 		/*
1329 		 * NOTE: it's a pity that we need to retake the lock here
1330 		 * to pair with the unlock() in the callers. Ideally we
1331 		 * could tell the callers so they do not need to unlock.
1332 		 */
1333 		mmap_read_lock(mm);
1334 		*unlocked = true;
1335 		return 0;
1336 	}
1337 
1338 	if (ret & VM_FAULT_ERROR) {
1339 		int err = vm_fault_to_errno(ret, 0);
1340 
1341 		if (err)
1342 			return err;
1343 		BUG();
1344 	}
1345 
1346 	if (ret & VM_FAULT_RETRY) {
1347 		mmap_read_lock(mm);
1348 		*unlocked = true;
1349 		fault_flags |= FAULT_FLAG_TRIED;
1350 		goto retry;
1351 	}
1352 
1353 	return 0;
1354 }
1355 EXPORT_SYMBOL_GPL(fixup_user_fault);
1356 
1357 /*
1358  * Please note that this function, unlike __get_user_pages will not
1359  * return 0 for nr_pages > 0 without FOLL_NOWAIT
1360  */
1361 static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1362 						unsigned long start,
1363 						unsigned long nr_pages,
1364 						struct page **pages,
1365 						struct vm_area_struct **vmas,
1366 						int *locked,
1367 						unsigned int flags)
1368 {
1369 	long ret, pages_done;
1370 	bool lock_dropped;
1371 
1372 	if (locked) {
1373 		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
1374 		BUG_ON(vmas);
1375 		/* check caller initialized locked */
1376 		BUG_ON(*locked != 1);
1377 	}
1378 
1379 	if (flags & FOLL_PIN)
1380 		mm_set_has_pinned_flag(&mm->flags);
1381 
1382 	/*
1383 	 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1384 	 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1385 	 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1386 	 * for FOLL_GET, not for the newer FOLL_PIN.
1387 	 *
1388 	 * FOLL_PIN always expects pages to be non-null, but no need to assert
1389 	 * that here, as any failures will be obvious enough.
1390 	 */
1391 	if (pages && !(flags & FOLL_PIN))
1392 		flags |= FOLL_GET;
1393 
1394 	pages_done = 0;
1395 	lock_dropped = false;
1396 	for (;;) {
1397 		ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1398 				       vmas, locked);
1399 		if (!locked)
1400 			/* VM_FAULT_RETRY couldn't trigger, bypass */
1401 			return ret;
1402 
1403 		/* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
1404 		if (!*locked) {
1405 			BUG_ON(ret < 0);
1406 			BUG_ON(ret >= nr_pages);
1407 		}
1408 
1409 		if (ret > 0) {
1410 			nr_pages -= ret;
1411 			pages_done += ret;
1412 			if (!nr_pages)
1413 				break;
1414 		}
1415 		if (*locked) {
1416 			/*
1417 			 * VM_FAULT_RETRY didn't trigger or it was a
1418 			 * FOLL_NOWAIT.
1419 			 */
1420 			if (!pages_done)
1421 				pages_done = ret;
1422 			break;
1423 		}
1424 		/*
1425 		 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1426 		 * For the prefault case (!pages) we only update counts.
1427 		 */
1428 		if (likely(pages))
1429 			pages += ret;
1430 		start += ret << PAGE_SHIFT;
1431 		lock_dropped = true;
1432 
1433 retry:
1434 		/*
1435 		 * Repeat on the address that fired VM_FAULT_RETRY
1436 		 * with both FAULT_FLAG_ALLOW_RETRY and
1437 		 * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
1438 		 * by fatal signals, so we need to check it before we
1439 		 * start trying again otherwise it can loop forever.
1440 		 */
1441 
1442 		if (fatal_signal_pending(current)) {
1443 			if (!pages_done)
1444 				pages_done = -EINTR;
1445 			break;
1446 		}
1447 
1448 		ret = mmap_read_lock_killable(mm);
1449 		if (ret) {
1450 			BUG_ON(ret > 0);
1451 			if (!pages_done)
1452 				pages_done = ret;
1453 			break;
1454 		}
1455 
1456 		*locked = 1;
1457 		ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1458 				       pages, NULL, locked);
1459 		if (!*locked) {
1460 			/* Continue to retry until we succeeded */
1461 			BUG_ON(ret != 0);
1462 			goto retry;
1463 		}
1464 		if (ret != 1) {
1465 			BUG_ON(ret > 1);
1466 			if (!pages_done)
1467 				pages_done = ret;
1468 			break;
1469 		}
1470 		nr_pages--;
1471 		pages_done++;
1472 		if (!nr_pages)
1473 			break;
1474 		if (likely(pages))
1475 			pages++;
1476 		start += PAGE_SIZE;
1477 	}
1478 	if (lock_dropped && *locked) {
1479 		/*
1480 		 * We must let the caller know we temporarily dropped the lock
1481 		 * and so the critical section protected by it was lost.
1482 		 */
1483 		mmap_read_unlock(mm);
1484 		*locked = 0;
1485 	}
1486 	return pages_done;
1487 }
1488 
1489 /**
1490  * populate_vma_page_range() -  populate a range of pages in the vma.
1491  * @vma:   target vma
1492  * @start: start address
1493  * @end:   end address
1494  * @locked: whether the mmap_lock is still held
1495  *
1496  * This takes care of mlocking the pages too if VM_LOCKED is set.
1497  *
1498  * Return either number of pages pinned in the vma, or a negative error
1499  * code on error.
1500  *
1501  * vma->vm_mm->mmap_lock must be held.
1502  *
1503  * If @locked is NULL, it may be held for read or write and will
1504  * be unperturbed.
1505  *
1506  * If @locked is non-NULL, it must held for read only and may be
1507  * released.  If it's released, *@locked will be set to 0.
1508  */
1509 long populate_vma_page_range(struct vm_area_struct *vma,
1510 		unsigned long start, unsigned long end, int *locked)
1511 {
1512 	struct mm_struct *mm = vma->vm_mm;
1513 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1514 	int gup_flags;
1515 	long ret;
1516 
1517 	VM_BUG_ON(!PAGE_ALIGNED(start));
1518 	VM_BUG_ON(!PAGE_ALIGNED(end));
1519 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1520 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1521 	mmap_assert_locked(mm);
1522 
1523 	/*
1524 	 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1525 	 * faultin_page() to break COW, so it has no work to do here.
1526 	 */
1527 	if (vma->vm_flags & VM_LOCKONFAULT)
1528 		return nr_pages;
1529 
1530 	gup_flags = FOLL_TOUCH;
1531 	/*
1532 	 * We want to touch writable mappings with a write fault in order
1533 	 * to break COW, except for shared mappings because these don't COW
1534 	 * and we would not want to dirty them for nothing.
1535 	 */
1536 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1537 		gup_flags |= FOLL_WRITE;
1538 
1539 	/*
1540 	 * We want mlock to succeed for regions that have any permissions
1541 	 * other than PROT_NONE.
1542 	 */
1543 	if (vma_is_accessible(vma))
1544 		gup_flags |= FOLL_FORCE;
1545 
1546 	/*
1547 	 * We made sure addr is within a VMA, so the following will
1548 	 * not result in a stack expansion that recurses back here.
1549 	 */
1550 	ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1551 				NULL, NULL, locked);
1552 	lru_add_drain();
1553 	return ret;
1554 }
1555 
1556 /*
1557  * faultin_vma_page_range() - populate (prefault) page tables inside the
1558  *			      given VMA range readable/writable
1559  *
1560  * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1561  *
1562  * @vma: target vma
1563  * @start: start address
1564  * @end: end address
1565  * @write: whether to prefault readable or writable
1566  * @locked: whether the mmap_lock is still held
1567  *
1568  * Returns either number of processed pages in the vma, or a negative error
1569  * code on error (see __get_user_pages()).
1570  *
1571  * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1572  * covered by the VMA.
1573  *
1574  * If @locked is NULL, it may be held for read or write and will be unperturbed.
1575  *
1576  * If @locked is non-NULL, it must held for read only and may be released.  If
1577  * it's released, *@locked will be set to 0.
1578  */
1579 long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1580 			    unsigned long end, bool write, int *locked)
1581 {
1582 	struct mm_struct *mm = vma->vm_mm;
1583 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1584 	int gup_flags;
1585 	long ret;
1586 
1587 	VM_BUG_ON(!PAGE_ALIGNED(start));
1588 	VM_BUG_ON(!PAGE_ALIGNED(end));
1589 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1590 	VM_BUG_ON_VMA(end > vma->vm_end, vma);
1591 	mmap_assert_locked(mm);
1592 
1593 	/*
1594 	 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1595 	 *	       the page dirty with FOLL_WRITE -- which doesn't make a
1596 	 *	       difference with !FOLL_FORCE, because the page is writable
1597 	 *	       in the page table.
1598 	 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1599 	 *		  a poisoned page.
1600 	 * !FOLL_FORCE: Require proper access permissions.
1601 	 */
1602 	gup_flags = FOLL_TOUCH | FOLL_HWPOISON;
1603 	if (write)
1604 		gup_flags |= FOLL_WRITE;
1605 
1606 	/*
1607 	 * We want to report -EINVAL instead of -EFAULT for any permission
1608 	 * problems or incompatible mappings.
1609 	 */
1610 	if (check_vma_flags(vma, gup_flags))
1611 		return -EINVAL;
1612 
1613 	ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1614 				NULL, NULL, locked);
1615 	lru_add_drain();
1616 	return ret;
1617 }
1618 
1619 /*
1620  * __mm_populate - populate and/or mlock pages within a range of address space.
1621  *
1622  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1623  * flags. VMAs must be already marked with the desired vm_flags, and
1624  * mmap_lock must not be held.
1625  */
1626 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1627 {
1628 	struct mm_struct *mm = current->mm;
1629 	unsigned long end, nstart, nend;
1630 	struct vm_area_struct *vma = NULL;
1631 	int locked = 0;
1632 	long ret = 0;
1633 
1634 	end = start + len;
1635 
1636 	for (nstart = start; nstart < end; nstart = nend) {
1637 		/*
1638 		 * We want to fault in pages for [nstart; end) address range.
1639 		 * Find first corresponding VMA.
1640 		 */
1641 		if (!locked) {
1642 			locked = 1;
1643 			mmap_read_lock(mm);
1644 			vma = find_vma(mm, nstart);
1645 		} else if (nstart >= vma->vm_end)
1646 			vma = vma->vm_next;
1647 		if (!vma || vma->vm_start >= end)
1648 			break;
1649 		/*
1650 		 * Set [nstart; nend) to intersection of desired address
1651 		 * range with the first VMA. Also, skip undesirable VMA types.
1652 		 */
1653 		nend = min(end, vma->vm_end);
1654 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1655 			continue;
1656 		if (nstart < vma->vm_start)
1657 			nstart = vma->vm_start;
1658 		/*
1659 		 * Now fault in a range of pages. populate_vma_page_range()
1660 		 * double checks the vma flags, so that it won't mlock pages
1661 		 * if the vma was already munlocked.
1662 		 */
1663 		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1664 		if (ret < 0) {
1665 			if (ignore_errors) {
1666 				ret = 0;
1667 				continue;	/* continue at next VMA */
1668 			}
1669 			break;
1670 		}
1671 		nend = nstart + ret * PAGE_SIZE;
1672 		ret = 0;
1673 	}
1674 	if (locked)
1675 		mmap_read_unlock(mm);
1676 	return ret;	/* 0 or negative error code */
1677 }
1678 #else /* CONFIG_MMU */
1679 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1680 		unsigned long nr_pages, struct page **pages,
1681 		struct vm_area_struct **vmas, int *locked,
1682 		unsigned int foll_flags)
1683 {
1684 	struct vm_area_struct *vma;
1685 	unsigned long vm_flags;
1686 	long i;
1687 
1688 	/* calculate required read or write permissions.
1689 	 * If FOLL_FORCE is set, we only require the "MAY" flags.
1690 	 */
1691 	vm_flags  = (foll_flags & FOLL_WRITE) ?
1692 			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1693 	vm_flags &= (foll_flags & FOLL_FORCE) ?
1694 			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1695 
1696 	for (i = 0; i < nr_pages; i++) {
1697 		vma = find_vma(mm, start);
1698 		if (!vma)
1699 			goto finish_or_fault;
1700 
1701 		/* protect what we can, including chardevs */
1702 		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1703 		    !(vm_flags & vma->vm_flags))
1704 			goto finish_or_fault;
1705 
1706 		if (pages) {
1707 			pages[i] = virt_to_page(start);
1708 			if (pages[i])
1709 				get_page(pages[i]);
1710 		}
1711 		if (vmas)
1712 			vmas[i] = vma;
1713 		start = (start + PAGE_SIZE) & PAGE_MASK;
1714 	}
1715 
1716 	return i;
1717 
1718 finish_or_fault:
1719 	return i ? : -EFAULT;
1720 }
1721 #endif /* !CONFIG_MMU */
1722 
1723 /**
1724  * fault_in_writeable - fault in userspace address range for writing
1725  * @uaddr: start of address range
1726  * @size: size of address range
1727  *
1728  * Returns the number of bytes not faulted in (like copy_to_user() and
1729  * copy_from_user()).
1730  */
1731 size_t fault_in_writeable(char __user *uaddr, size_t size)
1732 {
1733 	char __user *start = uaddr, *end;
1734 
1735 	if (unlikely(size == 0))
1736 		return 0;
1737 	if (!user_write_access_begin(uaddr, size))
1738 		return size;
1739 	if (!PAGE_ALIGNED(uaddr)) {
1740 		unsafe_put_user(0, uaddr, out);
1741 		uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1742 	}
1743 	end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1744 	if (unlikely(end < start))
1745 		end = NULL;
1746 	while (uaddr != end) {
1747 		unsafe_put_user(0, uaddr, out);
1748 		uaddr += PAGE_SIZE;
1749 	}
1750 
1751 out:
1752 	user_write_access_end();
1753 	if (size > uaddr - start)
1754 		return size - (uaddr - start);
1755 	return 0;
1756 }
1757 EXPORT_SYMBOL(fault_in_writeable);
1758 
1759 /**
1760  * fault_in_subpage_writeable - fault in an address range for writing
1761  * @uaddr: start of address range
1762  * @size: size of address range
1763  *
1764  * Fault in a user address range for writing while checking for permissions at
1765  * sub-page granularity (e.g. arm64 MTE). This function should be used when
1766  * the caller cannot guarantee forward progress of a copy_to_user() loop.
1767  *
1768  * Returns the number of bytes not faulted in (like copy_to_user() and
1769  * copy_from_user()).
1770  */
1771 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
1772 {
1773 	size_t faulted_in;
1774 
1775 	/*
1776 	 * Attempt faulting in at page granularity first for page table
1777 	 * permission checking. The arch-specific probe_subpage_writeable()
1778 	 * functions may not check for this.
1779 	 */
1780 	faulted_in = size - fault_in_writeable(uaddr, size);
1781 	if (faulted_in)
1782 		faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
1783 
1784 	return size - faulted_in;
1785 }
1786 EXPORT_SYMBOL(fault_in_subpage_writeable);
1787 
1788 /*
1789  * fault_in_safe_writeable - fault in an address range for writing
1790  * @uaddr: start of address range
1791  * @size: length of address range
1792  *
1793  * Faults in an address range for writing.  This is primarily useful when we
1794  * already know that some or all of the pages in the address range aren't in
1795  * memory.
1796  *
1797  * Unlike fault_in_writeable(), this function is non-destructive.
1798  *
1799  * Note that we don't pin or otherwise hold the pages referenced that we fault
1800  * in.  There's no guarantee that they'll stay in memory for any duration of
1801  * time.
1802  *
1803  * Returns the number of bytes not faulted in, like copy_to_user() and
1804  * copy_from_user().
1805  */
1806 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
1807 {
1808 	unsigned long start = (unsigned long)uaddr, end;
1809 	struct mm_struct *mm = current->mm;
1810 	bool unlocked = false;
1811 
1812 	if (unlikely(size == 0))
1813 		return 0;
1814 	end = PAGE_ALIGN(start + size);
1815 	if (end < start)
1816 		end = 0;
1817 
1818 	mmap_read_lock(mm);
1819 	do {
1820 		if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
1821 			break;
1822 		start = (start + PAGE_SIZE) & PAGE_MASK;
1823 	} while (start != end);
1824 	mmap_read_unlock(mm);
1825 
1826 	if (size > (unsigned long)uaddr - start)
1827 		return size - ((unsigned long)uaddr - start);
1828 	return 0;
1829 }
1830 EXPORT_SYMBOL(fault_in_safe_writeable);
1831 
1832 /**
1833  * fault_in_readable - fault in userspace address range for reading
1834  * @uaddr: start of user address range
1835  * @size: size of user address range
1836  *
1837  * Returns the number of bytes not faulted in (like copy_to_user() and
1838  * copy_from_user()).
1839  */
1840 size_t fault_in_readable(const char __user *uaddr, size_t size)
1841 {
1842 	const char __user *start = uaddr, *end;
1843 	volatile char c;
1844 
1845 	if (unlikely(size == 0))
1846 		return 0;
1847 	if (!user_read_access_begin(uaddr, size))
1848 		return size;
1849 	if (!PAGE_ALIGNED(uaddr)) {
1850 		unsafe_get_user(c, uaddr, out);
1851 		uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1852 	}
1853 	end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1854 	if (unlikely(end < start))
1855 		end = NULL;
1856 	while (uaddr != end) {
1857 		unsafe_get_user(c, uaddr, out);
1858 		uaddr += PAGE_SIZE;
1859 	}
1860 
1861 out:
1862 	user_read_access_end();
1863 	(void)c;
1864 	if (size > uaddr - start)
1865 		return size - (uaddr - start);
1866 	return 0;
1867 }
1868 EXPORT_SYMBOL(fault_in_readable);
1869 
1870 /**
1871  * get_dump_page() - pin user page in memory while writing it to core dump
1872  * @addr: user address
1873  *
1874  * Returns struct page pointer of user page pinned for dump,
1875  * to be freed afterwards by put_page().
1876  *
1877  * Returns NULL on any kind of failure - a hole must then be inserted into
1878  * the corefile, to preserve alignment with its headers; and also returns
1879  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1880  * allowing a hole to be left in the corefile to save disk space.
1881  *
1882  * Called without mmap_lock (takes and releases the mmap_lock by itself).
1883  */
1884 #ifdef CONFIG_ELF_CORE
1885 struct page *get_dump_page(unsigned long addr)
1886 {
1887 	struct mm_struct *mm = current->mm;
1888 	struct page *page;
1889 	int locked = 1;
1890 	int ret;
1891 
1892 	if (mmap_read_lock_killable(mm))
1893 		return NULL;
1894 	ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1895 				      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1896 	if (locked)
1897 		mmap_read_unlock(mm);
1898 	return (ret == 1) ? page : NULL;
1899 }
1900 #endif /* CONFIG_ELF_CORE */
1901 
1902 #ifdef CONFIG_MIGRATION
1903 /*
1904  * Check whether all pages are pinnable, if so return number of pages.  If some
1905  * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1906  * pages were migrated, or if some pages were not successfully isolated.
1907  * Return negative error if migration fails.
1908  */
1909 static long check_and_migrate_movable_pages(unsigned long nr_pages,
1910 					    struct page **pages,
1911 					    unsigned int gup_flags)
1912 {
1913 	unsigned long isolation_error_count = 0, i;
1914 	struct folio *prev_folio = NULL;
1915 	LIST_HEAD(movable_page_list);
1916 	bool drain_allow = true;
1917 	int ret = 0;
1918 
1919 	for (i = 0; i < nr_pages; i++) {
1920 		struct folio *folio = page_folio(pages[i]);
1921 
1922 		if (folio == prev_folio)
1923 			continue;
1924 		prev_folio = folio;
1925 
1926 		if (folio_is_pinnable(folio))
1927 			continue;
1928 
1929 		/*
1930 		 * Try to move out any movable page before pinning the range.
1931 		 */
1932 		if (folio_test_hugetlb(folio)) {
1933 			if (isolate_hugetlb(&folio->page,
1934 						&movable_page_list))
1935 				isolation_error_count++;
1936 			continue;
1937 		}
1938 
1939 		if (!folio_test_lru(folio) && drain_allow) {
1940 			lru_add_drain_all();
1941 			drain_allow = false;
1942 		}
1943 
1944 		if (folio_isolate_lru(folio)) {
1945 			isolation_error_count++;
1946 			continue;
1947 		}
1948 		list_add_tail(&folio->lru, &movable_page_list);
1949 		node_stat_mod_folio(folio,
1950 				    NR_ISOLATED_ANON + folio_is_file_lru(folio),
1951 				    folio_nr_pages(folio));
1952 	}
1953 
1954 	if (!list_empty(&movable_page_list) || isolation_error_count)
1955 		goto unpin_pages;
1956 
1957 	/*
1958 	 * If list is empty, and no isolation errors, means that all pages are
1959 	 * in the correct zone.
1960 	 */
1961 	return nr_pages;
1962 
1963 unpin_pages:
1964 	if (gup_flags & FOLL_PIN) {
1965 		unpin_user_pages(pages, nr_pages);
1966 	} else {
1967 		for (i = 0; i < nr_pages; i++)
1968 			put_page(pages[i]);
1969 	}
1970 
1971 	if (!list_empty(&movable_page_list)) {
1972 		struct migration_target_control mtc = {
1973 			.nid = NUMA_NO_NODE,
1974 			.gfp_mask = GFP_USER | __GFP_NOWARN,
1975 		};
1976 
1977 		ret = migrate_pages(&movable_page_list, alloc_migration_target,
1978 				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1979 				    MR_LONGTERM_PIN, NULL);
1980 		if (ret > 0) /* number of pages not migrated */
1981 			ret = -ENOMEM;
1982 	}
1983 
1984 	if (ret && !list_empty(&movable_page_list))
1985 		putback_movable_pages(&movable_page_list);
1986 	return ret;
1987 }
1988 #else
1989 static long check_and_migrate_movable_pages(unsigned long nr_pages,
1990 					    struct page **pages,
1991 					    unsigned int gup_flags)
1992 {
1993 	return nr_pages;
1994 }
1995 #endif /* CONFIG_MIGRATION */
1996 
1997 /*
1998  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1999  * allows us to process the FOLL_LONGTERM flag.
2000  */
2001 static long __gup_longterm_locked(struct mm_struct *mm,
2002 				  unsigned long start,
2003 				  unsigned long nr_pages,
2004 				  struct page **pages,
2005 				  struct vm_area_struct **vmas,
2006 				  unsigned int gup_flags)
2007 {
2008 	unsigned int flags;
2009 	long rc;
2010 
2011 	if (!(gup_flags & FOLL_LONGTERM))
2012 		return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
2013 					       NULL, gup_flags);
2014 	flags = memalloc_pin_save();
2015 	do {
2016 		rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
2017 					     NULL, gup_flags);
2018 		if (rc <= 0)
2019 			break;
2020 		rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
2021 	} while (!rc);
2022 	memalloc_pin_restore(flags);
2023 
2024 	return rc;
2025 }
2026 
2027 static bool is_valid_gup_flags(unsigned int gup_flags)
2028 {
2029 	/*
2030 	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2031 	 * never directly by the caller, so enforce that with an assertion:
2032 	 */
2033 	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2034 		return false;
2035 	/*
2036 	 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
2037 	 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
2038 	 * FOLL_PIN.
2039 	 */
2040 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2041 		return false;
2042 
2043 	return true;
2044 }
2045 
2046 #ifdef CONFIG_MMU
2047 static long __get_user_pages_remote(struct mm_struct *mm,
2048 				    unsigned long start, unsigned long nr_pages,
2049 				    unsigned int gup_flags, struct page **pages,
2050 				    struct vm_area_struct **vmas, int *locked)
2051 {
2052 	/*
2053 	 * Parts of FOLL_LONGTERM behavior are incompatible with
2054 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2055 	 * vmas. However, this only comes up if locked is set, and there are
2056 	 * callers that do request FOLL_LONGTERM, but do not set locked. So,
2057 	 * allow what we can.
2058 	 */
2059 	if (gup_flags & FOLL_LONGTERM) {
2060 		if (WARN_ON_ONCE(locked))
2061 			return -EINVAL;
2062 		/*
2063 		 * This will check the vmas (even if our vmas arg is NULL)
2064 		 * and return -ENOTSUPP if DAX isn't allowed in this case:
2065 		 */
2066 		return __gup_longterm_locked(mm, start, nr_pages, pages,
2067 					     vmas, gup_flags | FOLL_TOUCH |
2068 					     FOLL_REMOTE);
2069 	}
2070 
2071 	return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
2072 				       locked,
2073 				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
2074 }
2075 
2076 /**
2077  * get_user_pages_remote() - pin user pages in memory
2078  * @mm:		mm_struct of target mm
2079  * @start:	starting user address
2080  * @nr_pages:	number of pages from start to pin
2081  * @gup_flags:	flags modifying lookup behaviour
2082  * @pages:	array that receives pointers to the pages pinned.
2083  *		Should be at least nr_pages long. Or NULL, if caller
2084  *		only intends to ensure the pages are faulted in.
2085  * @vmas:	array of pointers to vmas corresponding to each page.
2086  *		Or NULL if the caller does not require them.
2087  * @locked:	pointer to lock flag indicating whether lock is held and
2088  *		subsequently whether VM_FAULT_RETRY functionality can be
2089  *		utilised. Lock must initially be held.
2090  *
2091  * Returns either number of pages pinned (which may be less than the
2092  * number requested), or an error. Details about the return value:
2093  *
2094  * -- If nr_pages is 0, returns 0.
2095  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2096  * -- If nr_pages is >0, and some pages were pinned, returns the number of
2097  *    pages pinned. Again, this may be less than nr_pages.
2098  *
2099  * The caller is responsible for releasing returned @pages, via put_page().
2100  *
2101  * @vmas are valid only as long as mmap_lock is held.
2102  *
2103  * Must be called with mmap_lock held for read or write.
2104  *
2105  * get_user_pages_remote walks a process's page tables and takes a reference
2106  * to each struct page that each user address corresponds to at a given
2107  * instant. That is, it takes the page that would be accessed if a user
2108  * thread accesses the given user virtual address at that instant.
2109  *
2110  * This does not guarantee that the page exists in the user mappings when
2111  * get_user_pages_remote returns, and there may even be a completely different
2112  * page there in some cases (eg. if mmapped pagecache has been invalidated
2113  * and subsequently re faulted). However it does guarantee that the page
2114  * won't be freed completely. And mostly callers simply care that the page
2115  * contains data that was valid *at some point in time*. Typically, an IO
2116  * or similar operation cannot guarantee anything stronger anyway because
2117  * locks can't be held over the syscall boundary.
2118  *
2119  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2120  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2121  * be called after the page is finished with, and before put_page is called.
2122  *
2123  * get_user_pages_remote is typically used for fewer-copy IO operations,
2124  * to get a handle on the memory by some means other than accesses
2125  * via the user virtual addresses. The pages may be submitted for
2126  * DMA to devices or accessed via their kernel linear mapping (via the
2127  * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2128  *
2129  * See also get_user_pages_fast, for performance critical applications.
2130  *
2131  * get_user_pages_remote should be phased out in favor of
2132  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2133  * should use get_user_pages_remote because it cannot pass
2134  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2135  */
2136 long get_user_pages_remote(struct mm_struct *mm,
2137 		unsigned long start, unsigned long nr_pages,
2138 		unsigned int gup_flags, struct page **pages,
2139 		struct vm_area_struct **vmas, int *locked)
2140 {
2141 	if (!is_valid_gup_flags(gup_flags))
2142 		return -EINVAL;
2143 
2144 	return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
2145 				       pages, vmas, locked);
2146 }
2147 EXPORT_SYMBOL(get_user_pages_remote);
2148 
2149 #else /* CONFIG_MMU */
2150 long get_user_pages_remote(struct mm_struct *mm,
2151 			   unsigned long start, unsigned long nr_pages,
2152 			   unsigned int gup_flags, struct page **pages,
2153 			   struct vm_area_struct **vmas, int *locked)
2154 {
2155 	return 0;
2156 }
2157 
2158 static long __get_user_pages_remote(struct mm_struct *mm,
2159 				    unsigned long start, unsigned long nr_pages,
2160 				    unsigned int gup_flags, struct page **pages,
2161 				    struct vm_area_struct **vmas, int *locked)
2162 {
2163 	return 0;
2164 }
2165 #endif /* !CONFIG_MMU */
2166 
2167 /**
2168  * get_user_pages() - pin user pages in memory
2169  * @start:      starting user address
2170  * @nr_pages:   number of pages from start to pin
2171  * @gup_flags:  flags modifying lookup behaviour
2172  * @pages:      array that receives pointers to the pages pinned.
2173  *              Should be at least nr_pages long. Or NULL, if caller
2174  *              only intends to ensure the pages are faulted in.
2175  * @vmas:       array of pointers to vmas corresponding to each page.
2176  *              Or NULL if the caller does not require them.
2177  *
2178  * This is the same as get_user_pages_remote(), just with a less-flexible
2179  * calling convention where we assume that the mm being operated on belongs to
2180  * the current task, and doesn't allow passing of a locked parameter.  We also
2181  * obviously don't pass FOLL_REMOTE in here.
2182  */
2183 long get_user_pages(unsigned long start, unsigned long nr_pages,
2184 		unsigned int gup_flags, struct page **pages,
2185 		struct vm_area_struct **vmas)
2186 {
2187 	if (!is_valid_gup_flags(gup_flags))
2188 		return -EINVAL;
2189 
2190 	return __gup_longterm_locked(current->mm, start, nr_pages,
2191 				     pages, vmas, gup_flags | FOLL_TOUCH);
2192 }
2193 EXPORT_SYMBOL(get_user_pages);
2194 
2195 /*
2196  * get_user_pages_unlocked() is suitable to replace the form:
2197  *
2198  *      mmap_read_lock(mm);
2199  *      get_user_pages(mm, ..., pages, NULL);
2200  *      mmap_read_unlock(mm);
2201  *
2202  *  with:
2203  *
2204  *      get_user_pages_unlocked(mm, ..., pages);
2205  *
2206  * It is functionally equivalent to get_user_pages_fast so
2207  * get_user_pages_fast should be used instead if specific gup_flags
2208  * (e.g. FOLL_FORCE) are not required.
2209  */
2210 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2211 			     struct page **pages, unsigned int gup_flags)
2212 {
2213 	struct mm_struct *mm = current->mm;
2214 	int locked = 1;
2215 	long ret;
2216 
2217 	/*
2218 	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2219 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2220 	 * vmas.  As there are no users of this flag in this call we simply
2221 	 * disallow this option for now.
2222 	 */
2223 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2224 		return -EINVAL;
2225 
2226 	mmap_read_lock(mm);
2227 	ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
2228 				      &locked, gup_flags | FOLL_TOUCH);
2229 	if (locked)
2230 		mmap_read_unlock(mm);
2231 	return ret;
2232 }
2233 EXPORT_SYMBOL(get_user_pages_unlocked);
2234 
2235 /*
2236  * Fast GUP
2237  *
2238  * get_user_pages_fast attempts to pin user pages by walking the page
2239  * tables directly and avoids taking locks. Thus the walker needs to be
2240  * protected from page table pages being freed from under it, and should
2241  * block any THP splits.
2242  *
2243  * One way to achieve this is to have the walker disable interrupts, and
2244  * rely on IPIs from the TLB flushing code blocking before the page table
2245  * pages are freed. This is unsuitable for architectures that do not need
2246  * to broadcast an IPI when invalidating TLBs.
2247  *
2248  * Another way to achieve this is to batch up page table containing pages
2249  * belonging to more than one mm_user, then rcu_sched a callback to free those
2250  * pages. Disabling interrupts will allow the fast_gup walker to both block
2251  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2252  * (which is a relatively rare event). The code below adopts this strategy.
2253  *
2254  * Before activating this code, please be aware that the following assumptions
2255  * are currently made:
2256  *
2257  *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2258  *  free pages containing page tables or TLB flushing requires IPI broadcast.
2259  *
2260  *  *) ptes can be read atomically by the architecture.
2261  *
2262  *  *) access_ok is sufficient to validate userspace address ranges.
2263  *
2264  * The last two assumptions can be relaxed by the addition of helper functions.
2265  *
2266  * This code is based heavily on the PowerPC implementation by Nick Piggin.
2267  */
2268 #ifdef CONFIG_HAVE_FAST_GUP
2269 
2270 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2271 					    unsigned int flags,
2272 					    struct page **pages)
2273 {
2274 	while ((*nr) - nr_start) {
2275 		struct page *page = pages[--(*nr)];
2276 
2277 		ClearPageReferenced(page);
2278 		if (flags & FOLL_PIN)
2279 			unpin_user_page(page);
2280 		else
2281 			put_page(page);
2282 	}
2283 }
2284 
2285 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2286 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2287 			 unsigned int flags, struct page **pages, int *nr)
2288 {
2289 	struct dev_pagemap *pgmap = NULL;
2290 	int nr_start = *nr, ret = 0;
2291 	pte_t *ptep, *ptem;
2292 
2293 	ptem = ptep = pte_offset_map(&pmd, addr);
2294 	do {
2295 		pte_t pte = ptep_get_lockless(ptep);
2296 		struct page *page;
2297 		struct folio *folio;
2298 
2299 		/*
2300 		 * Similar to the PMD case below, NUMA hinting must take slow
2301 		 * path using the pte_protnone check.
2302 		 */
2303 		if (pte_protnone(pte))
2304 			goto pte_unmap;
2305 
2306 		if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2307 			goto pte_unmap;
2308 
2309 		if (pte_devmap(pte)) {
2310 			if (unlikely(flags & FOLL_LONGTERM))
2311 				goto pte_unmap;
2312 
2313 			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2314 			if (unlikely(!pgmap)) {
2315 				undo_dev_pagemap(nr, nr_start, flags, pages);
2316 				goto pte_unmap;
2317 			}
2318 		} else if (pte_special(pte))
2319 			goto pte_unmap;
2320 
2321 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2322 		page = pte_page(pte);
2323 
2324 		folio = try_grab_folio(page, 1, flags);
2325 		if (!folio)
2326 			goto pte_unmap;
2327 
2328 		if (unlikely(page_is_secretmem(page))) {
2329 			gup_put_folio(folio, 1, flags);
2330 			goto pte_unmap;
2331 		}
2332 
2333 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2334 			gup_put_folio(folio, 1, flags);
2335 			goto pte_unmap;
2336 		}
2337 
2338 		if (!pte_write(pte) && gup_must_unshare(flags, page)) {
2339 			gup_put_folio(folio, 1, flags);
2340 			goto pte_unmap;
2341 		}
2342 
2343 		/*
2344 		 * We need to make the page accessible if and only if we are
2345 		 * going to access its content (the FOLL_PIN case).  Please
2346 		 * see Documentation/core-api/pin_user_pages.rst for
2347 		 * details.
2348 		 */
2349 		if (flags & FOLL_PIN) {
2350 			ret = arch_make_page_accessible(page);
2351 			if (ret) {
2352 				gup_put_folio(folio, 1, flags);
2353 				goto pte_unmap;
2354 			}
2355 		}
2356 		folio_set_referenced(folio);
2357 		pages[*nr] = page;
2358 		(*nr)++;
2359 	} while (ptep++, addr += PAGE_SIZE, addr != end);
2360 
2361 	ret = 1;
2362 
2363 pte_unmap:
2364 	if (pgmap)
2365 		put_dev_pagemap(pgmap);
2366 	pte_unmap(ptem);
2367 	return ret;
2368 }
2369 #else
2370 
2371 /*
2372  * If we can't determine whether or not a pte is special, then fail immediately
2373  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2374  * to be special.
2375  *
2376  * For a futex to be placed on a THP tail page, get_futex_key requires a
2377  * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2378  * useful to have gup_huge_pmd even if we can't operate on ptes.
2379  */
2380 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2381 			 unsigned int flags, struct page **pages, int *nr)
2382 {
2383 	return 0;
2384 }
2385 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2386 
2387 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2388 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2389 			     unsigned long end, unsigned int flags,
2390 			     struct page **pages, int *nr)
2391 {
2392 	int nr_start = *nr;
2393 	struct dev_pagemap *pgmap = NULL;
2394 
2395 	do {
2396 		struct page *page = pfn_to_page(pfn);
2397 
2398 		pgmap = get_dev_pagemap(pfn, pgmap);
2399 		if (unlikely(!pgmap)) {
2400 			undo_dev_pagemap(nr, nr_start, flags, pages);
2401 			break;
2402 		}
2403 		SetPageReferenced(page);
2404 		pages[*nr] = page;
2405 		if (unlikely(!try_grab_page(page, flags))) {
2406 			undo_dev_pagemap(nr, nr_start, flags, pages);
2407 			break;
2408 		}
2409 		(*nr)++;
2410 		pfn++;
2411 	} while (addr += PAGE_SIZE, addr != end);
2412 
2413 	put_dev_pagemap(pgmap);
2414 	return addr == end;
2415 }
2416 
2417 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2418 				 unsigned long end, unsigned int flags,
2419 				 struct page **pages, int *nr)
2420 {
2421 	unsigned long fault_pfn;
2422 	int nr_start = *nr;
2423 
2424 	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2425 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2426 		return 0;
2427 
2428 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2429 		undo_dev_pagemap(nr, nr_start, flags, pages);
2430 		return 0;
2431 	}
2432 	return 1;
2433 }
2434 
2435 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2436 				 unsigned long end, unsigned int flags,
2437 				 struct page **pages, int *nr)
2438 {
2439 	unsigned long fault_pfn;
2440 	int nr_start = *nr;
2441 
2442 	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2443 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2444 		return 0;
2445 
2446 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2447 		undo_dev_pagemap(nr, nr_start, flags, pages);
2448 		return 0;
2449 	}
2450 	return 1;
2451 }
2452 #else
2453 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2454 				 unsigned long end, unsigned int flags,
2455 				 struct page **pages, int *nr)
2456 {
2457 	BUILD_BUG();
2458 	return 0;
2459 }
2460 
2461 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2462 				 unsigned long end, unsigned int flags,
2463 				 struct page **pages, int *nr)
2464 {
2465 	BUILD_BUG();
2466 	return 0;
2467 }
2468 #endif
2469 
2470 static int record_subpages(struct page *page, unsigned long addr,
2471 			   unsigned long end, struct page **pages)
2472 {
2473 	int nr;
2474 
2475 	for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
2476 		pages[nr] = nth_page(page, nr);
2477 
2478 	return nr;
2479 }
2480 
2481 #ifdef CONFIG_ARCH_HAS_HUGEPD
2482 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2483 				      unsigned long sz)
2484 {
2485 	unsigned long __boundary = (addr + sz) & ~(sz-1);
2486 	return (__boundary - 1 < end - 1) ? __boundary : end;
2487 }
2488 
2489 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2490 		       unsigned long end, unsigned int flags,
2491 		       struct page **pages, int *nr)
2492 {
2493 	unsigned long pte_end;
2494 	struct page *page;
2495 	struct folio *folio;
2496 	pte_t pte;
2497 	int refs;
2498 
2499 	pte_end = (addr + sz) & ~(sz-1);
2500 	if (pte_end < end)
2501 		end = pte_end;
2502 
2503 	pte = huge_ptep_get(ptep);
2504 
2505 	if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2506 		return 0;
2507 
2508 	/* hugepages are never "special" */
2509 	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2510 
2511 	page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
2512 	refs = record_subpages(page, addr, end, pages + *nr);
2513 
2514 	folio = try_grab_folio(page, refs, flags);
2515 	if (!folio)
2516 		return 0;
2517 
2518 	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2519 		gup_put_folio(folio, refs, flags);
2520 		return 0;
2521 	}
2522 
2523 	if (!pte_write(pte) && gup_must_unshare(flags, &folio->page)) {
2524 		gup_put_folio(folio, refs, flags);
2525 		return 0;
2526 	}
2527 
2528 	*nr += refs;
2529 	folio_set_referenced(folio);
2530 	return 1;
2531 }
2532 
2533 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2534 		unsigned int pdshift, unsigned long end, unsigned int flags,
2535 		struct page **pages, int *nr)
2536 {
2537 	pte_t *ptep;
2538 	unsigned long sz = 1UL << hugepd_shift(hugepd);
2539 	unsigned long next;
2540 
2541 	ptep = hugepte_offset(hugepd, addr, pdshift);
2542 	do {
2543 		next = hugepte_addr_end(addr, end, sz);
2544 		if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2545 			return 0;
2546 	} while (ptep++, addr = next, addr != end);
2547 
2548 	return 1;
2549 }
2550 #else
2551 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2552 		unsigned int pdshift, unsigned long end, unsigned int flags,
2553 		struct page **pages, int *nr)
2554 {
2555 	return 0;
2556 }
2557 #endif /* CONFIG_ARCH_HAS_HUGEPD */
2558 
2559 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2560 			unsigned long end, unsigned int flags,
2561 			struct page **pages, int *nr)
2562 {
2563 	struct page *page;
2564 	struct folio *folio;
2565 	int refs;
2566 
2567 	if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2568 		return 0;
2569 
2570 	if (pmd_devmap(orig)) {
2571 		if (unlikely(flags & FOLL_LONGTERM))
2572 			return 0;
2573 		return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2574 					     pages, nr);
2575 	}
2576 
2577 	page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
2578 	refs = record_subpages(page, addr, end, pages + *nr);
2579 
2580 	folio = try_grab_folio(page, refs, flags);
2581 	if (!folio)
2582 		return 0;
2583 
2584 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2585 		gup_put_folio(folio, refs, flags);
2586 		return 0;
2587 	}
2588 
2589 	if (!pmd_write(orig) && gup_must_unshare(flags, &folio->page)) {
2590 		gup_put_folio(folio, refs, flags);
2591 		return 0;
2592 	}
2593 
2594 	*nr += refs;
2595 	folio_set_referenced(folio);
2596 	return 1;
2597 }
2598 
2599 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2600 			unsigned long end, unsigned int flags,
2601 			struct page **pages, int *nr)
2602 {
2603 	struct page *page;
2604 	struct folio *folio;
2605 	int refs;
2606 
2607 	if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2608 		return 0;
2609 
2610 	if (pud_devmap(orig)) {
2611 		if (unlikely(flags & FOLL_LONGTERM))
2612 			return 0;
2613 		return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2614 					     pages, nr);
2615 	}
2616 
2617 	page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
2618 	refs = record_subpages(page, addr, end, pages + *nr);
2619 
2620 	folio = try_grab_folio(page, refs, flags);
2621 	if (!folio)
2622 		return 0;
2623 
2624 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2625 		gup_put_folio(folio, refs, flags);
2626 		return 0;
2627 	}
2628 
2629 	if (!pud_write(orig) && gup_must_unshare(flags, &folio->page)) {
2630 		gup_put_folio(folio, refs, flags);
2631 		return 0;
2632 	}
2633 
2634 	*nr += refs;
2635 	folio_set_referenced(folio);
2636 	return 1;
2637 }
2638 
2639 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2640 			unsigned long end, unsigned int flags,
2641 			struct page **pages, int *nr)
2642 {
2643 	int refs;
2644 	struct page *page;
2645 	struct folio *folio;
2646 
2647 	if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2648 		return 0;
2649 
2650 	BUILD_BUG_ON(pgd_devmap(orig));
2651 
2652 	page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2653 	refs = record_subpages(page, addr, end, pages + *nr);
2654 
2655 	folio = try_grab_folio(page, refs, flags);
2656 	if (!folio)
2657 		return 0;
2658 
2659 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2660 		gup_put_folio(folio, refs, flags);
2661 		return 0;
2662 	}
2663 
2664 	*nr += refs;
2665 	folio_set_referenced(folio);
2666 	return 1;
2667 }
2668 
2669 static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2670 		unsigned int flags, struct page **pages, int *nr)
2671 {
2672 	unsigned long next;
2673 	pmd_t *pmdp;
2674 
2675 	pmdp = pmd_offset_lockless(pudp, pud, addr);
2676 	do {
2677 		pmd_t pmd = READ_ONCE(*pmdp);
2678 
2679 		next = pmd_addr_end(addr, end);
2680 		if (!pmd_present(pmd))
2681 			return 0;
2682 
2683 		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2684 			     pmd_devmap(pmd))) {
2685 			/*
2686 			 * NUMA hinting faults need to be handled in the GUP
2687 			 * slowpath for accounting purposes and so that they
2688 			 * can be serialised against THP migration.
2689 			 */
2690 			if (pmd_protnone(pmd))
2691 				return 0;
2692 
2693 			if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2694 				pages, nr))
2695 				return 0;
2696 
2697 		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2698 			/*
2699 			 * architecture have different format for hugetlbfs
2700 			 * pmd format and THP pmd format
2701 			 */
2702 			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2703 					 PMD_SHIFT, next, flags, pages, nr))
2704 				return 0;
2705 		} else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2706 			return 0;
2707 	} while (pmdp++, addr = next, addr != end);
2708 
2709 	return 1;
2710 }
2711 
2712 static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2713 			 unsigned int flags, struct page **pages, int *nr)
2714 {
2715 	unsigned long next;
2716 	pud_t *pudp;
2717 
2718 	pudp = pud_offset_lockless(p4dp, p4d, addr);
2719 	do {
2720 		pud_t pud = READ_ONCE(*pudp);
2721 
2722 		next = pud_addr_end(addr, end);
2723 		if (unlikely(!pud_present(pud)))
2724 			return 0;
2725 		if (unlikely(pud_huge(pud))) {
2726 			if (!gup_huge_pud(pud, pudp, addr, next, flags,
2727 					  pages, nr))
2728 				return 0;
2729 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2730 			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2731 					 PUD_SHIFT, next, flags, pages, nr))
2732 				return 0;
2733 		} else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2734 			return 0;
2735 	} while (pudp++, addr = next, addr != end);
2736 
2737 	return 1;
2738 }
2739 
2740 static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2741 			 unsigned int flags, struct page **pages, int *nr)
2742 {
2743 	unsigned long next;
2744 	p4d_t *p4dp;
2745 
2746 	p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2747 	do {
2748 		p4d_t p4d = READ_ONCE(*p4dp);
2749 
2750 		next = p4d_addr_end(addr, end);
2751 		if (p4d_none(p4d))
2752 			return 0;
2753 		BUILD_BUG_ON(p4d_huge(p4d));
2754 		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2755 			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2756 					 P4D_SHIFT, next, flags, pages, nr))
2757 				return 0;
2758 		} else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2759 			return 0;
2760 	} while (p4dp++, addr = next, addr != end);
2761 
2762 	return 1;
2763 }
2764 
2765 static void gup_pgd_range(unsigned long addr, unsigned long end,
2766 		unsigned int flags, struct page **pages, int *nr)
2767 {
2768 	unsigned long next;
2769 	pgd_t *pgdp;
2770 
2771 	pgdp = pgd_offset(current->mm, addr);
2772 	do {
2773 		pgd_t pgd = READ_ONCE(*pgdp);
2774 
2775 		next = pgd_addr_end(addr, end);
2776 		if (pgd_none(pgd))
2777 			return;
2778 		if (unlikely(pgd_huge(pgd))) {
2779 			if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2780 					  pages, nr))
2781 				return;
2782 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2783 			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2784 					 PGDIR_SHIFT, next, flags, pages, nr))
2785 				return;
2786 		} else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2787 			return;
2788 	} while (pgdp++, addr = next, addr != end);
2789 }
2790 #else
2791 static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2792 		unsigned int flags, struct page **pages, int *nr)
2793 {
2794 }
2795 #endif /* CONFIG_HAVE_FAST_GUP */
2796 
2797 #ifndef gup_fast_permitted
2798 /*
2799  * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2800  * we need to fall back to the slow version:
2801  */
2802 static bool gup_fast_permitted(unsigned long start, unsigned long end)
2803 {
2804 	return true;
2805 }
2806 #endif
2807 
2808 static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2809 				   unsigned int gup_flags, struct page **pages)
2810 {
2811 	int ret;
2812 
2813 	/*
2814 	 * FIXME: FOLL_LONGTERM does not work with
2815 	 * get_user_pages_unlocked() (see comments in that function)
2816 	 */
2817 	if (gup_flags & FOLL_LONGTERM) {
2818 		mmap_read_lock(current->mm);
2819 		ret = __gup_longterm_locked(current->mm,
2820 					    start, nr_pages,
2821 					    pages, NULL, gup_flags);
2822 		mmap_read_unlock(current->mm);
2823 	} else {
2824 		ret = get_user_pages_unlocked(start, nr_pages,
2825 					      pages, gup_flags);
2826 	}
2827 
2828 	return ret;
2829 }
2830 
2831 static unsigned long lockless_pages_from_mm(unsigned long start,
2832 					    unsigned long end,
2833 					    unsigned int gup_flags,
2834 					    struct page **pages)
2835 {
2836 	unsigned long flags;
2837 	int nr_pinned = 0;
2838 	unsigned seq;
2839 
2840 	if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2841 	    !gup_fast_permitted(start, end))
2842 		return 0;
2843 
2844 	if (gup_flags & FOLL_PIN) {
2845 		seq = raw_read_seqcount(&current->mm->write_protect_seq);
2846 		if (seq & 1)
2847 			return 0;
2848 	}
2849 
2850 	/*
2851 	 * Disable interrupts. The nested form is used, in order to allow full,
2852 	 * general purpose use of this routine.
2853 	 *
2854 	 * With interrupts disabled, we block page table pages from being freed
2855 	 * from under us. See struct mmu_table_batch comments in
2856 	 * include/asm-generic/tlb.h for more details.
2857 	 *
2858 	 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2859 	 * that come from THPs splitting.
2860 	 */
2861 	local_irq_save(flags);
2862 	gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2863 	local_irq_restore(flags);
2864 
2865 	/*
2866 	 * When pinning pages for DMA there could be a concurrent write protect
2867 	 * from fork() via copy_page_range(), in this case always fail fast GUP.
2868 	 */
2869 	if (gup_flags & FOLL_PIN) {
2870 		if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
2871 			unpin_user_pages_lockless(pages, nr_pinned);
2872 			return 0;
2873 		} else {
2874 			sanity_check_pinned_pages(pages, nr_pinned);
2875 		}
2876 	}
2877 	return nr_pinned;
2878 }
2879 
2880 static int internal_get_user_pages_fast(unsigned long start,
2881 					unsigned long nr_pages,
2882 					unsigned int gup_flags,
2883 					struct page **pages)
2884 {
2885 	unsigned long len, end;
2886 	unsigned long nr_pinned;
2887 	int ret;
2888 
2889 	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2890 				       FOLL_FORCE | FOLL_PIN | FOLL_GET |
2891 				       FOLL_FAST_ONLY | FOLL_NOFAULT)))
2892 		return -EINVAL;
2893 
2894 	if (gup_flags & FOLL_PIN)
2895 		mm_set_has_pinned_flag(&current->mm->flags);
2896 
2897 	if (!(gup_flags & FOLL_FAST_ONLY))
2898 		might_lock_read(&current->mm->mmap_lock);
2899 
2900 	start = untagged_addr(start) & PAGE_MASK;
2901 	len = nr_pages << PAGE_SHIFT;
2902 	if (check_add_overflow(start, len, &end))
2903 		return 0;
2904 	if (unlikely(!access_ok((void __user *)start, len)))
2905 		return -EFAULT;
2906 
2907 	nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2908 	if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2909 		return nr_pinned;
2910 
2911 	/* Slow path: try to get the remaining pages with get_user_pages */
2912 	start += nr_pinned << PAGE_SHIFT;
2913 	pages += nr_pinned;
2914 	ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2915 				      pages);
2916 	if (ret < 0) {
2917 		/*
2918 		 * The caller has to unpin the pages we already pinned so
2919 		 * returning -errno is not an option
2920 		 */
2921 		if (nr_pinned)
2922 			return nr_pinned;
2923 		return ret;
2924 	}
2925 	return ret + nr_pinned;
2926 }
2927 
2928 /**
2929  * get_user_pages_fast_only() - pin user pages in memory
2930  * @start:      starting user address
2931  * @nr_pages:   number of pages from start to pin
2932  * @gup_flags:  flags modifying pin behaviour
2933  * @pages:      array that receives pointers to the pages pinned.
2934  *              Should be at least nr_pages long.
2935  *
2936  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2937  * the regular GUP.
2938  * Note a difference with get_user_pages_fast: this always returns the
2939  * number of pages pinned, 0 if no pages were pinned.
2940  *
2941  * If the architecture does not support this function, simply return with no
2942  * pages pinned.
2943  *
2944  * Careful, careful! COW breaking can go either way, so a non-write
2945  * access can get ambiguous page results. If you call this function without
2946  * 'write' set, you'd better be sure that you're ok with that ambiguity.
2947  */
2948 int get_user_pages_fast_only(unsigned long start, int nr_pages,
2949 			     unsigned int gup_flags, struct page **pages)
2950 {
2951 	int nr_pinned;
2952 	/*
2953 	 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2954 	 * because gup fast is always a "pin with a +1 page refcount" request.
2955 	 *
2956 	 * FOLL_FAST_ONLY is required in order to match the API description of
2957 	 * this routine: no fall back to regular ("slow") GUP.
2958 	 */
2959 	gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
2960 
2961 	nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2962 						 pages);
2963 
2964 	/*
2965 	 * As specified in the API description above, this routine is not
2966 	 * allowed to return negative values. However, the common core
2967 	 * routine internal_get_user_pages_fast() *can* return -errno.
2968 	 * Therefore, correct for that here:
2969 	 */
2970 	if (nr_pinned < 0)
2971 		nr_pinned = 0;
2972 
2973 	return nr_pinned;
2974 }
2975 EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
2976 
2977 /**
2978  * get_user_pages_fast() - pin user pages in memory
2979  * @start:      starting user address
2980  * @nr_pages:   number of pages from start to pin
2981  * @gup_flags:  flags modifying pin behaviour
2982  * @pages:      array that receives pointers to the pages pinned.
2983  *              Should be at least nr_pages long.
2984  *
2985  * Attempt to pin user pages in memory without taking mm->mmap_lock.
2986  * If not successful, it will fall back to taking the lock and
2987  * calling get_user_pages().
2988  *
2989  * Returns number of pages pinned. This may be fewer than the number requested.
2990  * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2991  * -errno.
2992  */
2993 int get_user_pages_fast(unsigned long start, int nr_pages,
2994 			unsigned int gup_flags, struct page **pages)
2995 {
2996 	if (!is_valid_gup_flags(gup_flags))
2997 		return -EINVAL;
2998 
2999 	/*
3000 	 * The caller may or may not have explicitly set FOLL_GET; either way is
3001 	 * OK. However, internally (within mm/gup.c), gup fast variants must set
3002 	 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3003 	 * request.
3004 	 */
3005 	gup_flags |= FOLL_GET;
3006 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3007 }
3008 EXPORT_SYMBOL_GPL(get_user_pages_fast);
3009 
3010 /**
3011  * pin_user_pages_fast() - pin user pages in memory without taking locks
3012  *
3013  * @start:      starting user address
3014  * @nr_pages:   number of pages from start to pin
3015  * @gup_flags:  flags modifying pin behaviour
3016  * @pages:      array that receives pointers to the pages pinned.
3017  *              Should be at least nr_pages long.
3018  *
3019  * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3020  * get_user_pages_fast() for documentation on the function arguments, because
3021  * the arguments here are identical.
3022  *
3023  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3024  * see Documentation/core-api/pin_user_pages.rst for further details.
3025  */
3026 int pin_user_pages_fast(unsigned long start, int nr_pages,
3027 			unsigned int gup_flags, struct page **pages)
3028 {
3029 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
3030 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3031 		return -EINVAL;
3032 
3033 	if (WARN_ON_ONCE(!pages))
3034 		return -EINVAL;
3035 
3036 	gup_flags |= FOLL_PIN;
3037 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3038 }
3039 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
3040 
3041 /*
3042  * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
3043  * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
3044  *
3045  * The API rules are the same, too: no negative values may be returned.
3046  */
3047 int pin_user_pages_fast_only(unsigned long start, int nr_pages,
3048 			     unsigned int gup_flags, struct page **pages)
3049 {
3050 	int nr_pinned;
3051 
3052 	/*
3053 	 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
3054 	 * rules require returning 0, rather than -errno:
3055 	 */
3056 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3057 		return 0;
3058 
3059 	if (WARN_ON_ONCE(!pages))
3060 		return 0;
3061 	/*
3062 	 * FOLL_FAST_ONLY is required in order to match the API description of
3063 	 * this routine: no fall back to regular ("slow") GUP.
3064 	 */
3065 	gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
3066 	nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
3067 						 pages);
3068 	/*
3069 	 * This routine is not allowed to return negative values. However,
3070 	 * internal_get_user_pages_fast() *can* return -errno. Therefore,
3071 	 * correct for that here:
3072 	 */
3073 	if (nr_pinned < 0)
3074 		nr_pinned = 0;
3075 
3076 	return nr_pinned;
3077 }
3078 EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
3079 
3080 /**
3081  * pin_user_pages_remote() - pin pages of a remote process
3082  *
3083  * @mm:		mm_struct of target mm
3084  * @start:	starting user address
3085  * @nr_pages:	number of pages from start to pin
3086  * @gup_flags:	flags modifying lookup behaviour
3087  * @pages:	array that receives pointers to the pages pinned.
3088  *		Should be at least nr_pages long.
3089  * @vmas:	array of pointers to vmas corresponding to each page.
3090  *		Or NULL if the caller does not require them.
3091  * @locked:	pointer to lock flag indicating whether lock is held and
3092  *		subsequently whether VM_FAULT_RETRY functionality can be
3093  *		utilised. Lock must initially be held.
3094  *
3095  * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3096  * get_user_pages_remote() for documentation on the function arguments, because
3097  * the arguments here are identical.
3098  *
3099  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3100  * see Documentation/core-api/pin_user_pages.rst for details.
3101  */
3102 long pin_user_pages_remote(struct mm_struct *mm,
3103 			   unsigned long start, unsigned long nr_pages,
3104 			   unsigned int gup_flags, struct page **pages,
3105 			   struct vm_area_struct **vmas, int *locked)
3106 {
3107 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
3108 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3109 		return -EINVAL;
3110 
3111 	if (WARN_ON_ONCE(!pages))
3112 		return -EINVAL;
3113 
3114 	gup_flags |= FOLL_PIN;
3115 	return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
3116 				       pages, vmas, locked);
3117 }
3118 EXPORT_SYMBOL(pin_user_pages_remote);
3119 
3120 /**
3121  * pin_user_pages() - pin user pages in memory for use by other devices
3122  *
3123  * @start:	starting user address
3124  * @nr_pages:	number of pages from start to pin
3125  * @gup_flags:	flags modifying lookup behaviour
3126  * @pages:	array that receives pointers to the pages pinned.
3127  *		Should be at least nr_pages long.
3128  * @vmas:	array of pointers to vmas corresponding to each page.
3129  *		Or NULL if the caller does not require them.
3130  *
3131  * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3132  * FOLL_PIN is set.
3133  *
3134  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3135  * see Documentation/core-api/pin_user_pages.rst for details.
3136  */
3137 long pin_user_pages(unsigned long start, unsigned long nr_pages,
3138 		    unsigned int gup_flags, struct page **pages,
3139 		    struct vm_area_struct **vmas)
3140 {
3141 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
3142 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3143 		return -EINVAL;
3144 
3145 	if (WARN_ON_ONCE(!pages))
3146 		return -EINVAL;
3147 
3148 	gup_flags |= FOLL_PIN;
3149 	return __gup_longterm_locked(current->mm, start, nr_pages,
3150 				     pages, vmas, gup_flags);
3151 }
3152 EXPORT_SYMBOL(pin_user_pages);
3153 
3154 /*
3155  * pin_user_pages_unlocked() is the FOLL_PIN variant of
3156  * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3157  * FOLL_PIN and rejects FOLL_GET.
3158  */
3159 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3160 			     struct page **pages, unsigned int gup_flags)
3161 {
3162 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
3163 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3164 		return -EINVAL;
3165 
3166 	if (WARN_ON_ONCE(!pages))
3167 		return -EINVAL;
3168 
3169 	gup_flags |= FOLL_PIN;
3170 	return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
3171 }
3172 EXPORT_SYMBOL(pin_user_pages_unlocked);
3173