xref: /openbmc/linux/mm/gup.c (revision a7f7f6248d9740d710fd6bd190293fe5e16410ac)
1  // SPDX-License-Identifier: GPL-2.0-only
2  #include <linux/kernel.h>
3  #include <linux/errno.h>
4  #include <linux/err.h>
5  #include <linux/spinlock.h>
6  
7  #include <linux/mm.h>
8  #include <linux/memremap.h>
9  #include <linux/pagemap.h>
10  #include <linux/rmap.h>
11  #include <linux/swap.h>
12  #include <linux/swapops.h>
13  
14  #include <linux/sched/signal.h>
15  #include <linux/rwsem.h>
16  #include <linux/hugetlb.h>
17  #include <linux/migrate.h>
18  #include <linux/mm_inline.h>
19  #include <linux/sched/mm.h>
20  
21  #include <asm/mmu_context.h>
22  #include <asm/tlbflush.h>
23  
24  #include "internal.h"
25  
26  struct follow_page_context {
27  	struct dev_pagemap *pgmap;
28  	unsigned int page_mask;
29  };
30  
31  static void hpage_pincount_add(struct page *page, int refs)
32  {
33  	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
34  	VM_BUG_ON_PAGE(page != compound_head(page), page);
35  
36  	atomic_add(refs, compound_pincount_ptr(page));
37  }
38  
39  static void hpage_pincount_sub(struct page *page, int refs)
40  {
41  	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
42  	VM_BUG_ON_PAGE(page != compound_head(page), page);
43  
44  	atomic_sub(refs, compound_pincount_ptr(page));
45  }
46  
47  /*
48   * Return the compound head page with ref appropriately incremented,
49   * or NULL if that failed.
50   */
51  static inline struct page *try_get_compound_head(struct page *page, int refs)
52  {
53  	struct page *head = compound_head(page);
54  
55  	if (WARN_ON_ONCE(page_ref_count(head) < 0))
56  		return NULL;
57  	if (unlikely(!page_cache_add_speculative(head, refs)))
58  		return NULL;
59  	return head;
60  }
61  
62  /*
63   * try_grab_compound_head() - attempt to elevate a page's refcount, by a
64   * flags-dependent amount.
65   *
66   * "grab" names in this file mean, "look at flags to decide whether to use
67   * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
68   *
69   * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
70   * same time. (That's true throughout the get_user_pages*() and
71   * pin_user_pages*() APIs.) Cases:
72   *
73   *    FOLL_GET: page's refcount will be incremented by 1.
74   *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
75   *
76   * Return: head page (with refcount appropriately incremented) for success, or
77   * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
78   * considered failure, and furthermore, a likely bug in the caller, so a warning
79   * is also emitted.
80   */
81  static __maybe_unused struct page *try_grab_compound_head(struct page *page,
82  							  int refs,
83  							  unsigned int flags)
84  {
85  	if (flags & FOLL_GET)
86  		return try_get_compound_head(page, refs);
87  	else if (flags & FOLL_PIN) {
88  		int orig_refs = refs;
89  
90  		/*
91  		 * Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast
92  		 * path, so fail and let the caller fall back to the slow path.
93  		 */
94  		if (unlikely(flags & FOLL_LONGTERM) &&
95  				is_migrate_cma_page(page))
96  			return NULL;
97  
98  		/*
99  		 * When pinning a compound page of order > 1 (which is what
100  		 * hpage_pincount_available() checks for), use an exact count to
101  		 * track it, via hpage_pincount_add/_sub().
102  		 *
103  		 * However, be sure to *also* increment the normal page refcount
104  		 * field at least once, so that the page really is pinned.
105  		 */
106  		if (!hpage_pincount_available(page))
107  			refs *= GUP_PIN_COUNTING_BIAS;
108  
109  		page = try_get_compound_head(page, refs);
110  		if (!page)
111  			return NULL;
112  
113  		if (hpage_pincount_available(page))
114  			hpage_pincount_add(page, refs);
115  
116  		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
117  				    orig_refs);
118  
119  		return page;
120  	}
121  
122  	WARN_ON_ONCE(1);
123  	return NULL;
124  }
125  
126  /**
127   * try_grab_page() - elevate a page's refcount by a flag-dependent amount
128   *
129   * This might not do anything at all, depending on the flags argument.
130   *
131   * "grab" names in this file mean, "look at flags to decide whether to use
132   * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
133   *
134   * @page:    pointer to page to be grabbed
135   * @flags:   gup flags: these are the FOLL_* flag values.
136   *
137   * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
138   * time. Cases:
139   *
140   *    FOLL_GET: page's refcount will be incremented by 1.
141   *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
142   *
143   * Return: true for success, or if no action was required (if neither FOLL_PIN
144   * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
145   * FOLL_PIN was set, but the page could not be grabbed.
146   */
147  bool __must_check try_grab_page(struct page *page, unsigned int flags)
148  {
149  	WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
150  
151  	if (flags & FOLL_GET)
152  		return try_get_page(page);
153  	else if (flags & FOLL_PIN) {
154  		int refs = 1;
155  
156  		page = compound_head(page);
157  
158  		if (WARN_ON_ONCE(page_ref_count(page) <= 0))
159  			return false;
160  
161  		if (hpage_pincount_available(page))
162  			hpage_pincount_add(page, 1);
163  		else
164  			refs = GUP_PIN_COUNTING_BIAS;
165  
166  		/*
167  		 * Similar to try_grab_compound_head(): even if using the
168  		 * hpage_pincount_add/_sub() routines, be sure to
169  		 * *also* increment the normal page refcount field at least
170  		 * once, so that the page really is pinned.
171  		 */
172  		page_ref_add(page, refs);
173  
174  		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
175  	}
176  
177  	return true;
178  }
179  
180  #ifdef CONFIG_DEV_PAGEMAP_OPS
181  static bool __unpin_devmap_managed_user_page(struct page *page)
182  {
183  	int count, refs = 1;
184  
185  	if (!page_is_devmap_managed(page))
186  		return false;
187  
188  	if (hpage_pincount_available(page))
189  		hpage_pincount_sub(page, 1);
190  	else
191  		refs = GUP_PIN_COUNTING_BIAS;
192  
193  	count = page_ref_sub_return(page, refs);
194  
195  	mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 1);
196  	/*
197  	 * devmap page refcounts are 1-based, rather than 0-based: if
198  	 * refcount is 1, then the page is free and the refcount is
199  	 * stable because nobody holds a reference on the page.
200  	 */
201  	if (count == 1)
202  		free_devmap_managed_page(page);
203  	else if (!count)
204  		__put_page(page);
205  
206  	return true;
207  }
208  #else
209  static bool __unpin_devmap_managed_user_page(struct page *page)
210  {
211  	return false;
212  }
213  #endif /* CONFIG_DEV_PAGEMAP_OPS */
214  
215  /**
216   * unpin_user_page() - release a dma-pinned page
217   * @page:            pointer to page to be released
218   *
219   * Pages that were pinned via pin_user_pages*() must be released via either
220   * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
221   * that such pages can be separately tracked and uniquely handled. In
222   * particular, interactions with RDMA and filesystems need special handling.
223   */
224  void unpin_user_page(struct page *page)
225  {
226  	int refs = 1;
227  
228  	page = compound_head(page);
229  
230  	/*
231  	 * For devmap managed pages we need to catch refcount transition from
232  	 * GUP_PIN_COUNTING_BIAS to 1, when refcount reach one it means the
233  	 * page is free and we need to inform the device driver through
234  	 * callback. See include/linux/memremap.h and HMM for details.
235  	 */
236  	if (__unpin_devmap_managed_user_page(page))
237  		return;
238  
239  	if (hpage_pincount_available(page))
240  		hpage_pincount_sub(page, 1);
241  	else
242  		refs = GUP_PIN_COUNTING_BIAS;
243  
244  	if (page_ref_sub_and_test(page, refs))
245  		__put_page(page);
246  
247  	mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 1);
248  }
249  EXPORT_SYMBOL(unpin_user_page);
250  
251  /**
252   * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
253   * @pages:  array of pages to be maybe marked dirty, and definitely released.
254   * @npages: number of pages in the @pages array.
255   * @make_dirty: whether to mark the pages dirty
256   *
257   * "gup-pinned page" refers to a page that has had one of the get_user_pages()
258   * variants called on that page.
259   *
260   * For each page in the @pages array, make that page (or its head page, if a
261   * compound page) dirty, if @make_dirty is true, and if the page was previously
262   * listed as clean. In any case, releases all pages using unpin_user_page(),
263   * possibly via unpin_user_pages(), for the non-dirty case.
264   *
265   * Please see the unpin_user_page() documentation for details.
266   *
267   * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
268   * required, then the caller should a) verify that this is really correct,
269   * because _lock() is usually required, and b) hand code it:
270   * set_page_dirty_lock(), unpin_user_page().
271   *
272   */
273  void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
274  				 bool make_dirty)
275  {
276  	unsigned long index;
277  
278  	/*
279  	 * TODO: this can be optimized for huge pages: if a series of pages is
280  	 * physically contiguous and part of the same compound page, then a
281  	 * single operation to the head page should suffice.
282  	 */
283  
284  	if (!make_dirty) {
285  		unpin_user_pages(pages, npages);
286  		return;
287  	}
288  
289  	for (index = 0; index < npages; index++) {
290  		struct page *page = compound_head(pages[index]);
291  		/*
292  		 * Checking PageDirty at this point may race with
293  		 * clear_page_dirty_for_io(), but that's OK. Two key
294  		 * cases:
295  		 *
296  		 * 1) This code sees the page as already dirty, so it
297  		 * skips the call to set_page_dirty(). That could happen
298  		 * because clear_page_dirty_for_io() called
299  		 * page_mkclean(), followed by set_page_dirty().
300  		 * However, now the page is going to get written back,
301  		 * which meets the original intention of setting it
302  		 * dirty, so all is well: clear_page_dirty_for_io() goes
303  		 * on to call TestClearPageDirty(), and write the page
304  		 * back.
305  		 *
306  		 * 2) This code sees the page as clean, so it calls
307  		 * set_page_dirty(). The page stays dirty, despite being
308  		 * written back, so it gets written back again in the
309  		 * next writeback cycle. This is harmless.
310  		 */
311  		if (!PageDirty(page))
312  			set_page_dirty_lock(page);
313  		unpin_user_page(page);
314  	}
315  }
316  EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
317  
318  /**
319   * unpin_user_pages() - release an array of gup-pinned pages.
320   * @pages:  array of pages to be marked dirty and released.
321   * @npages: number of pages in the @pages array.
322   *
323   * For each page in the @pages array, release the page using unpin_user_page().
324   *
325   * Please see the unpin_user_page() documentation for details.
326   */
327  void unpin_user_pages(struct page **pages, unsigned long npages)
328  {
329  	unsigned long index;
330  
331  	/*
332  	 * TODO: this can be optimized for huge pages: if a series of pages is
333  	 * physically contiguous and part of the same compound page, then a
334  	 * single operation to the head page should suffice.
335  	 */
336  	for (index = 0; index < npages; index++)
337  		unpin_user_page(pages[index]);
338  }
339  EXPORT_SYMBOL(unpin_user_pages);
340  
341  #ifdef CONFIG_MMU
342  static struct page *no_page_table(struct vm_area_struct *vma,
343  		unsigned int flags)
344  {
345  	/*
346  	 * When core dumping an enormous anonymous area that nobody
347  	 * has touched so far, we don't want to allocate unnecessary pages or
348  	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
349  	 * then get_dump_page() will return NULL to leave a hole in the dump.
350  	 * But we can only make this optimization where a hole would surely
351  	 * be zero-filled if handle_mm_fault() actually did handle it.
352  	 */
353  	if ((flags & FOLL_DUMP) &&
354  			(vma_is_anonymous(vma) || !vma->vm_ops->fault))
355  		return ERR_PTR(-EFAULT);
356  	return NULL;
357  }
358  
359  static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
360  		pte_t *pte, unsigned int flags)
361  {
362  	/* No page to get reference */
363  	if (flags & FOLL_GET)
364  		return -EFAULT;
365  
366  	if (flags & FOLL_TOUCH) {
367  		pte_t entry = *pte;
368  
369  		if (flags & FOLL_WRITE)
370  			entry = pte_mkdirty(entry);
371  		entry = pte_mkyoung(entry);
372  
373  		if (!pte_same(*pte, entry)) {
374  			set_pte_at(vma->vm_mm, address, pte, entry);
375  			update_mmu_cache(vma, address, pte);
376  		}
377  	}
378  
379  	/* Proper page table entry exists, but no corresponding struct page */
380  	return -EEXIST;
381  }
382  
383  /*
384   * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
385   * but only after we've gone through a COW cycle and they are dirty.
386   */
387  static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
388  {
389  	return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
390  }
391  
392  /*
393   * A (separate) COW fault might break the page the other way and
394   * get_user_pages() would return the page from what is now the wrong
395   * VM. So we need to force a COW break at GUP time even for reads.
396   */
397  static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
398  {
399  	return is_cow_mapping(vma->vm_flags) && (flags & (FOLL_GET | FOLL_PIN));
400  }
401  
402  static struct page *follow_page_pte(struct vm_area_struct *vma,
403  		unsigned long address, pmd_t *pmd, unsigned int flags,
404  		struct dev_pagemap **pgmap)
405  {
406  	struct mm_struct *mm = vma->vm_mm;
407  	struct page *page;
408  	spinlock_t *ptl;
409  	pte_t *ptep, pte;
410  	int ret;
411  
412  	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
413  	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
414  			 (FOLL_PIN | FOLL_GET)))
415  		return ERR_PTR(-EINVAL);
416  retry:
417  	if (unlikely(pmd_bad(*pmd)))
418  		return no_page_table(vma, flags);
419  
420  	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
421  	pte = *ptep;
422  	if (!pte_present(pte)) {
423  		swp_entry_t entry;
424  		/*
425  		 * KSM's break_ksm() relies upon recognizing a ksm page
426  		 * even while it is being migrated, so for that case we
427  		 * need migration_entry_wait().
428  		 */
429  		if (likely(!(flags & FOLL_MIGRATION)))
430  			goto no_page;
431  		if (pte_none(pte))
432  			goto no_page;
433  		entry = pte_to_swp_entry(pte);
434  		if (!is_migration_entry(entry))
435  			goto no_page;
436  		pte_unmap_unlock(ptep, ptl);
437  		migration_entry_wait(mm, pmd, address);
438  		goto retry;
439  	}
440  	if ((flags & FOLL_NUMA) && pte_protnone(pte))
441  		goto no_page;
442  	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
443  		pte_unmap_unlock(ptep, ptl);
444  		return NULL;
445  	}
446  
447  	page = vm_normal_page(vma, address, pte);
448  	if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
449  		/*
450  		 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
451  		 * case since they are only valid while holding the pgmap
452  		 * reference.
453  		 */
454  		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
455  		if (*pgmap)
456  			page = pte_page(pte);
457  		else
458  			goto no_page;
459  	} else if (unlikely(!page)) {
460  		if (flags & FOLL_DUMP) {
461  			/* Avoid special (like zero) pages in core dumps */
462  			page = ERR_PTR(-EFAULT);
463  			goto out;
464  		}
465  
466  		if (is_zero_pfn(pte_pfn(pte))) {
467  			page = pte_page(pte);
468  		} else {
469  			ret = follow_pfn_pte(vma, address, ptep, flags);
470  			page = ERR_PTR(ret);
471  			goto out;
472  		}
473  	}
474  
475  	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
476  		get_page(page);
477  		pte_unmap_unlock(ptep, ptl);
478  		lock_page(page);
479  		ret = split_huge_page(page);
480  		unlock_page(page);
481  		put_page(page);
482  		if (ret)
483  			return ERR_PTR(ret);
484  		goto retry;
485  	}
486  
487  	/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
488  	if (unlikely(!try_grab_page(page, flags))) {
489  		page = ERR_PTR(-ENOMEM);
490  		goto out;
491  	}
492  	/*
493  	 * We need to make the page accessible if and only if we are going
494  	 * to access its content (the FOLL_PIN case).  Please see
495  	 * Documentation/core-api/pin_user_pages.rst for details.
496  	 */
497  	if (flags & FOLL_PIN) {
498  		ret = arch_make_page_accessible(page);
499  		if (ret) {
500  			unpin_user_page(page);
501  			page = ERR_PTR(ret);
502  			goto out;
503  		}
504  	}
505  	if (flags & FOLL_TOUCH) {
506  		if ((flags & FOLL_WRITE) &&
507  		    !pte_dirty(pte) && !PageDirty(page))
508  			set_page_dirty(page);
509  		/*
510  		 * pte_mkyoung() would be more correct here, but atomic care
511  		 * is needed to avoid losing the dirty bit: it is easier to use
512  		 * mark_page_accessed().
513  		 */
514  		mark_page_accessed(page);
515  	}
516  	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
517  		/* Do not mlock pte-mapped THP */
518  		if (PageTransCompound(page))
519  			goto out;
520  
521  		/*
522  		 * The preliminary mapping check is mainly to avoid the
523  		 * pointless overhead of lock_page on the ZERO_PAGE
524  		 * which might bounce very badly if there is contention.
525  		 *
526  		 * If the page is already locked, we don't need to
527  		 * handle it now - vmscan will handle it later if and
528  		 * when it attempts to reclaim the page.
529  		 */
530  		if (page->mapping && trylock_page(page)) {
531  			lru_add_drain();  /* push cached pages to LRU */
532  			/*
533  			 * Because we lock page here, and migration is
534  			 * blocked by the pte's page reference, and we
535  			 * know the page is still mapped, we don't even
536  			 * need to check for file-cache page truncation.
537  			 */
538  			mlock_vma_page(page);
539  			unlock_page(page);
540  		}
541  	}
542  out:
543  	pte_unmap_unlock(ptep, ptl);
544  	return page;
545  no_page:
546  	pte_unmap_unlock(ptep, ptl);
547  	if (!pte_none(pte))
548  		return NULL;
549  	return no_page_table(vma, flags);
550  }
551  
552  static struct page *follow_pmd_mask(struct vm_area_struct *vma,
553  				    unsigned long address, pud_t *pudp,
554  				    unsigned int flags,
555  				    struct follow_page_context *ctx)
556  {
557  	pmd_t *pmd, pmdval;
558  	spinlock_t *ptl;
559  	struct page *page;
560  	struct mm_struct *mm = vma->vm_mm;
561  
562  	pmd = pmd_offset(pudp, address);
563  	/*
564  	 * The READ_ONCE() will stabilize the pmdval in a register or
565  	 * on the stack so that it will stop changing under the code.
566  	 */
567  	pmdval = READ_ONCE(*pmd);
568  	if (pmd_none(pmdval))
569  		return no_page_table(vma, flags);
570  	if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
571  		page = follow_huge_pmd(mm, address, pmd, flags);
572  		if (page)
573  			return page;
574  		return no_page_table(vma, flags);
575  	}
576  	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
577  		page = follow_huge_pd(vma, address,
578  				      __hugepd(pmd_val(pmdval)), flags,
579  				      PMD_SHIFT);
580  		if (page)
581  			return page;
582  		return no_page_table(vma, flags);
583  	}
584  retry:
585  	if (!pmd_present(pmdval)) {
586  		if (likely(!(flags & FOLL_MIGRATION)))
587  			return no_page_table(vma, flags);
588  		VM_BUG_ON(thp_migration_supported() &&
589  				  !is_pmd_migration_entry(pmdval));
590  		if (is_pmd_migration_entry(pmdval))
591  			pmd_migration_entry_wait(mm, pmd);
592  		pmdval = READ_ONCE(*pmd);
593  		/*
594  		 * MADV_DONTNEED may convert the pmd to null because
595  		 * mmap_lock is held in read mode
596  		 */
597  		if (pmd_none(pmdval))
598  			return no_page_table(vma, flags);
599  		goto retry;
600  	}
601  	if (pmd_devmap(pmdval)) {
602  		ptl = pmd_lock(mm, pmd);
603  		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
604  		spin_unlock(ptl);
605  		if (page)
606  			return page;
607  	}
608  	if (likely(!pmd_trans_huge(pmdval)))
609  		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
610  
611  	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
612  		return no_page_table(vma, flags);
613  
614  retry_locked:
615  	ptl = pmd_lock(mm, pmd);
616  	if (unlikely(pmd_none(*pmd))) {
617  		spin_unlock(ptl);
618  		return no_page_table(vma, flags);
619  	}
620  	if (unlikely(!pmd_present(*pmd))) {
621  		spin_unlock(ptl);
622  		if (likely(!(flags & FOLL_MIGRATION)))
623  			return no_page_table(vma, flags);
624  		pmd_migration_entry_wait(mm, pmd);
625  		goto retry_locked;
626  	}
627  	if (unlikely(!pmd_trans_huge(*pmd))) {
628  		spin_unlock(ptl);
629  		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
630  	}
631  	if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
632  		int ret;
633  		page = pmd_page(*pmd);
634  		if (is_huge_zero_page(page)) {
635  			spin_unlock(ptl);
636  			ret = 0;
637  			split_huge_pmd(vma, pmd, address);
638  			if (pmd_trans_unstable(pmd))
639  				ret = -EBUSY;
640  		} else if (flags & FOLL_SPLIT) {
641  			if (unlikely(!try_get_page(page))) {
642  				spin_unlock(ptl);
643  				return ERR_PTR(-ENOMEM);
644  			}
645  			spin_unlock(ptl);
646  			lock_page(page);
647  			ret = split_huge_page(page);
648  			unlock_page(page);
649  			put_page(page);
650  			if (pmd_none(*pmd))
651  				return no_page_table(vma, flags);
652  		} else {  /* flags & FOLL_SPLIT_PMD */
653  			spin_unlock(ptl);
654  			split_huge_pmd(vma, pmd, address);
655  			ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
656  		}
657  
658  		return ret ? ERR_PTR(ret) :
659  			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
660  	}
661  	page = follow_trans_huge_pmd(vma, address, pmd, flags);
662  	spin_unlock(ptl);
663  	ctx->page_mask = HPAGE_PMD_NR - 1;
664  	return page;
665  }
666  
667  static struct page *follow_pud_mask(struct vm_area_struct *vma,
668  				    unsigned long address, p4d_t *p4dp,
669  				    unsigned int flags,
670  				    struct follow_page_context *ctx)
671  {
672  	pud_t *pud;
673  	spinlock_t *ptl;
674  	struct page *page;
675  	struct mm_struct *mm = vma->vm_mm;
676  
677  	pud = pud_offset(p4dp, address);
678  	if (pud_none(*pud))
679  		return no_page_table(vma, flags);
680  	if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
681  		page = follow_huge_pud(mm, address, pud, flags);
682  		if (page)
683  			return page;
684  		return no_page_table(vma, flags);
685  	}
686  	if (is_hugepd(__hugepd(pud_val(*pud)))) {
687  		page = follow_huge_pd(vma, address,
688  				      __hugepd(pud_val(*pud)), flags,
689  				      PUD_SHIFT);
690  		if (page)
691  			return page;
692  		return no_page_table(vma, flags);
693  	}
694  	if (pud_devmap(*pud)) {
695  		ptl = pud_lock(mm, pud);
696  		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
697  		spin_unlock(ptl);
698  		if (page)
699  			return page;
700  	}
701  	if (unlikely(pud_bad(*pud)))
702  		return no_page_table(vma, flags);
703  
704  	return follow_pmd_mask(vma, address, pud, flags, ctx);
705  }
706  
707  static struct page *follow_p4d_mask(struct vm_area_struct *vma,
708  				    unsigned long address, pgd_t *pgdp,
709  				    unsigned int flags,
710  				    struct follow_page_context *ctx)
711  {
712  	p4d_t *p4d;
713  	struct page *page;
714  
715  	p4d = p4d_offset(pgdp, address);
716  	if (p4d_none(*p4d))
717  		return no_page_table(vma, flags);
718  	BUILD_BUG_ON(p4d_huge(*p4d));
719  	if (unlikely(p4d_bad(*p4d)))
720  		return no_page_table(vma, flags);
721  
722  	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
723  		page = follow_huge_pd(vma, address,
724  				      __hugepd(p4d_val(*p4d)), flags,
725  				      P4D_SHIFT);
726  		if (page)
727  			return page;
728  		return no_page_table(vma, flags);
729  	}
730  	return follow_pud_mask(vma, address, p4d, flags, ctx);
731  }
732  
733  /**
734   * follow_page_mask - look up a page descriptor from a user-virtual address
735   * @vma: vm_area_struct mapping @address
736   * @address: virtual address to look up
737   * @flags: flags modifying lookup behaviour
738   * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
739   *       pointer to output page_mask
740   *
741   * @flags can have FOLL_ flags set, defined in <linux/mm.h>
742   *
743   * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
744   * the device's dev_pagemap metadata to avoid repeating expensive lookups.
745   *
746   * On output, the @ctx->page_mask is set according to the size of the page.
747   *
748   * Return: the mapped (struct page *), %NULL if no mapping exists, or
749   * an error pointer if there is a mapping to something not represented
750   * by a page descriptor (see also vm_normal_page()).
751   */
752  static struct page *follow_page_mask(struct vm_area_struct *vma,
753  			      unsigned long address, unsigned int flags,
754  			      struct follow_page_context *ctx)
755  {
756  	pgd_t *pgd;
757  	struct page *page;
758  	struct mm_struct *mm = vma->vm_mm;
759  
760  	ctx->page_mask = 0;
761  
762  	/* make this handle hugepd */
763  	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
764  	if (!IS_ERR(page)) {
765  		WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
766  		return page;
767  	}
768  
769  	pgd = pgd_offset(mm, address);
770  
771  	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
772  		return no_page_table(vma, flags);
773  
774  	if (pgd_huge(*pgd)) {
775  		page = follow_huge_pgd(mm, address, pgd, flags);
776  		if (page)
777  			return page;
778  		return no_page_table(vma, flags);
779  	}
780  	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
781  		page = follow_huge_pd(vma, address,
782  				      __hugepd(pgd_val(*pgd)), flags,
783  				      PGDIR_SHIFT);
784  		if (page)
785  			return page;
786  		return no_page_table(vma, flags);
787  	}
788  
789  	return follow_p4d_mask(vma, address, pgd, flags, ctx);
790  }
791  
792  struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
793  			 unsigned int foll_flags)
794  {
795  	struct follow_page_context ctx = { NULL };
796  	struct page *page;
797  
798  	page = follow_page_mask(vma, address, foll_flags, &ctx);
799  	if (ctx.pgmap)
800  		put_dev_pagemap(ctx.pgmap);
801  	return page;
802  }
803  
804  static int get_gate_page(struct mm_struct *mm, unsigned long address,
805  		unsigned int gup_flags, struct vm_area_struct **vma,
806  		struct page **page)
807  {
808  	pgd_t *pgd;
809  	p4d_t *p4d;
810  	pud_t *pud;
811  	pmd_t *pmd;
812  	pte_t *pte;
813  	int ret = -EFAULT;
814  
815  	/* user gate pages are read-only */
816  	if (gup_flags & FOLL_WRITE)
817  		return -EFAULT;
818  	if (address > TASK_SIZE)
819  		pgd = pgd_offset_k(address);
820  	else
821  		pgd = pgd_offset_gate(mm, address);
822  	if (pgd_none(*pgd))
823  		return -EFAULT;
824  	p4d = p4d_offset(pgd, address);
825  	if (p4d_none(*p4d))
826  		return -EFAULT;
827  	pud = pud_offset(p4d, address);
828  	if (pud_none(*pud))
829  		return -EFAULT;
830  	pmd = pmd_offset(pud, address);
831  	if (!pmd_present(*pmd))
832  		return -EFAULT;
833  	VM_BUG_ON(pmd_trans_huge(*pmd));
834  	pte = pte_offset_map(pmd, address);
835  	if (pte_none(*pte))
836  		goto unmap;
837  	*vma = get_gate_vma(mm);
838  	if (!page)
839  		goto out;
840  	*page = vm_normal_page(*vma, address, *pte);
841  	if (!*page) {
842  		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
843  			goto unmap;
844  		*page = pte_page(*pte);
845  	}
846  	if (unlikely(!try_get_page(*page))) {
847  		ret = -ENOMEM;
848  		goto unmap;
849  	}
850  out:
851  	ret = 0;
852  unmap:
853  	pte_unmap(pte);
854  	return ret;
855  }
856  
857  /*
858   * mmap_lock must be held on entry.  If @locked != NULL and *@flags
859   * does not include FOLL_NOWAIT, the mmap_lock may be released.  If it
860   * is, *@locked will be set to 0 and -EBUSY returned.
861   */
862  static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
863  		unsigned long address, unsigned int *flags, int *locked)
864  {
865  	unsigned int fault_flags = 0;
866  	vm_fault_t ret;
867  
868  	/* mlock all present pages, but do not fault in new pages */
869  	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
870  		return -ENOENT;
871  	if (*flags & FOLL_WRITE)
872  		fault_flags |= FAULT_FLAG_WRITE;
873  	if (*flags & FOLL_REMOTE)
874  		fault_flags |= FAULT_FLAG_REMOTE;
875  	if (locked)
876  		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
877  	if (*flags & FOLL_NOWAIT)
878  		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
879  	if (*flags & FOLL_TRIED) {
880  		/*
881  		 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
882  		 * can co-exist
883  		 */
884  		fault_flags |= FAULT_FLAG_TRIED;
885  	}
886  
887  	ret = handle_mm_fault(vma, address, fault_flags);
888  	if (ret & VM_FAULT_ERROR) {
889  		int err = vm_fault_to_errno(ret, *flags);
890  
891  		if (err)
892  			return err;
893  		BUG();
894  	}
895  
896  	if (tsk) {
897  		if (ret & VM_FAULT_MAJOR)
898  			tsk->maj_flt++;
899  		else
900  			tsk->min_flt++;
901  	}
902  
903  	if (ret & VM_FAULT_RETRY) {
904  		if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
905  			*locked = 0;
906  		return -EBUSY;
907  	}
908  
909  	/*
910  	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
911  	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
912  	 * can thus safely do subsequent page lookups as if they were reads.
913  	 * But only do so when looping for pte_write is futile: in some cases
914  	 * userspace may also be wanting to write to the gotten user page,
915  	 * which a read fault here might prevent (a readonly page might get
916  	 * reCOWed by userspace write).
917  	 */
918  	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
919  		*flags |= FOLL_COW;
920  	return 0;
921  }
922  
923  static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
924  {
925  	vm_flags_t vm_flags = vma->vm_flags;
926  	int write = (gup_flags & FOLL_WRITE);
927  	int foreign = (gup_flags & FOLL_REMOTE);
928  
929  	if (vm_flags & (VM_IO | VM_PFNMAP))
930  		return -EFAULT;
931  
932  	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
933  		return -EFAULT;
934  
935  	if (write) {
936  		if (!(vm_flags & VM_WRITE)) {
937  			if (!(gup_flags & FOLL_FORCE))
938  				return -EFAULT;
939  			/*
940  			 * We used to let the write,force case do COW in a
941  			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
942  			 * set a breakpoint in a read-only mapping of an
943  			 * executable, without corrupting the file (yet only
944  			 * when that file had been opened for writing!).
945  			 * Anon pages in shared mappings are surprising: now
946  			 * just reject it.
947  			 */
948  			if (!is_cow_mapping(vm_flags))
949  				return -EFAULT;
950  		}
951  	} else if (!(vm_flags & VM_READ)) {
952  		if (!(gup_flags & FOLL_FORCE))
953  			return -EFAULT;
954  		/*
955  		 * Is there actually any vma we can reach here which does not
956  		 * have VM_MAYREAD set?
957  		 */
958  		if (!(vm_flags & VM_MAYREAD))
959  			return -EFAULT;
960  	}
961  	/*
962  	 * gups are always data accesses, not instruction
963  	 * fetches, so execute=false here
964  	 */
965  	if (!arch_vma_access_permitted(vma, write, false, foreign))
966  		return -EFAULT;
967  	return 0;
968  }
969  
970  /**
971   * __get_user_pages() - pin user pages in memory
972   * @tsk:	task_struct of target task
973   * @mm:		mm_struct of target mm
974   * @start:	starting user address
975   * @nr_pages:	number of pages from start to pin
976   * @gup_flags:	flags modifying pin behaviour
977   * @pages:	array that receives pointers to the pages pinned.
978   *		Should be at least nr_pages long. Or NULL, if caller
979   *		only intends to ensure the pages are faulted in.
980   * @vmas:	array of pointers to vmas corresponding to each page.
981   *		Or NULL if the caller does not require them.
982   * @locked:     whether we're still with the mmap_lock held
983   *
984   * Returns either number of pages pinned (which may be less than the
985   * number requested), or an error. Details about the return value:
986   *
987   * -- If nr_pages is 0, returns 0.
988   * -- If nr_pages is >0, but no pages were pinned, returns -errno.
989   * -- If nr_pages is >0, and some pages were pinned, returns the number of
990   *    pages pinned. Again, this may be less than nr_pages.
991   * -- 0 return value is possible when the fault would need to be retried.
992   *
993   * The caller is responsible for releasing returned @pages, via put_page().
994   *
995   * @vmas are valid only as long as mmap_lock is held.
996   *
997   * Must be called with mmap_lock held.  It may be released.  See below.
998   *
999   * __get_user_pages walks a process's page tables and takes a reference to
1000   * each struct page that each user address corresponds to at a given
1001   * instant. That is, it takes the page that would be accessed if a user
1002   * thread accesses the given user virtual address at that instant.
1003   *
1004   * This does not guarantee that the page exists in the user mappings when
1005   * __get_user_pages returns, and there may even be a completely different
1006   * page there in some cases (eg. if mmapped pagecache has been invalidated
1007   * and subsequently re faulted). However it does guarantee that the page
1008   * won't be freed completely. And mostly callers simply care that the page
1009   * contains data that was valid *at some point in time*. Typically, an IO
1010   * or similar operation cannot guarantee anything stronger anyway because
1011   * locks can't be held over the syscall boundary.
1012   *
1013   * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1014   * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1015   * appropriate) must be called after the page is finished with, and
1016   * before put_page is called.
1017   *
1018   * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1019   * released by an up_read().  That can happen if @gup_flags does not
1020   * have FOLL_NOWAIT.
1021   *
1022   * A caller using such a combination of @locked and @gup_flags
1023   * must therefore hold the mmap_lock for reading only, and recognize
1024   * when it's been released.  Otherwise, it must be held for either
1025   * reading or writing and will not be released.
1026   *
1027   * In most cases, get_user_pages or get_user_pages_fast should be used
1028   * instead of __get_user_pages. __get_user_pages should be used only if
1029   * you need some special @gup_flags.
1030   */
1031  static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1032  		unsigned long start, unsigned long nr_pages,
1033  		unsigned int gup_flags, struct page **pages,
1034  		struct vm_area_struct **vmas, int *locked)
1035  {
1036  	long ret = 0, i = 0;
1037  	struct vm_area_struct *vma = NULL;
1038  	struct follow_page_context ctx = { NULL };
1039  
1040  	if (!nr_pages)
1041  		return 0;
1042  
1043  	start = untagged_addr(start);
1044  
1045  	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1046  
1047  	/*
1048  	 * If FOLL_FORCE is set then do not force a full fault as the hinting
1049  	 * fault information is unrelated to the reference behaviour of a task
1050  	 * using the address space
1051  	 */
1052  	if (!(gup_flags & FOLL_FORCE))
1053  		gup_flags |= FOLL_NUMA;
1054  
1055  	do {
1056  		struct page *page;
1057  		unsigned int foll_flags = gup_flags;
1058  		unsigned int page_increm;
1059  
1060  		/* first iteration or cross vma bound */
1061  		if (!vma || start >= vma->vm_end) {
1062  			vma = find_extend_vma(mm, start);
1063  			if (!vma && in_gate_area(mm, start)) {
1064  				ret = get_gate_page(mm, start & PAGE_MASK,
1065  						gup_flags, &vma,
1066  						pages ? &pages[i] : NULL);
1067  				if (ret)
1068  					goto out;
1069  				ctx.page_mask = 0;
1070  				goto next_page;
1071  			}
1072  
1073  			if (!vma || check_vma_flags(vma, gup_flags)) {
1074  				ret = -EFAULT;
1075  				goto out;
1076  			}
1077  			if (is_vm_hugetlb_page(vma)) {
1078  				if (should_force_cow_break(vma, foll_flags))
1079  					foll_flags |= FOLL_WRITE;
1080  				i = follow_hugetlb_page(mm, vma, pages, vmas,
1081  						&start, &nr_pages, i,
1082  						foll_flags, locked);
1083  				if (locked && *locked == 0) {
1084  					/*
1085  					 * We've got a VM_FAULT_RETRY
1086  					 * and we've lost mmap_lock.
1087  					 * We must stop here.
1088  					 */
1089  					BUG_ON(gup_flags & FOLL_NOWAIT);
1090  					BUG_ON(ret != 0);
1091  					goto out;
1092  				}
1093  				continue;
1094  			}
1095  		}
1096  
1097  		if (should_force_cow_break(vma, foll_flags))
1098  			foll_flags |= FOLL_WRITE;
1099  
1100  retry:
1101  		/*
1102  		 * If we have a pending SIGKILL, don't keep faulting pages and
1103  		 * potentially allocating memory.
1104  		 */
1105  		if (fatal_signal_pending(current)) {
1106  			ret = -EINTR;
1107  			goto out;
1108  		}
1109  		cond_resched();
1110  
1111  		page = follow_page_mask(vma, start, foll_flags, &ctx);
1112  		if (!page) {
1113  			ret = faultin_page(tsk, vma, start, &foll_flags,
1114  					   locked);
1115  			switch (ret) {
1116  			case 0:
1117  				goto retry;
1118  			case -EBUSY:
1119  				ret = 0;
1120  				fallthrough;
1121  			case -EFAULT:
1122  			case -ENOMEM:
1123  			case -EHWPOISON:
1124  				goto out;
1125  			case -ENOENT:
1126  				goto next_page;
1127  			}
1128  			BUG();
1129  		} else if (PTR_ERR(page) == -EEXIST) {
1130  			/*
1131  			 * Proper page table entry exists, but no corresponding
1132  			 * struct page.
1133  			 */
1134  			goto next_page;
1135  		} else if (IS_ERR(page)) {
1136  			ret = PTR_ERR(page);
1137  			goto out;
1138  		}
1139  		if (pages) {
1140  			pages[i] = page;
1141  			flush_anon_page(vma, page, start);
1142  			flush_dcache_page(page);
1143  			ctx.page_mask = 0;
1144  		}
1145  next_page:
1146  		if (vmas) {
1147  			vmas[i] = vma;
1148  			ctx.page_mask = 0;
1149  		}
1150  		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1151  		if (page_increm > nr_pages)
1152  			page_increm = nr_pages;
1153  		i += page_increm;
1154  		start += page_increm * PAGE_SIZE;
1155  		nr_pages -= page_increm;
1156  	} while (nr_pages);
1157  out:
1158  	if (ctx.pgmap)
1159  		put_dev_pagemap(ctx.pgmap);
1160  	return i ? i : ret;
1161  }
1162  
1163  static bool vma_permits_fault(struct vm_area_struct *vma,
1164  			      unsigned int fault_flags)
1165  {
1166  	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
1167  	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1168  	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1169  
1170  	if (!(vm_flags & vma->vm_flags))
1171  		return false;
1172  
1173  	/*
1174  	 * The architecture might have a hardware protection
1175  	 * mechanism other than read/write that can deny access.
1176  	 *
1177  	 * gup always represents data access, not instruction
1178  	 * fetches, so execute=false here:
1179  	 */
1180  	if (!arch_vma_access_permitted(vma, write, false, foreign))
1181  		return false;
1182  
1183  	return true;
1184  }
1185  
1186  /**
1187   * fixup_user_fault() - manually resolve a user page fault
1188   * @tsk:	the task_struct to use for page fault accounting, or
1189   *		NULL if faults are not to be recorded.
1190   * @mm:		mm_struct of target mm
1191   * @address:	user address
1192   * @fault_flags:flags to pass down to handle_mm_fault()
1193   * @unlocked:	did we unlock the mmap_lock while retrying, maybe NULL if caller
1194   *		does not allow retry. If NULL, the caller must guarantee
1195   *		that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1196   *
1197   * This is meant to be called in the specific scenario where for locking reasons
1198   * we try to access user memory in atomic context (within a pagefault_disable()
1199   * section), this returns -EFAULT, and we want to resolve the user fault before
1200   * trying again.
1201   *
1202   * Typically this is meant to be used by the futex code.
1203   *
1204   * The main difference with get_user_pages() is that this function will
1205   * unconditionally call handle_mm_fault() which will in turn perform all the
1206   * necessary SW fixup of the dirty and young bits in the PTE, while
1207   * get_user_pages() only guarantees to update these in the struct page.
1208   *
1209   * This is important for some architectures where those bits also gate the
1210   * access permission to the page because they are maintained in software.  On
1211   * such architectures, gup() will not be enough to make a subsequent access
1212   * succeed.
1213   *
1214   * This function will not return with an unlocked mmap_lock. So it has not the
1215   * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1216   */
1217  int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1218  		     unsigned long address, unsigned int fault_flags,
1219  		     bool *unlocked)
1220  {
1221  	struct vm_area_struct *vma;
1222  	vm_fault_t ret, major = 0;
1223  
1224  	address = untagged_addr(address);
1225  
1226  	if (unlocked)
1227  		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1228  
1229  retry:
1230  	vma = find_extend_vma(mm, address);
1231  	if (!vma || address < vma->vm_start)
1232  		return -EFAULT;
1233  
1234  	if (!vma_permits_fault(vma, fault_flags))
1235  		return -EFAULT;
1236  
1237  	if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1238  	    fatal_signal_pending(current))
1239  		return -EINTR;
1240  
1241  	ret = handle_mm_fault(vma, address, fault_flags);
1242  	major |= ret & VM_FAULT_MAJOR;
1243  	if (ret & VM_FAULT_ERROR) {
1244  		int err = vm_fault_to_errno(ret, 0);
1245  
1246  		if (err)
1247  			return err;
1248  		BUG();
1249  	}
1250  
1251  	if (ret & VM_FAULT_RETRY) {
1252  		mmap_read_lock(mm);
1253  		*unlocked = true;
1254  		fault_flags |= FAULT_FLAG_TRIED;
1255  		goto retry;
1256  	}
1257  
1258  	if (tsk) {
1259  		if (major)
1260  			tsk->maj_flt++;
1261  		else
1262  			tsk->min_flt++;
1263  	}
1264  	return 0;
1265  }
1266  EXPORT_SYMBOL_GPL(fixup_user_fault);
1267  
1268  /*
1269   * Please note that this function, unlike __get_user_pages will not
1270   * return 0 for nr_pages > 0 without FOLL_NOWAIT
1271   */
1272  static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
1273  						struct mm_struct *mm,
1274  						unsigned long start,
1275  						unsigned long nr_pages,
1276  						struct page **pages,
1277  						struct vm_area_struct **vmas,
1278  						int *locked,
1279  						unsigned int flags)
1280  {
1281  	long ret, pages_done;
1282  	bool lock_dropped;
1283  
1284  	if (locked) {
1285  		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
1286  		BUG_ON(vmas);
1287  		/* check caller initialized locked */
1288  		BUG_ON(*locked != 1);
1289  	}
1290  
1291  	/*
1292  	 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1293  	 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1294  	 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1295  	 * for FOLL_GET, not for the newer FOLL_PIN.
1296  	 *
1297  	 * FOLL_PIN always expects pages to be non-null, but no need to assert
1298  	 * that here, as any failures will be obvious enough.
1299  	 */
1300  	if (pages && !(flags & FOLL_PIN))
1301  		flags |= FOLL_GET;
1302  
1303  	pages_done = 0;
1304  	lock_dropped = false;
1305  	for (;;) {
1306  		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
1307  				       vmas, locked);
1308  		if (!locked)
1309  			/* VM_FAULT_RETRY couldn't trigger, bypass */
1310  			return ret;
1311  
1312  		/* VM_FAULT_RETRY cannot return errors */
1313  		if (!*locked) {
1314  			BUG_ON(ret < 0);
1315  			BUG_ON(ret >= nr_pages);
1316  		}
1317  
1318  		if (ret > 0) {
1319  			nr_pages -= ret;
1320  			pages_done += ret;
1321  			if (!nr_pages)
1322  				break;
1323  		}
1324  		if (*locked) {
1325  			/*
1326  			 * VM_FAULT_RETRY didn't trigger or it was a
1327  			 * FOLL_NOWAIT.
1328  			 */
1329  			if (!pages_done)
1330  				pages_done = ret;
1331  			break;
1332  		}
1333  		/*
1334  		 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1335  		 * For the prefault case (!pages) we only update counts.
1336  		 */
1337  		if (likely(pages))
1338  			pages += ret;
1339  		start += ret << PAGE_SHIFT;
1340  		lock_dropped = true;
1341  
1342  retry:
1343  		/*
1344  		 * Repeat on the address that fired VM_FAULT_RETRY
1345  		 * with both FAULT_FLAG_ALLOW_RETRY and
1346  		 * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
1347  		 * by fatal signals, so we need to check it before we
1348  		 * start trying again otherwise it can loop forever.
1349  		 */
1350  
1351  		if (fatal_signal_pending(current)) {
1352  			if (!pages_done)
1353  				pages_done = -EINTR;
1354  			break;
1355  		}
1356  
1357  		ret = mmap_read_lock_killable(mm);
1358  		if (ret) {
1359  			BUG_ON(ret > 0);
1360  			if (!pages_done)
1361  				pages_done = ret;
1362  			break;
1363  		}
1364  
1365  		*locked = 1;
1366  		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
1367  				       pages, NULL, locked);
1368  		if (!*locked) {
1369  			/* Continue to retry until we succeeded */
1370  			BUG_ON(ret != 0);
1371  			goto retry;
1372  		}
1373  		if (ret != 1) {
1374  			BUG_ON(ret > 1);
1375  			if (!pages_done)
1376  				pages_done = ret;
1377  			break;
1378  		}
1379  		nr_pages--;
1380  		pages_done++;
1381  		if (!nr_pages)
1382  			break;
1383  		if (likely(pages))
1384  			pages++;
1385  		start += PAGE_SIZE;
1386  	}
1387  	if (lock_dropped && *locked) {
1388  		/*
1389  		 * We must let the caller know we temporarily dropped the lock
1390  		 * and so the critical section protected by it was lost.
1391  		 */
1392  		mmap_read_unlock(mm);
1393  		*locked = 0;
1394  	}
1395  	return pages_done;
1396  }
1397  
1398  /**
1399   * populate_vma_page_range() -  populate a range of pages in the vma.
1400   * @vma:   target vma
1401   * @start: start address
1402   * @end:   end address
1403   * @locked: whether the mmap_lock is still held
1404   *
1405   * This takes care of mlocking the pages too if VM_LOCKED is set.
1406   *
1407   * return 0 on success, negative error code on error.
1408   *
1409   * vma->vm_mm->mmap_lock must be held.
1410   *
1411   * If @locked is NULL, it may be held for read or write and will
1412   * be unperturbed.
1413   *
1414   * If @locked is non-NULL, it must held for read only and may be
1415   * released.  If it's released, *@locked will be set to 0.
1416   */
1417  long populate_vma_page_range(struct vm_area_struct *vma,
1418  		unsigned long start, unsigned long end, int *locked)
1419  {
1420  	struct mm_struct *mm = vma->vm_mm;
1421  	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1422  	int gup_flags;
1423  
1424  	VM_BUG_ON(start & ~PAGE_MASK);
1425  	VM_BUG_ON(end   & ~PAGE_MASK);
1426  	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1427  	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1428  	mmap_assert_locked(mm);
1429  
1430  	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1431  	if (vma->vm_flags & VM_LOCKONFAULT)
1432  		gup_flags &= ~FOLL_POPULATE;
1433  	/*
1434  	 * We want to touch writable mappings with a write fault in order
1435  	 * to break COW, except for shared mappings because these don't COW
1436  	 * and we would not want to dirty them for nothing.
1437  	 */
1438  	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1439  		gup_flags |= FOLL_WRITE;
1440  
1441  	/*
1442  	 * We want mlock to succeed for regions that have any permissions
1443  	 * other than PROT_NONE.
1444  	 */
1445  	if (vma_is_accessible(vma))
1446  		gup_flags |= FOLL_FORCE;
1447  
1448  	/*
1449  	 * We made sure addr is within a VMA, so the following will
1450  	 * not result in a stack expansion that recurses back here.
1451  	 */
1452  	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
1453  				NULL, NULL, locked);
1454  }
1455  
1456  /*
1457   * __mm_populate - populate and/or mlock pages within a range of address space.
1458   *
1459   * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1460   * flags. VMAs must be already marked with the desired vm_flags, and
1461   * mmap_lock must not be held.
1462   */
1463  int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1464  {
1465  	struct mm_struct *mm = current->mm;
1466  	unsigned long end, nstart, nend;
1467  	struct vm_area_struct *vma = NULL;
1468  	int locked = 0;
1469  	long ret = 0;
1470  
1471  	end = start + len;
1472  
1473  	for (nstart = start; nstart < end; nstart = nend) {
1474  		/*
1475  		 * We want to fault in pages for [nstart; end) address range.
1476  		 * Find first corresponding VMA.
1477  		 */
1478  		if (!locked) {
1479  			locked = 1;
1480  			mmap_read_lock(mm);
1481  			vma = find_vma(mm, nstart);
1482  		} else if (nstart >= vma->vm_end)
1483  			vma = vma->vm_next;
1484  		if (!vma || vma->vm_start >= end)
1485  			break;
1486  		/*
1487  		 * Set [nstart; nend) to intersection of desired address
1488  		 * range with the first VMA. Also, skip undesirable VMA types.
1489  		 */
1490  		nend = min(end, vma->vm_end);
1491  		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1492  			continue;
1493  		if (nstart < vma->vm_start)
1494  			nstart = vma->vm_start;
1495  		/*
1496  		 * Now fault in a range of pages. populate_vma_page_range()
1497  		 * double checks the vma flags, so that it won't mlock pages
1498  		 * if the vma was already munlocked.
1499  		 */
1500  		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1501  		if (ret < 0) {
1502  			if (ignore_errors) {
1503  				ret = 0;
1504  				continue;	/* continue at next VMA */
1505  			}
1506  			break;
1507  		}
1508  		nend = nstart + ret * PAGE_SIZE;
1509  		ret = 0;
1510  	}
1511  	if (locked)
1512  		mmap_read_unlock(mm);
1513  	return ret;	/* 0 or negative error code */
1514  }
1515  
1516  /**
1517   * get_dump_page() - pin user page in memory while writing it to core dump
1518   * @addr: user address
1519   *
1520   * Returns struct page pointer of user page pinned for dump,
1521   * to be freed afterwards by put_page().
1522   *
1523   * Returns NULL on any kind of failure - a hole must then be inserted into
1524   * the corefile, to preserve alignment with its headers; and also returns
1525   * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1526   * allowing a hole to be left in the corefile to save diskspace.
1527   *
1528   * Called without mmap_lock, but after all other threads have been killed.
1529   */
1530  #ifdef CONFIG_ELF_CORE
1531  struct page *get_dump_page(unsigned long addr)
1532  {
1533  	struct vm_area_struct *vma;
1534  	struct page *page;
1535  
1536  	if (__get_user_pages(current, current->mm, addr, 1,
1537  			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1538  			     NULL) < 1)
1539  		return NULL;
1540  	flush_cache_page(vma, addr, page_to_pfn(page));
1541  	return page;
1542  }
1543  #endif /* CONFIG_ELF_CORE */
1544  #else /* CONFIG_MMU */
1545  static long __get_user_pages_locked(struct task_struct *tsk,
1546  		struct mm_struct *mm, unsigned long start,
1547  		unsigned long nr_pages, struct page **pages,
1548  		struct vm_area_struct **vmas, int *locked,
1549  		unsigned int foll_flags)
1550  {
1551  	struct vm_area_struct *vma;
1552  	unsigned long vm_flags;
1553  	int i;
1554  
1555  	/* calculate required read or write permissions.
1556  	 * If FOLL_FORCE is set, we only require the "MAY" flags.
1557  	 */
1558  	vm_flags  = (foll_flags & FOLL_WRITE) ?
1559  			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1560  	vm_flags &= (foll_flags & FOLL_FORCE) ?
1561  			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1562  
1563  	for (i = 0; i < nr_pages; i++) {
1564  		vma = find_vma(mm, start);
1565  		if (!vma)
1566  			goto finish_or_fault;
1567  
1568  		/* protect what we can, including chardevs */
1569  		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1570  		    !(vm_flags & vma->vm_flags))
1571  			goto finish_or_fault;
1572  
1573  		if (pages) {
1574  			pages[i] = virt_to_page(start);
1575  			if (pages[i])
1576  				get_page(pages[i]);
1577  		}
1578  		if (vmas)
1579  			vmas[i] = vma;
1580  		start = (start + PAGE_SIZE) & PAGE_MASK;
1581  	}
1582  
1583  	return i;
1584  
1585  finish_or_fault:
1586  	return i ? : -EFAULT;
1587  }
1588  #endif /* !CONFIG_MMU */
1589  
1590  #if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
1591  static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
1592  {
1593  	long i;
1594  	struct vm_area_struct *vma_prev = NULL;
1595  
1596  	for (i = 0; i < nr_pages; i++) {
1597  		struct vm_area_struct *vma = vmas[i];
1598  
1599  		if (vma == vma_prev)
1600  			continue;
1601  
1602  		vma_prev = vma;
1603  
1604  		if (vma_is_fsdax(vma))
1605  			return true;
1606  	}
1607  	return false;
1608  }
1609  
1610  #ifdef CONFIG_CMA
1611  static struct page *new_non_cma_page(struct page *page, unsigned long private)
1612  {
1613  	/*
1614  	 * We want to make sure we allocate the new page from the same node
1615  	 * as the source page.
1616  	 */
1617  	int nid = page_to_nid(page);
1618  	/*
1619  	 * Trying to allocate a page for migration. Ignore allocation
1620  	 * failure warnings. We don't force __GFP_THISNODE here because
1621  	 * this node here is the node where we have CMA reservation and
1622  	 * in some case these nodes will have really less non movable
1623  	 * allocation memory.
1624  	 */
1625  	gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
1626  
1627  	if (PageHighMem(page))
1628  		gfp_mask |= __GFP_HIGHMEM;
1629  
1630  #ifdef CONFIG_HUGETLB_PAGE
1631  	if (PageHuge(page)) {
1632  		struct hstate *h = page_hstate(page);
1633  		/*
1634  		 * We don't want to dequeue from the pool because pool pages will
1635  		 * mostly be from the CMA region.
1636  		 */
1637  		return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1638  	}
1639  #endif
1640  	if (PageTransHuge(page)) {
1641  		struct page *thp;
1642  		/*
1643  		 * ignore allocation failure warnings
1644  		 */
1645  		gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
1646  
1647  		/*
1648  		 * Remove the movable mask so that we don't allocate from
1649  		 * CMA area again.
1650  		 */
1651  		thp_gfpmask &= ~__GFP_MOVABLE;
1652  		thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
1653  		if (!thp)
1654  			return NULL;
1655  		prep_transhuge_page(thp);
1656  		return thp;
1657  	}
1658  
1659  	return __alloc_pages_node(nid, gfp_mask, 0);
1660  }
1661  
1662  static long check_and_migrate_cma_pages(struct task_struct *tsk,
1663  					struct mm_struct *mm,
1664  					unsigned long start,
1665  					unsigned long nr_pages,
1666  					struct page **pages,
1667  					struct vm_area_struct **vmas,
1668  					unsigned int gup_flags)
1669  {
1670  	unsigned long i;
1671  	unsigned long step;
1672  	bool drain_allow = true;
1673  	bool migrate_allow = true;
1674  	LIST_HEAD(cma_page_list);
1675  	long ret = nr_pages;
1676  
1677  check_again:
1678  	for (i = 0; i < nr_pages;) {
1679  
1680  		struct page *head = compound_head(pages[i]);
1681  
1682  		/*
1683  		 * gup may start from a tail page. Advance step by the left
1684  		 * part.
1685  		 */
1686  		step = compound_nr(head) - (pages[i] - head);
1687  		/*
1688  		 * If we get a page from the CMA zone, since we are going to
1689  		 * be pinning these entries, we might as well move them out
1690  		 * of the CMA zone if possible.
1691  		 */
1692  		if (is_migrate_cma_page(head)) {
1693  			if (PageHuge(head))
1694  				isolate_huge_page(head, &cma_page_list);
1695  			else {
1696  				if (!PageLRU(head) && drain_allow) {
1697  					lru_add_drain_all();
1698  					drain_allow = false;
1699  				}
1700  
1701  				if (!isolate_lru_page(head)) {
1702  					list_add_tail(&head->lru, &cma_page_list);
1703  					mod_node_page_state(page_pgdat(head),
1704  							    NR_ISOLATED_ANON +
1705  							    page_is_file_lru(head),
1706  							    hpage_nr_pages(head));
1707  				}
1708  			}
1709  		}
1710  
1711  		i += step;
1712  	}
1713  
1714  	if (!list_empty(&cma_page_list)) {
1715  		/*
1716  		 * drop the above get_user_pages reference.
1717  		 */
1718  		for (i = 0; i < nr_pages; i++)
1719  			put_page(pages[i]);
1720  
1721  		if (migrate_pages(&cma_page_list, new_non_cma_page,
1722  				  NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
1723  			/*
1724  			 * some of the pages failed migration. Do get_user_pages
1725  			 * without migration.
1726  			 */
1727  			migrate_allow = false;
1728  
1729  			if (!list_empty(&cma_page_list))
1730  				putback_movable_pages(&cma_page_list);
1731  		}
1732  		/*
1733  		 * We did migrate all the pages, Try to get the page references
1734  		 * again migrating any new CMA pages which we failed to isolate
1735  		 * earlier.
1736  		 */
1737  		ret = __get_user_pages_locked(tsk, mm, start, nr_pages,
1738  						   pages, vmas, NULL,
1739  						   gup_flags);
1740  
1741  		if ((ret > 0) && migrate_allow) {
1742  			nr_pages = ret;
1743  			drain_allow = true;
1744  			goto check_again;
1745  		}
1746  	}
1747  
1748  	return ret;
1749  }
1750  #else
1751  static long check_and_migrate_cma_pages(struct task_struct *tsk,
1752  					struct mm_struct *mm,
1753  					unsigned long start,
1754  					unsigned long nr_pages,
1755  					struct page **pages,
1756  					struct vm_area_struct **vmas,
1757  					unsigned int gup_flags)
1758  {
1759  	return nr_pages;
1760  }
1761  #endif /* CONFIG_CMA */
1762  
1763  /*
1764   * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1765   * allows us to process the FOLL_LONGTERM flag.
1766   */
1767  static long __gup_longterm_locked(struct task_struct *tsk,
1768  				  struct mm_struct *mm,
1769  				  unsigned long start,
1770  				  unsigned long nr_pages,
1771  				  struct page **pages,
1772  				  struct vm_area_struct **vmas,
1773  				  unsigned int gup_flags)
1774  {
1775  	struct vm_area_struct **vmas_tmp = vmas;
1776  	unsigned long flags = 0;
1777  	long rc, i;
1778  
1779  	if (gup_flags & FOLL_LONGTERM) {
1780  		if (!pages)
1781  			return -EINVAL;
1782  
1783  		if (!vmas_tmp) {
1784  			vmas_tmp = kcalloc(nr_pages,
1785  					   sizeof(struct vm_area_struct *),
1786  					   GFP_KERNEL);
1787  			if (!vmas_tmp)
1788  				return -ENOMEM;
1789  		}
1790  		flags = memalloc_nocma_save();
1791  	}
1792  
1793  	rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages,
1794  				     vmas_tmp, NULL, gup_flags);
1795  
1796  	if (gup_flags & FOLL_LONGTERM) {
1797  		memalloc_nocma_restore(flags);
1798  		if (rc < 0)
1799  			goto out;
1800  
1801  		if (check_dax_vmas(vmas_tmp, rc)) {
1802  			for (i = 0; i < rc; i++)
1803  				put_page(pages[i]);
1804  			rc = -EOPNOTSUPP;
1805  			goto out;
1806  		}
1807  
1808  		rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
1809  						 vmas_tmp, gup_flags);
1810  	}
1811  
1812  out:
1813  	if (vmas_tmp != vmas)
1814  		kfree(vmas_tmp);
1815  	return rc;
1816  }
1817  #else /* !CONFIG_FS_DAX && !CONFIG_CMA */
1818  static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
1819  						  struct mm_struct *mm,
1820  						  unsigned long start,
1821  						  unsigned long nr_pages,
1822  						  struct page **pages,
1823  						  struct vm_area_struct **vmas,
1824  						  unsigned int flags)
1825  {
1826  	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1827  				       NULL, flags);
1828  }
1829  #endif /* CONFIG_FS_DAX || CONFIG_CMA */
1830  
1831  #ifdef CONFIG_MMU
1832  static long __get_user_pages_remote(struct task_struct *tsk,
1833  				    struct mm_struct *mm,
1834  				    unsigned long start, unsigned long nr_pages,
1835  				    unsigned int gup_flags, struct page **pages,
1836  				    struct vm_area_struct **vmas, int *locked)
1837  {
1838  	/*
1839  	 * Parts of FOLL_LONGTERM behavior are incompatible with
1840  	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1841  	 * vmas. However, this only comes up if locked is set, and there are
1842  	 * callers that do request FOLL_LONGTERM, but do not set locked. So,
1843  	 * allow what we can.
1844  	 */
1845  	if (gup_flags & FOLL_LONGTERM) {
1846  		if (WARN_ON_ONCE(locked))
1847  			return -EINVAL;
1848  		/*
1849  		 * This will check the vmas (even if our vmas arg is NULL)
1850  		 * and return -ENOTSUPP if DAX isn't allowed in this case:
1851  		 */
1852  		return __gup_longterm_locked(tsk, mm, start, nr_pages, pages,
1853  					     vmas, gup_flags | FOLL_TOUCH |
1854  					     FOLL_REMOTE);
1855  	}
1856  
1857  	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1858  				       locked,
1859  				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1860  }
1861  
1862  /**
1863   * get_user_pages_remote() - pin user pages in memory
1864   * @tsk:	the task_struct to use for page fault accounting, or
1865   *		NULL if faults are not to be recorded.
1866   * @mm:		mm_struct of target mm
1867   * @start:	starting user address
1868   * @nr_pages:	number of pages from start to pin
1869   * @gup_flags:	flags modifying lookup behaviour
1870   * @pages:	array that receives pointers to the pages pinned.
1871   *		Should be at least nr_pages long. Or NULL, if caller
1872   *		only intends to ensure the pages are faulted in.
1873   * @vmas:	array of pointers to vmas corresponding to each page.
1874   *		Or NULL if the caller does not require them.
1875   * @locked:	pointer to lock flag indicating whether lock is held and
1876   *		subsequently whether VM_FAULT_RETRY functionality can be
1877   *		utilised. Lock must initially be held.
1878   *
1879   * Returns either number of pages pinned (which may be less than the
1880   * number requested), or an error. Details about the return value:
1881   *
1882   * -- If nr_pages is 0, returns 0.
1883   * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1884   * -- If nr_pages is >0, and some pages were pinned, returns the number of
1885   *    pages pinned. Again, this may be less than nr_pages.
1886   *
1887   * The caller is responsible for releasing returned @pages, via put_page().
1888   *
1889   * @vmas are valid only as long as mmap_lock is held.
1890   *
1891   * Must be called with mmap_lock held for read or write.
1892   *
1893   * get_user_pages_remote walks a process's page tables and takes a reference
1894   * to each struct page that each user address corresponds to at a given
1895   * instant. That is, it takes the page that would be accessed if a user
1896   * thread accesses the given user virtual address at that instant.
1897   *
1898   * This does not guarantee that the page exists in the user mappings when
1899   * get_user_pages_remote returns, and there may even be a completely different
1900   * page there in some cases (eg. if mmapped pagecache has been invalidated
1901   * and subsequently re faulted). However it does guarantee that the page
1902   * won't be freed completely. And mostly callers simply care that the page
1903   * contains data that was valid *at some point in time*. Typically, an IO
1904   * or similar operation cannot guarantee anything stronger anyway because
1905   * locks can't be held over the syscall boundary.
1906   *
1907   * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1908   * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1909   * be called after the page is finished with, and before put_page is called.
1910   *
1911   * get_user_pages_remote is typically used for fewer-copy IO operations,
1912   * to get a handle on the memory by some means other than accesses
1913   * via the user virtual addresses. The pages may be submitted for
1914   * DMA to devices or accessed via their kernel linear mapping (via the
1915   * kmap APIs). Care should be taken to use the correct cache flushing APIs.
1916   *
1917   * See also get_user_pages_fast, for performance critical applications.
1918   *
1919   * get_user_pages_remote should be phased out in favor of
1920   * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
1921   * should use get_user_pages_remote because it cannot pass
1922   * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1923   */
1924  long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1925  		unsigned long start, unsigned long nr_pages,
1926  		unsigned int gup_flags, struct page **pages,
1927  		struct vm_area_struct **vmas, int *locked)
1928  {
1929  	/*
1930  	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1931  	 * never directly by the caller, so enforce that with an assertion:
1932  	 */
1933  	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1934  		return -EINVAL;
1935  
1936  	return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
1937  				       pages, vmas, locked);
1938  }
1939  EXPORT_SYMBOL(get_user_pages_remote);
1940  
1941  #else /* CONFIG_MMU */
1942  long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1943  			   unsigned long start, unsigned long nr_pages,
1944  			   unsigned int gup_flags, struct page **pages,
1945  			   struct vm_area_struct **vmas, int *locked)
1946  {
1947  	return 0;
1948  }
1949  
1950  static long __get_user_pages_remote(struct task_struct *tsk,
1951  				    struct mm_struct *mm,
1952  				    unsigned long start, unsigned long nr_pages,
1953  				    unsigned int gup_flags, struct page **pages,
1954  				    struct vm_area_struct **vmas, int *locked)
1955  {
1956  	return 0;
1957  }
1958  #endif /* !CONFIG_MMU */
1959  
1960  /**
1961   * get_user_pages() - pin user pages in memory
1962   * @start:      starting user address
1963   * @nr_pages:   number of pages from start to pin
1964   * @gup_flags:  flags modifying lookup behaviour
1965   * @pages:      array that receives pointers to the pages pinned.
1966   *              Should be at least nr_pages long. Or NULL, if caller
1967   *              only intends to ensure the pages are faulted in.
1968   * @vmas:       array of pointers to vmas corresponding to each page.
1969   *              Or NULL if the caller does not require them.
1970   *
1971   * This is the same as get_user_pages_remote(), just with a
1972   * less-flexible calling convention where we assume that the task
1973   * and mm being operated on are the current task's and don't allow
1974   * passing of a locked parameter.  We also obviously don't pass
1975   * FOLL_REMOTE in here.
1976   */
1977  long get_user_pages(unsigned long start, unsigned long nr_pages,
1978  		unsigned int gup_flags, struct page **pages,
1979  		struct vm_area_struct **vmas)
1980  {
1981  	/*
1982  	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1983  	 * never directly by the caller, so enforce that with an assertion:
1984  	 */
1985  	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1986  		return -EINVAL;
1987  
1988  	return __gup_longterm_locked(current, current->mm, start, nr_pages,
1989  				     pages, vmas, gup_flags | FOLL_TOUCH);
1990  }
1991  EXPORT_SYMBOL(get_user_pages);
1992  
1993  /**
1994   * get_user_pages_locked() is suitable to replace the form:
1995   *
1996   *      mmap_read_lock(mm);
1997   *      do_something()
1998   *      get_user_pages(tsk, mm, ..., pages, NULL);
1999   *      mmap_read_unlock(mm);
2000   *
2001   *  to:
2002   *
2003   *      int locked = 1;
2004   *      mmap_read_lock(mm);
2005   *      do_something()
2006   *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
2007   *      if (locked)
2008   *          mmap_read_unlock(mm);
2009   *
2010   * @start:      starting user address
2011   * @nr_pages:   number of pages from start to pin
2012   * @gup_flags:  flags modifying lookup behaviour
2013   * @pages:      array that receives pointers to the pages pinned.
2014   *              Should be at least nr_pages long. Or NULL, if caller
2015   *              only intends to ensure the pages are faulted in.
2016   * @locked:     pointer to lock flag indicating whether lock is held and
2017   *              subsequently whether VM_FAULT_RETRY functionality can be
2018   *              utilised. Lock must initially be held.
2019   *
2020   * We can leverage the VM_FAULT_RETRY functionality in the page fault
2021   * paths better by using either get_user_pages_locked() or
2022   * get_user_pages_unlocked().
2023   *
2024   */
2025  long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
2026  			   unsigned int gup_flags, struct page **pages,
2027  			   int *locked)
2028  {
2029  	/*
2030  	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2031  	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2032  	 * vmas.  As there are no users of this flag in this call we simply
2033  	 * disallow this option for now.
2034  	 */
2035  	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2036  		return -EINVAL;
2037  	/*
2038  	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2039  	 * never directly by the caller, so enforce that:
2040  	 */
2041  	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2042  		return -EINVAL;
2043  
2044  	return __get_user_pages_locked(current, current->mm, start, nr_pages,
2045  				       pages, NULL, locked,
2046  				       gup_flags | FOLL_TOUCH);
2047  }
2048  EXPORT_SYMBOL(get_user_pages_locked);
2049  
2050  /*
2051   * get_user_pages_unlocked() is suitable to replace the form:
2052   *
2053   *      mmap_read_lock(mm);
2054   *      get_user_pages(tsk, mm, ..., pages, NULL);
2055   *      mmap_read_unlock(mm);
2056   *
2057   *  with:
2058   *
2059   *      get_user_pages_unlocked(tsk, mm, ..., pages);
2060   *
2061   * It is functionally equivalent to get_user_pages_fast so
2062   * get_user_pages_fast should be used instead if specific gup_flags
2063   * (e.g. FOLL_FORCE) are not required.
2064   */
2065  long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2066  			     struct page **pages, unsigned int gup_flags)
2067  {
2068  	struct mm_struct *mm = current->mm;
2069  	int locked = 1;
2070  	long ret;
2071  
2072  	/*
2073  	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2074  	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2075  	 * vmas.  As there are no users of this flag in this call we simply
2076  	 * disallow this option for now.
2077  	 */
2078  	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2079  		return -EINVAL;
2080  
2081  	mmap_read_lock(mm);
2082  	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
2083  				      &locked, gup_flags | FOLL_TOUCH);
2084  	if (locked)
2085  		mmap_read_unlock(mm);
2086  	return ret;
2087  }
2088  EXPORT_SYMBOL(get_user_pages_unlocked);
2089  
2090  /*
2091   * Fast GUP
2092   *
2093   * get_user_pages_fast attempts to pin user pages by walking the page
2094   * tables directly and avoids taking locks. Thus the walker needs to be
2095   * protected from page table pages being freed from under it, and should
2096   * block any THP splits.
2097   *
2098   * One way to achieve this is to have the walker disable interrupts, and
2099   * rely on IPIs from the TLB flushing code blocking before the page table
2100   * pages are freed. This is unsuitable for architectures that do not need
2101   * to broadcast an IPI when invalidating TLBs.
2102   *
2103   * Another way to achieve this is to batch up page table containing pages
2104   * belonging to more than one mm_user, then rcu_sched a callback to free those
2105   * pages. Disabling interrupts will allow the fast_gup walker to both block
2106   * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2107   * (which is a relatively rare event). The code below adopts this strategy.
2108   *
2109   * Before activating this code, please be aware that the following assumptions
2110   * are currently made:
2111   *
2112   *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2113   *  free pages containing page tables or TLB flushing requires IPI broadcast.
2114   *
2115   *  *) ptes can be read atomically by the architecture.
2116   *
2117   *  *) access_ok is sufficient to validate userspace address ranges.
2118   *
2119   * The last two assumptions can be relaxed by the addition of helper functions.
2120   *
2121   * This code is based heavily on the PowerPC implementation by Nick Piggin.
2122   */
2123  #ifdef CONFIG_HAVE_FAST_GUP
2124  
2125  static void put_compound_head(struct page *page, int refs, unsigned int flags)
2126  {
2127  	if (flags & FOLL_PIN) {
2128  		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
2129  				    refs);
2130  
2131  		if (hpage_pincount_available(page))
2132  			hpage_pincount_sub(page, refs);
2133  		else
2134  			refs *= GUP_PIN_COUNTING_BIAS;
2135  	}
2136  
2137  	VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
2138  	/*
2139  	 * Calling put_page() for each ref is unnecessarily slow. Only the last
2140  	 * ref needs a put_page().
2141  	 */
2142  	if (refs > 1)
2143  		page_ref_sub(page, refs - 1);
2144  	put_page(page);
2145  }
2146  
2147  #ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
2148  
2149  /*
2150   * WARNING: only to be used in the get_user_pages_fast() implementation.
2151   *
2152   * With get_user_pages_fast(), we walk down the pagetables without taking any
2153   * locks.  For this we would like to load the pointers atomically, but sometimes
2154   * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE).  What
2155   * we do have is the guarantee that a PTE will only either go from not present
2156   * to present, or present to not present or both -- it will not switch to a
2157   * completely different present page without a TLB flush in between; something
2158   * that we are blocking by holding interrupts off.
2159   *
2160   * Setting ptes from not present to present goes:
2161   *
2162   *   ptep->pte_high = h;
2163   *   smp_wmb();
2164   *   ptep->pte_low = l;
2165   *
2166   * And present to not present goes:
2167   *
2168   *   ptep->pte_low = 0;
2169   *   smp_wmb();
2170   *   ptep->pte_high = 0;
2171   *
2172   * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
2173   * We load pte_high *after* loading pte_low, which ensures we don't see an older
2174   * value of pte_high.  *Then* we recheck pte_low, which ensures that we haven't
2175   * picked up a changed pte high. We might have gotten rubbish values from
2176   * pte_low and pte_high, but we are guaranteed that pte_low will not have the
2177   * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
2178   * operates on present ptes we're safe.
2179   */
2180  static inline pte_t gup_get_pte(pte_t *ptep)
2181  {
2182  	pte_t pte;
2183  
2184  	do {
2185  		pte.pte_low = ptep->pte_low;
2186  		smp_rmb();
2187  		pte.pte_high = ptep->pte_high;
2188  		smp_rmb();
2189  	} while (unlikely(pte.pte_low != ptep->pte_low));
2190  
2191  	return pte;
2192  }
2193  #else /* CONFIG_GUP_GET_PTE_LOW_HIGH */
2194  /*
2195   * We require that the PTE can be read atomically.
2196   */
2197  static inline pte_t gup_get_pte(pte_t *ptep)
2198  {
2199  	return READ_ONCE(*ptep);
2200  }
2201  #endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
2202  
2203  static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2204  					    unsigned int flags,
2205  					    struct page **pages)
2206  {
2207  	while ((*nr) - nr_start) {
2208  		struct page *page = pages[--(*nr)];
2209  
2210  		ClearPageReferenced(page);
2211  		if (flags & FOLL_PIN)
2212  			unpin_user_page(page);
2213  		else
2214  			put_page(page);
2215  	}
2216  }
2217  
2218  #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2219  static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2220  			 unsigned int flags, struct page **pages, int *nr)
2221  {
2222  	struct dev_pagemap *pgmap = NULL;
2223  	int nr_start = *nr, ret = 0;
2224  	pte_t *ptep, *ptem;
2225  
2226  	ptem = ptep = pte_offset_map(&pmd, addr);
2227  	do {
2228  		pte_t pte = gup_get_pte(ptep);
2229  		struct page *head, *page;
2230  
2231  		/*
2232  		 * Similar to the PMD case below, NUMA hinting must take slow
2233  		 * path using the pte_protnone check.
2234  		 */
2235  		if (pte_protnone(pte))
2236  			goto pte_unmap;
2237  
2238  		if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2239  			goto pte_unmap;
2240  
2241  		if (pte_devmap(pte)) {
2242  			if (unlikely(flags & FOLL_LONGTERM))
2243  				goto pte_unmap;
2244  
2245  			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2246  			if (unlikely(!pgmap)) {
2247  				undo_dev_pagemap(nr, nr_start, flags, pages);
2248  				goto pte_unmap;
2249  			}
2250  		} else if (pte_special(pte))
2251  			goto pte_unmap;
2252  
2253  		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2254  		page = pte_page(pte);
2255  
2256  		head = try_grab_compound_head(page, 1, flags);
2257  		if (!head)
2258  			goto pte_unmap;
2259  
2260  		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2261  			put_compound_head(head, 1, flags);
2262  			goto pte_unmap;
2263  		}
2264  
2265  		VM_BUG_ON_PAGE(compound_head(page) != head, page);
2266  
2267  		/*
2268  		 * We need to make the page accessible if and only if we are
2269  		 * going to access its content (the FOLL_PIN case).  Please
2270  		 * see Documentation/core-api/pin_user_pages.rst for
2271  		 * details.
2272  		 */
2273  		if (flags & FOLL_PIN) {
2274  			ret = arch_make_page_accessible(page);
2275  			if (ret) {
2276  				unpin_user_page(page);
2277  				goto pte_unmap;
2278  			}
2279  		}
2280  		SetPageReferenced(page);
2281  		pages[*nr] = page;
2282  		(*nr)++;
2283  
2284  	} while (ptep++, addr += PAGE_SIZE, addr != end);
2285  
2286  	ret = 1;
2287  
2288  pte_unmap:
2289  	if (pgmap)
2290  		put_dev_pagemap(pgmap);
2291  	pte_unmap(ptem);
2292  	return ret;
2293  }
2294  #else
2295  
2296  /*
2297   * If we can't determine whether or not a pte is special, then fail immediately
2298   * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2299   * to be special.
2300   *
2301   * For a futex to be placed on a THP tail page, get_futex_key requires a
2302   * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2303   * useful to have gup_huge_pmd even if we can't operate on ptes.
2304   */
2305  static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2306  			 unsigned int flags, struct page **pages, int *nr)
2307  {
2308  	return 0;
2309  }
2310  #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2311  
2312  #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2313  static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2314  			     unsigned long end, unsigned int flags,
2315  			     struct page **pages, int *nr)
2316  {
2317  	int nr_start = *nr;
2318  	struct dev_pagemap *pgmap = NULL;
2319  
2320  	do {
2321  		struct page *page = pfn_to_page(pfn);
2322  
2323  		pgmap = get_dev_pagemap(pfn, pgmap);
2324  		if (unlikely(!pgmap)) {
2325  			undo_dev_pagemap(nr, nr_start, flags, pages);
2326  			return 0;
2327  		}
2328  		SetPageReferenced(page);
2329  		pages[*nr] = page;
2330  		if (unlikely(!try_grab_page(page, flags))) {
2331  			undo_dev_pagemap(nr, nr_start, flags, pages);
2332  			return 0;
2333  		}
2334  		(*nr)++;
2335  		pfn++;
2336  	} while (addr += PAGE_SIZE, addr != end);
2337  
2338  	if (pgmap)
2339  		put_dev_pagemap(pgmap);
2340  	return 1;
2341  }
2342  
2343  static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2344  				 unsigned long end, unsigned int flags,
2345  				 struct page **pages, int *nr)
2346  {
2347  	unsigned long fault_pfn;
2348  	int nr_start = *nr;
2349  
2350  	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2351  	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2352  		return 0;
2353  
2354  	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2355  		undo_dev_pagemap(nr, nr_start, flags, pages);
2356  		return 0;
2357  	}
2358  	return 1;
2359  }
2360  
2361  static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2362  				 unsigned long end, unsigned int flags,
2363  				 struct page **pages, int *nr)
2364  {
2365  	unsigned long fault_pfn;
2366  	int nr_start = *nr;
2367  
2368  	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2369  	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2370  		return 0;
2371  
2372  	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2373  		undo_dev_pagemap(nr, nr_start, flags, pages);
2374  		return 0;
2375  	}
2376  	return 1;
2377  }
2378  #else
2379  static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2380  				 unsigned long end, unsigned int flags,
2381  				 struct page **pages, int *nr)
2382  {
2383  	BUILD_BUG();
2384  	return 0;
2385  }
2386  
2387  static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2388  				 unsigned long end, unsigned int flags,
2389  				 struct page **pages, int *nr)
2390  {
2391  	BUILD_BUG();
2392  	return 0;
2393  }
2394  #endif
2395  
2396  static int record_subpages(struct page *page, unsigned long addr,
2397  			   unsigned long end, struct page **pages)
2398  {
2399  	int nr;
2400  
2401  	for (nr = 0; addr != end; addr += PAGE_SIZE)
2402  		pages[nr++] = page++;
2403  
2404  	return nr;
2405  }
2406  
2407  #ifdef CONFIG_ARCH_HAS_HUGEPD
2408  static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2409  				      unsigned long sz)
2410  {
2411  	unsigned long __boundary = (addr + sz) & ~(sz-1);
2412  	return (__boundary - 1 < end - 1) ? __boundary : end;
2413  }
2414  
2415  static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2416  		       unsigned long end, unsigned int flags,
2417  		       struct page **pages, int *nr)
2418  {
2419  	unsigned long pte_end;
2420  	struct page *head, *page;
2421  	pte_t pte;
2422  	int refs;
2423  
2424  	pte_end = (addr + sz) & ~(sz-1);
2425  	if (pte_end < end)
2426  		end = pte_end;
2427  
2428  	pte = READ_ONCE(*ptep);
2429  
2430  	if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2431  		return 0;
2432  
2433  	/* hugepages are never "special" */
2434  	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2435  
2436  	head = pte_page(pte);
2437  	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
2438  	refs = record_subpages(page, addr, end, pages + *nr);
2439  
2440  	head = try_grab_compound_head(head, refs, flags);
2441  	if (!head)
2442  		return 0;
2443  
2444  	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2445  		put_compound_head(head, refs, flags);
2446  		return 0;
2447  	}
2448  
2449  	*nr += refs;
2450  	SetPageReferenced(head);
2451  	return 1;
2452  }
2453  
2454  static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2455  		unsigned int pdshift, unsigned long end, unsigned int flags,
2456  		struct page **pages, int *nr)
2457  {
2458  	pte_t *ptep;
2459  	unsigned long sz = 1UL << hugepd_shift(hugepd);
2460  	unsigned long next;
2461  
2462  	ptep = hugepte_offset(hugepd, addr, pdshift);
2463  	do {
2464  		next = hugepte_addr_end(addr, end, sz);
2465  		if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2466  			return 0;
2467  	} while (ptep++, addr = next, addr != end);
2468  
2469  	return 1;
2470  }
2471  #else
2472  static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2473  		unsigned int pdshift, unsigned long end, unsigned int flags,
2474  		struct page **pages, int *nr)
2475  {
2476  	return 0;
2477  }
2478  #endif /* CONFIG_ARCH_HAS_HUGEPD */
2479  
2480  static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2481  			unsigned long end, unsigned int flags,
2482  			struct page **pages, int *nr)
2483  {
2484  	struct page *head, *page;
2485  	int refs;
2486  
2487  	if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2488  		return 0;
2489  
2490  	if (pmd_devmap(orig)) {
2491  		if (unlikely(flags & FOLL_LONGTERM))
2492  			return 0;
2493  		return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2494  					     pages, nr);
2495  	}
2496  
2497  	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2498  	refs = record_subpages(page, addr, end, pages + *nr);
2499  
2500  	head = try_grab_compound_head(pmd_page(orig), refs, flags);
2501  	if (!head)
2502  		return 0;
2503  
2504  	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2505  		put_compound_head(head, refs, flags);
2506  		return 0;
2507  	}
2508  
2509  	*nr += refs;
2510  	SetPageReferenced(head);
2511  	return 1;
2512  }
2513  
2514  static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2515  			unsigned long end, unsigned int flags,
2516  			struct page **pages, int *nr)
2517  {
2518  	struct page *head, *page;
2519  	int refs;
2520  
2521  	if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2522  		return 0;
2523  
2524  	if (pud_devmap(orig)) {
2525  		if (unlikely(flags & FOLL_LONGTERM))
2526  			return 0;
2527  		return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2528  					     pages, nr);
2529  	}
2530  
2531  	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2532  	refs = record_subpages(page, addr, end, pages + *nr);
2533  
2534  	head = try_grab_compound_head(pud_page(orig), refs, flags);
2535  	if (!head)
2536  		return 0;
2537  
2538  	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2539  		put_compound_head(head, refs, flags);
2540  		return 0;
2541  	}
2542  
2543  	*nr += refs;
2544  	SetPageReferenced(head);
2545  	return 1;
2546  }
2547  
2548  static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2549  			unsigned long end, unsigned int flags,
2550  			struct page **pages, int *nr)
2551  {
2552  	int refs;
2553  	struct page *head, *page;
2554  
2555  	if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2556  		return 0;
2557  
2558  	BUILD_BUG_ON(pgd_devmap(orig));
2559  
2560  	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2561  	refs = record_subpages(page, addr, end, pages + *nr);
2562  
2563  	head = try_grab_compound_head(pgd_page(orig), refs, flags);
2564  	if (!head)
2565  		return 0;
2566  
2567  	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2568  		put_compound_head(head, refs, flags);
2569  		return 0;
2570  	}
2571  
2572  	*nr += refs;
2573  	SetPageReferenced(head);
2574  	return 1;
2575  }
2576  
2577  static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
2578  		unsigned int flags, struct page **pages, int *nr)
2579  {
2580  	unsigned long next;
2581  	pmd_t *pmdp;
2582  
2583  	pmdp = pmd_offset(&pud, addr);
2584  	do {
2585  		pmd_t pmd = READ_ONCE(*pmdp);
2586  
2587  		next = pmd_addr_end(addr, end);
2588  		if (!pmd_present(pmd))
2589  			return 0;
2590  
2591  		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2592  			     pmd_devmap(pmd))) {
2593  			/*
2594  			 * NUMA hinting faults need to be handled in the GUP
2595  			 * slowpath for accounting purposes and so that they
2596  			 * can be serialised against THP migration.
2597  			 */
2598  			if (pmd_protnone(pmd))
2599  				return 0;
2600  
2601  			if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2602  				pages, nr))
2603  				return 0;
2604  
2605  		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2606  			/*
2607  			 * architecture have different format for hugetlbfs
2608  			 * pmd format and THP pmd format
2609  			 */
2610  			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2611  					 PMD_SHIFT, next, flags, pages, nr))
2612  				return 0;
2613  		} else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2614  			return 0;
2615  	} while (pmdp++, addr = next, addr != end);
2616  
2617  	return 1;
2618  }
2619  
2620  static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
2621  			 unsigned int flags, struct page **pages, int *nr)
2622  {
2623  	unsigned long next;
2624  	pud_t *pudp;
2625  
2626  	pudp = pud_offset(&p4d, addr);
2627  	do {
2628  		pud_t pud = READ_ONCE(*pudp);
2629  
2630  		next = pud_addr_end(addr, end);
2631  		if (unlikely(!pud_present(pud)))
2632  			return 0;
2633  		if (unlikely(pud_huge(pud))) {
2634  			if (!gup_huge_pud(pud, pudp, addr, next, flags,
2635  					  pages, nr))
2636  				return 0;
2637  		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2638  			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2639  					 PUD_SHIFT, next, flags, pages, nr))
2640  				return 0;
2641  		} else if (!gup_pmd_range(pud, addr, next, flags, pages, nr))
2642  			return 0;
2643  	} while (pudp++, addr = next, addr != end);
2644  
2645  	return 1;
2646  }
2647  
2648  static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
2649  			 unsigned int flags, struct page **pages, int *nr)
2650  {
2651  	unsigned long next;
2652  	p4d_t *p4dp;
2653  
2654  	p4dp = p4d_offset(&pgd, addr);
2655  	do {
2656  		p4d_t p4d = READ_ONCE(*p4dp);
2657  
2658  		next = p4d_addr_end(addr, end);
2659  		if (p4d_none(p4d))
2660  			return 0;
2661  		BUILD_BUG_ON(p4d_huge(p4d));
2662  		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2663  			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2664  					 P4D_SHIFT, next, flags, pages, nr))
2665  				return 0;
2666  		} else if (!gup_pud_range(p4d, addr, next, flags, pages, nr))
2667  			return 0;
2668  	} while (p4dp++, addr = next, addr != end);
2669  
2670  	return 1;
2671  }
2672  
2673  static void gup_pgd_range(unsigned long addr, unsigned long end,
2674  		unsigned int flags, struct page **pages, int *nr)
2675  {
2676  	unsigned long next;
2677  	pgd_t *pgdp;
2678  
2679  	pgdp = pgd_offset(current->mm, addr);
2680  	do {
2681  		pgd_t pgd = READ_ONCE(*pgdp);
2682  
2683  		next = pgd_addr_end(addr, end);
2684  		if (pgd_none(pgd))
2685  			return;
2686  		if (unlikely(pgd_huge(pgd))) {
2687  			if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2688  					  pages, nr))
2689  				return;
2690  		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2691  			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2692  					 PGDIR_SHIFT, next, flags, pages, nr))
2693  				return;
2694  		} else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr))
2695  			return;
2696  	} while (pgdp++, addr = next, addr != end);
2697  }
2698  #else
2699  static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2700  		unsigned int flags, struct page **pages, int *nr)
2701  {
2702  }
2703  #endif /* CONFIG_HAVE_FAST_GUP */
2704  
2705  #ifndef gup_fast_permitted
2706  /*
2707   * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2708   * we need to fall back to the slow version:
2709   */
2710  static bool gup_fast_permitted(unsigned long start, unsigned long end)
2711  {
2712  	return true;
2713  }
2714  #endif
2715  
2716  static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2717  				   unsigned int gup_flags, struct page **pages)
2718  {
2719  	int ret;
2720  
2721  	/*
2722  	 * FIXME: FOLL_LONGTERM does not work with
2723  	 * get_user_pages_unlocked() (see comments in that function)
2724  	 */
2725  	if (gup_flags & FOLL_LONGTERM) {
2726  		mmap_read_lock(current->mm);
2727  		ret = __gup_longterm_locked(current, current->mm,
2728  					    start, nr_pages,
2729  					    pages, NULL, gup_flags);
2730  		mmap_read_unlock(current->mm);
2731  	} else {
2732  		ret = get_user_pages_unlocked(start, nr_pages,
2733  					      pages, gup_flags);
2734  	}
2735  
2736  	return ret;
2737  }
2738  
2739  static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
2740  					unsigned int gup_flags,
2741  					struct page **pages)
2742  {
2743  	unsigned long addr, len, end;
2744  	unsigned long flags;
2745  	int nr_pinned = 0, ret = 0;
2746  
2747  	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2748  				       FOLL_FORCE | FOLL_PIN | FOLL_GET |
2749  				       FOLL_FAST_ONLY)))
2750  		return -EINVAL;
2751  
2752  	if (!(gup_flags & FOLL_FAST_ONLY))
2753  		might_lock_read(&current->mm->mmap_lock);
2754  
2755  	start = untagged_addr(start) & PAGE_MASK;
2756  	addr = start;
2757  	len = (unsigned long) nr_pages << PAGE_SHIFT;
2758  	end = start + len;
2759  
2760  	if (end <= start)
2761  		return 0;
2762  	if (unlikely(!access_ok((void __user *)start, len)))
2763  		return -EFAULT;
2764  
2765  	/*
2766  	 * The FAST_GUP case requires FOLL_WRITE even for pure reads,
2767  	 * because get_user_pages() may need to cause an early COW in
2768  	 * order to avoid confusing the normal COW routines. So only
2769  	 * targets that are already writable are safe to do by just
2770  	 * looking at the page tables.
2771  	 *
2772  	 * NOTE! With FOLL_FAST_ONLY we allow read-only gup_fast() here,
2773  	 * because there is no slow path to fall back on. But you'd
2774  	 * better be careful about possible COW pages - you'll get _a_
2775  	 * COW page, but not necessarily the one you intended to get
2776  	 * depending on what COW event happens after this. COW may break
2777  	 * the page copy in a random direction.
2778  	 *
2779  	 * Disable interrupts. The nested form is used, in order to allow
2780  	 * full, general purpose use of this routine.
2781  	 *
2782  	 * With interrupts disabled, we block page table pages from being
2783  	 * freed from under us. See struct mmu_table_batch comments in
2784  	 * include/asm-generic/tlb.h for more details.
2785  	 *
2786  	 * We do not adopt an rcu_read_lock(.) here as we also want to
2787  	 * block IPIs that come from THPs splitting.
2788  	 */
2789  	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && gup_fast_permitted(start, end)) {
2790  		unsigned long fast_flags = gup_flags;
2791  		if (!(gup_flags & FOLL_FAST_ONLY))
2792  			fast_flags |= FOLL_WRITE;
2793  
2794  		local_irq_save(flags);
2795  		gup_pgd_range(addr, end, fast_flags, pages, &nr_pinned);
2796  		local_irq_restore(flags);
2797  		ret = nr_pinned;
2798  	}
2799  
2800  	if (nr_pinned < nr_pages && !(gup_flags & FOLL_FAST_ONLY)) {
2801  		/* Try to get the remaining pages with get_user_pages */
2802  		start += nr_pinned << PAGE_SHIFT;
2803  		pages += nr_pinned;
2804  
2805  		ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned,
2806  					      gup_flags, pages);
2807  
2808  		/* Have to be a bit careful with return values */
2809  		if (nr_pinned > 0) {
2810  			if (ret < 0)
2811  				ret = nr_pinned;
2812  			else
2813  				ret += nr_pinned;
2814  		}
2815  	}
2816  
2817  	return ret;
2818  }
2819  /**
2820   * get_user_pages_fast_only() - pin user pages in memory
2821   * @start:      starting user address
2822   * @nr_pages:   number of pages from start to pin
2823   * @gup_flags:  flags modifying pin behaviour
2824   * @pages:      array that receives pointers to the pages pinned.
2825   *              Should be at least nr_pages long.
2826   *
2827   * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2828   * the regular GUP.
2829   * Note a difference with get_user_pages_fast: this always returns the
2830   * number of pages pinned, 0 if no pages were pinned.
2831   *
2832   * If the architecture does not support this function, simply return with no
2833   * pages pinned.
2834   *
2835   * Careful, careful! COW breaking can go either way, so a non-write
2836   * access can get ambiguous page results. If you call this function without
2837   * 'write' set, you'd better be sure that you're ok with that ambiguity.
2838   */
2839  int get_user_pages_fast_only(unsigned long start, int nr_pages,
2840  			     unsigned int gup_flags, struct page **pages)
2841  {
2842  	int nr_pinned;
2843  	/*
2844  	 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2845  	 * because gup fast is always a "pin with a +1 page refcount" request.
2846  	 *
2847  	 * FOLL_FAST_ONLY is required in order to match the API description of
2848  	 * this routine: no fall back to regular ("slow") GUP.
2849  	 */
2850  	gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
2851  
2852  	nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2853  						 pages);
2854  
2855  	/*
2856  	 * As specified in the API description above, this routine is not
2857  	 * allowed to return negative values. However, the common core
2858  	 * routine internal_get_user_pages_fast() *can* return -errno.
2859  	 * Therefore, correct for that here:
2860  	 */
2861  	if (nr_pinned < 0)
2862  		nr_pinned = 0;
2863  
2864  	return nr_pinned;
2865  }
2866  EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
2867  
2868  /**
2869   * get_user_pages_fast() - pin user pages in memory
2870   * @start:      starting user address
2871   * @nr_pages:   number of pages from start to pin
2872   * @gup_flags:  flags modifying pin behaviour
2873   * @pages:      array that receives pointers to the pages pinned.
2874   *              Should be at least nr_pages long.
2875   *
2876   * Attempt to pin user pages in memory without taking mm->mmap_lock.
2877   * If not successful, it will fall back to taking the lock and
2878   * calling get_user_pages().
2879   *
2880   * Returns number of pages pinned. This may be fewer than the number requested.
2881   * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2882   * -errno.
2883   */
2884  int get_user_pages_fast(unsigned long start, int nr_pages,
2885  			unsigned int gup_flags, struct page **pages)
2886  {
2887  	/*
2888  	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2889  	 * never directly by the caller, so enforce that:
2890  	 */
2891  	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2892  		return -EINVAL;
2893  
2894  	/*
2895  	 * The caller may or may not have explicitly set FOLL_GET; either way is
2896  	 * OK. However, internally (within mm/gup.c), gup fast variants must set
2897  	 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2898  	 * request.
2899  	 */
2900  	gup_flags |= FOLL_GET;
2901  	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2902  }
2903  EXPORT_SYMBOL_GPL(get_user_pages_fast);
2904  
2905  /**
2906   * pin_user_pages_fast() - pin user pages in memory without taking locks
2907   *
2908   * @start:      starting user address
2909   * @nr_pages:   number of pages from start to pin
2910   * @gup_flags:  flags modifying pin behaviour
2911   * @pages:      array that receives pointers to the pages pinned.
2912   *              Should be at least nr_pages long.
2913   *
2914   * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2915   * get_user_pages_fast() for documentation on the function arguments, because
2916   * the arguments here are identical.
2917   *
2918   * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2919   * see Documentation/core-api/pin_user_pages.rst for further details.
2920   */
2921  int pin_user_pages_fast(unsigned long start, int nr_pages,
2922  			unsigned int gup_flags, struct page **pages)
2923  {
2924  	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2925  	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2926  		return -EINVAL;
2927  
2928  	gup_flags |= FOLL_PIN;
2929  	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2930  }
2931  EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2932  
2933  /*
2934   * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2935   * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
2936   *
2937   * The API rules are the same, too: no negative values may be returned.
2938   */
2939  int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2940  			     unsigned int gup_flags, struct page **pages)
2941  {
2942  	int nr_pinned;
2943  
2944  	/*
2945  	 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
2946  	 * rules require returning 0, rather than -errno:
2947  	 */
2948  	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2949  		return 0;
2950  	/*
2951  	 * FOLL_FAST_ONLY is required in order to match the API description of
2952  	 * this routine: no fall back to regular ("slow") GUP.
2953  	 */
2954  	gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
2955  	nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2956  						 pages);
2957  	/*
2958  	 * This routine is not allowed to return negative values. However,
2959  	 * internal_get_user_pages_fast() *can* return -errno. Therefore,
2960  	 * correct for that here:
2961  	 */
2962  	if (nr_pinned < 0)
2963  		nr_pinned = 0;
2964  
2965  	return nr_pinned;
2966  }
2967  EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
2968  
2969  /**
2970   * pin_user_pages_remote() - pin pages of a remote process (task != current)
2971   *
2972   * @tsk:	the task_struct to use for page fault accounting, or
2973   *		NULL if faults are not to be recorded.
2974   * @mm:		mm_struct of target mm
2975   * @start:	starting user address
2976   * @nr_pages:	number of pages from start to pin
2977   * @gup_flags:	flags modifying lookup behaviour
2978   * @pages:	array that receives pointers to the pages pinned.
2979   *		Should be at least nr_pages long. Or NULL, if caller
2980   *		only intends to ensure the pages are faulted in.
2981   * @vmas:	array of pointers to vmas corresponding to each page.
2982   *		Or NULL if the caller does not require them.
2983   * @locked:	pointer to lock flag indicating whether lock is held and
2984   *		subsequently whether VM_FAULT_RETRY functionality can be
2985   *		utilised. Lock must initially be held.
2986   *
2987   * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
2988   * get_user_pages_remote() for documentation on the function arguments, because
2989   * the arguments here are identical.
2990   *
2991   * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2992   * see Documentation/core-api/pin_user_pages.rst for details.
2993   */
2994  long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
2995  			   unsigned long start, unsigned long nr_pages,
2996  			   unsigned int gup_flags, struct page **pages,
2997  			   struct vm_area_struct **vmas, int *locked)
2998  {
2999  	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
3000  	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3001  		return -EINVAL;
3002  
3003  	gup_flags |= FOLL_PIN;
3004  	return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
3005  				       pages, vmas, locked);
3006  }
3007  EXPORT_SYMBOL(pin_user_pages_remote);
3008  
3009  /**
3010   * pin_user_pages() - pin user pages in memory for use by other devices
3011   *
3012   * @start:	starting user address
3013   * @nr_pages:	number of pages from start to pin
3014   * @gup_flags:	flags modifying lookup behaviour
3015   * @pages:	array that receives pointers to the pages pinned.
3016   *		Should be at least nr_pages long. Or NULL, if caller
3017   *		only intends to ensure the pages are faulted in.
3018   * @vmas:	array of pointers to vmas corresponding to each page.
3019   *		Or NULL if the caller does not require them.
3020   *
3021   * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3022   * FOLL_PIN is set.
3023   *
3024   * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3025   * see Documentation/core-api/pin_user_pages.rst for details.
3026   */
3027  long pin_user_pages(unsigned long start, unsigned long nr_pages,
3028  		    unsigned int gup_flags, struct page **pages,
3029  		    struct vm_area_struct **vmas)
3030  {
3031  	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
3032  	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3033  		return -EINVAL;
3034  
3035  	gup_flags |= FOLL_PIN;
3036  	return __gup_longterm_locked(current, current->mm, start, nr_pages,
3037  				     pages, vmas, gup_flags);
3038  }
3039  EXPORT_SYMBOL(pin_user_pages);
3040  
3041  /*
3042   * pin_user_pages_unlocked() is the FOLL_PIN variant of
3043   * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3044   * FOLL_PIN and rejects FOLL_GET.
3045   */
3046  long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3047  			     struct page **pages, unsigned int gup_flags)
3048  {
3049  	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
3050  	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3051  		return -EINVAL;
3052  
3053  	gup_flags |= FOLL_PIN;
3054  	return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
3055  }
3056  EXPORT_SYMBOL(pin_user_pages_unlocked);
3057  
3058  /*
3059   * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
3060   * Behavior is the same, except that this one sets FOLL_PIN and rejects
3061   * FOLL_GET.
3062   */
3063  long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
3064  			   unsigned int gup_flags, struct page **pages,
3065  			   int *locked)
3066  {
3067  	/*
3068  	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
3069  	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
3070  	 * vmas.  As there are no users of this flag in this call we simply
3071  	 * disallow this option for now.
3072  	 */
3073  	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
3074  		return -EINVAL;
3075  
3076  	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
3077  	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3078  		return -EINVAL;
3079  
3080  	gup_flags |= FOLL_PIN;
3081  	return __get_user_pages_locked(current, current->mm, start, nr_pages,
3082  				       pages, NULL, locked,
3083  				       gup_flags | FOLL_TOUCH);
3084  }
3085  EXPORT_SYMBOL(pin_user_pages_locked);
3086