xref: /openbmc/linux/mm/gup.c (revision 9ee0034b8f49aaaa7e7c2da8db1038915db99c19)
1  #include <linux/kernel.h>
2  #include <linux/errno.h>
3  #include <linux/err.h>
4  #include <linux/spinlock.h>
5  
6  #include <linux/mm.h>
7  #include <linux/memremap.h>
8  #include <linux/pagemap.h>
9  #include <linux/rmap.h>
10  #include <linux/swap.h>
11  #include <linux/swapops.h>
12  
13  #include <linux/sched.h>
14  #include <linux/rwsem.h>
15  #include <linux/hugetlb.h>
16  
17  #include <asm/mmu_context.h>
18  #include <asm/pgtable.h>
19  #include <asm/tlbflush.h>
20  
21  #include "internal.h"
22  
23  static struct page *no_page_table(struct vm_area_struct *vma,
24  		unsigned int flags)
25  {
26  	/*
27  	 * When core dumping an enormous anonymous area that nobody
28  	 * has touched so far, we don't want to allocate unnecessary pages or
29  	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
30  	 * then get_dump_page() will return NULL to leave a hole in the dump.
31  	 * But we can only make this optimization where a hole would surely
32  	 * be zero-filled if handle_mm_fault() actually did handle it.
33  	 */
34  	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
35  		return ERR_PTR(-EFAULT);
36  	return NULL;
37  }
38  
39  static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
40  		pte_t *pte, unsigned int flags)
41  {
42  	/* No page to get reference */
43  	if (flags & FOLL_GET)
44  		return -EFAULT;
45  
46  	if (flags & FOLL_TOUCH) {
47  		pte_t entry = *pte;
48  
49  		if (flags & FOLL_WRITE)
50  			entry = pte_mkdirty(entry);
51  		entry = pte_mkyoung(entry);
52  
53  		if (!pte_same(*pte, entry)) {
54  			set_pte_at(vma->vm_mm, address, pte, entry);
55  			update_mmu_cache(vma, address, pte);
56  		}
57  	}
58  
59  	/* Proper page table entry exists, but no corresponding struct page */
60  	return -EEXIST;
61  }
62  
63  static struct page *follow_page_pte(struct vm_area_struct *vma,
64  		unsigned long address, pmd_t *pmd, unsigned int flags)
65  {
66  	struct mm_struct *mm = vma->vm_mm;
67  	struct dev_pagemap *pgmap = NULL;
68  	struct page *page;
69  	spinlock_t *ptl;
70  	pte_t *ptep, pte;
71  
72  retry:
73  	if (unlikely(pmd_bad(*pmd)))
74  		return no_page_table(vma, flags);
75  
76  	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
77  	pte = *ptep;
78  	if (!pte_present(pte)) {
79  		swp_entry_t entry;
80  		/*
81  		 * KSM's break_ksm() relies upon recognizing a ksm page
82  		 * even while it is being migrated, so for that case we
83  		 * need migration_entry_wait().
84  		 */
85  		if (likely(!(flags & FOLL_MIGRATION)))
86  			goto no_page;
87  		if (pte_none(pte))
88  			goto no_page;
89  		entry = pte_to_swp_entry(pte);
90  		if (!is_migration_entry(entry))
91  			goto no_page;
92  		pte_unmap_unlock(ptep, ptl);
93  		migration_entry_wait(mm, pmd, address);
94  		goto retry;
95  	}
96  	if ((flags & FOLL_NUMA) && pte_protnone(pte))
97  		goto no_page;
98  	if ((flags & FOLL_WRITE) && !pte_write(pte)) {
99  		pte_unmap_unlock(ptep, ptl);
100  		return NULL;
101  	}
102  
103  	page = vm_normal_page(vma, address, pte);
104  	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
105  		/*
106  		 * Only return device mapping pages in the FOLL_GET case since
107  		 * they are only valid while holding the pgmap reference.
108  		 */
109  		pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
110  		if (pgmap)
111  			page = pte_page(pte);
112  		else
113  			goto no_page;
114  	} else if (unlikely(!page)) {
115  		if (flags & FOLL_DUMP) {
116  			/* Avoid special (like zero) pages in core dumps */
117  			page = ERR_PTR(-EFAULT);
118  			goto out;
119  		}
120  
121  		if (is_zero_pfn(pte_pfn(pte))) {
122  			page = pte_page(pte);
123  		} else {
124  			int ret;
125  
126  			ret = follow_pfn_pte(vma, address, ptep, flags);
127  			page = ERR_PTR(ret);
128  			goto out;
129  		}
130  	}
131  
132  	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
133  		int ret;
134  		get_page(page);
135  		pte_unmap_unlock(ptep, ptl);
136  		lock_page(page);
137  		ret = split_huge_page(page);
138  		unlock_page(page);
139  		put_page(page);
140  		if (ret)
141  			return ERR_PTR(ret);
142  		goto retry;
143  	}
144  
145  	if (flags & FOLL_GET) {
146  		get_page(page);
147  
148  		/* drop the pgmap reference now that we hold the page */
149  		if (pgmap) {
150  			put_dev_pagemap(pgmap);
151  			pgmap = NULL;
152  		}
153  	}
154  	if (flags & FOLL_TOUCH) {
155  		if ((flags & FOLL_WRITE) &&
156  		    !pte_dirty(pte) && !PageDirty(page))
157  			set_page_dirty(page);
158  		/*
159  		 * pte_mkyoung() would be more correct here, but atomic care
160  		 * is needed to avoid losing the dirty bit: it is easier to use
161  		 * mark_page_accessed().
162  		 */
163  		mark_page_accessed(page);
164  	}
165  	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
166  		/* Do not mlock pte-mapped THP */
167  		if (PageTransCompound(page))
168  			goto out;
169  
170  		/*
171  		 * The preliminary mapping check is mainly to avoid the
172  		 * pointless overhead of lock_page on the ZERO_PAGE
173  		 * which might bounce very badly if there is contention.
174  		 *
175  		 * If the page is already locked, we don't need to
176  		 * handle it now - vmscan will handle it later if and
177  		 * when it attempts to reclaim the page.
178  		 */
179  		if (page->mapping && trylock_page(page)) {
180  			lru_add_drain();  /* push cached pages to LRU */
181  			/*
182  			 * Because we lock page here, and migration is
183  			 * blocked by the pte's page reference, and we
184  			 * know the page is still mapped, we don't even
185  			 * need to check for file-cache page truncation.
186  			 */
187  			mlock_vma_page(page);
188  			unlock_page(page);
189  		}
190  	}
191  out:
192  	pte_unmap_unlock(ptep, ptl);
193  	return page;
194  no_page:
195  	pte_unmap_unlock(ptep, ptl);
196  	if (!pte_none(pte))
197  		return NULL;
198  	return no_page_table(vma, flags);
199  }
200  
201  /**
202   * follow_page_mask - look up a page descriptor from a user-virtual address
203   * @vma: vm_area_struct mapping @address
204   * @address: virtual address to look up
205   * @flags: flags modifying lookup behaviour
206   * @page_mask: on output, *page_mask is set according to the size of the page
207   *
208   * @flags can have FOLL_ flags set, defined in <linux/mm.h>
209   *
210   * Returns the mapped (struct page *), %NULL if no mapping exists, or
211   * an error pointer if there is a mapping to something not represented
212   * by a page descriptor (see also vm_normal_page()).
213   */
214  struct page *follow_page_mask(struct vm_area_struct *vma,
215  			      unsigned long address, unsigned int flags,
216  			      unsigned int *page_mask)
217  {
218  	pgd_t *pgd;
219  	pud_t *pud;
220  	pmd_t *pmd;
221  	spinlock_t *ptl;
222  	struct page *page;
223  	struct mm_struct *mm = vma->vm_mm;
224  
225  	*page_mask = 0;
226  
227  	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
228  	if (!IS_ERR(page)) {
229  		BUG_ON(flags & FOLL_GET);
230  		return page;
231  	}
232  
233  	pgd = pgd_offset(mm, address);
234  	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
235  		return no_page_table(vma, flags);
236  
237  	pud = pud_offset(pgd, address);
238  	if (pud_none(*pud))
239  		return no_page_table(vma, flags);
240  	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
241  		page = follow_huge_pud(mm, address, pud, flags);
242  		if (page)
243  			return page;
244  		return no_page_table(vma, flags);
245  	}
246  	if (unlikely(pud_bad(*pud)))
247  		return no_page_table(vma, flags);
248  
249  	pmd = pmd_offset(pud, address);
250  	if (pmd_none(*pmd))
251  		return no_page_table(vma, flags);
252  	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
253  		page = follow_huge_pmd(mm, address, pmd, flags);
254  		if (page)
255  			return page;
256  		return no_page_table(vma, flags);
257  	}
258  	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
259  		return no_page_table(vma, flags);
260  	if (pmd_devmap(*pmd)) {
261  		ptl = pmd_lock(mm, pmd);
262  		page = follow_devmap_pmd(vma, address, pmd, flags);
263  		spin_unlock(ptl);
264  		if (page)
265  			return page;
266  	}
267  	if (likely(!pmd_trans_huge(*pmd)))
268  		return follow_page_pte(vma, address, pmd, flags);
269  
270  	ptl = pmd_lock(mm, pmd);
271  	if (unlikely(!pmd_trans_huge(*pmd))) {
272  		spin_unlock(ptl);
273  		return follow_page_pte(vma, address, pmd, flags);
274  	}
275  	if (flags & FOLL_SPLIT) {
276  		int ret;
277  		page = pmd_page(*pmd);
278  		if (is_huge_zero_page(page)) {
279  			spin_unlock(ptl);
280  			ret = 0;
281  			split_huge_pmd(vma, pmd, address);
282  			if (pmd_trans_unstable(pmd))
283  				ret = -EBUSY;
284  		} else {
285  			get_page(page);
286  			spin_unlock(ptl);
287  			lock_page(page);
288  			ret = split_huge_page(page);
289  			unlock_page(page);
290  			put_page(page);
291  			if (pmd_none(*pmd))
292  				return no_page_table(vma, flags);
293  		}
294  
295  		return ret ? ERR_PTR(ret) :
296  			follow_page_pte(vma, address, pmd, flags);
297  	}
298  
299  	page = follow_trans_huge_pmd(vma, address, pmd, flags);
300  	spin_unlock(ptl);
301  	*page_mask = HPAGE_PMD_NR - 1;
302  	return page;
303  }
304  
305  static int get_gate_page(struct mm_struct *mm, unsigned long address,
306  		unsigned int gup_flags, struct vm_area_struct **vma,
307  		struct page **page)
308  {
309  	pgd_t *pgd;
310  	pud_t *pud;
311  	pmd_t *pmd;
312  	pte_t *pte;
313  	int ret = -EFAULT;
314  
315  	/* user gate pages are read-only */
316  	if (gup_flags & FOLL_WRITE)
317  		return -EFAULT;
318  	if (address > TASK_SIZE)
319  		pgd = pgd_offset_k(address);
320  	else
321  		pgd = pgd_offset_gate(mm, address);
322  	BUG_ON(pgd_none(*pgd));
323  	pud = pud_offset(pgd, address);
324  	BUG_ON(pud_none(*pud));
325  	pmd = pmd_offset(pud, address);
326  	if (pmd_none(*pmd))
327  		return -EFAULT;
328  	VM_BUG_ON(pmd_trans_huge(*pmd));
329  	pte = pte_offset_map(pmd, address);
330  	if (pte_none(*pte))
331  		goto unmap;
332  	*vma = get_gate_vma(mm);
333  	if (!page)
334  		goto out;
335  	*page = vm_normal_page(*vma, address, *pte);
336  	if (!*page) {
337  		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
338  			goto unmap;
339  		*page = pte_page(*pte);
340  	}
341  	get_page(*page);
342  out:
343  	ret = 0;
344  unmap:
345  	pte_unmap(pte);
346  	return ret;
347  }
348  
349  /*
350   * mmap_sem must be held on entry.  If @nonblocking != NULL and
351   * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
352   * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
353   */
354  static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
355  		unsigned long address, unsigned int *flags, int *nonblocking)
356  {
357  	unsigned int fault_flags = 0;
358  	int ret;
359  
360  	/* mlock all present pages, but do not fault in new pages */
361  	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
362  		return -ENOENT;
363  	/* For mm_populate(), just skip the stack guard page. */
364  	if ((*flags & FOLL_POPULATE) &&
365  			(stack_guard_page_start(vma, address) ||
366  			 stack_guard_page_end(vma, address + PAGE_SIZE)))
367  		return -ENOENT;
368  	if (*flags & FOLL_WRITE)
369  		fault_flags |= FAULT_FLAG_WRITE;
370  	if (*flags & FOLL_REMOTE)
371  		fault_flags |= FAULT_FLAG_REMOTE;
372  	if (nonblocking)
373  		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
374  	if (*flags & FOLL_NOWAIT)
375  		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
376  	if (*flags & FOLL_TRIED) {
377  		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
378  		fault_flags |= FAULT_FLAG_TRIED;
379  	}
380  
381  	ret = handle_mm_fault(vma, address, fault_flags);
382  	if (ret & VM_FAULT_ERROR) {
383  		if (ret & VM_FAULT_OOM)
384  			return -ENOMEM;
385  		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
386  			return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
387  		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
388  			return -EFAULT;
389  		BUG();
390  	}
391  
392  	if (tsk) {
393  		if (ret & VM_FAULT_MAJOR)
394  			tsk->maj_flt++;
395  		else
396  			tsk->min_flt++;
397  	}
398  
399  	if (ret & VM_FAULT_RETRY) {
400  		if (nonblocking)
401  			*nonblocking = 0;
402  		return -EBUSY;
403  	}
404  
405  	/*
406  	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
407  	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
408  	 * can thus safely do subsequent page lookups as if they were reads.
409  	 * But only do so when looping for pte_write is futile: in some cases
410  	 * userspace may also be wanting to write to the gotten user page,
411  	 * which a read fault here might prevent (a readonly page might get
412  	 * reCOWed by userspace write).
413  	 */
414  	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
415  		*flags &= ~FOLL_WRITE;
416  	return 0;
417  }
418  
419  static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
420  {
421  	vm_flags_t vm_flags = vma->vm_flags;
422  	int write = (gup_flags & FOLL_WRITE);
423  	int foreign = (gup_flags & FOLL_REMOTE);
424  
425  	if (vm_flags & (VM_IO | VM_PFNMAP))
426  		return -EFAULT;
427  
428  	if (write) {
429  		if (!(vm_flags & VM_WRITE)) {
430  			if (!(gup_flags & FOLL_FORCE))
431  				return -EFAULT;
432  			/*
433  			 * We used to let the write,force case do COW in a
434  			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
435  			 * set a breakpoint in a read-only mapping of an
436  			 * executable, without corrupting the file (yet only
437  			 * when that file had been opened for writing!).
438  			 * Anon pages in shared mappings are surprising: now
439  			 * just reject it.
440  			 */
441  			if (!is_cow_mapping(vm_flags))
442  				return -EFAULT;
443  		}
444  	} else if (!(vm_flags & VM_READ)) {
445  		if (!(gup_flags & FOLL_FORCE))
446  			return -EFAULT;
447  		/*
448  		 * Is there actually any vma we can reach here which does not
449  		 * have VM_MAYREAD set?
450  		 */
451  		if (!(vm_flags & VM_MAYREAD))
452  			return -EFAULT;
453  	}
454  	/*
455  	 * gups are always data accesses, not instruction
456  	 * fetches, so execute=false here
457  	 */
458  	if (!arch_vma_access_permitted(vma, write, false, foreign))
459  		return -EFAULT;
460  	return 0;
461  }
462  
463  /**
464   * __get_user_pages() - pin user pages in memory
465   * @tsk:	task_struct of target task
466   * @mm:		mm_struct of target mm
467   * @start:	starting user address
468   * @nr_pages:	number of pages from start to pin
469   * @gup_flags:	flags modifying pin behaviour
470   * @pages:	array that receives pointers to the pages pinned.
471   *		Should be at least nr_pages long. Or NULL, if caller
472   *		only intends to ensure the pages are faulted in.
473   * @vmas:	array of pointers to vmas corresponding to each page.
474   *		Or NULL if the caller does not require them.
475   * @nonblocking: whether waiting for disk IO or mmap_sem contention
476   *
477   * Returns number of pages pinned. This may be fewer than the number
478   * requested. If nr_pages is 0 or negative, returns 0. If no pages
479   * were pinned, returns -errno. Each page returned must be released
480   * with a put_page() call when it is finished with. vmas will only
481   * remain valid while mmap_sem is held.
482   *
483   * Must be called with mmap_sem held.  It may be released.  See below.
484   *
485   * __get_user_pages walks a process's page tables and takes a reference to
486   * each struct page that each user address corresponds to at a given
487   * instant. That is, it takes the page that would be accessed if a user
488   * thread accesses the given user virtual address at that instant.
489   *
490   * This does not guarantee that the page exists in the user mappings when
491   * __get_user_pages returns, and there may even be a completely different
492   * page there in some cases (eg. if mmapped pagecache has been invalidated
493   * and subsequently re faulted). However it does guarantee that the page
494   * won't be freed completely. And mostly callers simply care that the page
495   * contains data that was valid *at some point in time*. Typically, an IO
496   * or similar operation cannot guarantee anything stronger anyway because
497   * locks can't be held over the syscall boundary.
498   *
499   * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
500   * the page is written to, set_page_dirty (or set_page_dirty_lock, as
501   * appropriate) must be called after the page is finished with, and
502   * before put_page is called.
503   *
504   * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
505   * or mmap_sem contention, and if waiting is needed to pin all pages,
506   * *@nonblocking will be set to 0.  Further, if @gup_flags does not
507   * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
508   * this case.
509   *
510   * A caller using such a combination of @nonblocking and @gup_flags
511   * must therefore hold the mmap_sem for reading only, and recognize
512   * when it's been released.  Otherwise, it must be held for either
513   * reading or writing and will not be released.
514   *
515   * In most cases, get_user_pages or get_user_pages_fast should be used
516   * instead of __get_user_pages. __get_user_pages should be used only if
517   * you need some special @gup_flags.
518   */
519  long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
520  		unsigned long start, unsigned long nr_pages,
521  		unsigned int gup_flags, struct page **pages,
522  		struct vm_area_struct **vmas, int *nonblocking)
523  {
524  	long i = 0;
525  	unsigned int page_mask;
526  	struct vm_area_struct *vma = NULL;
527  
528  	if (!nr_pages)
529  		return 0;
530  
531  	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
532  
533  	/*
534  	 * If FOLL_FORCE is set then do not force a full fault as the hinting
535  	 * fault information is unrelated to the reference behaviour of a task
536  	 * using the address space
537  	 */
538  	if (!(gup_flags & FOLL_FORCE))
539  		gup_flags |= FOLL_NUMA;
540  
541  	do {
542  		struct page *page;
543  		unsigned int foll_flags = gup_flags;
544  		unsigned int page_increm;
545  
546  		/* first iteration or cross vma bound */
547  		if (!vma || start >= vma->vm_end) {
548  			vma = find_extend_vma(mm, start);
549  			if (!vma && in_gate_area(mm, start)) {
550  				int ret;
551  				ret = get_gate_page(mm, start & PAGE_MASK,
552  						gup_flags, &vma,
553  						pages ? &pages[i] : NULL);
554  				if (ret)
555  					return i ? : ret;
556  				page_mask = 0;
557  				goto next_page;
558  			}
559  
560  			if (!vma || check_vma_flags(vma, gup_flags))
561  				return i ? : -EFAULT;
562  			if (is_vm_hugetlb_page(vma)) {
563  				i = follow_hugetlb_page(mm, vma, pages, vmas,
564  						&start, &nr_pages, i,
565  						gup_flags);
566  				continue;
567  			}
568  		}
569  retry:
570  		/*
571  		 * If we have a pending SIGKILL, don't keep faulting pages and
572  		 * potentially allocating memory.
573  		 */
574  		if (unlikely(fatal_signal_pending(current)))
575  			return i ? i : -ERESTARTSYS;
576  		cond_resched();
577  		page = follow_page_mask(vma, start, foll_flags, &page_mask);
578  		if (!page) {
579  			int ret;
580  			ret = faultin_page(tsk, vma, start, &foll_flags,
581  					nonblocking);
582  			switch (ret) {
583  			case 0:
584  				goto retry;
585  			case -EFAULT:
586  			case -ENOMEM:
587  			case -EHWPOISON:
588  				return i ? i : ret;
589  			case -EBUSY:
590  				return i;
591  			case -ENOENT:
592  				goto next_page;
593  			}
594  			BUG();
595  		} else if (PTR_ERR(page) == -EEXIST) {
596  			/*
597  			 * Proper page table entry exists, but no corresponding
598  			 * struct page.
599  			 */
600  			goto next_page;
601  		} else if (IS_ERR(page)) {
602  			return i ? i : PTR_ERR(page);
603  		}
604  		if (pages) {
605  			pages[i] = page;
606  			flush_anon_page(vma, page, start);
607  			flush_dcache_page(page);
608  			page_mask = 0;
609  		}
610  next_page:
611  		if (vmas) {
612  			vmas[i] = vma;
613  			page_mask = 0;
614  		}
615  		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
616  		if (page_increm > nr_pages)
617  			page_increm = nr_pages;
618  		i += page_increm;
619  		start += page_increm * PAGE_SIZE;
620  		nr_pages -= page_increm;
621  	} while (nr_pages);
622  	return i;
623  }
624  EXPORT_SYMBOL(__get_user_pages);
625  
626  bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
627  {
628  	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
629  	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
630  	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
631  
632  	if (!(vm_flags & vma->vm_flags))
633  		return false;
634  
635  	/*
636  	 * The architecture might have a hardware protection
637  	 * mechanism other than read/write that can deny access.
638  	 *
639  	 * gup always represents data access, not instruction
640  	 * fetches, so execute=false here:
641  	 */
642  	if (!arch_vma_access_permitted(vma, write, false, foreign))
643  		return false;
644  
645  	return true;
646  }
647  
648  /*
649   * fixup_user_fault() - manually resolve a user page fault
650   * @tsk:	the task_struct to use for page fault accounting, or
651   *		NULL if faults are not to be recorded.
652   * @mm:		mm_struct of target mm
653   * @address:	user address
654   * @fault_flags:flags to pass down to handle_mm_fault()
655   * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
656   *		does not allow retry
657   *
658   * This is meant to be called in the specific scenario where for locking reasons
659   * we try to access user memory in atomic context (within a pagefault_disable()
660   * section), this returns -EFAULT, and we want to resolve the user fault before
661   * trying again.
662   *
663   * Typically this is meant to be used by the futex code.
664   *
665   * The main difference with get_user_pages() is that this function will
666   * unconditionally call handle_mm_fault() which will in turn perform all the
667   * necessary SW fixup of the dirty and young bits in the PTE, while
668   * get_user_pages() only guarantees to update these in the struct page.
669   *
670   * This is important for some architectures where those bits also gate the
671   * access permission to the page because they are maintained in software.  On
672   * such architectures, gup() will not be enough to make a subsequent access
673   * succeed.
674   *
675   * This function will not return with an unlocked mmap_sem. So it has not the
676   * same semantics wrt the @mm->mmap_sem as does filemap_fault().
677   */
678  int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
679  		     unsigned long address, unsigned int fault_flags,
680  		     bool *unlocked)
681  {
682  	struct vm_area_struct *vma;
683  	int ret, major = 0;
684  
685  	if (unlocked)
686  		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
687  
688  retry:
689  	vma = find_extend_vma(mm, address);
690  	if (!vma || address < vma->vm_start)
691  		return -EFAULT;
692  
693  	if (!vma_permits_fault(vma, fault_flags))
694  		return -EFAULT;
695  
696  	ret = handle_mm_fault(vma, address, fault_flags);
697  	major |= ret & VM_FAULT_MAJOR;
698  	if (ret & VM_FAULT_ERROR) {
699  		if (ret & VM_FAULT_OOM)
700  			return -ENOMEM;
701  		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
702  			return -EHWPOISON;
703  		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
704  			return -EFAULT;
705  		BUG();
706  	}
707  
708  	if (ret & VM_FAULT_RETRY) {
709  		down_read(&mm->mmap_sem);
710  		if (!(fault_flags & FAULT_FLAG_TRIED)) {
711  			*unlocked = true;
712  			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
713  			fault_flags |= FAULT_FLAG_TRIED;
714  			goto retry;
715  		}
716  	}
717  
718  	if (tsk) {
719  		if (major)
720  			tsk->maj_flt++;
721  		else
722  			tsk->min_flt++;
723  	}
724  	return 0;
725  }
726  EXPORT_SYMBOL_GPL(fixup_user_fault);
727  
728  static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
729  						struct mm_struct *mm,
730  						unsigned long start,
731  						unsigned long nr_pages,
732  						int write, int force,
733  						struct page **pages,
734  						struct vm_area_struct **vmas,
735  						int *locked, bool notify_drop,
736  						unsigned int flags)
737  {
738  	long ret, pages_done;
739  	bool lock_dropped;
740  
741  	if (locked) {
742  		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
743  		BUG_ON(vmas);
744  		/* check caller initialized locked */
745  		BUG_ON(*locked != 1);
746  	}
747  
748  	if (pages)
749  		flags |= FOLL_GET;
750  	if (write)
751  		flags |= FOLL_WRITE;
752  	if (force)
753  		flags |= FOLL_FORCE;
754  
755  	pages_done = 0;
756  	lock_dropped = false;
757  	for (;;) {
758  		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
759  				       vmas, locked);
760  		if (!locked)
761  			/* VM_FAULT_RETRY couldn't trigger, bypass */
762  			return ret;
763  
764  		/* VM_FAULT_RETRY cannot return errors */
765  		if (!*locked) {
766  			BUG_ON(ret < 0);
767  			BUG_ON(ret >= nr_pages);
768  		}
769  
770  		if (!pages)
771  			/* If it's a prefault don't insist harder */
772  			return ret;
773  
774  		if (ret > 0) {
775  			nr_pages -= ret;
776  			pages_done += ret;
777  			if (!nr_pages)
778  				break;
779  		}
780  		if (*locked) {
781  			/* VM_FAULT_RETRY didn't trigger */
782  			if (!pages_done)
783  				pages_done = ret;
784  			break;
785  		}
786  		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
787  		pages += ret;
788  		start += ret << PAGE_SHIFT;
789  
790  		/*
791  		 * Repeat on the address that fired VM_FAULT_RETRY
792  		 * without FAULT_FLAG_ALLOW_RETRY but with
793  		 * FAULT_FLAG_TRIED.
794  		 */
795  		*locked = 1;
796  		lock_dropped = true;
797  		down_read(&mm->mmap_sem);
798  		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
799  				       pages, NULL, NULL);
800  		if (ret != 1) {
801  			BUG_ON(ret > 1);
802  			if (!pages_done)
803  				pages_done = ret;
804  			break;
805  		}
806  		nr_pages--;
807  		pages_done++;
808  		if (!nr_pages)
809  			break;
810  		pages++;
811  		start += PAGE_SIZE;
812  	}
813  	if (notify_drop && lock_dropped && *locked) {
814  		/*
815  		 * We must let the caller know we temporarily dropped the lock
816  		 * and so the critical section protected by it was lost.
817  		 */
818  		up_read(&mm->mmap_sem);
819  		*locked = 0;
820  	}
821  	return pages_done;
822  }
823  
824  /*
825   * We can leverage the VM_FAULT_RETRY functionality in the page fault
826   * paths better by using either get_user_pages_locked() or
827   * get_user_pages_unlocked().
828   *
829   * get_user_pages_locked() is suitable to replace the form:
830   *
831   *      down_read(&mm->mmap_sem);
832   *      do_something()
833   *      get_user_pages(tsk, mm, ..., pages, NULL);
834   *      up_read(&mm->mmap_sem);
835   *
836   *  to:
837   *
838   *      int locked = 1;
839   *      down_read(&mm->mmap_sem);
840   *      do_something()
841   *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
842   *      if (locked)
843   *          up_read(&mm->mmap_sem);
844   */
845  long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
846  			   int write, int force, struct page **pages,
847  			   int *locked)
848  {
849  	return __get_user_pages_locked(current, current->mm, start, nr_pages,
850  				       write, force, pages, NULL, locked, true,
851  				       FOLL_TOUCH);
852  }
853  EXPORT_SYMBOL(get_user_pages_locked);
854  
855  /*
856   * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
857   * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
858   *
859   * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
860   * caller if required (just like with __get_user_pages). "FOLL_GET",
861   * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
862   * according to the parameters "pages", "write", "force"
863   * respectively.
864   */
865  __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
866  					       unsigned long start, unsigned long nr_pages,
867  					       int write, int force, struct page **pages,
868  					       unsigned int gup_flags)
869  {
870  	long ret;
871  	int locked = 1;
872  	down_read(&mm->mmap_sem);
873  	ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
874  				      pages, NULL, &locked, false, gup_flags);
875  	if (locked)
876  		up_read(&mm->mmap_sem);
877  	return ret;
878  }
879  EXPORT_SYMBOL(__get_user_pages_unlocked);
880  
881  /*
882   * get_user_pages_unlocked() is suitable to replace the form:
883   *
884   *      down_read(&mm->mmap_sem);
885   *      get_user_pages(tsk, mm, ..., pages, NULL);
886   *      up_read(&mm->mmap_sem);
887   *
888   *  with:
889   *
890   *      get_user_pages_unlocked(tsk, mm, ..., pages);
891   *
892   * It is functionally equivalent to get_user_pages_fast so
893   * get_user_pages_fast should be used instead, if the two parameters
894   * "tsk" and "mm" are respectively equal to current and current->mm,
895   * or if "force" shall be set to 1 (get_user_pages_fast misses the
896   * "force" parameter).
897   */
898  long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
899  			     int write, int force, struct page **pages)
900  {
901  	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
902  					 write, force, pages, FOLL_TOUCH);
903  }
904  EXPORT_SYMBOL(get_user_pages_unlocked);
905  
906  /*
907   * get_user_pages_remote() - pin user pages in memory
908   * @tsk:	the task_struct to use for page fault accounting, or
909   *		NULL if faults are not to be recorded.
910   * @mm:		mm_struct of target mm
911   * @start:	starting user address
912   * @nr_pages:	number of pages from start to pin
913   * @write:	whether pages will be written to by the caller
914   * @force:	whether to force access even when user mapping is currently
915   *		protected (but never forces write access to shared mapping).
916   * @pages:	array that receives pointers to the pages pinned.
917   *		Should be at least nr_pages long. Or NULL, if caller
918   *		only intends to ensure the pages are faulted in.
919   * @vmas:	array of pointers to vmas corresponding to each page.
920   *		Or NULL if the caller does not require them.
921   *
922   * Returns number of pages pinned. This may be fewer than the number
923   * requested. If nr_pages is 0 or negative, returns 0. If no pages
924   * were pinned, returns -errno. Each page returned must be released
925   * with a put_page() call when it is finished with. vmas will only
926   * remain valid while mmap_sem is held.
927   *
928   * Must be called with mmap_sem held for read or write.
929   *
930   * get_user_pages walks a process's page tables and takes a reference to
931   * each struct page that each user address corresponds to at a given
932   * instant. That is, it takes the page that would be accessed if a user
933   * thread accesses the given user virtual address at that instant.
934   *
935   * This does not guarantee that the page exists in the user mappings when
936   * get_user_pages returns, and there may even be a completely different
937   * page there in some cases (eg. if mmapped pagecache has been invalidated
938   * and subsequently re faulted). However it does guarantee that the page
939   * won't be freed completely. And mostly callers simply care that the page
940   * contains data that was valid *at some point in time*. Typically, an IO
941   * or similar operation cannot guarantee anything stronger anyway because
942   * locks can't be held over the syscall boundary.
943   *
944   * If write=0, the page must not be written to. If the page is written to,
945   * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
946   * after the page is finished with, and before put_page is called.
947   *
948   * get_user_pages is typically used for fewer-copy IO operations, to get a
949   * handle on the memory by some means other than accesses via the user virtual
950   * addresses. The pages may be submitted for DMA to devices or accessed via
951   * their kernel linear mapping (via the kmap APIs). Care should be taken to
952   * use the correct cache flushing APIs.
953   *
954   * See also get_user_pages_fast, for performance critical applications.
955   *
956   * get_user_pages should be phased out in favor of
957   * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
958   * should use get_user_pages because it cannot pass
959   * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
960   */
961  long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
962  		unsigned long start, unsigned long nr_pages,
963  		int write, int force, struct page **pages,
964  		struct vm_area_struct **vmas)
965  {
966  	return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
967  				       pages, vmas, NULL, false,
968  				       FOLL_TOUCH | FOLL_REMOTE);
969  }
970  EXPORT_SYMBOL(get_user_pages_remote);
971  
972  /*
973   * This is the same as get_user_pages_remote(), just with a
974   * less-flexible calling convention where we assume that the task
975   * and mm being operated on are the current task's.  We also
976   * obviously don't pass FOLL_REMOTE in here.
977   */
978  long get_user_pages(unsigned long start, unsigned long nr_pages,
979  		int write, int force, struct page **pages,
980  		struct vm_area_struct **vmas)
981  {
982  	return __get_user_pages_locked(current, current->mm, start, nr_pages,
983  				       write, force, pages, vmas, NULL, false,
984  				       FOLL_TOUCH);
985  }
986  EXPORT_SYMBOL(get_user_pages);
987  
988  /**
989   * populate_vma_page_range() -  populate a range of pages in the vma.
990   * @vma:   target vma
991   * @start: start address
992   * @end:   end address
993   * @nonblocking:
994   *
995   * This takes care of mlocking the pages too if VM_LOCKED is set.
996   *
997   * return 0 on success, negative error code on error.
998   *
999   * vma->vm_mm->mmap_sem must be held.
1000   *
1001   * If @nonblocking is NULL, it may be held for read or write and will
1002   * be unperturbed.
1003   *
1004   * If @nonblocking is non-NULL, it must held for read only and may be
1005   * released.  If it's released, *@nonblocking will be set to 0.
1006   */
1007  long populate_vma_page_range(struct vm_area_struct *vma,
1008  		unsigned long start, unsigned long end, int *nonblocking)
1009  {
1010  	struct mm_struct *mm = vma->vm_mm;
1011  	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1012  	int gup_flags;
1013  
1014  	VM_BUG_ON(start & ~PAGE_MASK);
1015  	VM_BUG_ON(end   & ~PAGE_MASK);
1016  	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1017  	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1018  	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
1019  
1020  	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1021  	if (vma->vm_flags & VM_LOCKONFAULT)
1022  		gup_flags &= ~FOLL_POPULATE;
1023  	/*
1024  	 * We want to touch writable mappings with a write fault in order
1025  	 * to break COW, except for shared mappings because these don't COW
1026  	 * and we would not want to dirty them for nothing.
1027  	 */
1028  	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1029  		gup_flags |= FOLL_WRITE;
1030  
1031  	/*
1032  	 * We want mlock to succeed for regions that have any permissions
1033  	 * other than PROT_NONE.
1034  	 */
1035  	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
1036  		gup_flags |= FOLL_FORCE;
1037  
1038  	/*
1039  	 * We made sure addr is within a VMA, so the following will
1040  	 * not result in a stack expansion that recurses back here.
1041  	 */
1042  	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
1043  				NULL, NULL, nonblocking);
1044  }
1045  
1046  /*
1047   * __mm_populate - populate and/or mlock pages within a range of address space.
1048   *
1049   * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1050   * flags. VMAs must be already marked with the desired vm_flags, and
1051   * mmap_sem must not be held.
1052   */
1053  int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1054  {
1055  	struct mm_struct *mm = current->mm;
1056  	unsigned long end, nstart, nend;
1057  	struct vm_area_struct *vma = NULL;
1058  	int locked = 0;
1059  	long ret = 0;
1060  
1061  	VM_BUG_ON(start & ~PAGE_MASK);
1062  	VM_BUG_ON(len != PAGE_ALIGN(len));
1063  	end = start + len;
1064  
1065  	for (nstart = start; nstart < end; nstart = nend) {
1066  		/*
1067  		 * We want to fault in pages for [nstart; end) address range.
1068  		 * Find first corresponding VMA.
1069  		 */
1070  		if (!locked) {
1071  			locked = 1;
1072  			down_read(&mm->mmap_sem);
1073  			vma = find_vma(mm, nstart);
1074  		} else if (nstart >= vma->vm_end)
1075  			vma = vma->vm_next;
1076  		if (!vma || vma->vm_start >= end)
1077  			break;
1078  		/*
1079  		 * Set [nstart; nend) to intersection of desired address
1080  		 * range with the first VMA. Also, skip undesirable VMA types.
1081  		 */
1082  		nend = min(end, vma->vm_end);
1083  		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1084  			continue;
1085  		if (nstart < vma->vm_start)
1086  			nstart = vma->vm_start;
1087  		/*
1088  		 * Now fault in a range of pages. populate_vma_page_range()
1089  		 * double checks the vma flags, so that it won't mlock pages
1090  		 * if the vma was already munlocked.
1091  		 */
1092  		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1093  		if (ret < 0) {
1094  			if (ignore_errors) {
1095  				ret = 0;
1096  				continue;	/* continue at next VMA */
1097  			}
1098  			break;
1099  		}
1100  		nend = nstart + ret * PAGE_SIZE;
1101  		ret = 0;
1102  	}
1103  	if (locked)
1104  		up_read(&mm->mmap_sem);
1105  	return ret;	/* 0 or negative error code */
1106  }
1107  
1108  /**
1109   * get_dump_page() - pin user page in memory while writing it to core dump
1110   * @addr: user address
1111   *
1112   * Returns struct page pointer of user page pinned for dump,
1113   * to be freed afterwards by put_page().
1114   *
1115   * Returns NULL on any kind of failure - a hole must then be inserted into
1116   * the corefile, to preserve alignment with its headers; and also returns
1117   * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1118   * allowing a hole to be left in the corefile to save diskspace.
1119   *
1120   * Called without mmap_sem, but after all other threads have been killed.
1121   */
1122  #ifdef CONFIG_ELF_CORE
1123  struct page *get_dump_page(unsigned long addr)
1124  {
1125  	struct vm_area_struct *vma;
1126  	struct page *page;
1127  
1128  	if (__get_user_pages(current, current->mm, addr, 1,
1129  			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1130  			     NULL) < 1)
1131  		return NULL;
1132  	flush_cache_page(vma, addr, page_to_pfn(page));
1133  	return page;
1134  }
1135  #endif /* CONFIG_ELF_CORE */
1136  
1137  /*
1138   * Generic RCU Fast GUP
1139   *
1140   * get_user_pages_fast attempts to pin user pages by walking the page
1141   * tables directly and avoids taking locks. Thus the walker needs to be
1142   * protected from page table pages being freed from under it, and should
1143   * block any THP splits.
1144   *
1145   * One way to achieve this is to have the walker disable interrupts, and
1146   * rely on IPIs from the TLB flushing code blocking before the page table
1147   * pages are freed. This is unsuitable for architectures that do not need
1148   * to broadcast an IPI when invalidating TLBs.
1149   *
1150   * Another way to achieve this is to batch up page table containing pages
1151   * belonging to more than one mm_user, then rcu_sched a callback to free those
1152   * pages. Disabling interrupts will allow the fast_gup walker to both block
1153   * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
1154   * (which is a relatively rare event). The code below adopts this strategy.
1155   *
1156   * Before activating this code, please be aware that the following assumptions
1157   * are currently made:
1158   *
1159   *  *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
1160   *      pages containing page tables.
1161   *
1162   *  *) ptes can be read atomically by the architecture.
1163   *
1164   *  *) access_ok is sufficient to validate userspace address ranges.
1165   *
1166   * The last two assumptions can be relaxed by the addition of helper functions.
1167   *
1168   * This code is based heavily on the PowerPC implementation by Nick Piggin.
1169   */
1170  #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
1171  
1172  #ifdef __HAVE_ARCH_PTE_SPECIAL
1173  static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1174  			 int write, struct page **pages, int *nr)
1175  {
1176  	pte_t *ptep, *ptem;
1177  	int ret = 0;
1178  
1179  	ptem = ptep = pte_offset_map(&pmd, addr);
1180  	do {
1181  		/*
1182  		 * In the line below we are assuming that the pte can be read
1183  		 * atomically. If this is not the case for your architecture,
1184  		 * please wrap this in a helper function!
1185  		 *
1186  		 * for an example see gup_get_pte in arch/x86/mm/gup.c
1187  		 */
1188  		pte_t pte = READ_ONCE(*ptep);
1189  		struct page *head, *page;
1190  
1191  		/*
1192  		 * Similar to the PMD case below, NUMA hinting must take slow
1193  		 * path using the pte_protnone check.
1194  		 */
1195  		if (!pte_present(pte) || pte_special(pte) ||
1196  			pte_protnone(pte) || (write && !pte_write(pte)))
1197  			goto pte_unmap;
1198  
1199  		if (!arch_pte_access_permitted(pte, write))
1200  			goto pte_unmap;
1201  
1202  		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1203  		page = pte_page(pte);
1204  		head = compound_head(page);
1205  
1206  		if (!page_cache_get_speculative(head))
1207  			goto pte_unmap;
1208  
1209  		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1210  			put_page(head);
1211  			goto pte_unmap;
1212  		}
1213  
1214  		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1215  		pages[*nr] = page;
1216  		(*nr)++;
1217  
1218  	} while (ptep++, addr += PAGE_SIZE, addr != end);
1219  
1220  	ret = 1;
1221  
1222  pte_unmap:
1223  	pte_unmap(ptem);
1224  	return ret;
1225  }
1226  #else
1227  
1228  /*
1229   * If we can't determine whether or not a pte is special, then fail immediately
1230   * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
1231   * to be special.
1232   *
1233   * For a futex to be placed on a THP tail page, get_futex_key requires a
1234   * __get_user_pages_fast implementation that can pin pages. Thus it's still
1235   * useful to have gup_huge_pmd even if we can't operate on ptes.
1236   */
1237  static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1238  			 int write, struct page **pages, int *nr)
1239  {
1240  	return 0;
1241  }
1242  #endif /* __HAVE_ARCH_PTE_SPECIAL */
1243  
1244  static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1245  		unsigned long end, int write, struct page **pages, int *nr)
1246  {
1247  	struct page *head, *page;
1248  	int refs;
1249  
1250  	if (write && !pmd_write(orig))
1251  		return 0;
1252  
1253  	refs = 0;
1254  	head = pmd_page(orig);
1255  	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1256  	do {
1257  		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1258  		pages[*nr] = page;
1259  		(*nr)++;
1260  		page++;
1261  		refs++;
1262  	} while (addr += PAGE_SIZE, addr != end);
1263  
1264  	if (!page_cache_add_speculative(head, refs)) {
1265  		*nr -= refs;
1266  		return 0;
1267  	}
1268  
1269  	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
1270  		*nr -= refs;
1271  		while (refs--)
1272  			put_page(head);
1273  		return 0;
1274  	}
1275  
1276  	return 1;
1277  }
1278  
1279  static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1280  		unsigned long end, int write, struct page **pages, int *nr)
1281  {
1282  	struct page *head, *page;
1283  	int refs;
1284  
1285  	if (write && !pud_write(orig))
1286  		return 0;
1287  
1288  	refs = 0;
1289  	head = pud_page(orig);
1290  	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1291  	do {
1292  		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1293  		pages[*nr] = page;
1294  		(*nr)++;
1295  		page++;
1296  		refs++;
1297  	} while (addr += PAGE_SIZE, addr != end);
1298  
1299  	if (!page_cache_add_speculative(head, refs)) {
1300  		*nr -= refs;
1301  		return 0;
1302  	}
1303  
1304  	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1305  		*nr -= refs;
1306  		while (refs--)
1307  			put_page(head);
1308  		return 0;
1309  	}
1310  
1311  	return 1;
1312  }
1313  
1314  static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1315  			unsigned long end, int write,
1316  			struct page **pages, int *nr)
1317  {
1318  	int refs;
1319  	struct page *head, *page;
1320  
1321  	if (write && !pgd_write(orig))
1322  		return 0;
1323  
1324  	refs = 0;
1325  	head = pgd_page(orig);
1326  	page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1327  	do {
1328  		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1329  		pages[*nr] = page;
1330  		(*nr)++;
1331  		page++;
1332  		refs++;
1333  	} while (addr += PAGE_SIZE, addr != end);
1334  
1335  	if (!page_cache_add_speculative(head, refs)) {
1336  		*nr -= refs;
1337  		return 0;
1338  	}
1339  
1340  	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1341  		*nr -= refs;
1342  		while (refs--)
1343  			put_page(head);
1344  		return 0;
1345  	}
1346  
1347  	return 1;
1348  }
1349  
1350  static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1351  		int write, struct page **pages, int *nr)
1352  {
1353  	unsigned long next;
1354  	pmd_t *pmdp;
1355  
1356  	pmdp = pmd_offset(&pud, addr);
1357  	do {
1358  		pmd_t pmd = READ_ONCE(*pmdp);
1359  
1360  		next = pmd_addr_end(addr, end);
1361  		if (pmd_none(pmd))
1362  			return 0;
1363  
1364  		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1365  			/*
1366  			 * NUMA hinting faults need to be handled in the GUP
1367  			 * slowpath for accounting purposes and so that they
1368  			 * can be serialised against THP migration.
1369  			 */
1370  			if (pmd_protnone(pmd))
1371  				return 0;
1372  
1373  			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1374  				pages, nr))
1375  				return 0;
1376  
1377  		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1378  			/*
1379  			 * architecture have different format for hugetlbfs
1380  			 * pmd format and THP pmd format
1381  			 */
1382  			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1383  					 PMD_SHIFT, next, write, pages, nr))
1384  				return 0;
1385  		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1386  				return 0;
1387  	} while (pmdp++, addr = next, addr != end);
1388  
1389  	return 1;
1390  }
1391  
1392  static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
1393  			 int write, struct page **pages, int *nr)
1394  {
1395  	unsigned long next;
1396  	pud_t *pudp;
1397  
1398  	pudp = pud_offset(&pgd, addr);
1399  	do {
1400  		pud_t pud = READ_ONCE(*pudp);
1401  
1402  		next = pud_addr_end(addr, end);
1403  		if (pud_none(pud))
1404  			return 0;
1405  		if (unlikely(pud_huge(pud))) {
1406  			if (!gup_huge_pud(pud, pudp, addr, next, write,
1407  					  pages, nr))
1408  				return 0;
1409  		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1410  			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1411  					 PUD_SHIFT, next, write, pages, nr))
1412  				return 0;
1413  		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1414  			return 0;
1415  	} while (pudp++, addr = next, addr != end);
1416  
1417  	return 1;
1418  }
1419  
1420  /*
1421   * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1422   * the regular GUP. It will only return non-negative values.
1423   */
1424  int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1425  			  struct page **pages)
1426  {
1427  	struct mm_struct *mm = current->mm;
1428  	unsigned long addr, len, end;
1429  	unsigned long next, flags;
1430  	pgd_t *pgdp;
1431  	int nr = 0;
1432  
1433  	start &= PAGE_MASK;
1434  	addr = start;
1435  	len = (unsigned long) nr_pages << PAGE_SHIFT;
1436  	end = start + len;
1437  
1438  	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1439  					start, len)))
1440  		return 0;
1441  
1442  	/*
1443  	 * Disable interrupts.  We use the nested form as we can already have
1444  	 * interrupts disabled by get_futex_key.
1445  	 *
1446  	 * With interrupts disabled, we block page table pages from being
1447  	 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1448  	 * for more details.
1449  	 *
1450  	 * We do not adopt an rcu_read_lock(.) here as we also want to
1451  	 * block IPIs that come from THPs splitting.
1452  	 */
1453  
1454  	local_irq_save(flags);
1455  	pgdp = pgd_offset(mm, addr);
1456  	do {
1457  		pgd_t pgd = READ_ONCE(*pgdp);
1458  
1459  		next = pgd_addr_end(addr, end);
1460  		if (pgd_none(pgd))
1461  			break;
1462  		if (unlikely(pgd_huge(pgd))) {
1463  			if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1464  					  pages, &nr))
1465  				break;
1466  		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1467  			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1468  					 PGDIR_SHIFT, next, write, pages, &nr))
1469  				break;
1470  		} else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
1471  			break;
1472  	} while (pgdp++, addr = next, addr != end);
1473  	local_irq_restore(flags);
1474  
1475  	return nr;
1476  }
1477  
1478  /**
1479   * get_user_pages_fast() - pin user pages in memory
1480   * @start:	starting user address
1481   * @nr_pages:	number of pages from start to pin
1482   * @write:	whether pages will be written to
1483   * @pages:	array that receives pointers to the pages pinned.
1484   *		Should be at least nr_pages long.
1485   *
1486   * Attempt to pin user pages in memory without taking mm->mmap_sem.
1487   * If not successful, it will fall back to taking the lock and
1488   * calling get_user_pages().
1489   *
1490   * Returns number of pages pinned. This may be fewer than the number
1491   * requested. If nr_pages is 0 or negative, returns 0. If no pages
1492   * were pinned, returns -errno.
1493   */
1494  int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1495  			struct page **pages)
1496  {
1497  	int nr, ret;
1498  
1499  	start &= PAGE_MASK;
1500  	nr = __get_user_pages_fast(start, nr_pages, write, pages);
1501  	ret = nr;
1502  
1503  	if (nr < nr_pages) {
1504  		/* Try to get the remaining pages with get_user_pages */
1505  		start += nr << PAGE_SHIFT;
1506  		pages += nr;
1507  
1508  		ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
1509  
1510  		/* Have to be a bit careful with return values */
1511  		if (nr > 0) {
1512  			if (ret < 0)
1513  				ret = nr;
1514  			else
1515  				ret += nr;
1516  		}
1517  	}
1518  
1519  	return ret;
1520  }
1521  
1522  #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
1523