1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/err.h>
5 #include <linux/spinlock.h>
6
7 #include <linux/mm.h>
8 #include <linux/memremap.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/secretmem.h>
14
15 #include <linux/sched/signal.h>
16 #include <linux/rwsem.h>
17 #include <linux/hugetlb.h>
18 #include <linux/migrate.h>
19 #include <linux/mm_inline.h>
20 #include <linux/sched/mm.h>
21 #include <linux/shmem_fs.h>
22
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25
26 #include "internal.h"
27
28 struct follow_page_context {
29 struct dev_pagemap *pgmap;
30 unsigned int page_mask;
31 };
32
sanity_check_pinned_pages(struct page ** pages,unsigned long npages)33 static inline void sanity_check_pinned_pages(struct page **pages,
34 unsigned long npages)
35 {
36 if (!IS_ENABLED(CONFIG_DEBUG_VM))
37 return;
38
39 /*
40 * We only pin anonymous pages if they are exclusive. Once pinned, we
41 * can no longer turn them possibly shared and PageAnonExclusive() will
42 * stick around until the page is freed.
43 *
44 * We'd like to verify that our pinned anonymous pages are still mapped
45 * exclusively. The issue with anon THP is that we don't know how
46 * they are/were mapped when pinning them. However, for anon
47 * THP we can assume that either the given page (PTE-mapped THP) or
48 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
49 * neither is the case, there is certainly something wrong.
50 */
51 for (; npages; npages--, pages++) {
52 struct page *page = *pages;
53 struct folio *folio = page_folio(page);
54
55 if (is_zero_page(page) ||
56 !folio_test_anon(folio))
57 continue;
58 if (!folio_test_large(folio) || folio_test_hugetlb(folio))
59 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
60 else
61 /* Either a PTE-mapped or a PMD-mapped THP. */
62 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
63 !PageAnonExclusive(page), page);
64 }
65 }
66
67 /*
68 * Return the folio with ref appropriately incremented,
69 * or NULL if that failed.
70 */
try_get_folio(struct page * page,int refs)71 static inline struct folio *try_get_folio(struct page *page, int refs)
72 {
73 struct folio *folio;
74
75 retry:
76 folio = page_folio(page);
77 if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
78 return NULL;
79 if (unlikely(!folio_ref_try_add(folio, refs)))
80 return NULL;
81
82 /*
83 * At this point we have a stable reference to the folio; but it
84 * could be that between calling page_folio() and the refcount
85 * increment, the folio was split, in which case we'd end up
86 * holding a reference on a folio that has nothing to do with the page
87 * we were given anymore.
88 * So now that the folio is stable, recheck that the page still
89 * belongs to this folio.
90 */
91 if (unlikely(page_folio(page) != folio)) {
92 if (!put_devmap_managed_page_refs(&folio->page, refs))
93 folio_put_refs(folio, refs);
94 goto retry;
95 }
96
97 return folio;
98 }
99
gup_put_folio(struct folio * folio,int refs,unsigned int flags)100 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
101 {
102 if (flags & FOLL_PIN) {
103 if (is_zero_folio(folio))
104 return;
105 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
106 if (folio_test_large(folio))
107 atomic_sub(refs, &folio->_pincount);
108 else
109 refs *= GUP_PIN_COUNTING_BIAS;
110 }
111
112 if (!put_devmap_managed_page_refs(&folio->page, refs))
113 folio_put_refs(folio, refs);
114 }
115
116 /**
117 * try_grab_folio() - add a folio's refcount by a flag-dependent amount
118 * @folio: pointer to folio to be grabbed
119 * @refs: the value to (effectively) add to the folio's refcount
120 * @flags: gup flags: these are the FOLL_* flag values
121 *
122 * This might not do anything at all, depending on the flags argument.
123 *
124 * "grab" names in this file mean, "look at flags to decide whether to use
125 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
126 *
127 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
128 * time.
129 *
130 * Return: 0 for success, or if no action was required (if neither FOLL_PIN
131 * nor FOLL_GET was set, nothing is done). A negative error code for failure:
132 *
133 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not
134 * be grabbed.
135 *
136 * It is called when we have a stable reference for the folio, typically in
137 * GUP slow path.
138 */
try_grab_folio(struct folio * folio,int refs,unsigned int flags)139 int __must_check try_grab_folio(struct folio *folio, int refs,
140 unsigned int flags)
141 {
142 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
143 return -ENOMEM;
144
145 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page)))
146 return -EREMOTEIO;
147
148 if (flags & FOLL_GET)
149 folio_ref_add(folio, refs);
150 else if (flags & FOLL_PIN) {
151 /*
152 * Don't take a pin on the zero page - it's not going anywhere
153 * and it is used in a *lot* of places.
154 */
155 if (is_zero_folio(folio))
156 return 0;
157
158 /*
159 * Increment the normal page refcount field at least once,
160 * so that the page really is pinned.
161 */
162 if (folio_test_large(folio)) {
163 folio_ref_add(folio, refs);
164 atomic_add(refs, &folio->_pincount);
165 } else {
166 folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS);
167 }
168
169 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
170 }
171
172 return 0;
173 }
174
175 /**
176 * unpin_user_page() - release a dma-pinned page
177 * @page: pointer to page to be released
178 *
179 * Pages that were pinned via pin_user_pages*() must be released via either
180 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
181 * that such pages can be separately tracked and uniquely handled. In
182 * particular, interactions with RDMA and filesystems need special handling.
183 */
unpin_user_page(struct page * page)184 void unpin_user_page(struct page *page)
185 {
186 sanity_check_pinned_pages(&page, 1);
187 gup_put_folio(page_folio(page), 1, FOLL_PIN);
188 }
189 EXPORT_SYMBOL(unpin_user_page);
190
191 /**
192 * folio_add_pin - Try to get an additional pin on a pinned folio
193 * @folio: The folio to be pinned
194 *
195 * Get an additional pin on a folio we already have a pin on. Makes no change
196 * if the folio is a zero_page.
197 */
folio_add_pin(struct folio * folio)198 void folio_add_pin(struct folio *folio)
199 {
200 if (is_zero_folio(folio))
201 return;
202
203 /*
204 * Similar to try_grab_folio(): be sure to *also* increment the normal
205 * page refcount field at least once, so that the page really is
206 * pinned.
207 */
208 if (folio_test_large(folio)) {
209 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
210 folio_ref_inc(folio);
211 atomic_inc(&folio->_pincount);
212 } else {
213 WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS);
214 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
215 }
216 }
217
gup_folio_range_next(struct page * start,unsigned long npages,unsigned long i,unsigned int * ntails)218 static inline struct folio *gup_folio_range_next(struct page *start,
219 unsigned long npages, unsigned long i, unsigned int *ntails)
220 {
221 struct page *next = nth_page(start, i);
222 struct folio *folio = page_folio(next);
223 unsigned int nr = 1;
224
225 if (folio_test_large(folio))
226 nr = min_t(unsigned int, npages - i,
227 folio_nr_pages(folio) - folio_page_idx(folio, next));
228
229 *ntails = nr;
230 return folio;
231 }
232
gup_folio_next(struct page ** list,unsigned long npages,unsigned long i,unsigned int * ntails)233 static inline struct folio *gup_folio_next(struct page **list,
234 unsigned long npages, unsigned long i, unsigned int *ntails)
235 {
236 struct folio *folio = page_folio(list[i]);
237 unsigned int nr;
238
239 for (nr = i + 1; nr < npages; nr++) {
240 if (page_folio(list[nr]) != folio)
241 break;
242 }
243
244 *ntails = nr - i;
245 return folio;
246 }
247
248 /**
249 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
250 * @pages: array of pages to be maybe marked dirty, and definitely released.
251 * @npages: number of pages in the @pages array.
252 * @make_dirty: whether to mark the pages dirty
253 *
254 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
255 * variants called on that page.
256 *
257 * For each page in the @pages array, make that page (or its head page, if a
258 * compound page) dirty, if @make_dirty is true, and if the page was previously
259 * listed as clean. In any case, releases all pages using unpin_user_page(),
260 * possibly via unpin_user_pages(), for the non-dirty case.
261 *
262 * Please see the unpin_user_page() documentation for details.
263 *
264 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
265 * required, then the caller should a) verify that this is really correct,
266 * because _lock() is usually required, and b) hand code it:
267 * set_page_dirty_lock(), unpin_user_page().
268 *
269 */
unpin_user_pages_dirty_lock(struct page ** pages,unsigned long npages,bool make_dirty)270 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
271 bool make_dirty)
272 {
273 unsigned long i;
274 struct folio *folio;
275 unsigned int nr;
276
277 if (!make_dirty) {
278 unpin_user_pages(pages, npages);
279 return;
280 }
281
282 sanity_check_pinned_pages(pages, npages);
283 for (i = 0; i < npages; i += nr) {
284 folio = gup_folio_next(pages, npages, i, &nr);
285 /*
286 * Checking PageDirty at this point may race with
287 * clear_page_dirty_for_io(), but that's OK. Two key
288 * cases:
289 *
290 * 1) This code sees the page as already dirty, so it
291 * skips the call to set_page_dirty(). That could happen
292 * because clear_page_dirty_for_io() called
293 * page_mkclean(), followed by set_page_dirty().
294 * However, now the page is going to get written back,
295 * which meets the original intention of setting it
296 * dirty, so all is well: clear_page_dirty_for_io() goes
297 * on to call TestClearPageDirty(), and write the page
298 * back.
299 *
300 * 2) This code sees the page as clean, so it calls
301 * set_page_dirty(). The page stays dirty, despite being
302 * written back, so it gets written back again in the
303 * next writeback cycle. This is harmless.
304 */
305 if (!folio_test_dirty(folio)) {
306 folio_lock(folio);
307 folio_mark_dirty(folio);
308 folio_unlock(folio);
309 }
310 gup_put_folio(folio, nr, FOLL_PIN);
311 }
312 }
313 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
314
315 /**
316 * unpin_user_page_range_dirty_lock() - release and optionally dirty
317 * gup-pinned page range
318 *
319 * @page: the starting page of a range maybe marked dirty, and definitely released.
320 * @npages: number of consecutive pages to release.
321 * @make_dirty: whether to mark the pages dirty
322 *
323 * "gup-pinned page range" refers to a range of pages that has had one of the
324 * pin_user_pages() variants called on that page.
325 *
326 * For the page ranges defined by [page .. page+npages], make that range (or
327 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
328 * page range was previously listed as clean.
329 *
330 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
331 * required, then the caller should a) verify that this is really correct,
332 * because _lock() is usually required, and b) hand code it:
333 * set_page_dirty_lock(), unpin_user_page().
334 *
335 */
unpin_user_page_range_dirty_lock(struct page * page,unsigned long npages,bool make_dirty)336 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
337 bool make_dirty)
338 {
339 unsigned long i;
340 struct folio *folio;
341 unsigned int nr;
342
343 for (i = 0; i < npages; i += nr) {
344 folio = gup_folio_range_next(page, npages, i, &nr);
345 if (make_dirty && !folio_test_dirty(folio)) {
346 folio_lock(folio);
347 folio_mark_dirty(folio);
348 folio_unlock(folio);
349 }
350 gup_put_folio(folio, nr, FOLL_PIN);
351 }
352 }
353 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
354
unpin_user_pages_lockless(struct page ** pages,unsigned long npages)355 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages)
356 {
357 unsigned long i;
358 struct folio *folio;
359 unsigned int nr;
360
361 /*
362 * Don't perform any sanity checks because we might have raced with
363 * fork() and some anonymous pages might now actually be shared --
364 * which is why we're unpinning after all.
365 */
366 for (i = 0; i < npages; i += nr) {
367 folio = gup_folio_next(pages, npages, i, &nr);
368 gup_put_folio(folio, nr, FOLL_PIN);
369 }
370 }
371
372 /**
373 * unpin_user_pages() - release an array of gup-pinned pages.
374 * @pages: array of pages to be marked dirty and released.
375 * @npages: number of pages in the @pages array.
376 *
377 * For each page in the @pages array, release the page using unpin_user_page().
378 *
379 * Please see the unpin_user_page() documentation for details.
380 */
unpin_user_pages(struct page ** pages,unsigned long npages)381 void unpin_user_pages(struct page **pages, unsigned long npages)
382 {
383 unsigned long i;
384 struct folio *folio;
385 unsigned int nr;
386
387 /*
388 * If this WARN_ON() fires, then the system *might* be leaking pages (by
389 * leaving them pinned), but probably not. More likely, gup/pup returned
390 * a hard -ERRNO error to the caller, who erroneously passed it here.
391 */
392 if (WARN_ON(IS_ERR_VALUE(npages)))
393 return;
394
395 sanity_check_pinned_pages(pages, npages);
396 for (i = 0; i < npages; i += nr) {
397 folio = gup_folio_next(pages, npages, i, &nr);
398 gup_put_folio(folio, nr, FOLL_PIN);
399 }
400 }
401 EXPORT_SYMBOL(unpin_user_pages);
402
403 /*
404 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
405 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
406 * cache bouncing on large SMP machines for concurrent pinned gups.
407 */
mm_set_has_pinned_flag(unsigned long * mm_flags)408 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
409 {
410 if (!test_bit(MMF_HAS_PINNED, mm_flags))
411 set_bit(MMF_HAS_PINNED, mm_flags);
412 }
413
414 #ifdef CONFIG_MMU
no_page_table(struct vm_area_struct * vma,unsigned int flags)415 static struct page *no_page_table(struct vm_area_struct *vma,
416 unsigned int flags)
417 {
418 /*
419 * When core dumping an enormous anonymous area that nobody
420 * has touched so far, we don't want to allocate unnecessary pages or
421 * page tables. Return error instead of NULL to skip handle_mm_fault,
422 * then get_dump_page() will return NULL to leave a hole in the dump.
423 * But we can only make this optimization where a hole would surely
424 * be zero-filled if handle_mm_fault() actually did handle it.
425 */
426 if ((flags & FOLL_DUMP) &&
427 (vma_is_anonymous(vma) || !vma->vm_ops->fault))
428 return ERR_PTR(-EFAULT);
429 return NULL;
430 }
431
follow_pfn_pte(struct vm_area_struct * vma,unsigned long address,pte_t * pte,unsigned int flags)432 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
433 pte_t *pte, unsigned int flags)
434 {
435 if (flags & FOLL_TOUCH) {
436 pte_t orig_entry = ptep_get(pte);
437 pte_t entry = orig_entry;
438
439 if (flags & FOLL_WRITE)
440 entry = pte_mkdirty(entry);
441 entry = pte_mkyoung(entry);
442
443 if (!pte_same(orig_entry, entry)) {
444 set_pte_at(vma->vm_mm, address, pte, entry);
445 update_mmu_cache(vma, address, pte);
446 }
447 }
448
449 /* Proper page table entry exists, but no corresponding struct page */
450 return -EEXIST;
451 }
452
453 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
can_follow_write_pte(pte_t pte,struct page * page,struct vm_area_struct * vma,unsigned int flags)454 static inline bool can_follow_write_pte(pte_t pte, struct page *page,
455 struct vm_area_struct *vma,
456 unsigned int flags)
457 {
458 /* If the pte is writable, we can write to the page. */
459 if (pte_write(pte))
460 return true;
461
462 /* Maybe FOLL_FORCE is set to override it? */
463 if (!(flags & FOLL_FORCE))
464 return false;
465
466 /* But FOLL_FORCE has no effect on shared mappings */
467 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
468 return false;
469
470 /* ... or read-only private ones */
471 if (!(vma->vm_flags & VM_MAYWRITE))
472 return false;
473
474 /* ... or already writable ones that just need to take a write fault */
475 if (vma->vm_flags & VM_WRITE)
476 return false;
477
478 /*
479 * See can_change_pte_writable(): we broke COW and could map the page
480 * writable if we have an exclusive anonymous page ...
481 */
482 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
483 return false;
484
485 /* ... and a write-fault isn't required for other reasons. */
486 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
487 return false;
488 return !userfaultfd_pte_wp(vma, pte);
489 }
490
follow_page_pte(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,unsigned int flags,struct dev_pagemap ** pgmap)491 static struct page *follow_page_pte(struct vm_area_struct *vma,
492 unsigned long address, pmd_t *pmd, unsigned int flags,
493 struct dev_pagemap **pgmap)
494 {
495 struct mm_struct *mm = vma->vm_mm;
496 struct page *page;
497 spinlock_t *ptl;
498 pte_t *ptep, pte;
499 int ret;
500
501 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
502 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
503 (FOLL_PIN | FOLL_GET)))
504 return ERR_PTR(-EINVAL);
505
506 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
507 if (!ptep)
508 return no_page_table(vma, flags);
509 pte = ptep_get(ptep);
510 if (!pte_present(pte))
511 goto no_page;
512 if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
513 goto no_page;
514
515 page = vm_normal_page(vma, address, pte);
516
517 /*
518 * We only care about anon pages in can_follow_write_pte() and don't
519 * have to worry about pte_devmap() because they are never anon.
520 */
521 if ((flags & FOLL_WRITE) &&
522 !can_follow_write_pte(pte, page, vma, flags)) {
523 page = NULL;
524 goto out;
525 }
526
527 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
528 /*
529 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
530 * case since they are only valid while holding the pgmap
531 * reference.
532 */
533 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
534 if (*pgmap)
535 page = pte_page(pte);
536 else
537 goto no_page;
538 } else if (unlikely(!page)) {
539 if (flags & FOLL_DUMP) {
540 /* Avoid special (like zero) pages in core dumps */
541 page = ERR_PTR(-EFAULT);
542 goto out;
543 }
544
545 if (is_zero_pfn(pte_pfn(pte))) {
546 page = pte_page(pte);
547 } else {
548 ret = follow_pfn_pte(vma, address, ptep, flags);
549 page = ERR_PTR(ret);
550 goto out;
551 }
552 }
553
554 if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
555 page = ERR_PTR(-EMLINK);
556 goto out;
557 }
558
559 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
560 !PageAnonExclusive(page), page);
561
562 /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
563 ret = try_grab_folio(page_folio(page), 1, flags);
564 if (unlikely(ret)) {
565 page = ERR_PTR(ret);
566 goto out;
567 }
568
569 /*
570 * We need to make the page accessible if and only if we are going
571 * to access its content (the FOLL_PIN case). Please see
572 * Documentation/core-api/pin_user_pages.rst for details.
573 */
574 if (flags & FOLL_PIN) {
575 ret = arch_make_page_accessible(page);
576 if (ret) {
577 unpin_user_page(page);
578 page = ERR_PTR(ret);
579 goto out;
580 }
581 }
582 if (flags & FOLL_TOUCH) {
583 if ((flags & FOLL_WRITE) &&
584 !pte_dirty(pte) && !PageDirty(page))
585 set_page_dirty(page);
586 /*
587 * pte_mkyoung() would be more correct here, but atomic care
588 * is needed to avoid losing the dirty bit: it is easier to use
589 * mark_page_accessed().
590 */
591 mark_page_accessed(page);
592 }
593 out:
594 pte_unmap_unlock(ptep, ptl);
595 return page;
596 no_page:
597 pte_unmap_unlock(ptep, ptl);
598 if (!pte_none(pte))
599 return NULL;
600 return no_page_table(vma, flags);
601 }
602
follow_pmd_mask(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,unsigned int flags,struct follow_page_context * ctx)603 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
604 unsigned long address, pud_t *pudp,
605 unsigned int flags,
606 struct follow_page_context *ctx)
607 {
608 pmd_t *pmd, pmdval;
609 spinlock_t *ptl;
610 struct page *page;
611 struct mm_struct *mm = vma->vm_mm;
612
613 pmd = pmd_offset(pudp, address);
614 pmdval = pmdp_get_lockless(pmd);
615 if (pmd_none(pmdval))
616 return no_page_table(vma, flags);
617 if (!pmd_present(pmdval))
618 return no_page_table(vma, flags);
619 if (pmd_devmap(pmdval)) {
620 ptl = pmd_lock(mm, pmd);
621 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
622 spin_unlock(ptl);
623 if (page)
624 return page;
625 }
626 if (likely(!pmd_trans_huge(pmdval)))
627 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
628
629 if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
630 return no_page_table(vma, flags);
631
632 ptl = pmd_lock(mm, pmd);
633 if (unlikely(!pmd_present(*pmd))) {
634 spin_unlock(ptl);
635 return no_page_table(vma, flags);
636 }
637 if (unlikely(!pmd_trans_huge(*pmd))) {
638 spin_unlock(ptl);
639 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
640 }
641 if (flags & FOLL_SPLIT_PMD) {
642 spin_unlock(ptl);
643 split_huge_pmd(vma, pmd, address);
644 /* If pmd was left empty, stuff a page table in there quickly */
645 return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
646 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
647 }
648 page = follow_trans_huge_pmd(vma, address, pmd, flags);
649 spin_unlock(ptl);
650 ctx->page_mask = HPAGE_PMD_NR - 1;
651 return page;
652 }
653
follow_pud_mask(struct vm_area_struct * vma,unsigned long address,p4d_t * p4dp,unsigned int flags,struct follow_page_context * ctx)654 static struct page *follow_pud_mask(struct vm_area_struct *vma,
655 unsigned long address, p4d_t *p4dp,
656 unsigned int flags,
657 struct follow_page_context *ctx)
658 {
659 pud_t *pud;
660 spinlock_t *ptl;
661 struct page *page;
662 struct mm_struct *mm = vma->vm_mm;
663
664 pud = pud_offset(p4dp, address);
665 if (pud_none(*pud))
666 return no_page_table(vma, flags);
667 if (pud_devmap(*pud)) {
668 ptl = pud_lock(mm, pud);
669 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
670 spin_unlock(ptl);
671 if (page)
672 return page;
673 }
674 if (unlikely(pud_bad(*pud)))
675 return no_page_table(vma, flags);
676
677 return follow_pmd_mask(vma, address, pud, flags, ctx);
678 }
679
follow_p4d_mask(struct vm_area_struct * vma,unsigned long address,pgd_t * pgdp,unsigned int flags,struct follow_page_context * ctx)680 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
681 unsigned long address, pgd_t *pgdp,
682 unsigned int flags,
683 struct follow_page_context *ctx)
684 {
685 p4d_t *p4d;
686
687 p4d = p4d_offset(pgdp, address);
688 if (p4d_none(*p4d))
689 return no_page_table(vma, flags);
690 BUILD_BUG_ON(p4d_huge(*p4d));
691 if (unlikely(p4d_bad(*p4d)))
692 return no_page_table(vma, flags);
693
694 return follow_pud_mask(vma, address, p4d, flags, ctx);
695 }
696
697 /**
698 * follow_page_mask - look up a page descriptor from a user-virtual address
699 * @vma: vm_area_struct mapping @address
700 * @address: virtual address to look up
701 * @flags: flags modifying lookup behaviour
702 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
703 * pointer to output page_mask
704 *
705 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
706 *
707 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
708 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
709 *
710 * When getting an anonymous page and the caller has to trigger unsharing
711 * of a shared anonymous page first, -EMLINK is returned. The caller should
712 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
713 * relevant with FOLL_PIN and !FOLL_WRITE.
714 *
715 * On output, the @ctx->page_mask is set according to the size of the page.
716 *
717 * Return: the mapped (struct page *), %NULL if no mapping exists, or
718 * an error pointer if there is a mapping to something not represented
719 * by a page descriptor (see also vm_normal_page()).
720 */
follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct follow_page_context * ctx)721 static struct page *follow_page_mask(struct vm_area_struct *vma,
722 unsigned long address, unsigned int flags,
723 struct follow_page_context *ctx)
724 {
725 pgd_t *pgd;
726 struct mm_struct *mm = vma->vm_mm;
727
728 ctx->page_mask = 0;
729
730 /*
731 * Call hugetlb_follow_page_mask for hugetlb vmas as it will use
732 * special hugetlb page table walking code. This eliminates the
733 * need to check for hugetlb entries in the general walking code.
734 */
735 if (is_vm_hugetlb_page(vma))
736 return hugetlb_follow_page_mask(vma, address, flags,
737 &ctx->page_mask);
738
739 pgd = pgd_offset(mm, address);
740
741 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
742 return no_page_table(vma, flags);
743
744 return follow_p4d_mask(vma, address, pgd, flags, ctx);
745 }
746
follow_page(struct vm_area_struct * vma,unsigned long address,unsigned int foll_flags)747 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
748 unsigned int foll_flags)
749 {
750 struct follow_page_context ctx = { NULL };
751 struct page *page;
752
753 if (vma_is_secretmem(vma))
754 return NULL;
755
756 if (WARN_ON_ONCE(foll_flags & FOLL_PIN))
757 return NULL;
758
759 /*
760 * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect
761 * to fail on PROT_NONE-mapped pages.
762 */
763 page = follow_page_mask(vma, address, foll_flags, &ctx);
764 if (ctx.pgmap)
765 put_dev_pagemap(ctx.pgmap);
766 return page;
767 }
768
get_gate_page(struct mm_struct * mm,unsigned long address,unsigned int gup_flags,struct vm_area_struct ** vma,struct page ** page)769 static int get_gate_page(struct mm_struct *mm, unsigned long address,
770 unsigned int gup_flags, struct vm_area_struct **vma,
771 struct page **page)
772 {
773 pgd_t *pgd;
774 p4d_t *p4d;
775 pud_t *pud;
776 pmd_t *pmd;
777 pte_t *pte;
778 pte_t entry;
779 int ret = -EFAULT;
780
781 /* user gate pages are read-only */
782 if (gup_flags & FOLL_WRITE)
783 return -EFAULT;
784 if (address > TASK_SIZE)
785 pgd = pgd_offset_k(address);
786 else
787 pgd = pgd_offset_gate(mm, address);
788 if (pgd_none(*pgd))
789 return -EFAULT;
790 p4d = p4d_offset(pgd, address);
791 if (p4d_none(*p4d))
792 return -EFAULT;
793 pud = pud_offset(p4d, address);
794 if (pud_none(*pud))
795 return -EFAULT;
796 pmd = pmd_offset(pud, address);
797 if (!pmd_present(*pmd))
798 return -EFAULT;
799 pte = pte_offset_map(pmd, address);
800 if (!pte)
801 return -EFAULT;
802 entry = ptep_get(pte);
803 if (pte_none(entry))
804 goto unmap;
805 *vma = get_gate_vma(mm);
806 if (!page)
807 goto out;
808 *page = vm_normal_page(*vma, address, entry);
809 if (!*page) {
810 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry)))
811 goto unmap;
812 *page = pte_page(entry);
813 }
814 ret = try_grab_folio(page_folio(*page), 1, gup_flags);
815 if (unlikely(ret))
816 goto unmap;
817 out:
818 ret = 0;
819 unmap:
820 pte_unmap(pte);
821 return ret;
822 }
823
824 /*
825 * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not
826 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set
827 * to 0 and -EBUSY returned.
828 */
faultin_page(struct vm_area_struct * vma,unsigned long address,unsigned int * flags,bool unshare,int * locked)829 static int faultin_page(struct vm_area_struct *vma,
830 unsigned long address, unsigned int *flags, bool unshare,
831 int *locked)
832 {
833 unsigned int fault_flags = 0;
834 vm_fault_t ret;
835
836 if (*flags & FOLL_NOFAULT)
837 return -EFAULT;
838 if (*flags & FOLL_WRITE)
839 fault_flags |= FAULT_FLAG_WRITE;
840 if (*flags & FOLL_REMOTE)
841 fault_flags |= FAULT_FLAG_REMOTE;
842 if (*flags & FOLL_UNLOCKABLE) {
843 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
844 /*
845 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
846 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
847 * That's because some callers may not be prepared to
848 * handle early exits caused by non-fatal signals.
849 */
850 if (*flags & FOLL_INTERRUPTIBLE)
851 fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
852 }
853 if (*flags & FOLL_NOWAIT)
854 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
855 if (*flags & FOLL_TRIED) {
856 /*
857 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
858 * can co-exist
859 */
860 fault_flags |= FAULT_FLAG_TRIED;
861 }
862 if (unshare) {
863 fault_flags |= FAULT_FLAG_UNSHARE;
864 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
865 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
866 }
867
868 ret = handle_mm_fault(vma, address, fault_flags, NULL);
869
870 if (ret & VM_FAULT_COMPLETED) {
871 /*
872 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
873 * mmap lock in the page fault handler. Sanity check this.
874 */
875 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
876 *locked = 0;
877
878 /*
879 * We should do the same as VM_FAULT_RETRY, but let's not
880 * return -EBUSY since that's not reflecting the reality of
881 * what has happened - we've just fully completed a page
882 * fault, with the mmap lock released. Use -EAGAIN to show
883 * that we want to take the mmap lock _again_.
884 */
885 return -EAGAIN;
886 }
887
888 if (ret & VM_FAULT_ERROR) {
889 int err = vm_fault_to_errno(ret, *flags);
890
891 if (err)
892 return err;
893 BUG();
894 }
895
896 if (ret & VM_FAULT_RETRY) {
897 if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
898 *locked = 0;
899 return -EBUSY;
900 }
901
902 return 0;
903 }
904
905 /*
906 * Writing to file-backed mappings which require folio dirty tracking using GUP
907 * is a fundamentally broken operation, as kernel write access to GUP mappings
908 * do not adhere to the semantics expected by a file system.
909 *
910 * Consider the following scenario:-
911 *
912 * 1. A folio is written to via GUP which write-faults the memory, notifying
913 * the file system and dirtying the folio.
914 * 2. Later, writeback is triggered, resulting in the folio being cleaned and
915 * the PTE being marked read-only.
916 * 3. The GUP caller writes to the folio, as it is mapped read/write via the
917 * direct mapping.
918 * 4. The GUP caller, now done with the page, unpins it and sets it dirty
919 * (though it does not have to).
920 *
921 * This results in both data being written to a folio without writenotify, and
922 * the folio being dirtied unexpectedly (if the caller decides to do so).
923 */
writable_file_mapping_allowed(struct vm_area_struct * vma,unsigned long gup_flags)924 static bool writable_file_mapping_allowed(struct vm_area_struct *vma,
925 unsigned long gup_flags)
926 {
927 /*
928 * If we aren't pinning then no problematic write can occur. A long term
929 * pin is the most egregious case so this is the case we disallow.
930 */
931 if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) !=
932 (FOLL_PIN | FOLL_LONGTERM))
933 return true;
934
935 /*
936 * If the VMA does not require dirty tracking then no problematic write
937 * can occur either.
938 */
939 return !vma_needs_dirty_tracking(vma);
940 }
941
check_vma_flags(struct vm_area_struct * vma,unsigned long gup_flags)942 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
943 {
944 vm_flags_t vm_flags = vma->vm_flags;
945 int write = (gup_flags & FOLL_WRITE);
946 int foreign = (gup_flags & FOLL_REMOTE);
947 bool vma_anon = vma_is_anonymous(vma);
948
949 if (vm_flags & (VM_IO | VM_PFNMAP))
950 return -EFAULT;
951
952 if ((gup_flags & FOLL_ANON) && !vma_anon)
953 return -EFAULT;
954
955 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
956 return -EOPNOTSUPP;
957
958 if (vma_is_secretmem(vma))
959 return -EFAULT;
960
961 if (write) {
962 if (!vma_anon &&
963 !writable_file_mapping_allowed(vma, gup_flags))
964 return -EFAULT;
965
966 if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
967 if (!(gup_flags & FOLL_FORCE))
968 return -EFAULT;
969 /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
970 if (is_vm_hugetlb_page(vma))
971 return -EFAULT;
972 /*
973 * We used to let the write,force case do COW in a
974 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
975 * set a breakpoint in a read-only mapping of an
976 * executable, without corrupting the file (yet only
977 * when that file had been opened for writing!).
978 * Anon pages in shared mappings are surprising: now
979 * just reject it.
980 */
981 if (!is_cow_mapping(vm_flags))
982 return -EFAULT;
983 }
984 } else if (!(vm_flags & VM_READ)) {
985 if (!(gup_flags & FOLL_FORCE))
986 return -EFAULT;
987 /*
988 * Is there actually any vma we can reach here which does not
989 * have VM_MAYREAD set?
990 */
991 if (!(vm_flags & VM_MAYREAD))
992 return -EFAULT;
993 }
994 /*
995 * gups are always data accesses, not instruction
996 * fetches, so execute=false here
997 */
998 if (!arch_vma_access_permitted(vma, write, false, foreign))
999 return -EFAULT;
1000 return 0;
1001 }
1002
1003 /*
1004 * This is "vma_lookup()", but with a warning if we would have
1005 * historically expanded the stack in the GUP code.
1006 */
gup_vma_lookup(struct mm_struct * mm,unsigned long addr)1007 static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
1008 unsigned long addr)
1009 {
1010 #ifdef CONFIG_STACK_GROWSUP
1011 return vma_lookup(mm, addr);
1012 #else
1013 static volatile unsigned long next_warn;
1014 struct vm_area_struct *vma;
1015 unsigned long now, next;
1016
1017 vma = find_vma(mm, addr);
1018 if (!vma || (addr >= vma->vm_start))
1019 return vma;
1020
1021 /* Only warn for half-way relevant accesses */
1022 if (!(vma->vm_flags & VM_GROWSDOWN))
1023 return NULL;
1024 if (vma->vm_start - addr > 65536)
1025 return NULL;
1026
1027 /* Let's not warn more than once an hour.. */
1028 now = jiffies; next = next_warn;
1029 if (next && time_before(now, next))
1030 return NULL;
1031 next_warn = now + 60*60*HZ;
1032
1033 /* Let people know things may have changed. */
1034 pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
1035 current->comm, task_pid_nr(current),
1036 vma->vm_start, vma->vm_end, addr);
1037 dump_stack();
1038 return NULL;
1039 #endif
1040 }
1041
1042 /**
1043 * __get_user_pages() - pin user pages in memory
1044 * @mm: mm_struct of target mm
1045 * @start: starting user address
1046 * @nr_pages: number of pages from start to pin
1047 * @gup_flags: flags modifying pin behaviour
1048 * @pages: array that receives pointers to the pages pinned.
1049 * Should be at least nr_pages long. Or NULL, if caller
1050 * only intends to ensure the pages are faulted in.
1051 * @locked: whether we're still with the mmap_lock held
1052 *
1053 * Returns either number of pages pinned (which may be less than the
1054 * number requested), or an error. Details about the return value:
1055 *
1056 * -- If nr_pages is 0, returns 0.
1057 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1058 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1059 * pages pinned. Again, this may be less than nr_pages.
1060 * -- 0 return value is possible when the fault would need to be retried.
1061 *
1062 * The caller is responsible for releasing returned @pages, via put_page().
1063 *
1064 * Must be called with mmap_lock held. It may be released. See below.
1065 *
1066 * __get_user_pages walks a process's page tables and takes a reference to
1067 * each struct page that each user address corresponds to at a given
1068 * instant. That is, it takes the page that would be accessed if a user
1069 * thread accesses the given user virtual address at that instant.
1070 *
1071 * This does not guarantee that the page exists in the user mappings when
1072 * __get_user_pages returns, and there may even be a completely different
1073 * page there in some cases (eg. if mmapped pagecache has been invalidated
1074 * and subsequently re-faulted). However it does guarantee that the page
1075 * won't be freed completely. And mostly callers simply care that the page
1076 * contains data that was valid *at some point in time*. Typically, an IO
1077 * or similar operation cannot guarantee anything stronger anyway because
1078 * locks can't be held over the syscall boundary.
1079 *
1080 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1081 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1082 * appropriate) must be called after the page is finished with, and
1083 * before put_page is called.
1084 *
1085 * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
1086 * be released. If this happens *@locked will be set to 0 on return.
1087 *
1088 * A caller using such a combination of @gup_flags must therefore hold the
1089 * mmap_lock for reading only, and recognize when it's been released. Otherwise,
1090 * it must be held for either reading or writing and will not be released.
1091 *
1092 * In most cases, get_user_pages or get_user_pages_fast should be used
1093 * instead of __get_user_pages. __get_user_pages should be used only if
1094 * you need some special @gup_flags.
1095 */
__get_user_pages(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)1096 static long __get_user_pages(struct mm_struct *mm,
1097 unsigned long start, unsigned long nr_pages,
1098 unsigned int gup_flags, struct page **pages,
1099 int *locked)
1100 {
1101 long ret = 0, i = 0;
1102 struct vm_area_struct *vma = NULL;
1103 struct follow_page_context ctx = { NULL };
1104
1105 if (!nr_pages)
1106 return 0;
1107
1108 start = untagged_addr_remote(mm, start);
1109
1110 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1111
1112 do {
1113 struct page *page;
1114 unsigned int foll_flags = gup_flags;
1115 unsigned int page_increm;
1116
1117 /* first iteration or cross vma bound */
1118 if (!vma || start >= vma->vm_end) {
1119 /*
1120 * MADV_POPULATE_(READ|WRITE) wants to handle VMA
1121 * lookups+error reporting differently.
1122 */
1123 if (gup_flags & FOLL_MADV_POPULATE) {
1124 vma = vma_lookup(mm, start);
1125 if (!vma) {
1126 ret = -ENOMEM;
1127 goto out;
1128 }
1129 if (check_vma_flags(vma, gup_flags)) {
1130 ret = -EINVAL;
1131 goto out;
1132 }
1133 goto retry;
1134 }
1135 vma = gup_vma_lookup(mm, start);
1136 if (!vma && in_gate_area(mm, start)) {
1137 ret = get_gate_page(mm, start & PAGE_MASK,
1138 gup_flags, &vma,
1139 pages ? &page : NULL);
1140 if (ret)
1141 goto out;
1142 ctx.page_mask = 0;
1143 goto next_page;
1144 }
1145
1146 if (!vma) {
1147 ret = -EFAULT;
1148 goto out;
1149 }
1150 ret = check_vma_flags(vma, gup_flags);
1151 if (ret)
1152 goto out;
1153 }
1154 retry:
1155 /*
1156 * If we have a pending SIGKILL, don't keep faulting pages and
1157 * potentially allocating memory.
1158 */
1159 if (fatal_signal_pending(current)) {
1160 ret = -EINTR;
1161 goto out;
1162 }
1163 cond_resched();
1164
1165 page = follow_page_mask(vma, start, foll_flags, &ctx);
1166 if (!page || PTR_ERR(page) == -EMLINK) {
1167 ret = faultin_page(vma, start, &foll_flags,
1168 PTR_ERR(page) == -EMLINK, locked);
1169 switch (ret) {
1170 case 0:
1171 goto retry;
1172 case -EBUSY:
1173 case -EAGAIN:
1174 ret = 0;
1175 fallthrough;
1176 case -EFAULT:
1177 case -ENOMEM:
1178 case -EHWPOISON:
1179 goto out;
1180 }
1181 BUG();
1182 } else if (PTR_ERR(page) == -EEXIST) {
1183 /*
1184 * Proper page table entry exists, but no corresponding
1185 * struct page. If the caller expects **pages to be
1186 * filled in, bail out now, because that can't be done
1187 * for this page.
1188 */
1189 if (pages) {
1190 ret = PTR_ERR(page);
1191 goto out;
1192 }
1193 } else if (IS_ERR(page)) {
1194 ret = PTR_ERR(page);
1195 goto out;
1196 }
1197 next_page:
1198 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1199 if (page_increm > nr_pages)
1200 page_increm = nr_pages;
1201
1202 if (pages) {
1203 struct page *subpage;
1204 unsigned int j;
1205
1206 /*
1207 * This must be a large folio (and doesn't need to
1208 * be the whole folio; it can be part of it), do
1209 * the refcount work for all the subpages too.
1210 *
1211 * NOTE: here the page may not be the head page
1212 * e.g. when start addr is not thp-size aligned.
1213 * try_grab_folio() should have taken care of tail
1214 * pages.
1215 */
1216 if (page_increm > 1) {
1217 struct folio *folio = page_folio(page);
1218
1219 /*
1220 * Since we already hold refcount on the
1221 * large folio, this should never fail.
1222 */
1223 if (try_grab_folio(folio, page_increm - 1,
1224 foll_flags)) {
1225 /*
1226 * Release the 1st page ref if the
1227 * folio is problematic, fail hard.
1228 */
1229 gup_put_folio(folio, 1,
1230 foll_flags);
1231 ret = -EFAULT;
1232 goto out;
1233 }
1234 }
1235
1236 for (j = 0; j < page_increm; j++) {
1237 subpage = nth_page(page, j);
1238 pages[i + j] = subpage;
1239 flush_anon_page(vma, subpage, start + j * PAGE_SIZE);
1240 flush_dcache_page(subpage);
1241 }
1242 }
1243
1244 i += page_increm;
1245 start += page_increm * PAGE_SIZE;
1246 nr_pages -= page_increm;
1247 } while (nr_pages);
1248 out:
1249 if (ctx.pgmap)
1250 put_dev_pagemap(ctx.pgmap);
1251 return i ? i : ret;
1252 }
1253
vma_permits_fault(struct vm_area_struct * vma,unsigned int fault_flags)1254 static bool vma_permits_fault(struct vm_area_struct *vma,
1255 unsigned int fault_flags)
1256 {
1257 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1258 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1259 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1260
1261 if (!(vm_flags & vma->vm_flags))
1262 return false;
1263
1264 /*
1265 * The architecture might have a hardware protection
1266 * mechanism other than read/write that can deny access.
1267 *
1268 * gup always represents data access, not instruction
1269 * fetches, so execute=false here:
1270 */
1271 if (!arch_vma_access_permitted(vma, write, false, foreign))
1272 return false;
1273
1274 return true;
1275 }
1276
1277 /**
1278 * fixup_user_fault() - manually resolve a user page fault
1279 * @mm: mm_struct of target mm
1280 * @address: user address
1281 * @fault_flags:flags to pass down to handle_mm_fault()
1282 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
1283 * does not allow retry. If NULL, the caller must guarantee
1284 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1285 *
1286 * This is meant to be called in the specific scenario where for locking reasons
1287 * we try to access user memory in atomic context (within a pagefault_disable()
1288 * section), this returns -EFAULT, and we want to resolve the user fault before
1289 * trying again.
1290 *
1291 * Typically this is meant to be used by the futex code.
1292 *
1293 * The main difference with get_user_pages() is that this function will
1294 * unconditionally call handle_mm_fault() which will in turn perform all the
1295 * necessary SW fixup of the dirty and young bits in the PTE, while
1296 * get_user_pages() only guarantees to update these in the struct page.
1297 *
1298 * This is important for some architectures where those bits also gate the
1299 * access permission to the page because they are maintained in software. On
1300 * such architectures, gup() will not be enough to make a subsequent access
1301 * succeed.
1302 *
1303 * This function will not return with an unlocked mmap_lock. So it has not the
1304 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1305 */
fixup_user_fault(struct mm_struct * mm,unsigned long address,unsigned int fault_flags,bool * unlocked)1306 int fixup_user_fault(struct mm_struct *mm,
1307 unsigned long address, unsigned int fault_flags,
1308 bool *unlocked)
1309 {
1310 struct vm_area_struct *vma;
1311 vm_fault_t ret;
1312
1313 address = untagged_addr_remote(mm, address);
1314
1315 if (unlocked)
1316 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1317
1318 retry:
1319 vma = gup_vma_lookup(mm, address);
1320 if (!vma)
1321 return -EFAULT;
1322
1323 if (!vma_permits_fault(vma, fault_flags))
1324 return -EFAULT;
1325
1326 if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1327 fatal_signal_pending(current))
1328 return -EINTR;
1329
1330 ret = handle_mm_fault(vma, address, fault_flags, NULL);
1331
1332 if (ret & VM_FAULT_COMPLETED) {
1333 /*
1334 * NOTE: it's a pity that we need to retake the lock here
1335 * to pair with the unlock() in the callers. Ideally we
1336 * could tell the callers so they do not need to unlock.
1337 */
1338 mmap_read_lock(mm);
1339 *unlocked = true;
1340 return 0;
1341 }
1342
1343 if (ret & VM_FAULT_ERROR) {
1344 int err = vm_fault_to_errno(ret, 0);
1345
1346 if (err)
1347 return err;
1348 BUG();
1349 }
1350
1351 if (ret & VM_FAULT_RETRY) {
1352 mmap_read_lock(mm);
1353 *unlocked = true;
1354 fault_flags |= FAULT_FLAG_TRIED;
1355 goto retry;
1356 }
1357
1358 return 0;
1359 }
1360 EXPORT_SYMBOL_GPL(fixup_user_fault);
1361
1362 /*
1363 * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is
1364 * specified, it'll also respond to generic signals. The caller of GUP
1365 * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
1366 */
gup_signal_pending(unsigned int flags)1367 static bool gup_signal_pending(unsigned int flags)
1368 {
1369 if (fatal_signal_pending(current))
1370 return true;
1371
1372 if (!(flags & FOLL_INTERRUPTIBLE))
1373 return false;
1374
1375 return signal_pending(current);
1376 }
1377
1378 /*
1379 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
1380 * the caller. This function may drop the mmap_lock. If it does so, then it will
1381 * set (*locked = 0).
1382 *
1383 * (*locked == 0) means that the caller expects this function to acquire and
1384 * drop the mmap_lock. Therefore, the value of *locked will still be zero when
1385 * the function returns, even though it may have changed temporarily during
1386 * function execution.
1387 *
1388 * Please note that this function, unlike __get_user_pages(), will not return 0
1389 * for nr_pages > 0, unless FOLL_NOWAIT is used.
1390 */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,int * locked,unsigned int flags)1391 static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1392 unsigned long start,
1393 unsigned long nr_pages,
1394 struct page **pages,
1395 int *locked,
1396 unsigned int flags)
1397 {
1398 long ret, pages_done;
1399 bool must_unlock = false;
1400
1401 /*
1402 * The internal caller expects GUP to manage the lock internally and the
1403 * lock must be released when this returns.
1404 */
1405 if (!*locked) {
1406 if (mmap_read_lock_killable(mm))
1407 return -EAGAIN;
1408 must_unlock = true;
1409 *locked = 1;
1410 }
1411 else
1412 mmap_assert_locked(mm);
1413
1414 if (flags & FOLL_PIN)
1415 mm_set_has_pinned_flag(&mm->flags);
1416
1417 /*
1418 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1419 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1420 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1421 * for FOLL_GET, not for the newer FOLL_PIN.
1422 *
1423 * FOLL_PIN always expects pages to be non-null, but no need to assert
1424 * that here, as any failures will be obvious enough.
1425 */
1426 if (pages && !(flags & FOLL_PIN))
1427 flags |= FOLL_GET;
1428
1429 pages_done = 0;
1430 for (;;) {
1431 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1432 locked);
1433 if (!(flags & FOLL_UNLOCKABLE)) {
1434 /* VM_FAULT_RETRY couldn't trigger, bypass */
1435 pages_done = ret;
1436 break;
1437 }
1438
1439 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
1440 if (!*locked) {
1441 BUG_ON(ret < 0);
1442 BUG_ON(ret >= nr_pages);
1443 }
1444
1445 if (ret > 0) {
1446 nr_pages -= ret;
1447 pages_done += ret;
1448 if (!nr_pages)
1449 break;
1450 }
1451 if (*locked) {
1452 /*
1453 * VM_FAULT_RETRY didn't trigger or it was a
1454 * FOLL_NOWAIT.
1455 */
1456 if (!pages_done)
1457 pages_done = ret;
1458 break;
1459 }
1460 /*
1461 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1462 * For the prefault case (!pages) we only update counts.
1463 */
1464 if (likely(pages))
1465 pages += ret;
1466 start += ret << PAGE_SHIFT;
1467
1468 /* The lock was temporarily dropped, so we must unlock later */
1469 must_unlock = true;
1470
1471 retry:
1472 /*
1473 * Repeat on the address that fired VM_FAULT_RETRY
1474 * with both FAULT_FLAG_ALLOW_RETRY and
1475 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1476 * by fatal signals of even common signals, depending on
1477 * the caller's request. So we need to check it before we
1478 * start trying again otherwise it can loop forever.
1479 */
1480 if (gup_signal_pending(flags)) {
1481 if (!pages_done)
1482 pages_done = -EINTR;
1483 break;
1484 }
1485
1486 ret = mmap_read_lock_killable(mm);
1487 if (ret) {
1488 BUG_ON(ret > 0);
1489 if (!pages_done)
1490 pages_done = ret;
1491 break;
1492 }
1493
1494 *locked = 1;
1495 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1496 pages, locked);
1497 if (!*locked) {
1498 /* Continue to retry until we succeeded */
1499 BUG_ON(ret != 0);
1500 goto retry;
1501 }
1502 if (ret != 1) {
1503 BUG_ON(ret > 1);
1504 if (!pages_done)
1505 pages_done = ret;
1506 break;
1507 }
1508 nr_pages--;
1509 pages_done++;
1510 if (!nr_pages)
1511 break;
1512 if (likely(pages))
1513 pages++;
1514 start += PAGE_SIZE;
1515 }
1516 if (must_unlock && *locked) {
1517 /*
1518 * We either temporarily dropped the lock, or the caller
1519 * requested that we both acquire and drop the lock. Either way,
1520 * we must now unlock, and notify the caller of that state.
1521 */
1522 mmap_read_unlock(mm);
1523 *locked = 0;
1524 }
1525 return pages_done;
1526 }
1527
1528 /**
1529 * populate_vma_page_range() - populate a range of pages in the vma.
1530 * @vma: target vma
1531 * @start: start address
1532 * @end: end address
1533 * @locked: whether the mmap_lock is still held
1534 *
1535 * This takes care of mlocking the pages too if VM_LOCKED is set.
1536 *
1537 * Return either number of pages pinned in the vma, or a negative error
1538 * code on error.
1539 *
1540 * vma->vm_mm->mmap_lock must be held.
1541 *
1542 * If @locked is NULL, it may be held for read or write and will
1543 * be unperturbed.
1544 *
1545 * If @locked is non-NULL, it must held for read only and may be
1546 * released. If it's released, *@locked will be set to 0.
1547 */
populate_vma_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,int * locked)1548 long populate_vma_page_range(struct vm_area_struct *vma,
1549 unsigned long start, unsigned long end, int *locked)
1550 {
1551 struct mm_struct *mm = vma->vm_mm;
1552 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1553 int local_locked = 1;
1554 int gup_flags;
1555 long ret;
1556
1557 VM_BUG_ON(!PAGE_ALIGNED(start));
1558 VM_BUG_ON(!PAGE_ALIGNED(end));
1559 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1560 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1561 mmap_assert_locked(mm);
1562
1563 /*
1564 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1565 * faultin_page() to break COW, so it has no work to do here.
1566 */
1567 if (vma->vm_flags & VM_LOCKONFAULT)
1568 return nr_pages;
1569
1570 gup_flags = FOLL_TOUCH;
1571 /*
1572 * We want to touch writable mappings with a write fault in order
1573 * to break COW, except for shared mappings because these don't COW
1574 * and we would not want to dirty them for nothing.
1575 */
1576 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1577 gup_flags |= FOLL_WRITE;
1578
1579 /*
1580 * We want mlock to succeed for regions that have any permissions
1581 * other than PROT_NONE.
1582 */
1583 if (vma_is_accessible(vma))
1584 gup_flags |= FOLL_FORCE;
1585
1586 if (locked)
1587 gup_flags |= FOLL_UNLOCKABLE;
1588
1589 /*
1590 * We made sure addr is within a VMA, so the following will
1591 * not result in a stack expansion that recurses back here.
1592 */
1593 ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1594 NULL, locked ? locked : &local_locked);
1595 lru_add_drain();
1596 return ret;
1597 }
1598
1599 /*
1600 * faultin_page_range() - populate (prefault) page tables inside the
1601 * given range readable/writable
1602 *
1603 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1604 *
1605 * @mm: the mm to populate page tables in
1606 * @start: start address
1607 * @end: end address
1608 * @write: whether to prefault readable or writable
1609 * @locked: whether the mmap_lock is still held
1610 *
1611 * Returns either number of processed pages in the MM, or a negative error
1612 * code on error (see __get_user_pages()). Note that this function reports
1613 * errors related to VMAs, such as incompatible mappings, as expected by
1614 * MADV_POPULATE_(READ|WRITE).
1615 *
1616 * The range must be page-aligned.
1617 *
1618 * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
1619 */
faultin_page_range(struct mm_struct * mm,unsigned long start,unsigned long end,bool write,int * locked)1620 long faultin_page_range(struct mm_struct *mm, unsigned long start,
1621 unsigned long end, bool write, int *locked)
1622 {
1623 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1624 int gup_flags;
1625 long ret;
1626
1627 VM_BUG_ON(!PAGE_ALIGNED(start));
1628 VM_BUG_ON(!PAGE_ALIGNED(end));
1629 mmap_assert_locked(mm);
1630
1631 /*
1632 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1633 * the page dirty with FOLL_WRITE -- which doesn't make a
1634 * difference with !FOLL_FORCE, because the page is writable
1635 * in the page table.
1636 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1637 * a poisoned page.
1638 * !FOLL_FORCE: Require proper access permissions.
1639 */
1640 gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
1641 FOLL_MADV_POPULATE;
1642 if (write)
1643 gup_flags |= FOLL_WRITE;
1644
1645 ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
1646 gup_flags);
1647 lru_add_drain();
1648 return ret;
1649 }
1650
1651 /*
1652 * __mm_populate - populate and/or mlock pages within a range of address space.
1653 *
1654 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1655 * flags. VMAs must be already marked with the desired vm_flags, and
1656 * mmap_lock must not be held.
1657 */
__mm_populate(unsigned long start,unsigned long len,int ignore_errors)1658 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1659 {
1660 struct mm_struct *mm = current->mm;
1661 unsigned long end, nstart, nend;
1662 struct vm_area_struct *vma = NULL;
1663 int locked = 0;
1664 long ret = 0;
1665
1666 end = start + len;
1667
1668 for (nstart = start; nstart < end; nstart = nend) {
1669 /*
1670 * We want to fault in pages for [nstart; end) address range.
1671 * Find first corresponding VMA.
1672 */
1673 if (!locked) {
1674 locked = 1;
1675 mmap_read_lock(mm);
1676 vma = find_vma_intersection(mm, nstart, end);
1677 } else if (nstart >= vma->vm_end)
1678 vma = find_vma_intersection(mm, vma->vm_end, end);
1679
1680 if (!vma)
1681 break;
1682 /*
1683 * Set [nstart; nend) to intersection of desired address
1684 * range with the first VMA. Also, skip undesirable VMA types.
1685 */
1686 nend = min(end, vma->vm_end);
1687 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1688 continue;
1689 if (nstart < vma->vm_start)
1690 nstart = vma->vm_start;
1691 /*
1692 * Now fault in a range of pages. populate_vma_page_range()
1693 * double checks the vma flags, so that it won't mlock pages
1694 * if the vma was already munlocked.
1695 */
1696 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1697 if (ret < 0) {
1698 if (ignore_errors) {
1699 ret = 0;
1700 continue; /* continue at next VMA */
1701 }
1702 break;
1703 }
1704 nend = nstart + ret * PAGE_SIZE;
1705 ret = 0;
1706 }
1707 if (locked)
1708 mmap_read_unlock(mm);
1709 return ret; /* 0 or negative error code */
1710 }
1711 #else /* CONFIG_MMU */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,int * locked,unsigned int foll_flags)1712 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1713 unsigned long nr_pages, struct page **pages,
1714 int *locked, unsigned int foll_flags)
1715 {
1716 struct vm_area_struct *vma;
1717 bool must_unlock = false;
1718 unsigned long vm_flags;
1719 long i;
1720
1721 if (!nr_pages)
1722 return 0;
1723
1724 /*
1725 * The internal caller expects GUP to manage the lock internally and the
1726 * lock must be released when this returns.
1727 */
1728 if (!*locked) {
1729 if (mmap_read_lock_killable(mm))
1730 return -EAGAIN;
1731 must_unlock = true;
1732 *locked = 1;
1733 }
1734
1735 /* calculate required read or write permissions.
1736 * If FOLL_FORCE is set, we only require the "MAY" flags.
1737 */
1738 vm_flags = (foll_flags & FOLL_WRITE) ?
1739 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1740 vm_flags &= (foll_flags & FOLL_FORCE) ?
1741 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1742
1743 for (i = 0; i < nr_pages; i++) {
1744 vma = find_vma(mm, start);
1745 if (!vma)
1746 break;
1747
1748 /* protect what we can, including chardevs */
1749 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1750 !(vm_flags & vma->vm_flags))
1751 break;
1752
1753 if (pages) {
1754 pages[i] = virt_to_page((void *)start);
1755 if (pages[i])
1756 get_page(pages[i]);
1757 }
1758
1759 start = (start + PAGE_SIZE) & PAGE_MASK;
1760 }
1761
1762 if (must_unlock && *locked) {
1763 mmap_read_unlock(mm);
1764 *locked = 0;
1765 }
1766
1767 return i ? : -EFAULT;
1768 }
1769 #endif /* !CONFIG_MMU */
1770
1771 /**
1772 * fault_in_writeable - fault in userspace address range for writing
1773 * @uaddr: start of address range
1774 * @size: size of address range
1775 *
1776 * Returns the number of bytes not faulted in (like copy_to_user() and
1777 * copy_from_user()).
1778 */
fault_in_writeable(char __user * uaddr,size_t size)1779 size_t fault_in_writeable(char __user *uaddr, size_t size)
1780 {
1781 char __user *start = uaddr, *end;
1782
1783 if (unlikely(size == 0))
1784 return 0;
1785 if (!user_write_access_begin(uaddr, size))
1786 return size;
1787 if (!PAGE_ALIGNED(uaddr)) {
1788 unsafe_put_user(0, uaddr, out);
1789 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1790 }
1791 end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1792 if (unlikely(end < start))
1793 end = NULL;
1794 while (uaddr != end) {
1795 unsafe_put_user(0, uaddr, out);
1796 uaddr += PAGE_SIZE;
1797 }
1798
1799 out:
1800 user_write_access_end();
1801 if (size > uaddr - start)
1802 return size - (uaddr - start);
1803 return 0;
1804 }
1805 EXPORT_SYMBOL(fault_in_writeable);
1806
1807 /**
1808 * fault_in_subpage_writeable - fault in an address range for writing
1809 * @uaddr: start of address range
1810 * @size: size of address range
1811 *
1812 * Fault in a user address range for writing while checking for permissions at
1813 * sub-page granularity (e.g. arm64 MTE). This function should be used when
1814 * the caller cannot guarantee forward progress of a copy_to_user() loop.
1815 *
1816 * Returns the number of bytes not faulted in (like copy_to_user() and
1817 * copy_from_user()).
1818 */
fault_in_subpage_writeable(char __user * uaddr,size_t size)1819 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
1820 {
1821 size_t faulted_in;
1822
1823 /*
1824 * Attempt faulting in at page granularity first for page table
1825 * permission checking. The arch-specific probe_subpage_writeable()
1826 * functions may not check for this.
1827 */
1828 faulted_in = size - fault_in_writeable(uaddr, size);
1829 if (faulted_in)
1830 faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
1831
1832 return size - faulted_in;
1833 }
1834 EXPORT_SYMBOL(fault_in_subpage_writeable);
1835
1836 /*
1837 * fault_in_safe_writeable - fault in an address range for writing
1838 * @uaddr: start of address range
1839 * @size: length of address range
1840 *
1841 * Faults in an address range for writing. This is primarily useful when we
1842 * already know that some or all of the pages in the address range aren't in
1843 * memory.
1844 *
1845 * Unlike fault_in_writeable(), this function is non-destructive.
1846 *
1847 * Note that we don't pin or otherwise hold the pages referenced that we fault
1848 * in. There's no guarantee that they'll stay in memory for any duration of
1849 * time.
1850 *
1851 * Returns the number of bytes not faulted in, like copy_to_user() and
1852 * copy_from_user().
1853 */
fault_in_safe_writeable(const char __user * uaddr,size_t size)1854 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
1855 {
1856 unsigned long start = (unsigned long)uaddr, end;
1857 struct mm_struct *mm = current->mm;
1858 bool unlocked = false;
1859
1860 if (unlikely(size == 0))
1861 return 0;
1862 end = PAGE_ALIGN(start + size);
1863 if (end < start)
1864 end = 0;
1865
1866 mmap_read_lock(mm);
1867 do {
1868 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
1869 break;
1870 start = (start + PAGE_SIZE) & PAGE_MASK;
1871 } while (start != end);
1872 mmap_read_unlock(mm);
1873
1874 if (size > (unsigned long)uaddr - start)
1875 return size - ((unsigned long)uaddr - start);
1876 return 0;
1877 }
1878 EXPORT_SYMBOL(fault_in_safe_writeable);
1879
1880 /**
1881 * fault_in_readable - fault in userspace address range for reading
1882 * @uaddr: start of user address range
1883 * @size: size of user address range
1884 *
1885 * Returns the number of bytes not faulted in (like copy_to_user() and
1886 * copy_from_user()).
1887 */
fault_in_readable(const char __user * uaddr,size_t size)1888 size_t fault_in_readable(const char __user *uaddr, size_t size)
1889 {
1890 const char __user *start = uaddr, *end;
1891 volatile char c;
1892
1893 if (unlikely(size == 0))
1894 return 0;
1895 if (!user_read_access_begin(uaddr, size))
1896 return size;
1897 if (!PAGE_ALIGNED(uaddr)) {
1898 unsafe_get_user(c, uaddr, out);
1899 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1900 }
1901 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1902 if (unlikely(end < start))
1903 end = NULL;
1904 while (uaddr != end) {
1905 unsafe_get_user(c, uaddr, out);
1906 uaddr += PAGE_SIZE;
1907 }
1908
1909 out:
1910 user_read_access_end();
1911 (void)c;
1912 if (size > uaddr - start)
1913 return size - (uaddr - start);
1914 return 0;
1915 }
1916 EXPORT_SYMBOL(fault_in_readable);
1917
1918 /**
1919 * get_dump_page() - pin user page in memory while writing it to core dump
1920 * @addr: user address
1921 *
1922 * Returns struct page pointer of user page pinned for dump,
1923 * to be freed afterwards by put_page().
1924 *
1925 * Returns NULL on any kind of failure - a hole must then be inserted into
1926 * the corefile, to preserve alignment with its headers; and also returns
1927 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1928 * allowing a hole to be left in the corefile to save disk space.
1929 *
1930 * Called without mmap_lock (takes and releases the mmap_lock by itself).
1931 */
1932 #ifdef CONFIG_ELF_CORE
get_dump_page(unsigned long addr)1933 struct page *get_dump_page(unsigned long addr)
1934 {
1935 struct page *page;
1936 int locked = 0;
1937 int ret;
1938
1939 ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
1940 FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1941 return (ret == 1) ? page : NULL;
1942 }
1943 #endif /* CONFIG_ELF_CORE */
1944
1945 #ifdef CONFIG_MIGRATION
1946 /*
1947 * Returns the number of collected pages. Return value is always >= 0.
1948 */
collect_longterm_unpinnable_pages(struct list_head * movable_page_list,unsigned long nr_pages,struct page ** pages)1949 static void collect_longterm_unpinnable_pages(
1950 struct list_head *movable_page_list,
1951 unsigned long nr_pages,
1952 struct page **pages)
1953 {
1954 struct folio *prev_folio = NULL;
1955 bool drain_allow = true;
1956 unsigned long i;
1957
1958 for (i = 0; i < nr_pages; i++) {
1959 struct folio *folio = page_folio(pages[i]);
1960
1961 if (folio == prev_folio)
1962 continue;
1963 prev_folio = folio;
1964
1965 if (folio_is_longterm_pinnable(folio))
1966 continue;
1967
1968 if (folio_is_device_coherent(folio))
1969 continue;
1970
1971 if (folio_test_hugetlb(folio)) {
1972 isolate_hugetlb(folio, movable_page_list);
1973 continue;
1974 }
1975
1976 if (!folio_test_lru(folio) && drain_allow) {
1977 lru_add_drain_all();
1978 drain_allow = false;
1979 }
1980
1981 if (!folio_isolate_lru(folio))
1982 continue;
1983
1984 list_add_tail(&folio->lru, movable_page_list);
1985 node_stat_mod_folio(folio,
1986 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1987 folio_nr_pages(folio));
1988 }
1989 }
1990
1991 /*
1992 * Unpins all pages and migrates device coherent pages and movable_page_list.
1993 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
1994 * (or partial success).
1995 */
migrate_longterm_unpinnable_pages(struct list_head * movable_page_list,unsigned long nr_pages,struct page ** pages)1996 static int migrate_longterm_unpinnable_pages(
1997 struct list_head *movable_page_list,
1998 unsigned long nr_pages,
1999 struct page **pages)
2000 {
2001 int ret;
2002 unsigned long i;
2003
2004 for (i = 0; i < nr_pages; i++) {
2005 struct folio *folio = page_folio(pages[i]);
2006
2007 if (folio_is_device_coherent(folio)) {
2008 /*
2009 * Migration will fail if the page is pinned, so convert
2010 * the pin on the source page to a normal reference.
2011 */
2012 pages[i] = NULL;
2013 folio_get(folio);
2014 gup_put_folio(folio, 1, FOLL_PIN);
2015
2016 if (migrate_device_coherent_page(&folio->page)) {
2017 ret = -EBUSY;
2018 goto err;
2019 }
2020
2021 continue;
2022 }
2023
2024 /*
2025 * We can't migrate pages with unexpected references, so drop
2026 * the reference obtained by __get_user_pages_locked().
2027 * Migrating pages have been added to movable_page_list after
2028 * calling folio_isolate_lru() which takes a reference so the
2029 * page won't be freed if it's migrating.
2030 */
2031 unpin_user_page(pages[i]);
2032 pages[i] = NULL;
2033 }
2034
2035 if (!list_empty(movable_page_list)) {
2036 struct migration_target_control mtc = {
2037 .nid = NUMA_NO_NODE,
2038 .gfp_mask = GFP_USER | __GFP_NOWARN,
2039 };
2040
2041 if (migrate_pages(movable_page_list, alloc_migration_target,
2042 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
2043 MR_LONGTERM_PIN, NULL)) {
2044 ret = -ENOMEM;
2045 goto err;
2046 }
2047 }
2048
2049 putback_movable_pages(movable_page_list);
2050
2051 return -EAGAIN;
2052
2053 err:
2054 for (i = 0; i < nr_pages; i++)
2055 if (pages[i])
2056 unpin_user_page(pages[i]);
2057 putback_movable_pages(movable_page_list);
2058
2059 return ret;
2060 }
2061
2062 /*
2063 * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
2064 * pages in the range are required to be pinned via FOLL_PIN, before calling
2065 * this routine.
2066 *
2067 * If any pages in the range are not allowed to be pinned, then this routine
2068 * will migrate those pages away, unpin all the pages in the range and return
2069 * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
2070 * call this routine again.
2071 *
2072 * If an error other than -EAGAIN occurs, this indicates a migration failure.
2073 * The caller should give up, and propagate the error back up the call stack.
2074 *
2075 * If everything is OK and all pages in the range are allowed to be pinned, then
2076 * this routine leaves all pages pinned and returns zero for success.
2077 */
check_and_migrate_movable_pages(unsigned long nr_pages,struct page ** pages)2078 static long check_and_migrate_movable_pages(unsigned long nr_pages,
2079 struct page **pages)
2080 {
2081 LIST_HEAD(movable_page_list);
2082
2083 collect_longterm_unpinnable_pages(&movable_page_list, nr_pages, pages);
2084 if (list_empty(&movable_page_list))
2085 return 0;
2086
2087 return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
2088 pages);
2089 }
2090 #else
check_and_migrate_movable_pages(unsigned long nr_pages,struct page ** pages)2091 static long check_and_migrate_movable_pages(unsigned long nr_pages,
2092 struct page **pages)
2093 {
2094 return 0;
2095 }
2096 #endif /* CONFIG_MIGRATION */
2097
2098 /*
2099 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
2100 * allows us to process the FOLL_LONGTERM flag.
2101 */
__gup_longterm_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,int * locked,unsigned int gup_flags)2102 static long __gup_longterm_locked(struct mm_struct *mm,
2103 unsigned long start,
2104 unsigned long nr_pages,
2105 struct page **pages,
2106 int *locked,
2107 unsigned int gup_flags)
2108 {
2109 unsigned int flags;
2110 long rc, nr_pinned_pages;
2111
2112 if (!(gup_flags & FOLL_LONGTERM))
2113 return __get_user_pages_locked(mm, start, nr_pages, pages,
2114 locked, gup_flags);
2115
2116 flags = memalloc_pin_save();
2117 do {
2118 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
2119 pages, locked,
2120 gup_flags);
2121 if (nr_pinned_pages <= 0) {
2122 rc = nr_pinned_pages;
2123 break;
2124 }
2125
2126 /* FOLL_LONGTERM implies FOLL_PIN */
2127 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
2128 } while (rc == -EAGAIN);
2129 memalloc_pin_restore(flags);
2130 return rc ? rc : nr_pinned_pages;
2131 }
2132
2133 /*
2134 * Check that the given flags are valid for the exported gup/pup interface, and
2135 * update them with the required flags that the caller must have set.
2136 */
is_valid_gup_args(struct page ** pages,int * locked,unsigned int * gup_flags_p,unsigned int to_set)2137 static bool is_valid_gup_args(struct page **pages, int *locked,
2138 unsigned int *gup_flags_p, unsigned int to_set)
2139 {
2140 unsigned int gup_flags = *gup_flags_p;
2141
2142 /*
2143 * These flags not allowed to be specified externally to the gup
2144 * interfaces:
2145 * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
2146 * - FOLL_REMOTE is internal only and used on follow_page()
2147 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
2148 */
2149 if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
2150 return false;
2151
2152 gup_flags |= to_set;
2153 if (locked) {
2154 /* At the external interface locked must be set */
2155 if (WARN_ON_ONCE(*locked != 1))
2156 return false;
2157
2158 gup_flags |= FOLL_UNLOCKABLE;
2159 }
2160
2161 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2162 if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
2163 (FOLL_PIN | FOLL_GET)))
2164 return false;
2165
2166 /* LONGTERM can only be specified when pinning */
2167 if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
2168 return false;
2169
2170 /* Pages input must be given if using GET/PIN */
2171 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
2172 return false;
2173
2174 /* We want to allow the pgmap to be hot-unplugged at all times */
2175 if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) &&
2176 (gup_flags & FOLL_PCI_P2PDMA)))
2177 return false;
2178
2179 *gup_flags_p = gup_flags;
2180 return true;
2181 }
2182
2183 #ifdef CONFIG_MMU
2184 /**
2185 * get_user_pages_remote() - pin user pages in memory
2186 * @mm: mm_struct of target mm
2187 * @start: starting user address
2188 * @nr_pages: number of pages from start to pin
2189 * @gup_flags: flags modifying lookup behaviour
2190 * @pages: array that receives pointers to the pages pinned.
2191 * Should be at least nr_pages long. Or NULL, if caller
2192 * only intends to ensure the pages are faulted in.
2193 * @locked: pointer to lock flag indicating whether lock is held and
2194 * subsequently whether VM_FAULT_RETRY functionality can be
2195 * utilised. Lock must initially be held.
2196 *
2197 * Returns either number of pages pinned (which may be less than the
2198 * number requested), or an error. Details about the return value:
2199 *
2200 * -- If nr_pages is 0, returns 0.
2201 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2202 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2203 * pages pinned. Again, this may be less than nr_pages.
2204 *
2205 * The caller is responsible for releasing returned @pages, via put_page().
2206 *
2207 * Must be called with mmap_lock held for read or write.
2208 *
2209 * get_user_pages_remote walks a process's page tables and takes a reference
2210 * to each struct page that each user address corresponds to at a given
2211 * instant. That is, it takes the page that would be accessed if a user
2212 * thread accesses the given user virtual address at that instant.
2213 *
2214 * This does not guarantee that the page exists in the user mappings when
2215 * get_user_pages_remote returns, and there may even be a completely different
2216 * page there in some cases (eg. if mmapped pagecache has been invalidated
2217 * and subsequently re-faulted). However it does guarantee that the page
2218 * won't be freed completely. And mostly callers simply care that the page
2219 * contains data that was valid *at some point in time*. Typically, an IO
2220 * or similar operation cannot guarantee anything stronger anyway because
2221 * locks can't be held over the syscall boundary.
2222 *
2223 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2224 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2225 * be called after the page is finished with, and before put_page is called.
2226 *
2227 * get_user_pages_remote is typically used for fewer-copy IO operations,
2228 * to get a handle on the memory by some means other than accesses
2229 * via the user virtual addresses. The pages may be submitted for
2230 * DMA to devices or accessed via their kernel linear mapping (via the
2231 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2232 *
2233 * See also get_user_pages_fast, for performance critical applications.
2234 *
2235 * get_user_pages_remote should be phased out in favor of
2236 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2237 * should use get_user_pages_remote because it cannot pass
2238 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2239 */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)2240 long get_user_pages_remote(struct mm_struct *mm,
2241 unsigned long start, unsigned long nr_pages,
2242 unsigned int gup_flags, struct page **pages,
2243 int *locked)
2244 {
2245 int local_locked = 1;
2246
2247 if (!is_valid_gup_args(pages, locked, &gup_flags,
2248 FOLL_TOUCH | FOLL_REMOTE))
2249 return -EINVAL;
2250
2251 return __get_user_pages_locked(mm, start, nr_pages, pages,
2252 locked ? locked : &local_locked,
2253 gup_flags);
2254 }
2255 EXPORT_SYMBOL(get_user_pages_remote);
2256
2257 #else /* CONFIG_MMU */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)2258 long get_user_pages_remote(struct mm_struct *mm,
2259 unsigned long start, unsigned long nr_pages,
2260 unsigned int gup_flags, struct page **pages,
2261 int *locked)
2262 {
2263 return 0;
2264 }
2265 #endif /* !CONFIG_MMU */
2266
2267 /**
2268 * get_user_pages() - pin user pages in memory
2269 * @start: starting user address
2270 * @nr_pages: number of pages from start to pin
2271 * @gup_flags: flags modifying lookup behaviour
2272 * @pages: array that receives pointers to the pages pinned.
2273 * Should be at least nr_pages long. Or NULL, if caller
2274 * only intends to ensure the pages are faulted in.
2275 *
2276 * This is the same as get_user_pages_remote(), just with a less-flexible
2277 * calling convention where we assume that the mm being operated on belongs to
2278 * the current task, and doesn't allow passing of a locked parameter. We also
2279 * obviously don't pass FOLL_REMOTE in here.
2280 */
get_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)2281 long get_user_pages(unsigned long start, unsigned long nr_pages,
2282 unsigned int gup_flags, struct page **pages)
2283 {
2284 int locked = 1;
2285
2286 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
2287 return -EINVAL;
2288
2289 return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2290 &locked, gup_flags);
2291 }
2292 EXPORT_SYMBOL(get_user_pages);
2293
2294 /*
2295 * get_user_pages_unlocked() is suitable to replace the form:
2296 *
2297 * mmap_read_lock(mm);
2298 * get_user_pages(mm, ..., pages, NULL);
2299 * mmap_read_unlock(mm);
2300 *
2301 * with:
2302 *
2303 * get_user_pages_unlocked(mm, ..., pages);
2304 *
2305 * It is functionally equivalent to get_user_pages_fast so
2306 * get_user_pages_fast should be used instead if specific gup_flags
2307 * (e.g. FOLL_FORCE) are not required.
2308 */
get_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)2309 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2310 struct page **pages, unsigned int gup_flags)
2311 {
2312 int locked = 0;
2313
2314 if (!is_valid_gup_args(pages, NULL, &gup_flags,
2315 FOLL_TOUCH | FOLL_UNLOCKABLE))
2316 return -EINVAL;
2317
2318 return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2319 &locked, gup_flags);
2320 }
2321 EXPORT_SYMBOL(get_user_pages_unlocked);
2322
2323 /*
2324 * Fast GUP
2325 *
2326 * get_user_pages_fast attempts to pin user pages by walking the page
2327 * tables directly and avoids taking locks. Thus the walker needs to be
2328 * protected from page table pages being freed from under it, and should
2329 * block any THP splits.
2330 *
2331 * One way to achieve this is to have the walker disable interrupts, and
2332 * rely on IPIs from the TLB flushing code blocking before the page table
2333 * pages are freed. This is unsuitable for architectures that do not need
2334 * to broadcast an IPI when invalidating TLBs.
2335 *
2336 * Another way to achieve this is to batch up page table containing pages
2337 * belonging to more than one mm_user, then rcu_sched a callback to free those
2338 * pages. Disabling interrupts will allow the fast_gup walker to both block
2339 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2340 * (which is a relatively rare event). The code below adopts this strategy.
2341 *
2342 * Before activating this code, please be aware that the following assumptions
2343 * are currently made:
2344 *
2345 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2346 * free pages containing page tables or TLB flushing requires IPI broadcast.
2347 *
2348 * *) ptes can be read atomically by the architecture.
2349 *
2350 * *) access_ok is sufficient to validate userspace address ranges.
2351 *
2352 * The last two assumptions can be relaxed by the addition of helper functions.
2353 *
2354 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2355 */
2356 #ifdef CONFIG_HAVE_FAST_GUP
2357
2358 /*
2359 * Used in the GUP-fast path to determine whether a pin is permitted for a
2360 * specific folio.
2361 *
2362 * This call assumes the caller has pinned the folio, that the lowest page table
2363 * level still points to this folio, and that interrupts have been disabled.
2364 *
2365 * Writing to pinned file-backed dirty tracked folios is inherently problematic
2366 * (see comment describing the writable_file_mapping_allowed() function). We
2367 * therefore try to avoid the most egregious case of a long-term mapping doing
2368 * so.
2369 *
2370 * This function cannot be as thorough as that one as the VMA is not available
2371 * in the fast path, so instead we whitelist known good cases and if in doubt,
2372 * fall back to the slow path.
2373 */
folio_fast_pin_allowed(struct folio * folio,unsigned int flags)2374 static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags)
2375 {
2376 struct address_space *mapping;
2377 unsigned long mapping_flags;
2378
2379 /*
2380 * If we aren't pinning then no problematic write can occur. A long term
2381 * pin is the most egregious case so this is the one we disallow.
2382 */
2383 if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) !=
2384 (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
2385 return true;
2386
2387 /* The folio is pinned, so we can safely access folio fields. */
2388
2389 if (WARN_ON_ONCE(folio_test_slab(folio)))
2390 return false;
2391
2392 /* hugetlb mappings do not require dirty-tracking. */
2393 if (folio_test_hugetlb(folio))
2394 return true;
2395
2396 /*
2397 * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
2398 * cannot proceed, which means no actions performed under RCU can
2399 * proceed either.
2400 *
2401 * inodes and thus their mappings are freed under RCU, which means the
2402 * mapping cannot be freed beneath us and thus we can safely dereference
2403 * it.
2404 */
2405 lockdep_assert_irqs_disabled();
2406
2407 /*
2408 * However, there may be operations which _alter_ the mapping, so ensure
2409 * we read it once and only once.
2410 */
2411 mapping = READ_ONCE(folio->mapping);
2412
2413 /*
2414 * The mapping may have been truncated, in any case we cannot determine
2415 * if this mapping is safe - fall back to slow path to determine how to
2416 * proceed.
2417 */
2418 if (!mapping)
2419 return false;
2420
2421 /* Anonymous folios pose no problem. */
2422 mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
2423 if (mapping_flags)
2424 return mapping_flags & PAGE_MAPPING_ANON;
2425
2426 /*
2427 * At this point, we know the mapping is non-null and points to an
2428 * address_space object. The only remaining whitelisted file system is
2429 * shmem.
2430 */
2431 return shmem_mapping(mapping);
2432 }
2433
undo_dev_pagemap(int * nr,int nr_start,unsigned int flags,struct page ** pages)2434 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2435 unsigned int flags,
2436 struct page **pages)
2437 {
2438 while ((*nr) - nr_start) {
2439 struct page *page = pages[--(*nr)];
2440
2441 ClearPageReferenced(page);
2442 if (flags & FOLL_PIN)
2443 unpin_user_page(page);
2444 else
2445 put_page(page);
2446 }
2447 }
2448
2449 /**
2450 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
2451 * @page: pointer to page to be grabbed
2452 * @refs: the value to (effectively) add to the folio's refcount
2453 * @flags: gup flags: these are the FOLL_* flag values.
2454 *
2455 * "grab" names in this file mean, "look at flags to decide whether to use
2456 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
2457 *
2458 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
2459 * same time. (That's true throughout the get_user_pages*() and
2460 * pin_user_pages*() APIs.) Cases:
2461 *
2462 * FOLL_GET: folio's refcount will be incremented by @refs.
2463 *
2464 * FOLL_PIN on large folios: folio's refcount will be incremented by
2465 * @refs, and its pincount will be incremented by @refs.
2466 *
2467 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
2468 * @refs * GUP_PIN_COUNTING_BIAS.
2469 *
2470 * Return: The folio containing @page (with refcount appropriately
2471 * incremented) for success, or NULL upon failure. If neither FOLL_GET
2472 * nor FOLL_PIN was set, that's considered failure, and furthermore,
2473 * a likely bug in the caller, so a warning is also emitted.
2474 *
2475 * It uses add ref unless zero to elevate the folio refcount and must be called
2476 * in fast path only.
2477 */
try_grab_folio_fast(struct page * page,int refs,unsigned int flags)2478 static struct folio *try_grab_folio_fast(struct page *page, int refs,
2479 unsigned int flags)
2480 {
2481 struct folio *folio;
2482
2483 /* Raise warn if it is not called in fast GUP */
2484 VM_WARN_ON_ONCE(!irqs_disabled());
2485
2486 if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
2487 return NULL;
2488
2489 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
2490 return NULL;
2491
2492 if (flags & FOLL_GET)
2493 return try_get_folio(page, refs);
2494
2495 /* FOLL_PIN is set */
2496
2497 /*
2498 * Don't take a pin on the zero page - it's not going anywhere
2499 * and it is used in a *lot* of places.
2500 */
2501 if (is_zero_page(page))
2502 return page_folio(page);
2503
2504 folio = try_get_folio(page, refs);
2505 if (!folio)
2506 return NULL;
2507
2508 /*
2509 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
2510 * right zone, so fail and let the caller fall back to the slow
2511 * path.
2512 */
2513 if (unlikely((flags & FOLL_LONGTERM) &&
2514 !folio_is_longterm_pinnable(folio))) {
2515 if (!put_devmap_managed_page_refs(&folio->page, refs))
2516 folio_put_refs(folio, refs);
2517 return NULL;
2518 }
2519
2520 /*
2521 * When pinning a large folio, use an exact count to track it.
2522 *
2523 * However, be sure to *also* increment the normal folio
2524 * refcount field at least once, so that the folio really
2525 * is pinned. That's why the refcount from the earlier
2526 * try_get_folio() is left intact.
2527 */
2528 if (folio_test_large(folio))
2529 atomic_add(refs, &folio->_pincount);
2530 else
2531 folio_ref_add(folio,
2532 refs * (GUP_PIN_COUNTING_BIAS - 1));
2533 /*
2534 * Adjust the pincount before re-checking the PTE for changes.
2535 * This is essentially a smp_mb() and is paired with a memory
2536 * barrier in folio_try_share_anon_rmap_*().
2537 */
2538 smp_mb__after_atomic();
2539
2540 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
2541
2542 return folio;
2543 }
2544
2545 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2546 /*
2547 * Fast-gup relies on pte change detection to avoid concurrent pgtable
2548 * operations.
2549 *
2550 * To pin the page, fast-gup needs to do below in order:
2551 * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2552 *
2553 * For the rest of pgtable operations where pgtable updates can be racy
2554 * with fast-gup, we need to do (1) clear pte, then (2) check whether page
2555 * is pinned.
2556 *
2557 * Above will work for all pte-level operations, including THP split.
2558 *
2559 * For THP collapse, it's a bit more complicated because fast-gup may be
2560 * walking a pgtable page that is being freed (pte is still valid but pmd
2561 * can be cleared already). To avoid race in such condition, we need to
2562 * also check pmd here to make sure pmd doesn't change (corresponds to
2563 * pmdp_collapse_flush() in the THP collapse code path).
2564 */
gup_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2565 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2566 unsigned long end, unsigned int flags,
2567 struct page **pages, int *nr)
2568 {
2569 struct dev_pagemap *pgmap = NULL;
2570 int nr_start = *nr, ret = 0;
2571 pte_t *ptep, *ptem;
2572
2573 ptem = ptep = pte_offset_map(&pmd, addr);
2574 if (!ptep)
2575 return 0;
2576 do {
2577 pte_t pte = ptep_get_lockless(ptep);
2578 struct page *page;
2579 struct folio *folio;
2580
2581 /*
2582 * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
2583 * pte_access_permitted() better should reject these pages
2584 * either way: otherwise, GUP-fast might succeed in
2585 * cases where ordinary GUP would fail due to VMA access
2586 * permissions.
2587 */
2588 if (pte_protnone(pte))
2589 goto pte_unmap;
2590
2591 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2592 goto pte_unmap;
2593
2594 if (pte_devmap(pte)) {
2595 if (unlikely(flags & FOLL_LONGTERM))
2596 goto pte_unmap;
2597
2598 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2599 if (unlikely(!pgmap)) {
2600 undo_dev_pagemap(nr, nr_start, flags, pages);
2601 goto pte_unmap;
2602 }
2603 } else if (pte_special(pte))
2604 goto pte_unmap;
2605
2606 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2607 page = pte_page(pte);
2608
2609 folio = try_grab_folio_fast(page, 1, flags);
2610 if (!folio)
2611 goto pte_unmap;
2612
2613 if (unlikely(folio_is_secretmem(folio))) {
2614 gup_put_folio(folio, 1, flags);
2615 goto pte_unmap;
2616 }
2617
2618 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2619 unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
2620 gup_put_folio(folio, 1, flags);
2621 goto pte_unmap;
2622 }
2623
2624 if (!folio_fast_pin_allowed(folio, flags)) {
2625 gup_put_folio(folio, 1, flags);
2626 goto pte_unmap;
2627 }
2628
2629 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
2630 gup_put_folio(folio, 1, flags);
2631 goto pte_unmap;
2632 }
2633
2634 /*
2635 * We need to make the page accessible if and only if we are
2636 * going to access its content (the FOLL_PIN case). Please
2637 * see Documentation/core-api/pin_user_pages.rst for
2638 * details.
2639 */
2640 if (flags & FOLL_PIN) {
2641 ret = arch_make_page_accessible(page);
2642 if (ret) {
2643 gup_put_folio(folio, 1, flags);
2644 goto pte_unmap;
2645 }
2646 }
2647 folio_set_referenced(folio);
2648 pages[*nr] = page;
2649 (*nr)++;
2650 } while (ptep++, addr += PAGE_SIZE, addr != end);
2651
2652 ret = 1;
2653
2654 pte_unmap:
2655 if (pgmap)
2656 put_dev_pagemap(pgmap);
2657 pte_unmap(ptem);
2658 return ret;
2659 }
2660 #else
2661
2662 /*
2663 * If we can't determine whether or not a pte is special, then fail immediately
2664 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2665 * to be special.
2666 *
2667 * For a futex to be placed on a THP tail page, get_futex_key requires a
2668 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2669 * useful to have gup_huge_pmd even if we can't operate on ptes.
2670 */
gup_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2671 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2672 unsigned long end, unsigned int flags,
2673 struct page **pages, int *nr)
2674 {
2675 return 0;
2676 }
2677 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2678
2679 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
__gup_device_huge(unsigned long pfn,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2680 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2681 unsigned long end, unsigned int flags,
2682 struct page **pages, int *nr)
2683 {
2684 int nr_start = *nr;
2685 struct dev_pagemap *pgmap = NULL;
2686
2687 do {
2688 struct page *page = pfn_to_page(pfn);
2689
2690 pgmap = get_dev_pagemap(pfn, pgmap);
2691 if (unlikely(!pgmap)) {
2692 undo_dev_pagemap(nr, nr_start, flags, pages);
2693 break;
2694 }
2695
2696 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
2697 undo_dev_pagemap(nr, nr_start, flags, pages);
2698 break;
2699 }
2700
2701 SetPageReferenced(page);
2702 pages[*nr] = page;
2703 if (unlikely(try_grab_folio(page_folio(page), 1, flags))) {
2704 undo_dev_pagemap(nr, nr_start, flags, pages);
2705 break;
2706 }
2707 (*nr)++;
2708 pfn++;
2709 } while (addr += PAGE_SIZE, addr != end);
2710
2711 put_dev_pagemap(pgmap);
2712 return addr == end;
2713 }
2714
__gup_device_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2715 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2716 unsigned long end, unsigned int flags,
2717 struct page **pages, int *nr)
2718 {
2719 unsigned long fault_pfn;
2720 int nr_start = *nr;
2721
2722 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2723 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2724 return 0;
2725
2726 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2727 undo_dev_pagemap(nr, nr_start, flags, pages);
2728 return 0;
2729 }
2730 return 1;
2731 }
2732
__gup_device_huge_pud(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2733 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2734 unsigned long end, unsigned int flags,
2735 struct page **pages, int *nr)
2736 {
2737 unsigned long fault_pfn;
2738 int nr_start = *nr;
2739
2740 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2741 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2742 return 0;
2743
2744 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2745 undo_dev_pagemap(nr, nr_start, flags, pages);
2746 return 0;
2747 }
2748 return 1;
2749 }
2750 #else
__gup_device_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2751 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2752 unsigned long end, unsigned int flags,
2753 struct page **pages, int *nr)
2754 {
2755 BUILD_BUG();
2756 return 0;
2757 }
2758
__gup_device_huge_pud(pud_t pud,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2759 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2760 unsigned long end, unsigned int flags,
2761 struct page **pages, int *nr)
2762 {
2763 BUILD_BUG();
2764 return 0;
2765 }
2766 #endif
2767
record_subpages(struct page * page,unsigned long addr,unsigned long end,struct page ** pages)2768 static int record_subpages(struct page *page, unsigned long addr,
2769 unsigned long end, struct page **pages)
2770 {
2771 int nr;
2772
2773 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
2774 pages[nr] = nth_page(page, nr);
2775
2776 return nr;
2777 }
2778
2779 #ifdef CONFIG_ARCH_HAS_HUGEPD
hugepte_addr_end(unsigned long addr,unsigned long end,unsigned long sz)2780 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2781 unsigned long sz)
2782 {
2783 unsigned long __boundary = (addr + sz) & ~(sz-1);
2784 return (__boundary - 1 < end - 1) ? __boundary : end;
2785 }
2786
gup_hugepte(pte_t * ptep,unsigned long sz,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2787 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2788 unsigned long end, unsigned int flags,
2789 struct page **pages, int *nr)
2790 {
2791 unsigned long pte_end;
2792 struct page *page;
2793 struct folio *folio;
2794 pte_t pte;
2795 int refs;
2796
2797 pte_end = (addr + sz) & ~(sz-1);
2798 if (pte_end < end)
2799 end = pte_end;
2800
2801 pte = huge_ptep_get(ptep);
2802
2803 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2804 return 0;
2805
2806 /* hugepages are never "special" */
2807 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2808
2809 page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
2810 refs = record_subpages(page, addr, end, pages + *nr);
2811
2812 folio = try_grab_folio_fast(page, refs, flags);
2813 if (!folio)
2814 return 0;
2815
2816 if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
2817 gup_put_folio(folio, refs, flags);
2818 return 0;
2819 }
2820
2821 if (!folio_fast_pin_allowed(folio, flags)) {
2822 gup_put_folio(folio, refs, flags);
2823 return 0;
2824 }
2825
2826 if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) {
2827 gup_put_folio(folio, refs, flags);
2828 return 0;
2829 }
2830
2831 *nr += refs;
2832 folio_set_referenced(folio);
2833 return 1;
2834 }
2835
gup_huge_pd(hugepd_t hugepd,unsigned long addr,unsigned int pdshift,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2836 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2837 unsigned int pdshift, unsigned long end, unsigned int flags,
2838 struct page **pages, int *nr)
2839 {
2840 pte_t *ptep;
2841 unsigned long sz = 1UL << hugepd_shift(hugepd);
2842 unsigned long next;
2843
2844 ptep = hugepte_offset(hugepd, addr, pdshift);
2845 do {
2846 next = hugepte_addr_end(addr, end, sz);
2847 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2848 return 0;
2849 } while (ptep++, addr = next, addr != end);
2850
2851 return 1;
2852 }
2853 #else
gup_huge_pd(hugepd_t hugepd,unsigned long addr,unsigned int pdshift,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2854 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2855 unsigned int pdshift, unsigned long end, unsigned int flags,
2856 struct page **pages, int *nr)
2857 {
2858 return 0;
2859 }
2860 #endif /* CONFIG_ARCH_HAS_HUGEPD */
2861
gup_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2862 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2863 unsigned long end, unsigned int flags,
2864 struct page **pages, int *nr)
2865 {
2866 struct page *page;
2867 struct folio *folio;
2868 int refs;
2869
2870 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2871 return 0;
2872
2873 if (pmd_devmap(orig)) {
2874 if (unlikely(flags & FOLL_LONGTERM))
2875 return 0;
2876 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2877 pages, nr);
2878 }
2879
2880 page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
2881 refs = record_subpages(page, addr, end, pages + *nr);
2882
2883 folio = try_grab_folio_fast(page, refs, flags);
2884 if (!folio)
2885 return 0;
2886
2887 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2888 gup_put_folio(folio, refs, flags);
2889 return 0;
2890 }
2891
2892 if (!folio_fast_pin_allowed(folio, flags)) {
2893 gup_put_folio(folio, refs, flags);
2894 return 0;
2895 }
2896 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2897 gup_put_folio(folio, refs, flags);
2898 return 0;
2899 }
2900
2901 *nr += refs;
2902 folio_set_referenced(folio);
2903 return 1;
2904 }
2905
gup_huge_pud(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2906 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2907 unsigned long end, unsigned int flags,
2908 struct page **pages, int *nr)
2909 {
2910 struct page *page;
2911 struct folio *folio;
2912 int refs;
2913
2914 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2915 return 0;
2916
2917 if (pud_devmap(orig)) {
2918 if (unlikely(flags & FOLL_LONGTERM))
2919 return 0;
2920 return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2921 pages, nr);
2922 }
2923
2924 page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
2925 refs = record_subpages(page, addr, end, pages + *nr);
2926
2927 folio = try_grab_folio_fast(page, refs, flags);
2928 if (!folio)
2929 return 0;
2930
2931 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2932 gup_put_folio(folio, refs, flags);
2933 return 0;
2934 }
2935
2936 if (!folio_fast_pin_allowed(folio, flags)) {
2937 gup_put_folio(folio, refs, flags);
2938 return 0;
2939 }
2940
2941 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2942 gup_put_folio(folio, refs, flags);
2943 return 0;
2944 }
2945
2946 *nr += refs;
2947 folio_set_referenced(folio);
2948 return 1;
2949 }
2950
gup_huge_pgd(pgd_t orig,pgd_t * pgdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2951 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2952 unsigned long end, unsigned int flags,
2953 struct page **pages, int *nr)
2954 {
2955 int refs;
2956 struct page *page;
2957 struct folio *folio;
2958
2959 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2960 return 0;
2961
2962 BUILD_BUG_ON(pgd_devmap(orig));
2963
2964 page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2965 refs = record_subpages(page, addr, end, pages + *nr);
2966
2967 folio = try_grab_folio_fast(page, refs, flags);
2968 if (!folio)
2969 return 0;
2970
2971 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2972 gup_put_folio(folio, refs, flags);
2973 return 0;
2974 }
2975
2976 if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2977 gup_put_folio(folio, refs, flags);
2978 return 0;
2979 }
2980
2981 if (!folio_fast_pin_allowed(folio, flags)) {
2982 gup_put_folio(folio, refs, flags);
2983 return 0;
2984 }
2985
2986 *nr += refs;
2987 folio_set_referenced(folio);
2988 return 1;
2989 }
2990
gup_pmd_range(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2991 static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2992 unsigned int flags, struct page **pages, int *nr)
2993 {
2994 unsigned long next;
2995 pmd_t *pmdp;
2996
2997 pmdp = pmd_offset_lockless(pudp, pud, addr);
2998 do {
2999 pmd_t pmd = pmdp_get_lockless(pmdp);
3000
3001 next = pmd_addr_end(addr, end);
3002 if (!pmd_present(pmd))
3003 return 0;
3004
3005 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
3006 pmd_devmap(pmd))) {
3007 /* See gup_pte_range() */
3008 if (pmd_protnone(pmd))
3009 return 0;
3010
3011 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
3012 pages, nr))
3013 return 0;
3014
3015 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
3016 /*
3017 * architecture have different format for hugetlbfs
3018 * pmd format and THP pmd format
3019 */
3020 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
3021 PMD_SHIFT, next, flags, pages, nr))
3022 return 0;
3023 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
3024 return 0;
3025 } while (pmdp++, addr = next, addr != end);
3026
3027 return 1;
3028 }
3029
gup_pud_range(p4d_t * p4dp,p4d_t p4d,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3030 static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
3031 unsigned int flags, struct page **pages, int *nr)
3032 {
3033 unsigned long next;
3034 pud_t *pudp;
3035
3036 pudp = pud_offset_lockless(p4dp, p4d, addr);
3037 do {
3038 pud_t pud = READ_ONCE(*pudp);
3039
3040 next = pud_addr_end(addr, end);
3041 if (unlikely(!pud_present(pud)))
3042 return 0;
3043 if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
3044 if (!gup_huge_pud(pud, pudp, addr, next, flags,
3045 pages, nr))
3046 return 0;
3047 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
3048 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
3049 PUD_SHIFT, next, flags, pages, nr))
3050 return 0;
3051 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
3052 return 0;
3053 } while (pudp++, addr = next, addr != end);
3054
3055 return 1;
3056 }
3057
gup_p4d_range(pgd_t * pgdp,pgd_t pgd,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3058 static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
3059 unsigned int flags, struct page **pages, int *nr)
3060 {
3061 unsigned long next;
3062 p4d_t *p4dp;
3063
3064 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
3065 do {
3066 p4d_t p4d = READ_ONCE(*p4dp);
3067
3068 next = p4d_addr_end(addr, end);
3069 if (p4d_none(p4d))
3070 return 0;
3071 BUILD_BUG_ON(p4d_huge(p4d));
3072 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
3073 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
3074 P4D_SHIFT, next, flags, pages, nr))
3075 return 0;
3076 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
3077 return 0;
3078 } while (p4dp++, addr = next, addr != end);
3079
3080 return 1;
3081 }
3082
gup_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3083 static void gup_pgd_range(unsigned long addr, unsigned long end,
3084 unsigned int flags, struct page **pages, int *nr)
3085 {
3086 unsigned long next;
3087 pgd_t *pgdp;
3088
3089 pgdp = pgd_offset(current->mm, addr);
3090 do {
3091 pgd_t pgd = READ_ONCE(*pgdp);
3092
3093 next = pgd_addr_end(addr, end);
3094 if (pgd_none(pgd))
3095 return;
3096 if (unlikely(pgd_huge(pgd))) {
3097 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
3098 pages, nr))
3099 return;
3100 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
3101 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
3102 PGDIR_SHIFT, next, flags, pages, nr))
3103 return;
3104 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
3105 return;
3106 } while (pgdp++, addr = next, addr != end);
3107 }
3108 #else
gup_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3109 static inline void gup_pgd_range(unsigned long addr, unsigned long end,
3110 unsigned int flags, struct page **pages, int *nr)
3111 {
3112 }
3113 #endif /* CONFIG_HAVE_FAST_GUP */
3114
3115 #ifndef gup_fast_permitted
3116 /*
3117 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
3118 * we need to fall back to the slow version:
3119 */
gup_fast_permitted(unsigned long start,unsigned long end)3120 static bool gup_fast_permitted(unsigned long start, unsigned long end)
3121 {
3122 return true;
3123 }
3124 #endif
3125
lockless_pages_from_mm(unsigned long start,unsigned long end,unsigned int gup_flags,struct page ** pages)3126 static unsigned long lockless_pages_from_mm(unsigned long start,
3127 unsigned long end,
3128 unsigned int gup_flags,
3129 struct page **pages)
3130 {
3131 unsigned long flags;
3132 int nr_pinned = 0;
3133 unsigned seq;
3134
3135 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
3136 !gup_fast_permitted(start, end))
3137 return 0;
3138
3139 if (gup_flags & FOLL_PIN) {
3140 seq = raw_read_seqcount(¤t->mm->write_protect_seq);
3141 if (seq & 1)
3142 return 0;
3143 }
3144
3145 /*
3146 * Disable interrupts. The nested form is used, in order to allow full,
3147 * general purpose use of this routine.
3148 *
3149 * With interrupts disabled, we block page table pages from being freed
3150 * from under us. See struct mmu_table_batch comments in
3151 * include/asm-generic/tlb.h for more details.
3152 *
3153 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
3154 * that come from THPs splitting.
3155 */
3156 local_irq_save(flags);
3157 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
3158 local_irq_restore(flags);
3159
3160 /*
3161 * When pinning pages for DMA there could be a concurrent write protect
3162 * from fork() via copy_page_range(), in this case always fail fast GUP.
3163 */
3164 if (gup_flags & FOLL_PIN) {
3165 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) {
3166 unpin_user_pages_lockless(pages, nr_pinned);
3167 return 0;
3168 } else {
3169 sanity_check_pinned_pages(pages, nr_pinned);
3170 }
3171 }
3172 return nr_pinned;
3173 }
3174
internal_get_user_pages_fast(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)3175 static int internal_get_user_pages_fast(unsigned long start,
3176 unsigned long nr_pages,
3177 unsigned int gup_flags,
3178 struct page **pages)
3179 {
3180 unsigned long len, end;
3181 unsigned long nr_pinned;
3182 int locked = 0;
3183 int ret;
3184
3185 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
3186 FOLL_FORCE | FOLL_PIN | FOLL_GET |
3187 FOLL_FAST_ONLY | FOLL_NOFAULT |
3188 FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
3189 return -EINVAL;
3190
3191 if (gup_flags & FOLL_PIN)
3192 mm_set_has_pinned_flag(¤t->mm->flags);
3193
3194 if (!(gup_flags & FOLL_FAST_ONLY))
3195 might_lock_read(¤t->mm->mmap_lock);
3196
3197 start = untagged_addr(start) & PAGE_MASK;
3198 len = nr_pages << PAGE_SHIFT;
3199 if (check_add_overflow(start, len, &end))
3200 return -EOVERFLOW;
3201 if (end > TASK_SIZE_MAX)
3202 return -EFAULT;
3203 if (unlikely(!access_ok((void __user *)start, len)))
3204 return -EFAULT;
3205
3206 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
3207 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
3208 return nr_pinned;
3209
3210 /* Slow path: try to get the remaining pages with get_user_pages */
3211 start += nr_pinned << PAGE_SHIFT;
3212 pages += nr_pinned;
3213 ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
3214 pages, &locked,
3215 gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE);
3216 if (ret < 0) {
3217 /*
3218 * The caller has to unpin the pages we already pinned so
3219 * returning -errno is not an option
3220 */
3221 if (nr_pinned)
3222 return nr_pinned;
3223 return ret;
3224 }
3225 return ret + nr_pinned;
3226 }
3227
3228 /**
3229 * get_user_pages_fast_only() - pin user pages in memory
3230 * @start: starting user address
3231 * @nr_pages: number of pages from start to pin
3232 * @gup_flags: flags modifying pin behaviour
3233 * @pages: array that receives pointers to the pages pinned.
3234 * Should be at least nr_pages long.
3235 *
3236 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
3237 * the regular GUP.
3238 *
3239 * If the architecture does not support this function, simply return with no
3240 * pages pinned.
3241 *
3242 * Careful, careful! COW breaking can go either way, so a non-write
3243 * access can get ambiguous page results. If you call this function without
3244 * 'write' set, you'd better be sure that you're ok with that ambiguity.
3245 */
get_user_pages_fast_only(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3246 int get_user_pages_fast_only(unsigned long start, int nr_pages,
3247 unsigned int gup_flags, struct page **pages)
3248 {
3249 /*
3250 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
3251 * because gup fast is always a "pin with a +1 page refcount" request.
3252 *
3253 * FOLL_FAST_ONLY is required in order to match the API description of
3254 * this routine: no fall back to regular ("slow") GUP.
3255 */
3256 if (!is_valid_gup_args(pages, NULL, &gup_flags,
3257 FOLL_GET | FOLL_FAST_ONLY))
3258 return -EINVAL;
3259
3260 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3261 }
3262 EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
3263
3264 /**
3265 * get_user_pages_fast() - pin user pages in memory
3266 * @start: starting user address
3267 * @nr_pages: number of pages from start to pin
3268 * @gup_flags: flags modifying pin behaviour
3269 * @pages: array that receives pointers to the pages pinned.
3270 * Should be at least nr_pages long.
3271 *
3272 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3273 * If not successful, it will fall back to taking the lock and
3274 * calling get_user_pages().
3275 *
3276 * Returns number of pages pinned. This may be fewer than the number requested.
3277 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3278 * -errno.
3279 */
get_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3280 int get_user_pages_fast(unsigned long start, int nr_pages,
3281 unsigned int gup_flags, struct page **pages)
3282 {
3283 /*
3284 * The caller may or may not have explicitly set FOLL_GET; either way is
3285 * OK. However, internally (within mm/gup.c), gup fast variants must set
3286 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3287 * request.
3288 */
3289 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
3290 return -EINVAL;
3291 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3292 }
3293 EXPORT_SYMBOL_GPL(get_user_pages_fast);
3294
3295 /**
3296 * pin_user_pages_fast() - pin user pages in memory without taking locks
3297 *
3298 * @start: starting user address
3299 * @nr_pages: number of pages from start to pin
3300 * @gup_flags: flags modifying pin behaviour
3301 * @pages: array that receives pointers to the pages pinned.
3302 * Should be at least nr_pages long.
3303 *
3304 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3305 * get_user_pages_fast() for documentation on the function arguments, because
3306 * the arguments here are identical.
3307 *
3308 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3309 * see Documentation/core-api/pin_user_pages.rst for further details.
3310 *
3311 * Note that if a zero_page is amongst the returned pages, it will not have
3312 * pins in it and unpin_user_page() will not remove pins from it.
3313 */
pin_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3314 int pin_user_pages_fast(unsigned long start, int nr_pages,
3315 unsigned int gup_flags, struct page **pages)
3316 {
3317 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3318 return -EINVAL;
3319 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3320 }
3321 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
3322
3323 /**
3324 * pin_user_pages_remote() - pin pages of a remote process
3325 *
3326 * @mm: mm_struct of target mm
3327 * @start: starting user address
3328 * @nr_pages: number of pages from start to pin
3329 * @gup_flags: flags modifying lookup behaviour
3330 * @pages: array that receives pointers to the pages pinned.
3331 * Should be at least nr_pages long.
3332 * @locked: pointer to lock flag indicating whether lock is held and
3333 * subsequently whether VM_FAULT_RETRY functionality can be
3334 * utilised. Lock must initially be held.
3335 *
3336 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3337 * get_user_pages_remote() for documentation on the function arguments, because
3338 * the arguments here are identical.
3339 *
3340 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3341 * see Documentation/core-api/pin_user_pages.rst for details.
3342 *
3343 * Note that if a zero_page is amongst the returned pages, it will not have
3344 * pins in it and unpin_user_page*() will not remove pins from it.
3345 */
pin_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)3346 long pin_user_pages_remote(struct mm_struct *mm,
3347 unsigned long start, unsigned long nr_pages,
3348 unsigned int gup_flags, struct page **pages,
3349 int *locked)
3350 {
3351 int local_locked = 1;
3352
3353 if (!is_valid_gup_args(pages, locked, &gup_flags,
3354 FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
3355 return 0;
3356 return __gup_longterm_locked(mm, start, nr_pages, pages,
3357 locked ? locked : &local_locked,
3358 gup_flags);
3359 }
3360 EXPORT_SYMBOL(pin_user_pages_remote);
3361
3362 /**
3363 * pin_user_pages() - pin user pages in memory for use by other devices
3364 *
3365 * @start: starting user address
3366 * @nr_pages: number of pages from start to pin
3367 * @gup_flags: flags modifying lookup behaviour
3368 * @pages: array that receives pointers to the pages pinned.
3369 * Should be at least nr_pages long.
3370 *
3371 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3372 * FOLL_PIN is set.
3373 *
3374 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3375 * see Documentation/core-api/pin_user_pages.rst for details.
3376 *
3377 * Note that if a zero_page is amongst the returned pages, it will not have
3378 * pins in it and unpin_user_page*() will not remove pins from it.
3379 */
pin_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)3380 long pin_user_pages(unsigned long start, unsigned long nr_pages,
3381 unsigned int gup_flags, struct page **pages)
3382 {
3383 int locked = 1;
3384
3385 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3386 return 0;
3387 return __gup_longterm_locked(current->mm, start, nr_pages,
3388 pages, &locked, gup_flags);
3389 }
3390 EXPORT_SYMBOL(pin_user_pages);
3391
3392 /*
3393 * pin_user_pages_unlocked() is the FOLL_PIN variant of
3394 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3395 * FOLL_PIN and rejects FOLL_GET.
3396 *
3397 * Note that if a zero_page is amongst the returned pages, it will not have
3398 * pins in it and unpin_user_page*() will not remove pins from it.
3399 */
pin_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)3400 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3401 struct page **pages, unsigned int gup_flags)
3402 {
3403 int locked = 0;
3404
3405 if (!is_valid_gup_args(pages, NULL, &gup_flags,
3406 FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE))
3407 return 0;
3408
3409 return __gup_longterm_locked(current->mm, start, nr_pages, pages,
3410 &locked, gup_flags);
3411 }
3412 EXPORT_SYMBOL(pin_user_pages_unlocked);
3413