1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/err.h> 5 #include <linux/spinlock.h> 6 7 #include <linux/mm.h> 8 #include <linux/memremap.h> 9 #include <linux/pagemap.h> 10 #include <linux/rmap.h> 11 #include <linux/swap.h> 12 #include <linux/swapops.h> 13 #include <linux/secretmem.h> 14 15 #include <linux/sched/signal.h> 16 #include <linux/rwsem.h> 17 #include <linux/hugetlb.h> 18 #include <linux/migrate.h> 19 #include <linux/mm_inline.h> 20 #include <linux/sched/mm.h> 21 #include <linux/shmem_fs.h> 22 23 #include <asm/mmu_context.h> 24 #include <asm/tlbflush.h> 25 26 #include "internal.h" 27 28 struct follow_page_context { 29 struct dev_pagemap *pgmap; 30 unsigned int page_mask; 31 }; 32 33 static inline void sanity_check_pinned_pages(struct page **pages, 34 unsigned long npages) 35 { 36 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 37 return; 38 39 /* 40 * We only pin anonymous pages if they are exclusive. Once pinned, we 41 * can no longer turn them possibly shared and PageAnonExclusive() will 42 * stick around until the page is freed. 43 * 44 * We'd like to verify that our pinned anonymous pages are still mapped 45 * exclusively. The issue with anon THP is that we don't know how 46 * they are/were mapped when pinning them. However, for anon 47 * THP we can assume that either the given page (PTE-mapped THP) or 48 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If 49 * neither is the case, there is certainly something wrong. 50 */ 51 for (; npages; npages--, pages++) { 52 struct page *page = *pages; 53 struct folio *folio = page_folio(page); 54 55 if (is_zero_page(page) || 56 !folio_test_anon(folio)) 57 continue; 58 if (!folio_test_large(folio) || folio_test_hugetlb(folio)) 59 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); 60 else 61 /* Either a PTE-mapped or a PMD-mapped THP. */ 62 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) && 63 !PageAnonExclusive(page), page); 64 } 65 } 66 67 /* 68 * Return the folio with ref appropriately incremented, 69 * or NULL if that failed. 70 */ 71 static inline struct folio *try_get_folio(struct page *page, int refs) 72 { 73 struct folio *folio; 74 75 retry: 76 folio = page_folio(page); 77 if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) 78 return NULL; 79 if (unlikely(!folio_ref_try_add(folio, refs))) 80 return NULL; 81 82 /* 83 * At this point we have a stable reference to the folio; but it 84 * could be that between calling page_folio() and the refcount 85 * increment, the folio was split, in which case we'd end up 86 * holding a reference on a folio that has nothing to do with the page 87 * we were given anymore. 88 * So now that the folio is stable, recheck that the page still 89 * belongs to this folio. 90 */ 91 if (unlikely(page_folio(page) != folio)) { 92 if (!put_devmap_managed_page_refs(&folio->page, refs)) 93 folio_put_refs(folio, refs); 94 goto retry; 95 } 96 97 return folio; 98 } 99 100 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) 101 { 102 if (flags & FOLL_PIN) { 103 if (is_zero_folio(folio)) 104 return; 105 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); 106 if (folio_test_large(folio)) 107 atomic_sub(refs, &folio->_pincount); 108 else 109 refs *= GUP_PIN_COUNTING_BIAS; 110 } 111 112 if (!put_devmap_managed_page_refs(&folio->page, refs)) 113 folio_put_refs(folio, refs); 114 } 115 116 /** 117 * try_grab_folio() - add a folio's refcount by a flag-dependent amount 118 * @folio: pointer to folio to be grabbed 119 * @refs: the value to (effectively) add to the folio's refcount 120 * @flags: gup flags: these are the FOLL_* flag values 121 * 122 * This might not do anything at all, depending on the flags argument. 123 * 124 * "grab" names in this file mean, "look at flags to decide whether to use 125 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. 126 * 127 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same 128 * time. 129 * 130 * Return: 0 for success, or if no action was required (if neither FOLL_PIN 131 * nor FOLL_GET was set, nothing is done). A negative error code for failure: 132 * 133 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not 134 * be grabbed. 135 * 136 * It is called when we have a stable reference for the folio, typically in 137 * GUP slow path. 138 */ 139 int __must_check try_grab_folio(struct folio *folio, int refs, 140 unsigned int flags) 141 { 142 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) 143 return -ENOMEM; 144 145 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page))) 146 return -EREMOTEIO; 147 148 if (flags & FOLL_GET) 149 folio_ref_add(folio, refs); 150 else if (flags & FOLL_PIN) { 151 /* 152 * Don't take a pin on the zero page - it's not going anywhere 153 * and it is used in a *lot* of places. 154 */ 155 if (is_zero_folio(folio)) 156 return 0; 157 158 /* 159 * Increment the normal page refcount field at least once, 160 * so that the page really is pinned. 161 */ 162 if (folio_test_large(folio)) { 163 folio_ref_add(folio, refs); 164 atomic_add(refs, &folio->_pincount); 165 } else { 166 folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS); 167 } 168 169 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); 170 } 171 172 return 0; 173 } 174 175 /** 176 * unpin_user_page() - release a dma-pinned page 177 * @page: pointer to page to be released 178 * 179 * Pages that were pinned via pin_user_pages*() must be released via either 180 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so 181 * that such pages can be separately tracked and uniquely handled. In 182 * particular, interactions with RDMA and filesystems need special handling. 183 */ 184 void unpin_user_page(struct page *page) 185 { 186 sanity_check_pinned_pages(&page, 1); 187 gup_put_folio(page_folio(page), 1, FOLL_PIN); 188 } 189 EXPORT_SYMBOL(unpin_user_page); 190 191 /** 192 * folio_add_pin - Try to get an additional pin on a pinned folio 193 * @folio: The folio to be pinned 194 * 195 * Get an additional pin on a folio we already have a pin on. Makes no change 196 * if the folio is a zero_page. 197 */ 198 void folio_add_pin(struct folio *folio) 199 { 200 if (is_zero_folio(folio)) 201 return; 202 203 /* 204 * Similar to try_grab_folio(): be sure to *also* increment the normal 205 * page refcount field at least once, so that the page really is 206 * pinned. 207 */ 208 if (folio_test_large(folio)) { 209 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); 210 folio_ref_inc(folio); 211 atomic_inc(&folio->_pincount); 212 } else { 213 WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS); 214 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); 215 } 216 } 217 218 static inline struct folio *gup_folio_range_next(struct page *start, 219 unsigned long npages, unsigned long i, unsigned int *ntails) 220 { 221 struct page *next = nth_page(start, i); 222 struct folio *folio = page_folio(next); 223 unsigned int nr = 1; 224 225 if (folio_test_large(folio)) 226 nr = min_t(unsigned int, npages - i, 227 folio_nr_pages(folio) - folio_page_idx(folio, next)); 228 229 *ntails = nr; 230 return folio; 231 } 232 233 static inline struct folio *gup_folio_next(struct page **list, 234 unsigned long npages, unsigned long i, unsigned int *ntails) 235 { 236 struct folio *folio = page_folio(list[i]); 237 unsigned int nr; 238 239 for (nr = i + 1; nr < npages; nr++) { 240 if (page_folio(list[nr]) != folio) 241 break; 242 } 243 244 *ntails = nr - i; 245 return folio; 246 } 247 248 /** 249 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 250 * @pages: array of pages to be maybe marked dirty, and definitely released. 251 * @npages: number of pages in the @pages array. 252 * @make_dirty: whether to mark the pages dirty 253 * 254 * "gup-pinned page" refers to a page that has had one of the get_user_pages() 255 * variants called on that page. 256 * 257 * For each page in the @pages array, make that page (or its head page, if a 258 * compound page) dirty, if @make_dirty is true, and if the page was previously 259 * listed as clean. In any case, releases all pages using unpin_user_page(), 260 * possibly via unpin_user_pages(), for the non-dirty case. 261 * 262 * Please see the unpin_user_page() documentation for details. 263 * 264 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 265 * required, then the caller should a) verify that this is really correct, 266 * because _lock() is usually required, and b) hand code it: 267 * set_page_dirty_lock(), unpin_user_page(). 268 * 269 */ 270 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, 271 bool make_dirty) 272 { 273 unsigned long i; 274 struct folio *folio; 275 unsigned int nr; 276 277 if (!make_dirty) { 278 unpin_user_pages(pages, npages); 279 return; 280 } 281 282 sanity_check_pinned_pages(pages, npages); 283 for (i = 0; i < npages; i += nr) { 284 folio = gup_folio_next(pages, npages, i, &nr); 285 /* 286 * Checking PageDirty at this point may race with 287 * clear_page_dirty_for_io(), but that's OK. Two key 288 * cases: 289 * 290 * 1) This code sees the page as already dirty, so it 291 * skips the call to set_page_dirty(). That could happen 292 * because clear_page_dirty_for_io() called 293 * page_mkclean(), followed by set_page_dirty(). 294 * However, now the page is going to get written back, 295 * which meets the original intention of setting it 296 * dirty, so all is well: clear_page_dirty_for_io() goes 297 * on to call TestClearPageDirty(), and write the page 298 * back. 299 * 300 * 2) This code sees the page as clean, so it calls 301 * set_page_dirty(). The page stays dirty, despite being 302 * written back, so it gets written back again in the 303 * next writeback cycle. This is harmless. 304 */ 305 if (!folio_test_dirty(folio)) { 306 folio_lock(folio); 307 folio_mark_dirty(folio); 308 folio_unlock(folio); 309 } 310 gup_put_folio(folio, nr, FOLL_PIN); 311 } 312 } 313 EXPORT_SYMBOL(unpin_user_pages_dirty_lock); 314 315 /** 316 * unpin_user_page_range_dirty_lock() - release and optionally dirty 317 * gup-pinned page range 318 * 319 * @page: the starting page of a range maybe marked dirty, and definitely released. 320 * @npages: number of consecutive pages to release. 321 * @make_dirty: whether to mark the pages dirty 322 * 323 * "gup-pinned page range" refers to a range of pages that has had one of the 324 * pin_user_pages() variants called on that page. 325 * 326 * For the page ranges defined by [page .. page+npages], make that range (or 327 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the 328 * page range was previously listed as clean. 329 * 330 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 331 * required, then the caller should a) verify that this is really correct, 332 * because _lock() is usually required, and b) hand code it: 333 * set_page_dirty_lock(), unpin_user_page(). 334 * 335 */ 336 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, 337 bool make_dirty) 338 { 339 unsigned long i; 340 struct folio *folio; 341 unsigned int nr; 342 343 for (i = 0; i < npages; i += nr) { 344 folio = gup_folio_range_next(page, npages, i, &nr); 345 if (make_dirty && !folio_test_dirty(folio)) { 346 folio_lock(folio); 347 folio_mark_dirty(folio); 348 folio_unlock(folio); 349 } 350 gup_put_folio(folio, nr, FOLL_PIN); 351 } 352 } 353 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); 354 355 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages) 356 { 357 unsigned long i; 358 struct folio *folio; 359 unsigned int nr; 360 361 /* 362 * Don't perform any sanity checks because we might have raced with 363 * fork() and some anonymous pages might now actually be shared -- 364 * which is why we're unpinning after all. 365 */ 366 for (i = 0; i < npages; i += nr) { 367 folio = gup_folio_next(pages, npages, i, &nr); 368 gup_put_folio(folio, nr, FOLL_PIN); 369 } 370 } 371 372 /** 373 * unpin_user_pages() - release an array of gup-pinned pages. 374 * @pages: array of pages to be marked dirty and released. 375 * @npages: number of pages in the @pages array. 376 * 377 * For each page in the @pages array, release the page using unpin_user_page(). 378 * 379 * Please see the unpin_user_page() documentation for details. 380 */ 381 void unpin_user_pages(struct page **pages, unsigned long npages) 382 { 383 unsigned long i; 384 struct folio *folio; 385 unsigned int nr; 386 387 /* 388 * If this WARN_ON() fires, then the system *might* be leaking pages (by 389 * leaving them pinned), but probably not. More likely, gup/pup returned 390 * a hard -ERRNO error to the caller, who erroneously passed it here. 391 */ 392 if (WARN_ON(IS_ERR_VALUE(npages))) 393 return; 394 395 sanity_check_pinned_pages(pages, npages); 396 for (i = 0; i < npages; i += nr) { 397 folio = gup_folio_next(pages, npages, i, &nr); 398 gup_put_folio(folio, nr, FOLL_PIN); 399 } 400 } 401 EXPORT_SYMBOL(unpin_user_pages); 402 403 /* 404 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's 405 * lifecycle. Avoid setting the bit unless necessary, or it might cause write 406 * cache bouncing on large SMP machines for concurrent pinned gups. 407 */ 408 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) 409 { 410 if (!test_bit(MMF_HAS_PINNED, mm_flags)) 411 set_bit(MMF_HAS_PINNED, mm_flags); 412 } 413 414 #ifdef CONFIG_MMU 415 static struct page *no_page_table(struct vm_area_struct *vma, 416 unsigned int flags) 417 { 418 /* 419 * When core dumping an enormous anonymous area that nobody 420 * has touched so far, we don't want to allocate unnecessary pages or 421 * page tables. Return error instead of NULL to skip handle_mm_fault, 422 * then get_dump_page() will return NULL to leave a hole in the dump. 423 * But we can only make this optimization where a hole would surely 424 * be zero-filled if handle_mm_fault() actually did handle it. 425 */ 426 if ((flags & FOLL_DUMP) && 427 (vma_is_anonymous(vma) || !vma->vm_ops->fault)) 428 return ERR_PTR(-EFAULT); 429 return NULL; 430 } 431 432 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, 433 pte_t *pte, unsigned int flags) 434 { 435 if (flags & FOLL_TOUCH) { 436 pte_t orig_entry = ptep_get(pte); 437 pte_t entry = orig_entry; 438 439 if (flags & FOLL_WRITE) 440 entry = pte_mkdirty(entry); 441 entry = pte_mkyoung(entry); 442 443 if (!pte_same(orig_entry, entry)) { 444 set_pte_at(vma->vm_mm, address, pte, entry); 445 update_mmu_cache(vma, address, pte); 446 } 447 } 448 449 /* Proper page table entry exists, but no corresponding struct page */ 450 return -EEXIST; 451 } 452 453 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */ 454 static inline bool can_follow_write_pte(pte_t pte, struct page *page, 455 struct vm_area_struct *vma, 456 unsigned int flags) 457 { 458 /* If the pte is writable, we can write to the page. */ 459 if (pte_write(pte)) 460 return true; 461 462 /* Maybe FOLL_FORCE is set to override it? */ 463 if (!(flags & FOLL_FORCE)) 464 return false; 465 466 /* But FOLL_FORCE has no effect on shared mappings */ 467 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) 468 return false; 469 470 /* ... or read-only private ones */ 471 if (!(vma->vm_flags & VM_MAYWRITE)) 472 return false; 473 474 /* ... or already writable ones that just need to take a write fault */ 475 if (vma->vm_flags & VM_WRITE) 476 return false; 477 478 /* 479 * See can_change_pte_writable(): we broke COW and could map the page 480 * writable if we have an exclusive anonymous page ... 481 */ 482 if (!page || !PageAnon(page) || !PageAnonExclusive(page)) 483 return false; 484 485 /* ... and a write-fault isn't required for other reasons. */ 486 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte)) 487 return false; 488 return !userfaultfd_pte_wp(vma, pte); 489 } 490 491 static struct page *follow_page_pte(struct vm_area_struct *vma, 492 unsigned long address, pmd_t *pmd, unsigned int flags, 493 struct dev_pagemap **pgmap) 494 { 495 struct mm_struct *mm = vma->vm_mm; 496 struct page *page; 497 spinlock_t *ptl; 498 pte_t *ptep, pte; 499 int ret; 500 501 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 502 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 503 (FOLL_PIN | FOLL_GET))) 504 return ERR_PTR(-EINVAL); 505 506 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 507 if (!ptep) 508 return no_page_table(vma, flags); 509 pte = ptep_get(ptep); 510 if (!pte_present(pte)) 511 goto no_page; 512 if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags)) 513 goto no_page; 514 515 page = vm_normal_page(vma, address, pte); 516 517 /* 518 * We only care about anon pages in can_follow_write_pte() and don't 519 * have to worry about pte_devmap() because they are never anon. 520 */ 521 if ((flags & FOLL_WRITE) && 522 !can_follow_write_pte(pte, page, vma, flags)) { 523 page = NULL; 524 goto out; 525 } 526 527 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { 528 /* 529 * Only return device mapping pages in the FOLL_GET or FOLL_PIN 530 * case since they are only valid while holding the pgmap 531 * reference. 532 */ 533 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); 534 if (*pgmap) 535 page = pte_page(pte); 536 else 537 goto no_page; 538 } else if (unlikely(!page)) { 539 if (flags & FOLL_DUMP) { 540 /* Avoid special (like zero) pages in core dumps */ 541 page = ERR_PTR(-EFAULT); 542 goto out; 543 } 544 545 if (is_zero_pfn(pte_pfn(pte))) { 546 page = pte_page(pte); 547 } else { 548 ret = follow_pfn_pte(vma, address, ptep, flags); 549 page = ERR_PTR(ret); 550 goto out; 551 } 552 } 553 554 if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) { 555 page = ERR_PTR(-EMLINK); 556 goto out; 557 } 558 559 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 560 !PageAnonExclusive(page), page); 561 562 /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */ 563 ret = try_grab_folio(page_folio(page), 1, flags); 564 if (unlikely(ret)) { 565 page = ERR_PTR(ret); 566 goto out; 567 } 568 569 /* 570 * We need to make the page accessible if and only if we are going 571 * to access its content (the FOLL_PIN case). Please see 572 * Documentation/core-api/pin_user_pages.rst for details. 573 */ 574 if (flags & FOLL_PIN) { 575 ret = arch_make_page_accessible(page); 576 if (ret) { 577 unpin_user_page(page); 578 page = ERR_PTR(ret); 579 goto out; 580 } 581 } 582 if (flags & FOLL_TOUCH) { 583 if ((flags & FOLL_WRITE) && 584 !pte_dirty(pte) && !PageDirty(page)) 585 set_page_dirty(page); 586 /* 587 * pte_mkyoung() would be more correct here, but atomic care 588 * is needed to avoid losing the dirty bit: it is easier to use 589 * mark_page_accessed(). 590 */ 591 mark_page_accessed(page); 592 } 593 out: 594 pte_unmap_unlock(ptep, ptl); 595 return page; 596 no_page: 597 pte_unmap_unlock(ptep, ptl); 598 if (!pte_none(pte)) 599 return NULL; 600 return no_page_table(vma, flags); 601 } 602 603 static struct page *follow_pmd_mask(struct vm_area_struct *vma, 604 unsigned long address, pud_t *pudp, 605 unsigned int flags, 606 struct follow_page_context *ctx) 607 { 608 pmd_t *pmd, pmdval; 609 spinlock_t *ptl; 610 struct page *page; 611 struct mm_struct *mm = vma->vm_mm; 612 613 pmd = pmd_offset(pudp, address); 614 pmdval = pmdp_get_lockless(pmd); 615 if (pmd_none(pmdval)) 616 return no_page_table(vma, flags); 617 if (!pmd_present(pmdval)) 618 return no_page_table(vma, flags); 619 if (pmd_devmap(pmdval)) { 620 ptl = pmd_lock(mm, pmd); 621 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); 622 spin_unlock(ptl); 623 if (page) 624 return page; 625 } 626 if (likely(!pmd_trans_huge(pmdval))) 627 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 628 629 if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) 630 return no_page_table(vma, flags); 631 632 ptl = pmd_lock(mm, pmd); 633 if (unlikely(!pmd_present(*pmd))) { 634 spin_unlock(ptl); 635 return no_page_table(vma, flags); 636 } 637 if (unlikely(!pmd_trans_huge(*pmd))) { 638 spin_unlock(ptl); 639 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 640 } 641 if (flags & FOLL_SPLIT_PMD) { 642 spin_unlock(ptl); 643 split_huge_pmd(vma, pmd, address); 644 /* If pmd was left empty, stuff a page table in there quickly */ 645 return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : 646 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 647 } 648 page = follow_trans_huge_pmd(vma, address, pmd, flags); 649 spin_unlock(ptl); 650 ctx->page_mask = HPAGE_PMD_NR - 1; 651 return page; 652 } 653 654 static struct page *follow_pud_mask(struct vm_area_struct *vma, 655 unsigned long address, p4d_t *p4dp, 656 unsigned int flags, 657 struct follow_page_context *ctx) 658 { 659 pud_t *pud; 660 spinlock_t *ptl; 661 struct page *page; 662 struct mm_struct *mm = vma->vm_mm; 663 664 pud = pud_offset(p4dp, address); 665 if (pud_none(*pud)) 666 return no_page_table(vma, flags); 667 if (pud_devmap(*pud)) { 668 ptl = pud_lock(mm, pud); 669 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); 670 spin_unlock(ptl); 671 if (page) 672 return page; 673 } 674 if (unlikely(pud_bad(*pud))) 675 return no_page_table(vma, flags); 676 677 return follow_pmd_mask(vma, address, pud, flags, ctx); 678 } 679 680 static struct page *follow_p4d_mask(struct vm_area_struct *vma, 681 unsigned long address, pgd_t *pgdp, 682 unsigned int flags, 683 struct follow_page_context *ctx) 684 { 685 p4d_t *p4d; 686 687 p4d = p4d_offset(pgdp, address); 688 if (p4d_none(*p4d)) 689 return no_page_table(vma, flags); 690 BUILD_BUG_ON(p4d_huge(*p4d)); 691 if (unlikely(p4d_bad(*p4d))) 692 return no_page_table(vma, flags); 693 694 return follow_pud_mask(vma, address, p4d, flags, ctx); 695 } 696 697 /** 698 * follow_page_mask - look up a page descriptor from a user-virtual address 699 * @vma: vm_area_struct mapping @address 700 * @address: virtual address to look up 701 * @flags: flags modifying lookup behaviour 702 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a 703 * pointer to output page_mask 704 * 705 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 706 * 707 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches 708 * the device's dev_pagemap metadata to avoid repeating expensive lookups. 709 * 710 * When getting an anonymous page and the caller has to trigger unsharing 711 * of a shared anonymous page first, -EMLINK is returned. The caller should 712 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only 713 * relevant with FOLL_PIN and !FOLL_WRITE. 714 * 715 * On output, the @ctx->page_mask is set according to the size of the page. 716 * 717 * Return: the mapped (struct page *), %NULL if no mapping exists, or 718 * an error pointer if there is a mapping to something not represented 719 * by a page descriptor (see also vm_normal_page()). 720 */ 721 static struct page *follow_page_mask(struct vm_area_struct *vma, 722 unsigned long address, unsigned int flags, 723 struct follow_page_context *ctx) 724 { 725 pgd_t *pgd; 726 struct mm_struct *mm = vma->vm_mm; 727 728 ctx->page_mask = 0; 729 730 /* 731 * Call hugetlb_follow_page_mask for hugetlb vmas as it will use 732 * special hugetlb page table walking code. This eliminates the 733 * need to check for hugetlb entries in the general walking code. 734 */ 735 if (is_vm_hugetlb_page(vma)) 736 return hugetlb_follow_page_mask(vma, address, flags, 737 &ctx->page_mask); 738 739 pgd = pgd_offset(mm, address); 740 741 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 742 return no_page_table(vma, flags); 743 744 return follow_p4d_mask(vma, address, pgd, flags, ctx); 745 } 746 747 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 748 unsigned int foll_flags) 749 { 750 struct follow_page_context ctx = { NULL }; 751 struct page *page; 752 753 if (vma_is_secretmem(vma)) 754 return NULL; 755 756 if (WARN_ON_ONCE(foll_flags & FOLL_PIN)) 757 return NULL; 758 759 /* 760 * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect 761 * to fail on PROT_NONE-mapped pages. 762 */ 763 page = follow_page_mask(vma, address, foll_flags, &ctx); 764 if (ctx.pgmap) 765 put_dev_pagemap(ctx.pgmap); 766 return page; 767 } 768 769 static int get_gate_page(struct mm_struct *mm, unsigned long address, 770 unsigned int gup_flags, struct vm_area_struct **vma, 771 struct page **page) 772 { 773 pgd_t *pgd; 774 p4d_t *p4d; 775 pud_t *pud; 776 pmd_t *pmd; 777 pte_t *pte; 778 pte_t entry; 779 int ret = -EFAULT; 780 781 /* user gate pages are read-only */ 782 if (gup_flags & FOLL_WRITE) 783 return -EFAULT; 784 if (address > TASK_SIZE) 785 pgd = pgd_offset_k(address); 786 else 787 pgd = pgd_offset_gate(mm, address); 788 if (pgd_none(*pgd)) 789 return -EFAULT; 790 p4d = p4d_offset(pgd, address); 791 if (p4d_none(*p4d)) 792 return -EFAULT; 793 pud = pud_offset(p4d, address); 794 if (pud_none(*pud)) 795 return -EFAULT; 796 pmd = pmd_offset(pud, address); 797 if (!pmd_present(*pmd)) 798 return -EFAULT; 799 pte = pte_offset_map(pmd, address); 800 if (!pte) 801 return -EFAULT; 802 entry = ptep_get(pte); 803 if (pte_none(entry)) 804 goto unmap; 805 *vma = get_gate_vma(mm); 806 if (!page) 807 goto out; 808 *page = vm_normal_page(*vma, address, entry); 809 if (!*page) { 810 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) 811 goto unmap; 812 *page = pte_page(entry); 813 } 814 ret = try_grab_folio(page_folio(*page), 1, gup_flags); 815 if (unlikely(ret)) 816 goto unmap; 817 out: 818 ret = 0; 819 unmap: 820 pte_unmap(pte); 821 return ret; 822 } 823 824 /* 825 * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not 826 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set 827 * to 0 and -EBUSY returned. 828 */ 829 static int faultin_page(struct vm_area_struct *vma, 830 unsigned long address, unsigned int *flags, bool unshare, 831 int *locked) 832 { 833 unsigned int fault_flags = 0; 834 vm_fault_t ret; 835 836 if (*flags & FOLL_NOFAULT) 837 return -EFAULT; 838 if (*flags & FOLL_WRITE) 839 fault_flags |= FAULT_FLAG_WRITE; 840 if (*flags & FOLL_REMOTE) 841 fault_flags |= FAULT_FLAG_REMOTE; 842 if (*flags & FOLL_UNLOCKABLE) { 843 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 844 /* 845 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set 846 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE. 847 * That's because some callers may not be prepared to 848 * handle early exits caused by non-fatal signals. 849 */ 850 if (*flags & FOLL_INTERRUPTIBLE) 851 fault_flags |= FAULT_FLAG_INTERRUPTIBLE; 852 } 853 if (*flags & FOLL_NOWAIT) 854 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 855 if (*flags & FOLL_TRIED) { 856 /* 857 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED 858 * can co-exist 859 */ 860 fault_flags |= FAULT_FLAG_TRIED; 861 } 862 if (unshare) { 863 fault_flags |= FAULT_FLAG_UNSHARE; 864 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ 865 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE); 866 } 867 868 ret = handle_mm_fault(vma, address, fault_flags, NULL); 869 870 if (ret & VM_FAULT_COMPLETED) { 871 /* 872 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the 873 * mmap lock in the page fault handler. Sanity check this. 874 */ 875 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); 876 *locked = 0; 877 878 /* 879 * We should do the same as VM_FAULT_RETRY, but let's not 880 * return -EBUSY since that's not reflecting the reality of 881 * what has happened - we've just fully completed a page 882 * fault, with the mmap lock released. Use -EAGAIN to show 883 * that we want to take the mmap lock _again_. 884 */ 885 return -EAGAIN; 886 } 887 888 if (ret & VM_FAULT_ERROR) { 889 int err = vm_fault_to_errno(ret, *flags); 890 891 if (err) 892 return err; 893 BUG(); 894 } 895 896 if (ret & VM_FAULT_RETRY) { 897 if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 898 *locked = 0; 899 return -EBUSY; 900 } 901 902 return 0; 903 } 904 905 /* 906 * Writing to file-backed mappings which require folio dirty tracking using GUP 907 * is a fundamentally broken operation, as kernel write access to GUP mappings 908 * do not adhere to the semantics expected by a file system. 909 * 910 * Consider the following scenario:- 911 * 912 * 1. A folio is written to via GUP which write-faults the memory, notifying 913 * the file system and dirtying the folio. 914 * 2. Later, writeback is triggered, resulting in the folio being cleaned and 915 * the PTE being marked read-only. 916 * 3. The GUP caller writes to the folio, as it is mapped read/write via the 917 * direct mapping. 918 * 4. The GUP caller, now done with the page, unpins it and sets it dirty 919 * (though it does not have to). 920 * 921 * This results in both data being written to a folio without writenotify, and 922 * the folio being dirtied unexpectedly (if the caller decides to do so). 923 */ 924 static bool writable_file_mapping_allowed(struct vm_area_struct *vma, 925 unsigned long gup_flags) 926 { 927 /* 928 * If we aren't pinning then no problematic write can occur. A long term 929 * pin is the most egregious case so this is the case we disallow. 930 */ 931 if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != 932 (FOLL_PIN | FOLL_LONGTERM)) 933 return true; 934 935 /* 936 * If the VMA does not require dirty tracking then no problematic write 937 * can occur either. 938 */ 939 return !vma_needs_dirty_tracking(vma); 940 } 941 942 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) 943 { 944 vm_flags_t vm_flags = vma->vm_flags; 945 int write = (gup_flags & FOLL_WRITE); 946 int foreign = (gup_flags & FOLL_REMOTE); 947 bool vma_anon = vma_is_anonymous(vma); 948 949 if (vm_flags & (VM_IO | VM_PFNMAP)) 950 return -EFAULT; 951 952 if ((gup_flags & FOLL_ANON) && !vma_anon) 953 return -EFAULT; 954 955 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) 956 return -EOPNOTSUPP; 957 958 if (vma_is_secretmem(vma)) 959 return -EFAULT; 960 961 if (write) { 962 if (!vma_anon && 963 !writable_file_mapping_allowed(vma, gup_flags)) 964 return -EFAULT; 965 966 if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { 967 if (!(gup_flags & FOLL_FORCE)) 968 return -EFAULT; 969 /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */ 970 if (is_vm_hugetlb_page(vma)) 971 return -EFAULT; 972 /* 973 * We used to let the write,force case do COW in a 974 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could 975 * set a breakpoint in a read-only mapping of an 976 * executable, without corrupting the file (yet only 977 * when that file had been opened for writing!). 978 * Anon pages in shared mappings are surprising: now 979 * just reject it. 980 */ 981 if (!is_cow_mapping(vm_flags)) 982 return -EFAULT; 983 } 984 } else if (!(vm_flags & VM_READ)) { 985 if (!(gup_flags & FOLL_FORCE)) 986 return -EFAULT; 987 /* 988 * Is there actually any vma we can reach here which does not 989 * have VM_MAYREAD set? 990 */ 991 if (!(vm_flags & VM_MAYREAD)) 992 return -EFAULT; 993 } 994 /* 995 * gups are always data accesses, not instruction 996 * fetches, so execute=false here 997 */ 998 if (!arch_vma_access_permitted(vma, write, false, foreign)) 999 return -EFAULT; 1000 return 0; 1001 } 1002 1003 /* 1004 * This is "vma_lookup()", but with a warning if we would have 1005 * historically expanded the stack in the GUP code. 1006 */ 1007 static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm, 1008 unsigned long addr) 1009 { 1010 #ifdef CONFIG_STACK_GROWSUP 1011 return vma_lookup(mm, addr); 1012 #else 1013 static volatile unsigned long next_warn; 1014 struct vm_area_struct *vma; 1015 unsigned long now, next; 1016 1017 vma = find_vma(mm, addr); 1018 if (!vma || (addr >= vma->vm_start)) 1019 return vma; 1020 1021 /* Only warn for half-way relevant accesses */ 1022 if (!(vma->vm_flags & VM_GROWSDOWN)) 1023 return NULL; 1024 if (vma->vm_start - addr > 65536) 1025 return NULL; 1026 1027 /* Let's not warn more than once an hour.. */ 1028 now = jiffies; next = next_warn; 1029 if (next && time_before(now, next)) 1030 return NULL; 1031 next_warn = now + 60*60*HZ; 1032 1033 /* Let people know things may have changed. */ 1034 pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", 1035 current->comm, task_pid_nr(current), 1036 vma->vm_start, vma->vm_end, addr); 1037 dump_stack(); 1038 return NULL; 1039 #endif 1040 } 1041 1042 /** 1043 * __get_user_pages() - pin user pages in memory 1044 * @mm: mm_struct of target mm 1045 * @start: starting user address 1046 * @nr_pages: number of pages from start to pin 1047 * @gup_flags: flags modifying pin behaviour 1048 * @pages: array that receives pointers to the pages pinned. 1049 * Should be at least nr_pages long. Or NULL, if caller 1050 * only intends to ensure the pages are faulted in. 1051 * @locked: whether we're still with the mmap_lock held 1052 * 1053 * Returns either number of pages pinned (which may be less than the 1054 * number requested), or an error. Details about the return value: 1055 * 1056 * -- If nr_pages is 0, returns 0. 1057 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 1058 * -- If nr_pages is >0, and some pages were pinned, returns the number of 1059 * pages pinned. Again, this may be less than nr_pages. 1060 * -- 0 return value is possible when the fault would need to be retried. 1061 * 1062 * The caller is responsible for releasing returned @pages, via put_page(). 1063 * 1064 * Must be called with mmap_lock held. It may be released. See below. 1065 * 1066 * __get_user_pages walks a process's page tables and takes a reference to 1067 * each struct page that each user address corresponds to at a given 1068 * instant. That is, it takes the page that would be accessed if a user 1069 * thread accesses the given user virtual address at that instant. 1070 * 1071 * This does not guarantee that the page exists in the user mappings when 1072 * __get_user_pages returns, and there may even be a completely different 1073 * page there in some cases (eg. if mmapped pagecache has been invalidated 1074 * and subsequently re-faulted). However it does guarantee that the page 1075 * won't be freed completely. And mostly callers simply care that the page 1076 * contains data that was valid *at some point in time*. Typically, an IO 1077 * or similar operation cannot guarantee anything stronger anyway because 1078 * locks can't be held over the syscall boundary. 1079 * 1080 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If 1081 * the page is written to, set_page_dirty (or set_page_dirty_lock, as 1082 * appropriate) must be called after the page is finished with, and 1083 * before put_page is called. 1084 * 1085 * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may 1086 * be released. If this happens *@locked will be set to 0 on return. 1087 * 1088 * A caller using such a combination of @gup_flags must therefore hold the 1089 * mmap_lock for reading only, and recognize when it's been released. Otherwise, 1090 * it must be held for either reading or writing and will not be released. 1091 * 1092 * In most cases, get_user_pages or get_user_pages_fast should be used 1093 * instead of __get_user_pages. __get_user_pages should be used only if 1094 * you need some special @gup_flags. 1095 */ 1096 static long __get_user_pages(struct mm_struct *mm, 1097 unsigned long start, unsigned long nr_pages, 1098 unsigned int gup_flags, struct page **pages, 1099 int *locked) 1100 { 1101 long ret = 0, i = 0; 1102 struct vm_area_struct *vma = NULL; 1103 struct follow_page_context ctx = { NULL }; 1104 1105 if (!nr_pages) 1106 return 0; 1107 1108 start = untagged_addr_remote(mm, start); 1109 1110 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); 1111 1112 do { 1113 struct page *page; 1114 unsigned int foll_flags = gup_flags; 1115 unsigned int page_increm; 1116 1117 /* first iteration or cross vma bound */ 1118 if (!vma || start >= vma->vm_end) { 1119 /* 1120 * MADV_POPULATE_(READ|WRITE) wants to handle VMA 1121 * lookups+error reporting differently. 1122 */ 1123 if (gup_flags & FOLL_MADV_POPULATE) { 1124 vma = vma_lookup(mm, start); 1125 if (!vma) { 1126 ret = -ENOMEM; 1127 goto out; 1128 } 1129 if (check_vma_flags(vma, gup_flags)) { 1130 ret = -EINVAL; 1131 goto out; 1132 } 1133 goto retry; 1134 } 1135 vma = gup_vma_lookup(mm, start); 1136 if (!vma && in_gate_area(mm, start)) { 1137 ret = get_gate_page(mm, start & PAGE_MASK, 1138 gup_flags, &vma, 1139 pages ? &page : NULL); 1140 if (ret) 1141 goto out; 1142 ctx.page_mask = 0; 1143 goto next_page; 1144 } 1145 1146 if (!vma) { 1147 ret = -EFAULT; 1148 goto out; 1149 } 1150 ret = check_vma_flags(vma, gup_flags); 1151 if (ret) 1152 goto out; 1153 } 1154 retry: 1155 /* 1156 * If we have a pending SIGKILL, don't keep faulting pages and 1157 * potentially allocating memory. 1158 */ 1159 if (fatal_signal_pending(current)) { 1160 ret = -EINTR; 1161 goto out; 1162 } 1163 cond_resched(); 1164 1165 page = follow_page_mask(vma, start, foll_flags, &ctx); 1166 if (!page || PTR_ERR(page) == -EMLINK) { 1167 ret = faultin_page(vma, start, &foll_flags, 1168 PTR_ERR(page) == -EMLINK, locked); 1169 switch (ret) { 1170 case 0: 1171 goto retry; 1172 case -EBUSY: 1173 case -EAGAIN: 1174 ret = 0; 1175 fallthrough; 1176 case -EFAULT: 1177 case -ENOMEM: 1178 case -EHWPOISON: 1179 goto out; 1180 } 1181 BUG(); 1182 } else if (PTR_ERR(page) == -EEXIST) { 1183 /* 1184 * Proper page table entry exists, but no corresponding 1185 * struct page. If the caller expects **pages to be 1186 * filled in, bail out now, because that can't be done 1187 * for this page. 1188 */ 1189 if (pages) { 1190 ret = PTR_ERR(page); 1191 goto out; 1192 } 1193 } else if (IS_ERR(page)) { 1194 ret = PTR_ERR(page); 1195 goto out; 1196 } 1197 next_page: 1198 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); 1199 if (page_increm > nr_pages) 1200 page_increm = nr_pages; 1201 1202 if (pages) { 1203 struct page *subpage; 1204 unsigned int j; 1205 1206 /* 1207 * This must be a large folio (and doesn't need to 1208 * be the whole folio; it can be part of it), do 1209 * the refcount work for all the subpages too. 1210 * 1211 * NOTE: here the page may not be the head page 1212 * e.g. when start addr is not thp-size aligned. 1213 * try_grab_folio() should have taken care of tail 1214 * pages. 1215 */ 1216 if (page_increm > 1) { 1217 struct folio *folio = page_folio(page); 1218 1219 /* 1220 * Since we already hold refcount on the 1221 * large folio, this should never fail. 1222 */ 1223 if (try_grab_folio(folio, page_increm - 1, 1224 foll_flags)) { 1225 /* 1226 * Release the 1st page ref if the 1227 * folio is problematic, fail hard. 1228 */ 1229 gup_put_folio(folio, 1, 1230 foll_flags); 1231 ret = -EFAULT; 1232 goto out; 1233 } 1234 } 1235 1236 for (j = 0; j < page_increm; j++) { 1237 subpage = nth_page(page, j); 1238 pages[i + j] = subpage; 1239 flush_anon_page(vma, subpage, start + j * PAGE_SIZE); 1240 flush_dcache_page(subpage); 1241 } 1242 } 1243 1244 i += page_increm; 1245 start += page_increm * PAGE_SIZE; 1246 nr_pages -= page_increm; 1247 } while (nr_pages); 1248 out: 1249 if (ctx.pgmap) 1250 put_dev_pagemap(ctx.pgmap); 1251 return i ? i : ret; 1252 } 1253 1254 static bool vma_permits_fault(struct vm_area_struct *vma, 1255 unsigned int fault_flags) 1256 { 1257 bool write = !!(fault_flags & FAULT_FLAG_WRITE); 1258 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); 1259 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; 1260 1261 if (!(vm_flags & vma->vm_flags)) 1262 return false; 1263 1264 /* 1265 * The architecture might have a hardware protection 1266 * mechanism other than read/write that can deny access. 1267 * 1268 * gup always represents data access, not instruction 1269 * fetches, so execute=false here: 1270 */ 1271 if (!arch_vma_access_permitted(vma, write, false, foreign)) 1272 return false; 1273 1274 return true; 1275 } 1276 1277 /** 1278 * fixup_user_fault() - manually resolve a user page fault 1279 * @mm: mm_struct of target mm 1280 * @address: user address 1281 * @fault_flags:flags to pass down to handle_mm_fault() 1282 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller 1283 * does not allow retry. If NULL, the caller must guarantee 1284 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. 1285 * 1286 * This is meant to be called in the specific scenario where for locking reasons 1287 * we try to access user memory in atomic context (within a pagefault_disable() 1288 * section), this returns -EFAULT, and we want to resolve the user fault before 1289 * trying again. 1290 * 1291 * Typically this is meant to be used by the futex code. 1292 * 1293 * The main difference with get_user_pages() is that this function will 1294 * unconditionally call handle_mm_fault() which will in turn perform all the 1295 * necessary SW fixup of the dirty and young bits in the PTE, while 1296 * get_user_pages() only guarantees to update these in the struct page. 1297 * 1298 * This is important for some architectures where those bits also gate the 1299 * access permission to the page because they are maintained in software. On 1300 * such architectures, gup() will not be enough to make a subsequent access 1301 * succeed. 1302 * 1303 * This function will not return with an unlocked mmap_lock. So it has not the 1304 * same semantics wrt the @mm->mmap_lock as does filemap_fault(). 1305 */ 1306 int fixup_user_fault(struct mm_struct *mm, 1307 unsigned long address, unsigned int fault_flags, 1308 bool *unlocked) 1309 { 1310 struct vm_area_struct *vma; 1311 vm_fault_t ret; 1312 1313 address = untagged_addr_remote(mm, address); 1314 1315 if (unlocked) 1316 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1317 1318 retry: 1319 vma = gup_vma_lookup(mm, address); 1320 if (!vma) 1321 return -EFAULT; 1322 1323 if (!vma_permits_fault(vma, fault_flags)) 1324 return -EFAULT; 1325 1326 if ((fault_flags & FAULT_FLAG_KILLABLE) && 1327 fatal_signal_pending(current)) 1328 return -EINTR; 1329 1330 ret = handle_mm_fault(vma, address, fault_flags, NULL); 1331 1332 if (ret & VM_FAULT_COMPLETED) { 1333 /* 1334 * NOTE: it's a pity that we need to retake the lock here 1335 * to pair with the unlock() in the callers. Ideally we 1336 * could tell the callers so they do not need to unlock. 1337 */ 1338 mmap_read_lock(mm); 1339 *unlocked = true; 1340 return 0; 1341 } 1342 1343 if (ret & VM_FAULT_ERROR) { 1344 int err = vm_fault_to_errno(ret, 0); 1345 1346 if (err) 1347 return err; 1348 BUG(); 1349 } 1350 1351 if (ret & VM_FAULT_RETRY) { 1352 mmap_read_lock(mm); 1353 *unlocked = true; 1354 fault_flags |= FAULT_FLAG_TRIED; 1355 goto retry; 1356 } 1357 1358 return 0; 1359 } 1360 EXPORT_SYMBOL_GPL(fixup_user_fault); 1361 1362 /* 1363 * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is 1364 * specified, it'll also respond to generic signals. The caller of GUP 1365 * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption. 1366 */ 1367 static bool gup_signal_pending(unsigned int flags) 1368 { 1369 if (fatal_signal_pending(current)) 1370 return true; 1371 1372 if (!(flags & FOLL_INTERRUPTIBLE)) 1373 return false; 1374 1375 return signal_pending(current); 1376 } 1377 1378 /* 1379 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by 1380 * the caller. This function may drop the mmap_lock. If it does so, then it will 1381 * set (*locked = 0). 1382 * 1383 * (*locked == 0) means that the caller expects this function to acquire and 1384 * drop the mmap_lock. Therefore, the value of *locked will still be zero when 1385 * the function returns, even though it may have changed temporarily during 1386 * function execution. 1387 * 1388 * Please note that this function, unlike __get_user_pages(), will not return 0 1389 * for nr_pages > 0, unless FOLL_NOWAIT is used. 1390 */ 1391 static __always_inline long __get_user_pages_locked(struct mm_struct *mm, 1392 unsigned long start, 1393 unsigned long nr_pages, 1394 struct page **pages, 1395 int *locked, 1396 unsigned int flags) 1397 { 1398 long ret, pages_done; 1399 bool must_unlock = false; 1400 1401 /* 1402 * The internal caller expects GUP to manage the lock internally and the 1403 * lock must be released when this returns. 1404 */ 1405 if (!*locked) { 1406 if (mmap_read_lock_killable(mm)) 1407 return -EAGAIN; 1408 must_unlock = true; 1409 *locked = 1; 1410 } 1411 else 1412 mmap_assert_locked(mm); 1413 1414 if (flags & FOLL_PIN) 1415 mm_set_has_pinned_flag(&mm->flags); 1416 1417 /* 1418 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior 1419 * is to set FOLL_GET if the caller wants pages[] filled in (but has 1420 * carelessly failed to specify FOLL_GET), so keep doing that, but only 1421 * for FOLL_GET, not for the newer FOLL_PIN. 1422 * 1423 * FOLL_PIN always expects pages to be non-null, but no need to assert 1424 * that here, as any failures will be obvious enough. 1425 */ 1426 if (pages && !(flags & FOLL_PIN)) 1427 flags |= FOLL_GET; 1428 1429 pages_done = 0; 1430 for (;;) { 1431 ret = __get_user_pages(mm, start, nr_pages, flags, pages, 1432 locked); 1433 if (!(flags & FOLL_UNLOCKABLE)) { 1434 /* VM_FAULT_RETRY couldn't trigger, bypass */ 1435 pages_done = ret; 1436 break; 1437 } 1438 1439 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ 1440 if (!*locked) { 1441 BUG_ON(ret < 0); 1442 BUG_ON(ret >= nr_pages); 1443 } 1444 1445 if (ret > 0) { 1446 nr_pages -= ret; 1447 pages_done += ret; 1448 if (!nr_pages) 1449 break; 1450 } 1451 if (*locked) { 1452 /* 1453 * VM_FAULT_RETRY didn't trigger or it was a 1454 * FOLL_NOWAIT. 1455 */ 1456 if (!pages_done) 1457 pages_done = ret; 1458 break; 1459 } 1460 /* 1461 * VM_FAULT_RETRY triggered, so seek to the faulting offset. 1462 * For the prefault case (!pages) we only update counts. 1463 */ 1464 if (likely(pages)) 1465 pages += ret; 1466 start += ret << PAGE_SHIFT; 1467 1468 /* The lock was temporarily dropped, so we must unlock later */ 1469 must_unlock = true; 1470 1471 retry: 1472 /* 1473 * Repeat on the address that fired VM_FAULT_RETRY 1474 * with both FAULT_FLAG_ALLOW_RETRY and 1475 * FAULT_FLAG_TRIED. Note that GUP can be interrupted 1476 * by fatal signals of even common signals, depending on 1477 * the caller's request. So we need to check it before we 1478 * start trying again otherwise it can loop forever. 1479 */ 1480 if (gup_signal_pending(flags)) { 1481 if (!pages_done) 1482 pages_done = -EINTR; 1483 break; 1484 } 1485 1486 ret = mmap_read_lock_killable(mm); 1487 if (ret) { 1488 BUG_ON(ret > 0); 1489 if (!pages_done) 1490 pages_done = ret; 1491 break; 1492 } 1493 1494 *locked = 1; 1495 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, 1496 pages, locked); 1497 if (!*locked) { 1498 /* Continue to retry until we succeeded */ 1499 BUG_ON(ret != 0); 1500 goto retry; 1501 } 1502 if (ret != 1) { 1503 BUG_ON(ret > 1); 1504 if (!pages_done) 1505 pages_done = ret; 1506 break; 1507 } 1508 nr_pages--; 1509 pages_done++; 1510 if (!nr_pages) 1511 break; 1512 if (likely(pages)) 1513 pages++; 1514 start += PAGE_SIZE; 1515 } 1516 if (must_unlock && *locked) { 1517 /* 1518 * We either temporarily dropped the lock, or the caller 1519 * requested that we both acquire and drop the lock. Either way, 1520 * we must now unlock, and notify the caller of that state. 1521 */ 1522 mmap_read_unlock(mm); 1523 *locked = 0; 1524 } 1525 return pages_done; 1526 } 1527 1528 /** 1529 * populate_vma_page_range() - populate a range of pages in the vma. 1530 * @vma: target vma 1531 * @start: start address 1532 * @end: end address 1533 * @locked: whether the mmap_lock is still held 1534 * 1535 * This takes care of mlocking the pages too if VM_LOCKED is set. 1536 * 1537 * Return either number of pages pinned in the vma, or a negative error 1538 * code on error. 1539 * 1540 * vma->vm_mm->mmap_lock must be held. 1541 * 1542 * If @locked is NULL, it may be held for read or write and will 1543 * be unperturbed. 1544 * 1545 * If @locked is non-NULL, it must held for read only and may be 1546 * released. If it's released, *@locked will be set to 0. 1547 */ 1548 long populate_vma_page_range(struct vm_area_struct *vma, 1549 unsigned long start, unsigned long end, int *locked) 1550 { 1551 struct mm_struct *mm = vma->vm_mm; 1552 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1553 int local_locked = 1; 1554 int gup_flags; 1555 long ret; 1556 1557 VM_BUG_ON(!PAGE_ALIGNED(start)); 1558 VM_BUG_ON(!PAGE_ALIGNED(end)); 1559 VM_BUG_ON_VMA(start < vma->vm_start, vma); 1560 VM_BUG_ON_VMA(end > vma->vm_end, vma); 1561 mmap_assert_locked(mm); 1562 1563 /* 1564 * Rightly or wrongly, the VM_LOCKONFAULT case has never used 1565 * faultin_page() to break COW, so it has no work to do here. 1566 */ 1567 if (vma->vm_flags & VM_LOCKONFAULT) 1568 return nr_pages; 1569 1570 gup_flags = FOLL_TOUCH; 1571 /* 1572 * We want to touch writable mappings with a write fault in order 1573 * to break COW, except for shared mappings because these don't COW 1574 * and we would not want to dirty them for nothing. 1575 */ 1576 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 1577 gup_flags |= FOLL_WRITE; 1578 1579 /* 1580 * We want mlock to succeed for regions that have any permissions 1581 * other than PROT_NONE. 1582 */ 1583 if (vma_is_accessible(vma)) 1584 gup_flags |= FOLL_FORCE; 1585 1586 if (locked) 1587 gup_flags |= FOLL_UNLOCKABLE; 1588 1589 /* 1590 * We made sure addr is within a VMA, so the following will 1591 * not result in a stack expansion that recurses back here. 1592 */ 1593 ret = __get_user_pages(mm, start, nr_pages, gup_flags, 1594 NULL, locked ? locked : &local_locked); 1595 lru_add_drain(); 1596 return ret; 1597 } 1598 1599 /* 1600 * faultin_page_range() - populate (prefault) page tables inside the 1601 * given range readable/writable 1602 * 1603 * This takes care of mlocking the pages, too, if VM_LOCKED is set. 1604 * 1605 * @mm: the mm to populate page tables in 1606 * @start: start address 1607 * @end: end address 1608 * @write: whether to prefault readable or writable 1609 * @locked: whether the mmap_lock is still held 1610 * 1611 * Returns either number of processed pages in the MM, or a negative error 1612 * code on error (see __get_user_pages()). Note that this function reports 1613 * errors related to VMAs, such as incompatible mappings, as expected by 1614 * MADV_POPULATE_(READ|WRITE). 1615 * 1616 * The range must be page-aligned. 1617 * 1618 * mm->mmap_lock must be held. If it's released, *@locked will be set to 0. 1619 */ 1620 long faultin_page_range(struct mm_struct *mm, unsigned long start, 1621 unsigned long end, bool write, int *locked) 1622 { 1623 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1624 int gup_flags; 1625 long ret; 1626 1627 VM_BUG_ON(!PAGE_ALIGNED(start)); 1628 VM_BUG_ON(!PAGE_ALIGNED(end)); 1629 mmap_assert_locked(mm); 1630 1631 /* 1632 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark 1633 * the page dirty with FOLL_WRITE -- which doesn't make a 1634 * difference with !FOLL_FORCE, because the page is writable 1635 * in the page table. 1636 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit 1637 * a poisoned page. 1638 * !FOLL_FORCE: Require proper access permissions. 1639 */ 1640 gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE | 1641 FOLL_MADV_POPULATE; 1642 if (write) 1643 gup_flags |= FOLL_WRITE; 1644 1645 ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked, 1646 gup_flags); 1647 lru_add_drain(); 1648 return ret; 1649 } 1650 1651 /* 1652 * __mm_populate - populate and/or mlock pages within a range of address space. 1653 * 1654 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap 1655 * flags. VMAs must be already marked with the desired vm_flags, and 1656 * mmap_lock must not be held. 1657 */ 1658 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 1659 { 1660 struct mm_struct *mm = current->mm; 1661 unsigned long end, nstart, nend; 1662 struct vm_area_struct *vma = NULL; 1663 int locked = 0; 1664 long ret = 0; 1665 1666 end = start + len; 1667 1668 for (nstart = start; nstart < end; nstart = nend) { 1669 /* 1670 * We want to fault in pages for [nstart; end) address range. 1671 * Find first corresponding VMA. 1672 */ 1673 if (!locked) { 1674 locked = 1; 1675 mmap_read_lock(mm); 1676 vma = find_vma_intersection(mm, nstart, end); 1677 } else if (nstart >= vma->vm_end) 1678 vma = find_vma_intersection(mm, vma->vm_end, end); 1679 1680 if (!vma) 1681 break; 1682 /* 1683 * Set [nstart; nend) to intersection of desired address 1684 * range with the first VMA. Also, skip undesirable VMA types. 1685 */ 1686 nend = min(end, vma->vm_end); 1687 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1688 continue; 1689 if (nstart < vma->vm_start) 1690 nstart = vma->vm_start; 1691 /* 1692 * Now fault in a range of pages. populate_vma_page_range() 1693 * double checks the vma flags, so that it won't mlock pages 1694 * if the vma was already munlocked. 1695 */ 1696 ret = populate_vma_page_range(vma, nstart, nend, &locked); 1697 if (ret < 0) { 1698 if (ignore_errors) { 1699 ret = 0; 1700 continue; /* continue at next VMA */ 1701 } 1702 break; 1703 } 1704 nend = nstart + ret * PAGE_SIZE; 1705 ret = 0; 1706 } 1707 if (locked) 1708 mmap_read_unlock(mm); 1709 return ret; /* 0 or negative error code */ 1710 } 1711 #else /* CONFIG_MMU */ 1712 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, 1713 unsigned long nr_pages, struct page **pages, 1714 int *locked, unsigned int foll_flags) 1715 { 1716 struct vm_area_struct *vma; 1717 bool must_unlock = false; 1718 unsigned long vm_flags; 1719 long i; 1720 1721 if (!nr_pages) 1722 return 0; 1723 1724 /* 1725 * The internal caller expects GUP to manage the lock internally and the 1726 * lock must be released when this returns. 1727 */ 1728 if (!*locked) { 1729 if (mmap_read_lock_killable(mm)) 1730 return -EAGAIN; 1731 must_unlock = true; 1732 *locked = 1; 1733 } 1734 1735 /* calculate required read or write permissions. 1736 * If FOLL_FORCE is set, we only require the "MAY" flags. 1737 */ 1738 vm_flags = (foll_flags & FOLL_WRITE) ? 1739 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 1740 vm_flags &= (foll_flags & FOLL_FORCE) ? 1741 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 1742 1743 for (i = 0; i < nr_pages; i++) { 1744 vma = find_vma(mm, start); 1745 if (!vma) 1746 break; 1747 1748 /* protect what we can, including chardevs */ 1749 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || 1750 !(vm_flags & vma->vm_flags)) 1751 break; 1752 1753 if (pages) { 1754 pages[i] = virt_to_page((void *)start); 1755 if (pages[i]) 1756 get_page(pages[i]); 1757 } 1758 1759 start = (start + PAGE_SIZE) & PAGE_MASK; 1760 } 1761 1762 if (must_unlock && *locked) { 1763 mmap_read_unlock(mm); 1764 *locked = 0; 1765 } 1766 1767 return i ? : -EFAULT; 1768 } 1769 #endif /* !CONFIG_MMU */ 1770 1771 /** 1772 * fault_in_writeable - fault in userspace address range for writing 1773 * @uaddr: start of address range 1774 * @size: size of address range 1775 * 1776 * Returns the number of bytes not faulted in (like copy_to_user() and 1777 * copy_from_user()). 1778 */ 1779 size_t fault_in_writeable(char __user *uaddr, size_t size) 1780 { 1781 char __user *start = uaddr, *end; 1782 1783 if (unlikely(size == 0)) 1784 return 0; 1785 if (!user_write_access_begin(uaddr, size)) 1786 return size; 1787 if (!PAGE_ALIGNED(uaddr)) { 1788 unsafe_put_user(0, uaddr, out); 1789 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr); 1790 } 1791 end = (char __user *)PAGE_ALIGN((unsigned long)start + size); 1792 if (unlikely(end < start)) 1793 end = NULL; 1794 while (uaddr != end) { 1795 unsafe_put_user(0, uaddr, out); 1796 uaddr += PAGE_SIZE; 1797 } 1798 1799 out: 1800 user_write_access_end(); 1801 if (size > uaddr - start) 1802 return size - (uaddr - start); 1803 return 0; 1804 } 1805 EXPORT_SYMBOL(fault_in_writeable); 1806 1807 /** 1808 * fault_in_subpage_writeable - fault in an address range for writing 1809 * @uaddr: start of address range 1810 * @size: size of address range 1811 * 1812 * Fault in a user address range for writing while checking for permissions at 1813 * sub-page granularity (e.g. arm64 MTE). This function should be used when 1814 * the caller cannot guarantee forward progress of a copy_to_user() loop. 1815 * 1816 * Returns the number of bytes not faulted in (like copy_to_user() and 1817 * copy_from_user()). 1818 */ 1819 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size) 1820 { 1821 size_t faulted_in; 1822 1823 /* 1824 * Attempt faulting in at page granularity first for page table 1825 * permission checking. The arch-specific probe_subpage_writeable() 1826 * functions may not check for this. 1827 */ 1828 faulted_in = size - fault_in_writeable(uaddr, size); 1829 if (faulted_in) 1830 faulted_in -= probe_subpage_writeable(uaddr, faulted_in); 1831 1832 return size - faulted_in; 1833 } 1834 EXPORT_SYMBOL(fault_in_subpage_writeable); 1835 1836 /* 1837 * fault_in_safe_writeable - fault in an address range for writing 1838 * @uaddr: start of address range 1839 * @size: length of address range 1840 * 1841 * Faults in an address range for writing. This is primarily useful when we 1842 * already know that some or all of the pages in the address range aren't in 1843 * memory. 1844 * 1845 * Unlike fault_in_writeable(), this function is non-destructive. 1846 * 1847 * Note that we don't pin or otherwise hold the pages referenced that we fault 1848 * in. There's no guarantee that they'll stay in memory for any duration of 1849 * time. 1850 * 1851 * Returns the number of bytes not faulted in, like copy_to_user() and 1852 * copy_from_user(). 1853 */ 1854 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) 1855 { 1856 unsigned long start = (unsigned long)uaddr, end; 1857 struct mm_struct *mm = current->mm; 1858 bool unlocked = false; 1859 1860 if (unlikely(size == 0)) 1861 return 0; 1862 end = PAGE_ALIGN(start + size); 1863 if (end < start) 1864 end = 0; 1865 1866 mmap_read_lock(mm); 1867 do { 1868 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked)) 1869 break; 1870 start = (start + PAGE_SIZE) & PAGE_MASK; 1871 } while (start != end); 1872 mmap_read_unlock(mm); 1873 1874 if (size > (unsigned long)uaddr - start) 1875 return size - ((unsigned long)uaddr - start); 1876 return 0; 1877 } 1878 EXPORT_SYMBOL(fault_in_safe_writeable); 1879 1880 /** 1881 * fault_in_readable - fault in userspace address range for reading 1882 * @uaddr: start of user address range 1883 * @size: size of user address range 1884 * 1885 * Returns the number of bytes not faulted in (like copy_to_user() and 1886 * copy_from_user()). 1887 */ 1888 size_t fault_in_readable(const char __user *uaddr, size_t size) 1889 { 1890 const char __user *start = uaddr, *end; 1891 volatile char c; 1892 1893 if (unlikely(size == 0)) 1894 return 0; 1895 if (!user_read_access_begin(uaddr, size)) 1896 return size; 1897 if (!PAGE_ALIGNED(uaddr)) { 1898 unsafe_get_user(c, uaddr, out); 1899 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr); 1900 } 1901 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size); 1902 if (unlikely(end < start)) 1903 end = NULL; 1904 while (uaddr != end) { 1905 unsafe_get_user(c, uaddr, out); 1906 uaddr += PAGE_SIZE; 1907 } 1908 1909 out: 1910 user_read_access_end(); 1911 (void)c; 1912 if (size > uaddr - start) 1913 return size - (uaddr - start); 1914 return 0; 1915 } 1916 EXPORT_SYMBOL(fault_in_readable); 1917 1918 /** 1919 * get_dump_page() - pin user page in memory while writing it to core dump 1920 * @addr: user address 1921 * 1922 * Returns struct page pointer of user page pinned for dump, 1923 * to be freed afterwards by put_page(). 1924 * 1925 * Returns NULL on any kind of failure - a hole must then be inserted into 1926 * the corefile, to preserve alignment with its headers; and also returns 1927 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 1928 * allowing a hole to be left in the corefile to save disk space. 1929 * 1930 * Called without mmap_lock (takes and releases the mmap_lock by itself). 1931 */ 1932 #ifdef CONFIG_ELF_CORE 1933 struct page *get_dump_page(unsigned long addr) 1934 { 1935 struct page *page; 1936 int locked = 0; 1937 int ret; 1938 1939 ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked, 1940 FOLL_FORCE | FOLL_DUMP | FOLL_GET); 1941 return (ret == 1) ? page : NULL; 1942 } 1943 #endif /* CONFIG_ELF_CORE */ 1944 1945 #ifdef CONFIG_MIGRATION 1946 /* 1947 * Returns the number of collected pages. Return value is always >= 0. 1948 */ 1949 static unsigned long collect_longterm_unpinnable_pages( 1950 struct list_head *movable_page_list, 1951 unsigned long nr_pages, 1952 struct page **pages) 1953 { 1954 unsigned long i, collected = 0; 1955 struct folio *prev_folio = NULL; 1956 bool drain_allow = true; 1957 1958 for (i = 0; i < nr_pages; i++) { 1959 struct folio *folio = page_folio(pages[i]); 1960 1961 if (folio == prev_folio) 1962 continue; 1963 prev_folio = folio; 1964 1965 if (folio_is_longterm_pinnable(folio)) 1966 continue; 1967 1968 collected++; 1969 1970 if (folio_is_device_coherent(folio)) 1971 continue; 1972 1973 if (folio_test_hugetlb(folio)) { 1974 isolate_hugetlb(folio, movable_page_list); 1975 continue; 1976 } 1977 1978 if (!folio_test_lru(folio) && drain_allow) { 1979 lru_add_drain_all(); 1980 drain_allow = false; 1981 } 1982 1983 if (!folio_isolate_lru(folio)) 1984 continue; 1985 1986 list_add_tail(&folio->lru, movable_page_list); 1987 node_stat_mod_folio(folio, 1988 NR_ISOLATED_ANON + folio_is_file_lru(folio), 1989 folio_nr_pages(folio)); 1990 } 1991 1992 return collected; 1993 } 1994 1995 /* 1996 * Unpins all pages and migrates device coherent pages and movable_page_list. 1997 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure 1998 * (or partial success). 1999 */ 2000 static int migrate_longterm_unpinnable_pages( 2001 struct list_head *movable_page_list, 2002 unsigned long nr_pages, 2003 struct page **pages) 2004 { 2005 int ret; 2006 unsigned long i; 2007 2008 for (i = 0; i < nr_pages; i++) { 2009 struct folio *folio = page_folio(pages[i]); 2010 2011 if (folio_is_device_coherent(folio)) { 2012 /* 2013 * Migration will fail if the page is pinned, so convert 2014 * the pin on the source page to a normal reference. 2015 */ 2016 pages[i] = NULL; 2017 folio_get(folio); 2018 gup_put_folio(folio, 1, FOLL_PIN); 2019 2020 if (migrate_device_coherent_page(&folio->page)) { 2021 ret = -EBUSY; 2022 goto err; 2023 } 2024 2025 continue; 2026 } 2027 2028 /* 2029 * We can't migrate pages with unexpected references, so drop 2030 * the reference obtained by __get_user_pages_locked(). 2031 * Migrating pages have been added to movable_page_list after 2032 * calling folio_isolate_lru() which takes a reference so the 2033 * page won't be freed if it's migrating. 2034 */ 2035 unpin_user_page(pages[i]); 2036 pages[i] = NULL; 2037 } 2038 2039 if (!list_empty(movable_page_list)) { 2040 struct migration_target_control mtc = { 2041 .nid = NUMA_NO_NODE, 2042 .gfp_mask = GFP_USER | __GFP_NOWARN, 2043 }; 2044 2045 if (migrate_pages(movable_page_list, alloc_migration_target, 2046 NULL, (unsigned long)&mtc, MIGRATE_SYNC, 2047 MR_LONGTERM_PIN, NULL)) { 2048 ret = -ENOMEM; 2049 goto err; 2050 } 2051 } 2052 2053 putback_movable_pages(movable_page_list); 2054 2055 return -EAGAIN; 2056 2057 err: 2058 for (i = 0; i < nr_pages; i++) 2059 if (pages[i]) 2060 unpin_user_page(pages[i]); 2061 putback_movable_pages(movable_page_list); 2062 2063 return ret; 2064 } 2065 2066 /* 2067 * Check whether all pages are *allowed* to be pinned. Rather confusingly, all 2068 * pages in the range are required to be pinned via FOLL_PIN, before calling 2069 * this routine. 2070 * 2071 * If any pages in the range are not allowed to be pinned, then this routine 2072 * will migrate those pages away, unpin all the pages in the range and return 2073 * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then 2074 * call this routine again. 2075 * 2076 * If an error other than -EAGAIN occurs, this indicates a migration failure. 2077 * The caller should give up, and propagate the error back up the call stack. 2078 * 2079 * If everything is OK and all pages in the range are allowed to be pinned, then 2080 * this routine leaves all pages pinned and returns zero for success. 2081 */ 2082 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2083 struct page **pages) 2084 { 2085 unsigned long collected; 2086 LIST_HEAD(movable_page_list); 2087 2088 collected = collect_longterm_unpinnable_pages(&movable_page_list, 2089 nr_pages, pages); 2090 if (!collected) 2091 return 0; 2092 2093 return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages, 2094 pages); 2095 } 2096 #else 2097 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2098 struct page **pages) 2099 { 2100 return 0; 2101 } 2102 #endif /* CONFIG_MIGRATION */ 2103 2104 /* 2105 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which 2106 * allows us to process the FOLL_LONGTERM flag. 2107 */ 2108 static long __gup_longterm_locked(struct mm_struct *mm, 2109 unsigned long start, 2110 unsigned long nr_pages, 2111 struct page **pages, 2112 int *locked, 2113 unsigned int gup_flags) 2114 { 2115 unsigned int flags; 2116 long rc, nr_pinned_pages; 2117 2118 if (!(gup_flags & FOLL_LONGTERM)) 2119 return __get_user_pages_locked(mm, start, nr_pages, pages, 2120 locked, gup_flags); 2121 2122 flags = memalloc_pin_save(); 2123 do { 2124 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, 2125 pages, locked, 2126 gup_flags); 2127 if (nr_pinned_pages <= 0) { 2128 rc = nr_pinned_pages; 2129 break; 2130 } 2131 2132 /* FOLL_LONGTERM implies FOLL_PIN */ 2133 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); 2134 } while (rc == -EAGAIN); 2135 memalloc_pin_restore(flags); 2136 return rc ? rc : nr_pinned_pages; 2137 } 2138 2139 /* 2140 * Check that the given flags are valid for the exported gup/pup interface, and 2141 * update them with the required flags that the caller must have set. 2142 */ 2143 static bool is_valid_gup_args(struct page **pages, int *locked, 2144 unsigned int *gup_flags_p, unsigned int to_set) 2145 { 2146 unsigned int gup_flags = *gup_flags_p; 2147 2148 /* 2149 * These flags not allowed to be specified externally to the gup 2150 * interfaces: 2151 * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only 2152 * - FOLL_REMOTE is internal only and used on follow_page() 2153 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL 2154 */ 2155 if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS)) 2156 return false; 2157 2158 gup_flags |= to_set; 2159 if (locked) { 2160 /* At the external interface locked must be set */ 2161 if (WARN_ON_ONCE(*locked != 1)) 2162 return false; 2163 2164 gup_flags |= FOLL_UNLOCKABLE; 2165 } 2166 2167 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 2168 if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == 2169 (FOLL_PIN | FOLL_GET))) 2170 return false; 2171 2172 /* LONGTERM can only be specified when pinning */ 2173 if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM))) 2174 return false; 2175 2176 /* Pages input must be given if using GET/PIN */ 2177 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) 2178 return false; 2179 2180 /* We want to allow the pgmap to be hot-unplugged at all times */ 2181 if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) && 2182 (gup_flags & FOLL_PCI_P2PDMA))) 2183 return false; 2184 2185 *gup_flags_p = gup_flags; 2186 return true; 2187 } 2188 2189 #ifdef CONFIG_MMU 2190 /** 2191 * get_user_pages_remote() - pin user pages in memory 2192 * @mm: mm_struct of target mm 2193 * @start: starting user address 2194 * @nr_pages: number of pages from start to pin 2195 * @gup_flags: flags modifying lookup behaviour 2196 * @pages: array that receives pointers to the pages pinned. 2197 * Should be at least nr_pages long. Or NULL, if caller 2198 * only intends to ensure the pages are faulted in. 2199 * @locked: pointer to lock flag indicating whether lock is held and 2200 * subsequently whether VM_FAULT_RETRY functionality can be 2201 * utilised. Lock must initially be held. 2202 * 2203 * Returns either number of pages pinned (which may be less than the 2204 * number requested), or an error. Details about the return value: 2205 * 2206 * -- If nr_pages is 0, returns 0. 2207 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 2208 * -- If nr_pages is >0, and some pages were pinned, returns the number of 2209 * pages pinned. Again, this may be less than nr_pages. 2210 * 2211 * The caller is responsible for releasing returned @pages, via put_page(). 2212 * 2213 * Must be called with mmap_lock held for read or write. 2214 * 2215 * get_user_pages_remote walks a process's page tables and takes a reference 2216 * to each struct page that each user address corresponds to at a given 2217 * instant. That is, it takes the page that would be accessed if a user 2218 * thread accesses the given user virtual address at that instant. 2219 * 2220 * This does not guarantee that the page exists in the user mappings when 2221 * get_user_pages_remote returns, and there may even be a completely different 2222 * page there in some cases (eg. if mmapped pagecache has been invalidated 2223 * and subsequently re-faulted). However it does guarantee that the page 2224 * won't be freed completely. And mostly callers simply care that the page 2225 * contains data that was valid *at some point in time*. Typically, an IO 2226 * or similar operation cannot guarantee anything stronger anyway because 2227 * locks can't be held over the syscall boundary. 2228 * 2229 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page 2230 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must 2231 * be called after the page is finished with, and before put_page is called. 2232 * 2233 * get_user_pages_remote is typically used for fewer-copy IO operations, 2234 * to get a handle on the memory by some means other than accesses 2235 * via the user virtual addresses. The pages may be submitted for 2236 * DMA to devices or accessed via their kernel linear mapping (via the 2237 * kmap APIs). Care should be taken to use the correct cache flushing APIs. 2238 * 2239 * See also get_user_pages_fast, for performance critical applications. 2240 * 2241 * get_user_pages_remote should be phased out in favor of 2242 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing 2243 * should use get_user_pages_remote because it cannot pass 2244 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. 2245 */ 2246 long get_user_pages_remote(struct mm_struct *mm, 2247 unsigned long start, unsigned long nr_pages, 2248 unsigned int gup_flags, struct page **pages, 2249 int *locked) 2250 { 2251 int local_locked = 1; 2252 2253 if (!is_valid_gup_args(pages, locked, &gup_flags, 2254 FOLL_TOUCH | FOLL_REMOTE)) 2255 return -EINVAL; 2256 2257 return __get_user_pages_locked(mm, start, nr_pages, pages, 2258 locked ? locked : &local_locked, 2259 gup_flags); 2260 } 2261 EXPORT_SYMBOL(get_user_pages_remote); 2262 2263 #else /* CONFIG_MMU */ 2264 long get_user_pages_remote(struct mm_struct *mm, 2265 unsigned long start, unsigned long nr_pages, 2266 unsigned int gup_flags, struct page **pages, 2267 int *locked) 2268 { 2269 return 0; 2270 } 2271 #endif /* !CONFIG_MMU */ 2272 2273 /** 2274 * get_user_pages() - pin user pages in memory 2275 * @start: starting user address 2276 * @nr_pages: number of pages from start to pin 2277 * @gup_flags: flags modifying lookup behaviour 2278 * @pages: array that receives pointers to the pages pinned. 2279 * Should be at least nr_pages long. Or NULL, if caller 2280 * only intends to ensure the pages are faulted in. 2281 * 2282 * This is the same as get_user_pages_remote(), just with a less-flexible 2283 * calling convention where we assume that the mm being operated on belongs to 2284 * the current task, and doesn't allow passing of a locked parameter. We also 2285 * obviously don't pass FOLL_REMOTE in here. 2286 */ 2287 long get_user_pages(unsigned long start, unsigned long nr_pages, 2288 unsigned int gup_flags, struct page **pages) 2289 { 2290 int locked = 1; 2291 2292 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) 2293 return -EINVAL; 2294 2295 return __get_user_pages_locked(current->mm, start, nr_pages, pages, 2296 &locked, gup_flags); 2297 } 2298 EXPORT_SYMBOL(get_user_pages); 2299 2300 /* 2301 * get_user_pages_unlocked() is suitable to replace the form: 2302 * 2303 * mmap_read_lock(mm); 2304 * get_user_pages(mm, ..., pages, NULL); 2305 * mmap_read_unlock(mm); 2306 * 2307 * with: 2308 * 2309 * get_user_pages_unlocked(mm, ..., pages); 2310 * 2311 * It is functionally equivalent to get_user_pages_fast so 2312 * get_user_pages_fast should be used instead if specific gup_flags 2313 * (e.g. FOLL_FORCE) are not required. 2314 */ 2315 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 2316 struct page **pages, unsigned int gup_flags) 2317 { 2318 int locked = 0; 2319 2320 if (!is_valid_gup_args(pages, NULL, &gup_flags, 2321 FOLL_TOUCH | FOLL_UNLOCKABLE)) 2322 return -EINVAL; 2323 2324 return __get_user_pages_locked(current->mm, start, nr_pages, pages, 2325 &locked, gup_flags); 2326 } 2327 EXPORT_SYMBOL(get_user_pages_unlocked); 2328 2329 /* 2330 * Fast GUP 2331 * 2332 * get_user_pages_fast attempts to pin user pages by walking the page 2333 * tables directly and avoids taking locks. Thus the walker needs to be 2334 * protected from page table pages being freed from under it, and should 2335 * block any THP splits. 2336 * 2337 * One way to achieve this is to have the walker disable interrupts, and 2338 * rely on IPIs from the TLB flushing code blocking before the page table 2339 * pages are freed. This is unsuitable for architectures that do not need 2340 * to broadcast an IPI when invalidating TLBs. 2341 * 2342 * Another way to achieve this is to batch up page table containing pages 2343 * belonging to more than one mm_user, then rcu_sched a callback to free those 2344 * pages. Disabling interrupts will allow the fast_gup walker to both block 2345 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs 2346 * (which is a relatively rare event). The code below adopts this strategy. 2347 * 2348 * Before activating this code, please be aware that the following assumptions 2349 * are currently made: 2350 * 2351 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to 2352 * free pages containing page tables or TLB flushing requires IPI broadcast. 2353 * 2354 * *) ptes can be read atomically by the architecture. 2355 * 2356 * *) access_ok is sufficient to validate userspace address ranges. 2357 * 2358 * The last two assumptions can be relaxed by the addition of helper functions. 2359 * 2360 * This code is based heavily on the PowerPC implementation by Nick Piggin. 2361 */ 2362 #ifdef CONFIG_HAVE_FAST_GUP 2363 2364 /* 2365 * Used in the GUP-fast path to determine whether a pin is permitted for a 2366 * specific folio. 2367 * 2368 * This call assumes the caller has pinned the folio, that the lowest page table 2369 * level still points to this folio, and that interrupts have been disabled. 2370 * 2371 * Writing to pinned file-backed dirty tracked folios is inherently problematic 2372 * (see comment describing the writable_file_mapping_allowed() function). We 2373 * therefore try to avoid the most egregious case of a long-term mapping doing 2374 * so. 2375 * 2376 * This function cannot be as thorough as that one as the VMA is not available 2377 * in the fast path, so instead we whitelist known good cases and if in doubt, 2378 * fall back to the slow path. 2379 */ 2380 static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) 2381 { 2382 struct address_space *mapping; 2383 unsigned long mapping_flags; 2384 2385 /* 2386 * If we aren't pinning then no problematic write can occur. A long term 2387 * pin is the most egregious case so this is the one we disallow. 2388 */ 2389 if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) != 2390 (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) 2391 return true; 2392 2393 /* The folio is pinned, so we can safely access folio fields. */ 2394 2395 if (WARN_ON_ONCE(folio_test_slab(folio))) 2396 return false; 2397 2398 /* hugetlb mappings do not require dirty-tracking. */ 2399 if (folio_test_hugetlb(folio)) 2400 return true; 2401 2402 /* 2403 * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods 2404 * cannot proceed, which means no actions performed under RCU can 2405 * proceed either. 2406 * 2407 * inodes and thus their mappings are freed under RCU, which means the 2408 * mapping cannot be freed beneath us and thus we can safely dereference 2409 * it. 2410 */ 2411 lockdep_assert_irqs_disabled(); 2412 2413 /* 2414 * However, there may be operations which _alter_ the mapping, so ensure 2415 * we read it once and only once. 2416 */ 2417 mapping = READ_ONCE(folio->mapping); 2418 2419 /* 2420 * The mapping may have been truncated, in any case we cannot determine 2421 * if this mapping is safe - fall back to slow path to determine how to 2422 * proceed. 2423 */ 2424 if (!mapping) 2425 return false; 2426 2427 /* Anonymous folios pose no problem. */ 2428 mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS; 2429 if (mapping_flags) 2430 return mapping_flags & PAGE_MAPPING_ANON; 2431 2432 /* 2433 * At this point, we know the mapping is non-null and points to an 2434 * address_space object. The only remaining whitelisted file system is 2435 * shmem. 2436 */ 2437 return shmem_mapping(mapping); 2438 } 2439 2440 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, 2441 unsigned int flags, 2442 struct page **pages) 2443 { 2444 while ((*nr) - nr_start) { 2445 struct page *page = pages[--(*nr)]; 2446 2447 ClearPageReferenced(page); 2448 if (flags & FOLL_PIN) 2449 unpin_user_page(page); 2450 else 2451 put_page(page); 2452 } 2453 } 2454 2455 /** 2456 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path. 2457 * @page: pointer to page to be grabbed 2458 * @refs: the value to (effectively) add to the folio's refcount 2459 * @flags: gup flags: these are the FOLL_* flag values. 2460 * 2461 * "grab" names in this file mean, "look at flags to decide whether to use 2462 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. 2463 * 2464 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the 2465 * same time. (That's true throughout the get_user_pages*() and 2466 * pin_user_pages*() APIs.) Cases: 2467 * 2468 * FOLL_GET: folio's refcount will be incremented by @refs. 2469 * 2470 * FOLL_PIN on large folios: folio's refcount will be incremented by 2471 * @refs, and its pincount will be incremented by @refs. 2472 * 2473 * FOLL_PIN on single-page folios: folio's refcount will be incremented by 2474 * @refs * GUP_PIN_COUNTING_BIAS. 2475 * 2476 * Return: The folio containing @page (with refcount appropriately 2477 * incremented) for success, or NULL upon failure. If neither FOLL_GET 2478 * nor FOLL_PIN was set, that's considered failure, and furthermore, 2479 * a likely bug in the caller, so a warning is also emitted. 2480 * 2481 * It uses add ref unless zero to elevate the folio refcount and must be called 2482 * in fast path only. 2483 */ 2484 static struct folio *try_grab_folio_fast(struct page *page, int refs, 2485 unsigned int flags) 2486 { 2487 struct folio *folio; 2488 2489 /* Raise warn if it is not called in fast GUP */ 2490 VM_WARN_ON_ONCE(!irqs_disabled()); 2491 2492 if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) 2493 return NULL; 2494 2495 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) 2496 return NULL; 2497 2498 if (flags & FOLL_GET) 2499 return try_get_folio(page, refs); 2500 2501 /* FOLL_PIN is set */ 2502 2503 /* 2504 * Don't take a pin on the zero page - it's not going anywhere 2505 * and it is used in a *lot* of places. 2506 */ 2507 if (is_zero_page(page)) 2508 return page_folio(page); 2509 2510 folio = try_get_folio(page, refs); 2511 if (!folio) 2512 return NULL; 2513 2514 /* 2515 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a 2516 * right zone, so fail and let the caller fall back to the slow 2517 * path. 2518 */ 2519 if (unlikely((flags & FOLL_LONGTERM) && 2520 !folio_is_longterm_pinnable(folio))) { 2521 if (!put_devmap_managed_page_refs(&folio->page, refs)) 2522 folio_put_refs(folio, refs); 2523 return NULL; 2524 } 2525 2526 /* 2527 * When pinning a large folio, use an exact count to track it. 2528 * 2529 * However, be sure to *also* increment the normal folio 2530 * refcount field at least once, so that the folio really 2531 * is pinned. That's why the refcount from the earlier 2532 * try_get_folio() is left intact. 2533 */ 2534 if (folio_test_large(folio)) 2535 atomic_add(refs, &folio->_pincount); 2536 else 2537 folio_ref_add(folio, 2538 refs * (GUP_PIN_COUNTING_BIAS - 1)); 2539 /* 2540 * Adjust the pincount before re-checking the PTE for changes. 2541 * This is essentially a smp_mb() and is paired with a memory 2542 * barrier in folio_try_share_anon_rmap_*(). 2543 */ 2544 smp_mb__after_atomic(); 2545 2546 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); 2547 2548 return folio; 2549 } 2550 2551 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL 2552 /* 2553 * Fast-gup relies on pte change detection to avoid concurrent pgtable 2554 * operations. 2555 * 2556 * To pin the page, fast-gup needs to do below in order: 2557 * (1) pin the page (by prefetching pte), then (2) check pte not changed. 2558 * 2559 * For the rest of pgtable operations where pgtable updates can be racy 2560 * with fast-gup, we need to do (1) clear pte, then (2) check whether page 2561 * is pinned. 2562 * 2563 * Above will work for all pte-level operations, including THP split. 2564 * 2565 * For THP collapse, it's a bit more complicated because fast-gup may be 2566 * walking a pgtable page that is being freed (pte is still valid but pmd 2567 * can be cleared already). To avoid race in such condition, we need to 2568 * also check pmd here to make sure pmd doesn't change (corresponds to 2569 * pmdp_collapse_flush() in the THP collapse code path). 2570 */ 2571 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, 2572 unsigned long end, unsigned int flags, 2573 struct page **pages, int *nr) 2574 { 2575 struct dev_pagemap *pgmap = NULL; 2576 int nr_start = *nr, ret = 0; 2577 pte_t *ptep, *ptem; 2578 2579 ptem = ptep = pte_offset_map(&pmd, addr); 2580 if (!ptep) 2581 return 0; 2582 do { 2583 pte_t pte = ptep_get_lockless(ptep); 2584 struct page *page; 2585 struct folio *folio; 2586 2587 /* 2588 * Always fallback to ordinary GUP on PROT_NONE-mapped pages: 2589 * pte_access_permitted() better should reject these pages 2590 * either way: otherwise, GUP-fast might succeed in 2591 * cases where ordinary GUP would fail due to VMA access 2592 * permissions. 2593 */ 2594 if (pte_protnone(pte)) 2595 goto pte_unmap; 2596 2597 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 2598 goto pte_unmap; 2599 2600 if (pte_devmap(pte)) { 2601 if (unlikely(flags & FOLL_LONGTERM)) 2602 goto pte_unmap; 2603 2604 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 2605 if (unlikely(!pgmap)) { 2606 undo_dev_pagemap(nr, nr_start, flags, pages); 2607 goto pte_unmap; 2608 } 2609 } else if (pte_special(pte)) 2610 goto pte_unmap; 2611 2612 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 2613 page = pte_page(pte); 2614 2615 folio = try_grab_folio_fast(page, 1, flags); 2616 if (!folio) 2617 goto pte_unmap; 2618 2619 if (unlikely(folio_is_secretmem(folio))) { 2620 gup_put_folio(folio, 1, flags); 2621 goto pte_unmap; 2622 } 2623 2624 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || 2625 unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { 2626 gup_put_folio(folio, 1, flags); 2627 goto pte_unmap; 2628 } 2629 2630 if (!folio_fast_pin_allowed(folio, flags)) { 2631 gup_put_folio(folio, 1, flags); 2632 goto pte_unmap; 2633 } 2634 2635 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { 2636 gup_put_folio(folio, 1, flags); 2637 goto pte_unmap; 2638 } 2639 2640 /* 2641 * We need to make the page accessible if and only if we are 2642 * going to access its content (the FOLL_PIN case). Please 2643 * see Documentation/core-api/pin_user_pages.rst for 2644 * details. 2645 */ 2646 if (flags & FOLL_PIN) { 2647 ret = arch_make_page_accessible(page); 2648 if (ret) { 2649 gup_put_folio(folio, 1, flags); 2650 goto pte_unmap; 2651 } 2652 } 2653 folio_set_referenced(folio); 2654 pages[*nr] = page; 2655 (*nr)++; 2656 } while (ptep++, addr += PAGE_SIZE, addr != end); 2657 2658 ret = 1; 2659 2660 pte_unmap: 2661 if (pgmap) 2662 put_dev_pagemap(pgmap); 2663 pte_unmap(ptem); 2664 return ret; 2665 } 2666 #else 2667 2668 /* 2669 * If we can't determine whether or not a pte is special, then fail immediately 2670 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not 2671 * to be special. 2672 * 2673 * For a futex to be placed on a THP tail page, get_futex_key requires a 2674 * get_user_pages_fast_only implementation that can pin pages. Thus it's still 2675 * useful to have gup_huge_pmd even if we can't operate on ptes. 2676 */ 2677 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, 2678 unsigned long end, unsigned int flags, 2679 struct page **pages, int *nr) 2680 { 2681 return 0; 2682 } 2683 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 2684 2685 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 2686 static int __gup_device_huge(unsigned long pfn, unsigned long addr, 2687 unsigned long end, unsigned int flags, 2688 struct page **pages, int *nr) 2689 { 2690 int nr_start = *nr; 2691 struct dev_pagemap *pgmap = NULL; 2692 2693 do { 2694 struct page *page = pfn_to_page(pfn); 2695 2696 pgmap = get_dev_pagemap(pfn, pgmap); 2697 if (unlikely(!pgmap)) { 2698 undo_dev_pagemap(nr, nr_start, flags, pages); 2699 break; 2700 } 2701 2702 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) { 2703 undo_dev_pagemap(nr, nr_start, flags, pages); 2704 break; 2705 } 2706 2707 SetPageReferenced(page); 2708 pages[*nr] = page; 2709 if (unlikely(try_grab_folio(page_folio(page), 1, flags))) { 2710 undo_dev_pagemap(nr, nr_start, flags, pages); 2711 break; 2712 } 2713 (*nr)++; 2714 pfn++; 2715 } while (addr += PAGE_SIZE, addr != end); 2716 2717 put_dev_pagemap(pgmap); 2718 return addr == end; 2719 } 2720 2721 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2722 unsigned long end, unsigned int flags, 2723 struct page **pages, int *nr) 2724 { 2725 unsigned long fault_pfn; 2726 int nr_start = *nr; 2727 2728 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 2729 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) 2730 return 0; 2731 2732 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 2733 undo_dev_pagemap(nr, nr_start, flags, pages); 2734 return 0; 2735 } 2736 return 1; 2737 } 2738 2739 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 2740 unsigned long end, unsigned int flags, 2741 struct page **pages, int *nr) 2742 { 2743 unsigned long fault_pfn; 2744 int nr_start = *nr; 2745 2746 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 2747 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) 2748 return 0; 2749 2750 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 2751 undo_dev_pagemap(nr, nr_start, flags, pages); 2752 return 0; 2753 } 2754 return 1; 2755 } 2756 #else 2757 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2758 unsigned long end, unsigned int flags, 2759 struct page **pages, int *nr) 2760 { 2761 BUILD_BUG(); 2762 return 0; 2763 } 2764 2765 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, 2766 unsigned long end, unsigned int flags, 2767 struct page **pages, int *nr) 2768 { 2769 BUILD_BUG(); 2770 return 0; 2771 } 2772 #endif 2773 2774 static int record_subpages(struct page *page, unsigned long addr, 2775 unsigned long end, struct page **pages) 2776 { 2777 int nr; 2778 2779 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) 2780 pages[nr] = nth_page(page, nr); 2781 2782 return nr; 2783 } 2784 2785 #ifdef CONFIG_ARCH_HAS_HUGEPD 2786 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, 2787 unsigned long sz) 2788 { 2789 unsigned long __boundary = (addr + sz) & ~(sz-1); 2790 return (__boundary - 1 < end - 1) ? __boundary : end; 2791 } 2792 2793 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, 2794 unsigned long end, unsigned int flags, 2795 struct page **pages, int *nr) 2796 { 2797 unsigned long pte_end; 2798 struct page *page; 2799 struct folio *folio; 2800 pte_t pte; 2801 int refs; 2802 2803 pte_end = (addr + sz) & ~(sz-1); 2804 if (pte_end < end) 2805 end = pte_end; 2806 2807 pte = huge_ptep_get(ptep); 2808 2809 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 2810 return 0; 2811 2812 /* hugepages are never "special" */ 2813 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 2814 2815 page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT); 2816 refs = record_subpages(page, addr, end, pages + *nr); 2817 2818 folio = try_grab_folio_fast(page, refs, flags); 2819 if (!folio) 2820 return 0; 2821 2822 if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { 2823 gup_put_folio(folio, refs, flags); 2824 return 0; 2825 } 2826 2827 if (!folio_fast_pin_allowed(folio, flags)) { 2828 gup_put_folio(folio, refs, flags); 2829 return 0; 2830 } 2831 2832 if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) { 2833 gup_put_folio(folio, refs, flags); 2834 return 0; 2835 } 2836 2837 *nr += refs; 2838 folio_set_referenced(folio); 2839 return 1; 2840 } 2841 2842 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 2843 unsigned int pdshift, unsigned long end, unsigned int flags, 2844 struct page **pages, int *nr) 2845 { 2846 pte_t *ptep; 2847 unsigned long sz = 1UL << hugepd_shift(hugepd); 2848 unsigned long next; 2849 2850 ptep = hugepte_offset(hugepd, addr, pdshift); 2851 do { 2852 next = hugepte_addr_end(addr, end, sz); 2853 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) 2854 return 0; 2855 } while (ptep++, addr = next, addr != end); 2856 2857 return 1; 2858 } 2859 #else 2860 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 2861 unsigned int pdshift, unsigned long end, unsigned int flags, 2862 struct page **pages, int *nr) 2863 { 2864 return 0; 2865 } 2866 #endif /* CONFIG_ARCH_HAS_HUGEPD */ 2867 2868 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2869 unsigned long end, unsigned int flags, 2870 struct page **pages, int *nr) 2871 { 2872 struct page *page; 2873 struct folio *folio; 2874 int refs; 2875 2876 if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) 2877 return 0; 2878 2879 if (pmd_devmap(orig)) { 2880 if (unlikely(flags & FOLL_LONGTERM)) 2881 return 0; 2882 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags, 2883 pages, nr); 2884 } 2885 2886 page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT); 2887 refs = record_subpages(page, addr, end, pages + *nr); 2888 2889 folio = try_grab_folio_fast(page, refs, flags); 2890 if (!folio) 2891 return 0; 2892 2893 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 2894 gup_put_folio(folio, refs, flags); 2895 return 0; 2896 } 2897 2898 if (!folio_fast_pin_allowed(folio, flags)) { 2899 gup_put_folio(folio, refs, flags); 2900 return 0; 2901 } 2902 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 2903 gup_put_folio(folio, refs, flags); 2904 return 0; 2905 } 2906 2907 *nr += refs; 2908 folio_set_referenced(folio); 2909 return 1; 2910 } 2911 2912 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 2913 unsigned long end, unsigned int flags, 2914 struct page **pages, int *nr) 2915 { 2916 struct page *page; 2917 struct folio *folio; 2918 int refs; 2919 2920 if (!pud_access_permitted(orig, flags & FOLL_WRITE)) 2921 return 0; 2922 2923 if (pud_devmap(orig)) { 2924 if (unlikely(flags & FOLL_LONGTERM)) 2925 return 0; 2926 return __gup_device_huge_pud(orig, pudp, addr, end, flags, 2927 pages, nr); 2928 } 2929 2930 page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT); 2931 refs = record_subpages(page, addr, end, pages + *nr); 2932 2933 folio = try_grab_folio_fast(page, refs, flags); 2934 if (!folio) 2935 return 0; 2936 2937 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 2938 gup_put_folio(folio, refs, flags); 2939 return 0; 2940 } 2941 2942 if (!folio_fast_pin_allowed(folio, flags)) { 2943 gup_put_folio(folio, refs, flags); 2944 return 0; 2945 } 2946 2947 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 2948 gup_put_folio(folio, refs, flags); 2949 return 0; 2950 } 2951 2952 *nr += refs; 2953 folio_set_referenced(folio); 2954 return 1; 2955 } 2956 2957 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, 2958 unsigned long end, unsigned int flags, 2959 struct page **pages, int *nr) 2960 { 2961 int refs; 2962 struct page *page; 2963 struct folio *folio; 2964 2965 if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) 2966 return 0; 2967 2968 BUILD_BUG_ON(pgd_devmap(orig)); 2969 2970 page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT); 2971 refs = record_subpages(page, addr, end, pages + *nr); 2972 2973 folio = try_grab_folio_fast(page, refs, flags); 2974 if (!folio) 2975 return 0; 2976 2977 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { 2978 gup_put_folio(folio, refs, flags); 2979 return 0; 2980 } 2981 2982 if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 2983 gup_put_folio(folio, refs, flags); 2984 return 0; 2985 } 2986 2987 if (!folio_fast_pin_allowed(folio, flags)) { 2988 gup_put_folio(folio, refs, flags); 2989 return 0; 2990 } 2991 2992 *nr += refs; 2993 folio_set_referenced(folio); 2994 return 1; 2995 } 2996 2997 static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, 2998 unsigned int flags, struct page **pages, int *nr) 2999 { 3000 unsigned long next; 3001 pmd_t *pmdp; 3002 3003 pmdp = pmd_offset_lockless(pudp, pud, addr); 3004 do { 3005 pmd_t pmd = pmdp_get_lockless(pmdp); 3006 3007 next = pmd_addr_end(addr, end); 3008 if (!pmd_present(pmd)) 3009 return 0; 3010 3011 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || 3012 pmd_devmap(pmd))) { 3013 /* See gup_pte_range() */ 3014 if (pmd_protnone(pmd)) 3015 return 0; 3016 3017 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, 3018 pages, nr)) 3019 return 0; 3020 3021 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { 3022 /* 3023 * architecture have different format for hugetlbfs 3024 * pmd format and THP pmd format 3025 */ 3026 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, 3027 PMD_SHIFT, next, flags, pages, nr)) 3028 return 0; 3029 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) 3030 return 0; 3031 } while (pmdp++, addr = next, addr != end); 3032 3033 return 1; 3034 } 3035 3036 static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, 3037 unsigned int flags, struct page **pages, int *nr) 3038 { 3039 unsigned long next; 3040 pud_t *pudp; 3041 3042 pudp = pud_offset_lockless(p4dp, p4d, addr); 3043 do { 3044 pud_t pud = READ_ONCE(*pudp); 3045 3046 next = pud_addr_end(addr, end); 3047 if (unlikely(!pud_present(pud))) 3048 return 0; 3049 if (unlikely(pud_huge(pud) || pud_devmap(pud))) { 3050 if (!gup_huge_pud(pud, pudp, addr, next, flags, 3051 pages, nr)) 3052 return 0; 3053 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { 3054 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, 3055 PUD_SHIFT, next, flags, pages, nr)) 3056 return 0; 3057 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) 3058 return 0; 3059 } while (pudp++, addr = next, addr != end); 3060 3061 return 1; 3062 } 3063 3064 static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, 3065 unsigned int flags, struct page **pages, int *nr) 3066 { 3067 unsigned long next; 3068 p4d_t *p4dp; 3069 3070 p4dp = p4d_offset_lockless(pgdp, pgd, addr); 3071 do { 3072 p4d_t p4d = READ_ONCE(*p4dp); 3073 3074 next = p4d_addr_end(addr, end); 3075 if (p4d_none(p4d)) 3076 return 0; 3077 BUILD_BUG_ON(p4d_huge(p4d)); 3078 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { 3079 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, 3080 P4D_SHIFT, next, flags, pages, nr)) 3081 return 0; 3082 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) 3083 return 0; 3084 } while (p4dp++, addr = next, addr != end); 3085 3086 return 1; 3087 } 3088 3089 static void gup_pgd_range(unsigned long addr, unsigned long end, 3090 unsigned int flags, struct page **pages, int *nr) 3091 { 3092 unsigned long next; 3093 pgd_t *pgdp; 3094 3095 pgdp = pgd_offset(current->mm, addr); 3096 do { 3097 pgd_t pgd = READ_ONCE(*pgdp); 3098 3099 next = pgd_addr_end(addr, end); 3100 if (pgd_none(pgd)) 3101 return; 3102 if (unlikely(pgd_huge(pgd))) { 3103 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags, 3104 pages, nr)) 3105 return; 3106 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { 3107 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, 3108 PGDIR_SHIFT, next, flags, pages, nr)) 3109 return; 3110 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) 3111 return; 3112 } while (pgdp++, addr = next, addr != end); 3113 } 3114 #else 3115 static inline void gup_pgd_range(unsigned long addr, unsigned long end, 3116 unsigned int flags, struct page **pages, int *nr) 3117 { 3118 } 3119 #endif /* CONFIG_HAVE_FAST_GUP */ 3120 3121 #ifndef gup_fast_permitted 3122 /* 3123 * Check if it's allowed to use get_user_pages_fast_only() for the range, or 3124 * we need to fall back to the slow version: 3125 */ 3126 static bool gup_fast_permitted(unsigned long start, unsigned long end) 3127 { 3128 return true; 3129 } 3130 #endif 3131 3132 static unsigned long lockless_pages_from_mm(unsigned long start, 3133 unsigned long end, 3134 unsigned int gup_flags, 3135 struct page **pages) 3136 { 3137 unsigned long flags; 3138 int nr_pinned = 0; 3139 unsigned seq; 3140 3141 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) || 3142 !gup_fast_permitted(start, end)) 3143 return 0; 3144 3145 if (gup_flags & FOLL_PIN) { 3146 seq = raw_read_seqcount(¤t->mm->write_protect_seq); 3147 if (seq & 1) 3148 return 0; 3149 } 3150 3151 /* 3152 * Disable interrupts. The nested form is used, in order to allow full, 3153 * general purpose use of this routine. 3154 * 3155 * With interrupts disabled, we block page table pages from being freed 3156 * from under us. See struct mmu_table_batch comments in 3157 * include/asm-generic/tlb.h for more details. 3158 * 3159 * We do not adopt an rcu_read_lock() here as we also want to block IPIs 3160 * that come from THPs splitting. 3161 */ 3162 local_irq_save(flags); 3163 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); 3164 local_irq_restore(flags); 3165 3166 /* 3167 * When pinning pages for DMA there could be a concurrent write protect 3168 * from fork() via copy_page_range(), in this case always fail fast GUP. 3169 */ 3170 if (gup_flags & FOLL_PIN) { 3171 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { 3172 unpin_user_pages_lockless(pages, nr_pinned); 3173 return 0; 3174 } else { 3175 sanity_check_pinned_pages(pages, nr_pinned); 3176 } 3177 } 3178 return nr_pinned; 3179 } 3180 3181 static int internal_get_user_pages_fast(unsigned long start, 3182 unsigned long nr_pages, 3183 unsigned int gup_flags, 3184 struct page **pages) 3185 { 3186 unsigned long len, end; 3187 unsigned long nr_pinned; 3188 int locked = 0; 3189 int ret; 3190 3191 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | 3192 FOLL_FORCE | FOLL_PIN | FOLL_GET | 3193 FOLL_FAST_ONLY | FOLL_NOFAULT | 3194 FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT))) 3195 return -EINVAL; 3196 3197 if (gup_flags & FOLL_PIN) 3198 mm_set_has_pinned_flag(¤t->mm->flags); 3199 3200 if (!(gup_flags & FOLL_FAST_ONLY)) 3201 might_lock_read(¤t->mm->mmap_lock); 3202 3203 start = untagged_addr(start) & PAGE_MASK; 3204 len = nr_pages << PAGE_SHIFT; 3205 if (check_add_overflow(start, len, &end)) 3206 return -EOVERFLOW; 3207 if (end > TASK_SIZE_MAX) 3208 return -EFAULT; 3209 if (unlikely(!access_ok((void __user *)start, len))) 3210 return -EFAULT; 3211 3212 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); 3213 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) 3214 return nr_pinned; 3215 3216 /* Slow path: try to get the remaining pages with get_user_pages */ 3217 start += nr_pinned << PAGE_SHIFT; 3218 pages += nr_pinned; 3219 ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, 3220 pages, &locked, 3221 gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); 3222 if (ret < 0) { 3223 /* 3224 * The caller has to unpin the pages we already pinned so 3225 * returning -errno is not an option 3226 */ 3227 if (nr_pinned) 3228 return nr_pinned; 3229 return ret; 3230 } 3231 return ret + nr_pinned; 3232 } 3233 3234 /** 3235 * get_user_pages_fast_only() - pin user pages in memory 3236 * @start: starting user address 3237 * @nr_pages: number of pages from start to pin 3238 * @gup_flags: flags modifying pin behaviour 3239 * @pages: array that receives pointers to the pages pinned. 3240 * Should be at least nr_pages long. 3241 * 3242 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 3243 * the regular GUP. 3244 * 3245 * If the architecture does not support this function, simply return with no 3246 * pages pinned. 3247 * 3248 * Careful, careful! COW breaking can go either way, so a non-write 3249 * access can get ambiguous page results. If you call this function without 3250 * 'write' set, you'd better be sure that you're ok with that ambiguity. 3251 */ 3252 int get_user_pages_fast_only(unsigned long start, int nr_pages, 3253 unsigned int gup_flags, struct page **pages) 3254 { 3255 /* 3256 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, 3257 * because gup fast is always a "pin with a +1 page refcount" request. 3258 * 3259 * FOLL_FAST_ONLY is required in order to match the API description of 3260 * this routine: no fall back to regular ("slow") GUP. 3261 */ 3262 if (!is_valid_gup_args(pages, NULL, &gup_flags, 3263 FOLL_GET | FOLL_FAST_ONLY)) 3264 return -EINVAL; 3265 3266 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); 3267 } 3268 EXPORT_SYMBOL_GPL(get_user_pages_fast_only); 3269 3270 /** 3271 * get_user_pages_fast() - pin user pages in memory 3272 * @start: starting user address 3273 * @nr_pages: number of pages from start to pin 3274 * @gup_flags: flags modifying pin behaviour 3275 * @pages: array that receives pointers to the pages pinned. 3276 * Should be at least nr_pages long. 3277 * 3278 * Attempt to pin user pages in memory without taking mm->mmap_lock. 3279 * If not successful, it will fall back to taking the lock and 3280 * calling get_user_pages(). 3281 * 3282 * Returns number of pages pinned. This may be fewer than the number requested. 3283 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns 3284 * -errno. 3285 */ 3286 int get_user_pages_fast(unsigned long start, int nr_pages, 3287 unsigned int gup_flags, struct page **pages) 3288 { 3289 /* 3290 * The caller may or may not have explicitly set FOLL_GET; either way is 3291 * OK. However, internally (within mm/gup.c), gup fast variants must set 3292 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" 3293 * request. 3294 */ 3295 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) 3296 return -EINVAL; 3297 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); 3298 } 3299 EXPORT_SYMBOL_GPL(get_user_pages_fast); 3300 3301 /** 3302 * pin_user_pages_fast() - pin user pages in memory without taking locks 3303 * 3304 * @start: starting user address 3305 * @nr_pages: number of pages from start to pin 3306 * @gup_flags: flags modifying pin behaviour 3307 * @pages: array that receives pointers to the pages pinned. 3308 * Should be at least nr_pages long. 3309 * 3310 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See 3311 * get_user_pages_fast() for documentation on the function arguments, because 3312 * the arguments here are identical. 3313 * 3314 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3315 * see Documentation/core-api/pin_user_pages.rst for further details. 3316 * 3317 * Note that if a zero_page is amongst the returned pages, it will not have 3318 * pins in it and unpin_user_page() will not remove pins from it. 3319 */ 3320 int pin_user_pages_fast(unsigned long start, int nr_pages, 3321 unsigned int gup_flags, struct page **pages) 3322 { 3323 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) 3324 return -EINVAL; 3325 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); 3326 } 3327 EXPORT_SYMBOL_GPL(pin_user_pages_fast); 3328 3329 /** 3330 * pin_user_pages_remote() - pin pages of a remote process 3331 * 3332 * @mm: mm_struct of target mm 3333 * @start: starting user address 3334 * @nr_pages: number of pages from start to pin 3335 * @gup_flags: flags modifying lookup behaviour 3336 * @pages: array that receives pointers to the pages pinned. 3337 * Should be at least nr_pages long. 3338 * @locked: pointer to lock flag indicating whether lock is held and 3339 * subsequently whether VM_FAULT_RETRY functionality can be 3340 * utilised. Lock must initially be held. 3341 * 3342 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See 3343 * get_user_pages_remote() for documentation on the function arguments, because 3344 * the arguments here are identical. 3345 * 3346 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3347 * see Documentation/core-api/pin_user_pages.rst for details. 3348 * 3349 * Note that if a zero_page is amongst the returned pages, it will not have 3350 * pins in it and unpin_user_page*() will not remove pins from it. 3351 */ 3352 long pin_user_pages_remote(struct mm_struct *mm, 3353 unsigned long start, unsigned long nr_pages, 3354 unsigned int gup_flags, struct page **pages, 3355 int *locked) 3356 { 3357 int local_locked = 1; 3358 3359 if (!is_valid_gup_args(pages, locked, &gup_flags, 3360 FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) 3361 return 0; 3362 return __gup_longterm_locked(mm, start, nr_pages, pages, 3363 locked ? locked : &local_locked, 3364 gup_flags); 3365 } 3366 EXPORT_SYMBOL(pin_user_pages_remote); 3367 3368 /** 3369 * pin_user_pages() - pin user pages in memory for use by other devices 3370 * 3371 * @start: starting user address 3372 * @nr_pages: number of pages from start to pin 3373 * @gup_flags: flags modifying lookup behaviour 3374 * @pages: array that receives pointers to the pages pinned. 3375 * Should be at least nr_pages long. 3376 * 3377 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and 3378 * FOLL_PIN is set. 3379 * 3380 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3381 * see Documentation/core-api/pin_user_pages.rst for details. 3382 * 3383 * Note that if a zero_page is amongst the returned pages, it will not have 3384 * pins in it and unpin_user_page*() will not remove pins from it. 3385 */ 3386 long pin_user_pages(unsigned long start, unsigned long nr_pages, 3387 unsigned int gup_flags, struct page **pages) 3388 { 3389 int locked = 1; 3390 3391 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) 3392 return 0; 3393 return __gup_longterm_locked(current->mm, start, nr_pages, 3394 pages, &locked, gup_flags); 3395 } 3396 EXPORT_SYMBOL(pin_user_pages); 3397 3398 /* 3399 * pin_user_pages_unlocked() is the FOLL_PIN variant of 3400 * get_user_pages_unlocked(). Behavior is the same, except that this one sets 3401 * FOLL_PIN and rejects FOLL_GET. 3402 * 3403 * Note that if a zero_page is amongst the returned pages, it will not have 3404 * pins in it and unpin_user_page*() will not remove pins from it. 3405 */ 3406 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 3407 struct page **pages, unsigned int gup_flags) 3408 { 3409 int locked = 0; 3410 3411 if (!is_valid_gup_args(pages, NULL, &gup_flags, 3412 FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) 3413 return 0; 3414 3415 return __gup_longterm_locked(current->mm, start, nr_pages, pages, 3416 &locked, gup_flags); 3417 } 3418 EXPORT_SYMBOL(pin_user_pages_unlocked); 3419