1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/err.h> 5 #include <linux/spinlock.h> 6 7 #include <linux/mm.h> 8 #include <linux/memremap.h> 9 #include <linux/pagemap.h> 10 #include <linux/rmap.h> 11 #include <linux/swap.h> 12 #include <linux/swapops.h> 13 #include <linux/secretmem.h> 14 15 #include <linux/sched/signal.h> 16 #include <linux/rwsem.h> 17 #include <linux/hugetlb.h> 18 #include <linux/migrate.h> 19 #include <linux/mm_inline.h> 20 #include <linux/sched/mm.h> 21 #include <linux/shmem_fs.h> 22 23 #include <asm/mmu_context.h> 24 #include <asm/tlbflush.h> 25 26 #include "internal.h" 27 28 struct follow_page_context { 29 struct dev_pagemap *pgmap; 30 unsigned int page_mask; 31 }; 32 33 static inline void sanity_check_pinned_pages(struct page **pages, 34 unsigned long npages) 35 { 36 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 37 return; 38 39 /* 40 * We only pin anonymous pages if they are exclusive. Once pinned, we 41 * can no longer turn them possibly shared and PageAnonExclusive() will 42 * stick around until the page is freed. 43 * 44 * We'd like to verify that our pinned anonymous pages are still mapped 45 * exclusively. The issue with anon THP is that we don't know how 46 * they are/were mapped when pinning them. However, for anon 47 * THP we can assume that either the given page (PTE-mapped THP) or 48 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If 49 * neither is the case, there is certainly something wrong. 50 */ 51 for (; npages; npages--, pages++) { 52 struct page *page = *pages; 53 struct folio *folio = page_folio(page); 54 55 if (is_zero_page(page) || 56 !folio_test_anon(folio)) 57 continue; 58 if (!folio_test_large(folio) || folio_test_hugetlb(folio)) 59 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); 60 else 61 /* Either a PTE-mapped or a PMD-mapped THP. */ 62 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) && 63 !PageAnonExclusive(page), page); 64 } 65 } 66 67 /* 68 * Return the folio with ref appropriately incremented, 69 * or NULL if that failed. 70 */ 71 static inline struct folio *try_get_folio(struct page *page, int refs) 72 { 73 struct folio *folio; 74 75 retry: 76 folio = page_folio(page); 77 if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) 78 return NULL; 79 if (unlikely(!folio_ref_try_add_rcu(folio, refs))) 80 return NULL; 81 82 /* 83 * At this point we have a stable reference to the folio; but it 84 * could be that between calling page_folio() and the refcount 85 * increment, the folio was split, in which case we'd end up 86 * holding a reference on a folio that has nothing to do with the page 87 * we were given anymore. 88 * So now that the folio is stable, recheck that the page still 89 * belongs to this folio. 90 */ 91 if (unlikely(page_folio(page) != folio)) { 92 if (!put_devmap_managed_page_refs(&folio->page, refs)) 93 folio_put_refs(folio, refs); 94 goto retry; 95 } 96 97 return folio; 98 } 99 100 /** 101 * try_grab_folio() - Attempt to get or pin a folio. 102 * @page: pointer to page to be grabbed 103 * @refs: the value to (effectively) add to the folio's refcount 104 * @flags: gup flags: these are the FOLL_* flag values. 105 * 106 * "grab" names in this file mean, "look at flags to decide whether to use 107 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. 108 * 109 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the 110 * same time. (That's true throughout the get_user_pages*() and 111 * pin_user_pages*() APIs.) Cases: 112 * 113 * FOLL_GET: folio's refcount will be incremented by @refs. 114 * 115 * FOLL_PIN on large folios: folio's refcount will be incremented by 116 * @refs, and its pincount will be incremented by @refs. 117 * 118 * FOLL_PIN on single-page folios: folio's refcount will be incremented by 119 * @refs * GUP_PIN_COUNTING_BIAS. 120 * 121 * Return: The folio containing @page (with refcount appropriately 122 * incremented) for success, or NULL upon failure. If neither FOLL_GET 123 * nor FOLL_PIN was set, that's considered failure, and furthermore, 124 * a likely bug in the caller, so a warning is also emitted. 125 */ 126 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) 127 { 128 struct folio *folio; 129 130 if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) 131 return NULL; 132 133 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) 134 return NULL; 135 136 if (flags & FOLL_GET) 137 return try_get_folio(page, refs); 138 139 /* FOLL_PIN is set */ 140 141 /* 142 * Don't take a pin on the zero page - it's not going anywhere 143 * and it is used in a *lot* of places. 144 */ 145 if (is_zero_page(page)) 146 return page_folio(page); 147 148 folio = try_get_folio(page, refs); 149 if (!folio) 150 return NULL; 151 152 /* 153 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a 154 * right zone, so fail and let the caller fall back to the slow 155 * path. 156 */ 157 if (unlikely((flags & FOLL_LONGTERM) && 158 !folio_is_longterm_pinnable(folio))) { 159 if (!put_devmap_managed_page_refs(&folio->page, refs)) 160 folio_put_refs(folio, refs); 161 return NULL; 162 } 163 164 /* 165 * When pinning a large folio, use an exact count to track it. 166 * 167 * However, be sure to *also* increment the normal folio 168 * refcount field at least once, so that the folio really 169 * is pinned. That's why the refcount from the earlier 170 * try_get_folio() is left intact. 171 */ 172 if (folio_test_large(folio)) 173 atomic_add(refs, &folio->_pincount); 174 else 175 folio_ref_add(folio, 176 refs * (GUP_PIN_COUNTING_BIAS - 1)); 177 /* 178 * Adjust the pincount before re-checking the PTE for changes. 179 * This is essentially a smp_mb() and is paired with a memory 180 * barrier in page_try_share_anon_rmap(). 181 */ 182 smp_mb__after_atomic(); 183 184 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); 185 186 return folio; 187 } 188 189 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) 190 { 191 if (flags & FOLL_PIN) { 192 if (is_zero_folio(folio)) 193 return; 194 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); 195 if (folio_test_large(folio)) 196 atomic_sub(refs, &folio->_pincount); 197 else 198 refs *= GUP_PIN_COUNTING_BIAS; 199 } 200 201 if (!put_devmap_managed_page_refs(&folio->page, refs)) 202 folio_put_refs(folio, refs); 203 } 204 205 /** 206 * try_grab_page() - elevate a page's refcount by a flag-dependent amount 207 * @page: pointer to page to be grabbed 208 * @flags: gup flags: these are the FOLL_* flag values. 209 * 210 * This might not do anything at all, depending on the flags argument. 211 * 212 * "grab" names in this file mean, "look at flags to decide whether to use 213 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount. 214 * 215 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same 216 * time. Cases: please see the try_grab_folio() documentation, with 217 * "refs=1". 218 * 219 * Return: 0 for success, or if no action was required (if neither FOLL_PIN 220 * nor FOLL_GET was set, nothing is done). A negative error code for failure: 221 * 222 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not 223 * be grabbed. 224 */ 225 int __must_check try_grab_page(struct page *page, unsigned int flags) 226 { 227 struct folio *folio = page_folio(page); 228 229 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) 230 return -ENOMEM; 231 232 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) 233 return -EREMOTEIO; 234 235 if (flags & FOLL_GET) 236 folio_ref_inc(folio); 237 else if (flags & FOLL_PIN) { 238 /* 239 * Don't take a pin on the zero page - it's not going anywhere 240 * and it is used in a *lot* of places. 241 */ 242 if (is_zero_page(page)) 243 return 0; 244 245 /* 246 * Similar to try_grab_folio(): be sure to *also* 247 * increment the normal page refcount field at least once, 248 * so that the page really is pinned. 249 */ 250 if (folio_test_large(folio)) { 251 folio_ref_add(folio, 1); 252 atomic_add(1, &folio->_pincount); 253 } else { 254 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); 255 } 256 257 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1); 258 } 259 260 return 0; 261 } 262 263 /** 264 * unpin_user_page() - release a dma-pinned page 265 * @page: pointer to page to be released 266 * 267 * Pages that were pinned via pin_user_pages*() must be released via either 268 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so 269 * that such pages can be separately tracked and uniquely handled. In 270 * particular, interactions with RDMA and filesystems need special handling. 271 */ 272 void unpin_user_page(struct page *page) 273 { 274 sanity_check_pinned_pages(&page, 1); 275 gup_put_folio(page_folio(page), 1, FOLL_PIN); 276 } 277 EXPORT_SYMBOL(unpin_user_page); 278 279 /** 280 * folio_add_pin - Try to get an additional pin on a pinned folio 281 * @folio: The folio to be pinned 282 * 283 * Get an additional pin on a folio we already have a pin on. Makes no change 284 * if the folio is a zero_page. 285 */ 286 void folio_add_pin(struct folio *folio) 287 { 288 if (is_zero_folio(folio)) 289 return; 290 291 /* 292 * Similar to try_grab_folio(): be sure to *also* increment the normal 293 * page refcount field at least once, so that the page really is 294 * pinned. 295 */ 296 if (folio_test_large(folio)) { 297 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); 298 folio_ref_inc(folio); 299 atomic_inc(&folio->_pincount); 300 } else { 301 WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS); 302 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); 303 } 304 } 305 306 static inline struct folio *gup_folio_range_next(struct page *start, 307 unsigned long npages, unsigned long i, unsigned int *ntails) 308 { 309 struct page *next = nth_page(start, i); 310 struct folio *folio = page_folio(next); 311 unsigned int nr = 1; 312 313 if (folio_test_large(folio)) 314 nr = min_t(unsigned int, npages - i, 315 folio_nr_pages(folio) - folio_page_idx(folio, next)); 316 317 *ntails = nr; 318 return folio; 319 } 320 321 static inline struct folio *gup_folio_next(struct page **list, 322 unsigned long npages, unsigned long i, unsigned int *ntails) 323 { 324 struct folio *folio = page_folio(list[i]); 325 unsigned int nr; 326 327 for (nr = i + 1; nr < npages; nr++) { 328 if (page_folio(list[nr]) != folio) 329 break; 330 } 331 332 *ntails = nr - i; 333 return folio; 334 } 335 336 /** 337 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 338 * @pages: array of pages to be maybe marked dirty, and definitely released. 339 * @npages: number of pages in the @pages array. 340 * @make_dirty: whether to mark the pages dirty 341 * 342 * "gup-pinned page" refers to a page that has had one of the get_user_pages() 343 * variants called on that page. 344 * 345 * For each page in the @pages array, make that page (or its head page, if a 346 * compound page) dirty, if @make_dirty is true, and if the page was previously 347 * listed as clean. In any case, releases all pages using unpin_user_page(), 348 * possibly via unpin_user_pages(), for the non-dirty case. 349 * 350 * Please see the unpin_user_page() documentation for details. 351 * 352 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 353 * required, then the caller should a) verify that this is really correct, 354 * because _lock() is usually required, and b) hand code it: 355 * set_page_dirty_lock(), unpin_user_page(). 356 * 357 */ 358 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, 359 bool make_dirty) 360 { 361 unsigned long i; 362 struct folio *folio; 363 unsigned int nr; 364 365 if (!make_dirty) { 366 unpin_user_pages(pages, npages); 367 return; 368 } 369 370 sanity_check_pinned_pages(pages, npages); 371 for (i = 0; i < npages; i += nr) { 372 folio = gup_folio_next(pages, npages, i, &nr); 373 /* 374 * Checking PageDirty at this point may race with 375 * clear_page_dirty_for_io(), but that's OK. Two key 376 * cases: 377 * 378 * 1) This code sees the page as already dirty, so it 379 * skips the call to set_page_dirty(). That could happen 380 * because clear_page_dirty_for_io() called 381 * page_mkclean(), followed by set_page_dirty(). 382 * However, now the page is going to get written back, 383 * which meets the original intention of setting it 384 * dirty, so all is well: clear_page_dirty_for_io() goes 385 * on to call TestClearPageDirty(), and write the page 386 * back. 387 * 388 * 2) This code sees the page as clean, so it calls 389 * set_page_dirty(). The page stays dirty, despite being 390 * written back, so it gets written back again in the 391 * next writeback cycle. This is harmless. 392 */ 393 if (!folio_test_dirty(folio)) { 394 folio_lock(folio); 395 folio_mark_dirty(folio); 396 folio_unlock(folio); 397 } 398 gup_put_folio(folio, nr, FOLL_PIN); 399 } 400 } 401 EXPORT_SYMBOL(unpin_user_pages_dirty_lock); 402 403 /** 404 * unpin_user_page_range_dirty_lock() - release and optionally dirty 405 * gup-pinned page range 406 * 407 * @page: the starting page of a range maybe marked dirty, and definitely released. 408 * @npages: number of consecutive pages to release. 409 * @make_dirty: whether to mark the pages dirty 410 * 411 * "gup-pinned page range" refers to a range of pages that has had one of the 412 * pin_user_pages() variants called on that page. 413 * 414 * For the page ranges defined by [page .. page+npages], make that range (or 415 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the 416 * page range was previously listed as clean. 417 * 418 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 419 * required, then the caller should a) verify that this is really correct, 420 * because _lock() is usually required, and b) hand code it: 421 * set_page_dirty_lock(), unpin_user_page(). 422 * 423 */ 424 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, 425 bool make_dirty) 426 { 427 unsigned long i; 428 struct folio *folio; 429 unsigned int nr; 430 431 for (i = 0; i < npages; i += nr) { 432 folio = gup_folio_range_next(page, npages, i, &nr); 433 if (make_dirty && !folio_test_dirty(folio)) { 434 folio_lock(folio); 435 folio_mark_dirty(folio); 436 folio_unlock(folio); 437 } 438 gup_put_folio(folio, nr, FOLL_PIN); 439 } 440 } 441 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); 442 443 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages) 444 { 445 unsigned long i; 446 struct folio *folio; 447 unsigned int nr; 448 449 /* 450 * Don't perform any sanity checks because we might have raced with 451 * fork() and some anonymous pages might now actually be shared -- 452 * which is why we're unpinning after all. 453 */ 454 for (i = 0; i < npages; i += nr) { 455 folio = gup_folio_next(pages, npages, i, &nr); 456 gup_put_folio(folio, nr, FOLL_PIN); 457 } 458 } 459 460 /** 461 * unpin_user_pages() - release an array of gup-pinned pages. 462 * @pages: array of pages to be marked dirty and released. 463 * @npages: number of pages in the @pages array. 464 * 465 * For each page in the @pages array, release the page using unpin_user_page(). 466 * 467 * Please see the unpin_user_page() documentation for details. 468 */ 469 void unpin_user_pages(struct page **pages, unsigned long npages) 470 { 471 unsigned long i; 472 struct folio *folio; 473 unsigned int nr; 474 475 /* 476 * If this WARN_ON() fires, then the system *might* be leaking pages (by 477 * leaving them pinned), but probably not. More likely, gup/pup returned 478 * a hard -ERRNO error to the caller, who erroneously passed it here. 479 */ 480 if (WARN_ON(IS_ERR_VALUE(npages))) 481 return; 482 483 sanity_check_pinned_pages(pages, npages); 484 for (i = 0; i < npages; i += nr) { 485 folio = gup_folio_next(pages, npages, i, &nr); 486 gup_put_folio(folio, nr, FOLL_PIN); 487 } 488 } 489 EXPORT_SYMBOL(unpin_user_pages); 490 491 /* 492 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's 493 * lifecycle. Avoid setting the bit unless necessary, or it might cause write 494 * cache bouncing on large SMP machines for concurrent pinned gups. 495 */ 496 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) 497 { 498 if (!test_bit(MMF_HAS_PINNED, mm_flags)) 499 set_bit(MMF_HAS_PINNED, mm_flags); 500 } 501 502 #ifdef CONFIG_MMU 503 static struct page *no_page_table(struct vm_area_struct *vma, 504 unsigned int flags) 505 { 506 /* 507 * When core dumping an enormous anonymous area that nobody 508 * has touched so far, we don't want to allocate unnecessary pages or 509 * page tables. Return error instead of NULL to skip handle_mm_fault, 510 * then get_dump_page() will return NULL to leave a hole in the dump. 511 * But we can only make this optimization where a hole would surely 512 * be zero-filled if handle_mm_fault() actually did handle it. 513 */ 514 if ((flags & FOLL_DUMP) && 515 (vma_is_anonymous(vma) || !vma->vm_ops->fault)) 516 return ERR_PTR(-EFAULT); 517 return NULL; 518 } 519 520 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, 521 pte_t *pte, unsigned int flags) 522 { 523 if (flags & FOLL_TOUCH) { 524 pte_t orig_entry = ptep_get(pte); 525 pte_t entry = orig_entry; 526 527 if (flags & FOLL_WRITE) 528 entry = pte_mkdirty(entry); 529 entry = pte_mkyoung(entry); 530 531 if (!pte_same(orig_entry, entry)) { 532 set_pte_at(vma->vm_mm, address, pte, entry); 533 update_mmu_cache(vma, address, pte); 534 } 535 } 536 537 /* Proper page table entry exists, but no corresponding struct page */ 538 return -EEXIST; 539 } 540 541 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */ 542 static inline bool can_follow_write_pte(pte_t pte, struct page *page, 543 struct vm_area_struct *vma, 544 unsigned int flags) 545 { 546 /* If the pte is writable, we can write to the page. */ 547 if (pte_write(pte)) 548 return true; 549 550 /* Maybe FOLL_FORCE is set to override it? */ 551 if (!(flags & FOLL_FORCE)) 552 return false; 553 554 /* But FOLL_FORCE has no effect on shared mappings */ 555 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) 556 return false; 557 558 /* ... or read-only private ones */ 559 if (!(vma->vm_flags & VM_MAYWRITE)) 560 return false; 561 562 /* ... or already writable ones that just need to take a write fault */ 563 if (vma->vm_flags & VM_WRITE) 564 return false; 565 566 /* 567 * See can_change_pte_writable(): we broke COW and could map the page 568 * writable if we have an exclusive anonymous page ... 569 */ 570 if (!page || !PageAnon(page) || !PageAnonExclusive(page)) 571 return false; 572 573 /* ... and a write-fault isn't required for other reasons. */ 574 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte)) 575 return false; 576 return !userfaultfd_pte_wp(vma, pte); 577 } 578 579 static struct page *follow_page_pte(struct vm_area_struct *vma, 580 unsigned long address, pmd_t *pmd, unsigned int flags, 581 struct dev_pagemap **pgmap) 582 { 583 struct mm_struct *mm = vma->vm_mm; 584 struct page *page; 585 spinlock_t *ptl; 586 pte_t *ptep, pte; 587 int ret; 588 589 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 590 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 591 (FOLL_PIN | FOLL_GET))) 592 return ERR_PTR(-EINVAL); 593 594 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 595 if (!ptep) 596 return no_page_table(vma, flags); 597 pte = ptep_get(ptep); 598 if (!pte_present(pte)) 599 goto no_page; 600 if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags)) 601 goto no_page; 602 603 page = vm_normal_page(vma, address, pte); 604 605 /* 606 * We only care about anon pages in can_follow_write_pte() and don't 607 * have to worry about pte_devmap() because they are never anon. 608 */ 609 if ((flags & FOLL_WRITE) && 610 !can_follow_write_pte(pte, page, vma, flags)) { 611 page = NULL; 612 goto out; 613 } 614 615 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { 616 /* 617 * Only return device mapping pages in the FOLL_GET or FOLL_PIN 618 * case since they are only valid while holding the pgmap 619 * reference. 620 */ 621 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); 622 if (*pgmap) 623 page = pte_page(pte); 624 else 625 goto no_page; 626 } else if (unlikely(!page)) { 627 if (flags & FOLL_DUMP) { 628 /* Avoid special (like zero) pages in core dumps */ 629 page = ERR_PTR(-EFAULT); 630 goto out; 631 } 632 633 if (is_zero_pfn(pte_pfn(pte))) { 634 page = pte_page(pte); 635 } else { 636 ret = follow_pfn_pte(vma, address, ptep, flags); 637 page = ERR_PTR(ret); 638 goto out; 639 } 640 } 641 642 if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) { 643 page = ERR_PTR(-EMLINK); 644 goto out; 645 } 646 647 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 648 !PageAnonExclusive(page), page); 649 650 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ 651 ret = try_grab_page(page, flags); 652 if (unlikely(ret)) { 653 page = ERR_PTR(ret); 654 goto out; 655 } 656 657 /* 658 * We need to make the page accessible if and only if we are going 659 * to access its content (the FOLL_PIN case). Please see 660 * Documentation/core-api/pin_user_pages.rst for details. 661 */ 662 if (flags & FOLL_PIN) { 663 ret = arch_make_page_accessible(page); 664 if (ret) { 665 unpin_user_page(page); 666 page = ERR_PTR(ret); 667 goto out; 668 } 669 } 670 if (flags & FOLL_TOUCH) { 671 if ((flags & FOLL_WRITE) && 672 !pte_dirty(pte) && !PageDirty(page)) 673 set_page_dirty(page); 674 /* 675 * pte_mkyoung() would be more correct here, but atomic care 676 * is needed to avoid losing the dirty bit: it is easier to use 677 * mark_page_accessed(). 678 */ 679 mark_page_accessed(page); 680 } 681 out: 682 pte_unmap_unlock(ptep, ptl); 683 return page; 684 no_page: 685 pte_unmap_unlock(ptep, ptl); 686 if (!pte_none(pte)) 687 return NULL; 688 return no_page_table(vma, flags); 689 } 690 691 static struct page *follow_pmd_mask(struct vm_area_struct *vma, 692 unsigned long address, pud_t *pudp, 693 unsigned int flags, 694 struct follow_page_context *ctx) 695 { 696 pmd_t *pmd, pmdval; 697 spinlock_t *ptl; 698 struct page *page; 699 struct mm_struct *mm = vma->vm_mm; 700 701 pmd = pmd_offset(pudp, address); 702 pmdval = pmdp_get_lockless(pmd); 703 if (pmd_none(pmdval)) 704 return no_page_table(vma, flags); 705 if (!pmd_present(pmdval)) 706 return no_page_table(vma, flags); 707 if (pmd_devmap(pmdval)) { 708 ptl = pmd_lock(mm, pmd); 709 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); 710 spin_unlock(ptl); 711 if (page) 712 return page; 713 } 714 if (likely(!pmd_trans_huge(pmdval))) 715 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 716 717 if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) 718 return no_page_table(vma, flags); 719 720 ptl = pmd_lock(mm, pmd); 721 if (unlikely(!pmd_present(*pmd))) { 722 spin_unlock(ptl); 723 return no_page_table(vma, flags); 724 } 725 if (unlikely(!pmd_trans_huge(*pmd))) { 726 spin_unlock(ptl); 727 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 728 } 729 if (flags & FOLL_SPLIT_PMD) { 730 spin_unlock(ptl); 731 split_huge_pmd(vma, pmd, address); 732 /* If pmd was left empty, stuff a page table in there quickly */ 733 return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : 734 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 735 } 736 page = follow_trans_huge_pmd(vma, address, pmd, flags); 737 spin_unlock(ptl); 738 ctx->page_mask = HPAGE_PMD_NR - 1; 739 return page; 740 } 741 742 static struct page *follow_pud_mask(struct vm_area_struct *vma, 743 unsigned long address, p4d_t *p4dp, 744 unsigned int flags, 745 struct follow_page_context *ctx) 746 { 747 pud_t *pud; 748 spinlock_t *ptl; 749 struct page *page; 750 struct mm_struct *mm = vma->vm_mm; 751 752 pud = pud_offset(p4dp, address); 753 if (pud_none(*pud)) 754 return no_page_table(vma, flags); 755 if (pud_devmap(*pud)) { 756 ptl = pud_lock(mm, pud); 757 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); 758 spin_unlock(ptl); 759 if (page) 760 return page; 761 } 762 if (unlikely(pud_bad(*pud))) 763 return no_page_table(vma, flags); 764 765 return follow_pmd_mask(vma, address, pud, flags, ctx); 766 } 767 768 static struct page *follow_p4d_mask(struct vm_area_struct *vma, 769 unsigned long address, pgd_t *pgdp, 770 unsigned int flags, 771 struct follow_page_context *ctx) 772 { 773 p4d_t *p4d; 774 775 p4d = p4d_offset(pgdp, address); 776 if (p4d_none(*p4d)) 777 return no_page_table(vma, flags); 778 BUILD_BUG_ON(p4d_huge(*p4d)); 779 if (unlikely(p4d_bad(*p4d))) 780 return no_page_table(vma, flags); 781 782 return follow_pud_mask(vma, address, p4d, flags, ctx); 783 } 784 785 /** 786 * follow_page_mask - look up a page descriptor from a user-virtual address 787 * @vma: vm_area_struct mapping @address 788 * @address: virtual address to look up 789 * @flags: flags modifying lookup behaviour 790 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a 791 * pointer to output page_mask 792 * 793 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 794 * 795 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches 796 * the device's dev_pagemap metadata to avoid repeating expensive lookups. 797 * 798 * When getting an anonymous page and the caller has to trigger unsharing 799 * of a shared anonymous page first, -EMLINK is returned. The caller should 800 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only 801 * relevant with FOLL_PIN and !FOLL_WRITE. 802 * 803 * On output, the @ctx->page_mask is set according to the size of the page. 804 * 805 * Return: the mapped (struct page *), %NULL if no mapping exists, or 806 * an error pointer if there is a mapping to something not represented 807 * by a page descriptor (see also vm_normal_page()). 808 */ 809 static struct page *follow_page_mask(struct vm_area_struct *vma, 810 unsigned long address, unsigned int flags, 811 struct follow_page_context *ctx) 812 { 813 pgd_t *pgd; 814 struct mm_struct *mm = vma->vm_mm; 815 816 ctx->page_mask = 0; 817 818 /* 819 * Call hugetlb_follow_page_mask for hugetlb vmas as it will use 820 * special hugetlb page table walking code. This eliminates the 821 * need to check for hugetlb entries in the general walking code. 822 */ 823 if (is_vm_hugetlb_page(vma)) 824 return hugetlb_follow_page_mask(vma, address, flags, 825 &ctx->page_mask); 826 827 pgd = pgd_offset(mm, address); 828 829 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 830 return no_page_table(vma, flags); 831 832 return follow_p4d_mask(vma, address, pgd, flags, ctx); 833 } 834 835 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 836 unsigned int foll_flags) 837 { 838 struct follow_page_context ctx = { NULL }; 839 struct page *page; 840 841 if (vma_is_secretmem(vma)) 842 return NULL; 843 844 if (WARN_ON_ONCE(foll_flags & FOLL_PIN)) 845 return NULL; 846 847 /* 848 * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect 849 * to fail on PROT_NONE-mapped pages. 850 */ 851 page = follow_page_mask(vma, address, foll_flags, &ctx); 852 if (ctx.pgmap) 853 put_dev_pagemap(ctx.pgmap); 854 return page; 855 } 856 857 static int get_gate_page(struct mm_struct *mm, unsigned long address, 858 unsigned int gup_flags, struct vm_area_struct **vma, 859 struct page **page) 860 { 861 pgd_t *pgd; 862 p4d_t *p4d; 863 pud_t *pud; 864 pmd_t *pmd; 865 pte_t *pte; 866 pte_t entry; 867 int ret = -EFAULT; 868 869 /* user gate pages are read-only */ 870 if (gup_flags & FOLL_WRITE) 871 return -EFAULT; 872 if (address > TASK_SIZE) 873 pgd = pgd_offset_k(address); 874 else 875 pgd = pgd_offset_gate(mm, address); 876 if (pgd_none(*pgd)) 877 return -EFAULT; 878 p4d = p4d_offset(pgd, address); 879 if (p4d_none(*p4d)) 880 return -EFAULT; 881 pud = pud_offset(p4d, address); 882 if (pud_none(*pud)) 883 return -EFAULT; 884 pmd = pmd_offset(pud, address); 885 if (!pmd_present(*pmd)) 886 return -EFAULT; 887 pte = pte_offset_map(pmd, address); 888 if (!pte) 889 return -EFAULT; 890 entry = ptep_get(pte); 891 if (pte_none(entry)) 892 goto unmap; 893 *vma = get_gate_vma(mm); 894 if (!page) 895 goto out; 896 *page = vm_normal_page(*vma, address, entry); 897 if (!*page) { 898 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) 899 goto unmap; 900 *page = pte_page(entry); 901 } 902 ret = try_grab_page(*page, gup_flags); 903 if (unlikely(ret)) 904 goto unmap; 905 out: 906 ret = 0; 907 unmap: 908 pte_unmap(pte); 909 return ret; 910 } 911 912 /* 913 * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not 914 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set 915 * to 0 and -EBUSY returned. 916 */ 917 static int faultin_page(struct vm_area_struct *vma, 918 unsigned long address, unsigned int *flags, bool unshare, 919 int *locked) 920 { 921 unsigned int fault_flags = 0; 922 vm_fault_t ret; 923 924 if (*flags & FOLL_NOFAULT) 925 return -EFAULT; 926 if (*flags & FOLL_WRITE) 927 fault_flags |= FAULT_FLAG_WRITE; 928 if (*flags & FOLL_REMOTE) 929 fault_flags |= FAULT_FLAG_REMOTE; 930 if (*flags & FOLL_UNLOCKABLE) { 931 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 932 /* 933 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set 934 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE. 935 * That's because some callers may not be prepared to 936 * handle early exits caused by non-fatal signals. 937 */ 938 if (*flags & FOLL_INTERRUPTIBLE) 939 fault_flags |= FAULT_FLAG_INTERRUPTIBLE; 940 } 941 if (*flags & FOLL_NOWAIT) 942 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 943 if (*flags & FOLL_TRIED) { 944 /* 945 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED 946 * can co-exist 947 */ 948 fault_flags |= FAULT_FLAG_TRIED; 949 } 950 if (unshare) { 951 fault_flags |= FAULT_FLAG_UNSHARE; 952 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ 953 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE); 954 } 955 956 ret = handle_mm_fault(vma, address, fault_flags, NULL); 957 958 if (ret & VM_FAULT_COMPLETED) { 959 /* 960 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the 961 * mmap lock in the page fault handler. Sanity check this. 962 */ 963 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); 964 *locked = 0; 965 966 /* 967 * We should do the same as VM_FAULT_RETRY, but let's not 968 * return -EBUSY since that's not reflecting the reality of 969 * what has happened - we've just fully completed a page 970 * fault, with the mmap lock released. Use -EAGAIN to show 971 * that we want to take the mmap lock _again_. 972 */ 973 return -EAGAIN; 974 } 975 976 if (ret & VM_FAULT_ERROR) { 977 int err = vm_fault_to_errno(ret, *flags); 978 979 if (err) 980 return err; 981 BUG(); 982 } 983 984 if (ret & VM_FAULT_RETRY) { 985 if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 986 *locked = 0; 987 return -EBUSY; 988 } 989 990 return 0; 991 } 992 993 /* 994 * Writing to file-backed mappings which require folio dirty tracking using GUP 995 * is a fundamentally broken operation, as kernel write access to GUP mappings 996 * do not adhere to the semantics expected by a file system. 997 * 998 * Consider the following scenario:- 999 * 1000 * 1. A folio is written to via GUP which write-faults the memory, notifying 1001 * the file system and dirtying the folio. 1002 * 2. Later, writeback is triggered, resulting in the folio being cleaned and 1003 * the PTE being marked read-only. 1004 * 3. The GUP caller writes to the folio, as it is mapped read/write via the 1005 * direct mapping. 1006 * 4. The GUP caller, now done with the page, unpins it and sets it dirty 1007 * (though it does not have to). 1008 * 1009 * This results in both data being written to a folio without writenotify, and 1010 * the folio being dirtied unexpectedly (if the caller decides to do so). 1011 */ 1012 static bool writable_file_mapping_allowed(struct vm_area_struct *vma, 1013 unsigned long gup_flags) 1014 { 1015 /* 1016 * If we aren't pinning then no problematic write can occur. A long term 1017 * pin is the most egregious case so this is the case we disallow. 1018 */ 1019 if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != 1020 (FOLL_PIN | FOLL_LONGTERM)) 1021 return true; 1022 1023 /* 1024 * If the VMA does not require dirty tracking then no problematic write 1025 * can occur either. 1026 */ 1027 return !vma_needs_dirty_tracking(vma); 1028 } 1029 1030 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) 1031 { 1032 vm_flags_t vm_flags = vma->vm_flags; 1033 int write = (gup_flags & FOLL_WRITE); 1034 int foreign = (gup_flags & FOLL_REMOTE); 1035 bool vma_anon = vma_is_anonymous(vma); 1036 1037 if (vm_flags & (VM_IO | VM_PFNMAP)) 1038 return -EFAULT; 1039 1040 if ((gup_flags & FOLL_ANON) && !vma_anon) 1041 return -EFAULT; 1042 1043 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) 1044 return -EOPNOTSUPP; 1045 1046 if (vma_is_secretmem(vma)) 1047 return -EFAULT; 1048 1049 if (write) { 1050 if (!vma_anon && 1051 !writable_file_mapping_allowed(vma, gup_flags)) 1052 return -EFAULT; 1053 1054 if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { 1055 if (!(gup_flags & FOLL_FORCE)) 1056 return -EFAULT; 1057 /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */ 1058 if (is_vm_hugetlb_page(vma)) 1059 return -EFAULT; 1060 /* 1061 * We used to let the write,force case do COW in a 1062 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could 1063 * set a breakpoint in a read-only mapping of an 1064 * executable, without corrupting the file (yet only 1065 * when that file had been opened for writing!). 1066 * Anon pages in shared mappings are surprising: now 1067 * just reject it. 1068 */ 1069 if (!is_cow_mapping(vm_flags)) 1070 return -EFAULT; 1071 } 1072 } else if (!(vm_flags & VM_READ)) { 1073 if (!(gup_flags & FOLL_FORCE)) 1074 return -EFAULT; 1075 /* 1076 * Is there actually any vma we can reach here which does not 1077 * have VM_MAYREAD set? 1078 */ 1079 if (!(vm_flags & VM_MAYREAD)) 1080 return -EFAULT; 1081 } 1082 /* 1083 * gups are always data accesses, not instruction 1084 * fetches, so execute=false here 1085 */ 1086 if (!arch_vma_access_permitted(vma, write, false, foreign)) 1087 return -EFAULT; 1088 return 0; 1089 } 1090 1091 /* 1092 * This is "vma_lookup()", but with a warning if we would have 1093 * historically expanded the stack in the GUP code. 1094 */ 1095 static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm, 1096 unsigned long addr) 1097 { 1098 #ifdef CONFIG_STACK_GROWSUP 1099 return vma_lookup(mm, addr); 1100 #else 1101 static volatile unsigned long next_warn; 1102 struct vm_area_struct *vma; 1103 unsigned long now, next; 1104 1105 vma = find_vma(mm, addr); 1106 if (!vma || (addr >= vma->vm_start)) 1107 return vma; 1108 1109 /* Only warn for half-way relevant accesses */ 1110 if (!(vma->vm_flags & VM_GROWSDOWN)) 1111 return NULL; 1112 if (vma->vm_start - addr > 65536) 1113 return NULL; 1114 1115 /* Let's not warn more than once an hour.. */ 1116 now = jiffies; next = next_warn; 1117 if (next && time_before(now, next)) 1118 return NULL; 1119 next_warn = now + 60*60*HZ; 1120 1121 /* Let people know things may have changed. */ 1122 pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", 1123 current->comm, task_pid_nr(current), 1124 vma->vm_start, vma->vm_end, addr); 1125 dump_stack(); 1126 return NULL; 1127 #endif 1128 } 1129 1130 /** 1131 * __get_user_pages() - pin user pages in memory 1132 * @mm: mm_struct of target mm 1133 * @start: starting user address 1134 * @nr_pages: number of pages from start to pin 1135 * @gup_flags: flags modifying pin behaviour 1136 * @pages: array that receives pointers to the pages pinned. 1137 * Should be at least nr_pages long. Or NULL, if caller 1138 * only intends to ensure the pages are faulted in. 1139 * @locked: whether we're still with the mmap_lock held 1140 * 1141 * Returns either number of pages pinned (which may be less than the 1142 * number requested), or an error. Details about the return value: 1143 * 1144 * -- If nr_pages is 0, returns 0. 1145 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 1146 * -- If nr_pages is >0, and some pages were pinned, returns the number of 1147 * pages pinned. Again, this may be less than nr_pages. 1148 * -- 0 return value is possible when the fault would need to be retried. 1149 * 1150 * The caller is responsible for releasing returned @pages, via put_page(). 1151 * 1152 * Must be called with mmap_lock held. It may be released. See below. 1153 * 1154 * __get_user_pages walks a process's page tables and takes a reference to 1155 * each struct page that each user address corresponds to at a given 1156 * instant. That is, it takes the page that would be accessed if a user 1157 * thread accesses the given user virtual address at that instant. 1158 * 1159 * This does not guarantee that the page exists in the user mappings when 1160 * __get_user_pages returns, and there may even be a completely different 1161 * page there in some cases (eg. if mmapped pagecache has been invalidated 1162 * and subsequently re-faulted). However it does guarantee that the page 1163 * won't be freed completely. And mostly callers simply care that the page 1164 * contains data that was valid *at some point in time*. Typically, an IO 1165 * or similar operation cannot guarantee anything stronger anyway because 1166 * locks can't be held over the syscall boundary. 1167 * 1168 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If 1169 * the page is written to, set_page_dirty (or set_page_dirty_lock, as 1170 * appropriate) must be called after the page is finished with, and 1171 * before put_page is called. 1172 * 1173 * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may 1174 * be released. If this happens *@locked will be set to 0 on return. 1175 * 1176 * A caller using such a combination of @gup_flags must therefore hold the 1177 * mmap_lock for reading only, and recognize when it's been released. Otherwise, 1178 * it must be held for either reading or writing and will not be released. 1179 * 1180 * In most cases, get_user_pages or get_user_pages_fast should be used 1181 * instead of __get_user_pages. __get_user_pages should be used only if 1182 * you need some special @gup_flags. 1183 */ 1184 static long __get_user_pages(struct mm_struct *mm, 1185 unsigned long start, unsigned long nr_pages, 1186 unsigned int gup_flags, struct page **pages, 1187 int *locked) 1188 { 1189 long ret = 0, i = 0; 1190 struct vm_area_struct *vma = NULL; 1191 struct follow_page_context ctx = { NULL }; 1192 1193 if (!nr_pages) 1194 return 0; 1195 1196 start = untagged_addr_remote(mm, start); 1197 1198 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); 1199 1200 do { 1201 struct page *page; 1202 unsigned int foll_flags = gup_flags; 1203 unsigned int page_increm; 1204 1205 /* first iteration or cross vma bound */ 1206 if (!vma || start >= vma->vm_end) { 1207 vma = gup_vma_lookup(mm, start); 1208 if (!vma && in_gate_area(mm, start)) { 1209 ret = get_gate_page(mm, start & PAGE_MASK, 1210 gup_flags, &vma, 1211 pages ? &page : NULL); 1212 if (ret) 1213 goto out; 1214 ctx.page_mask = 0; 1215 goto next_page; 1216 } 1217 1218 if (!vma) { 1219 ret = -EFAULT; 1220 goto out; 1221 } 1222 ret = check_vma_flags(vma, gup_flags); 1223 if (ret) 1224 goto out; 1225 } 1226 retry: 1227 /* 1228 * If we have a pending SIGKILL, don't keep faulting pages and 1229 * potentially allocating memory. 1230 */ 1231 if (fatal_signal_pending(current)) { 1232 ret = -EINTR; 1233 goto out; 1234 } 1235 cond_resched(); 1236 1237 page = follow_page_mask(vma, start, foll_flags, &ctx); 1238 if (!page || PTR_ERR(page) == -EMLINK) { 1239 ret = faultin_page(vma, start, &foll_flags, 1240 PTR_ERR(page) == -EMLINK, locked); 1241 switch (ret) { 1242 case 0: 1243 goto retry; 1244 case -EBUSY: 1245 case -EAGAIN: 1246 ret = 0; 1247 fallthrough; 1248 case -EFAULT: 1249 case -ENOMEM: 1250 case -EHWPOISON: 1251 goto out; 1252 } 1253 BUG(); 1254 } else if (PTR_ERR(page) == -EEXIST) { 1255 /* 1256 * Proper page table entry exists, but no corresponding 1257 * struct page. If the caller expects **pages to be 1258 * filled in, bail out now, because that can't be done 1259 * for this page. 1260 */ 1261 if (pages) { 1262 ret = PTR_ERR(page); 1263 goto out; 1264 } 1265 } else if (IS_ERR(page)) { 1266 ret = PTR_ERR(page); 1267 goto out; 1268 } 1269 next_page: 1270 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); 1271 if (page_increm > nr_pages) 1272 page_increm = nr_pages; 1273 1274 if (pages) { 1275 struct page *subpage; 1276 unsigned int j; 1277 1278 /* 1279 * This must be a large folio (and doesn't need to 1280 * be the whole folio; it can be part of it), do 1281 * the refcount work for all the subpages too. 1282 * 1283 * NOTE: here the page may not be the head page 1284 * e.g. when start addr is not thp-size aligned. 1285 * try_grab_folio() should have taken care of tail 1286 * pages. 1287 */ 1288 if (page_increm > 1) { 1289 struct folio *folio; 1290 1291 /* 1292 * Since we already hold refcount on the 1293 * large folio, this should never fail. 1294 */ 1295 folio = try_grab_folio(page, page_increm - 1, 1296 foll_flags); 1297 if (WARN_ON_ONCE(!folio)) { 1298 /* 1299 * Release the 1st page ref if the 1300 * folio is problematic, fail hard. 1301 */ 1302 gup_put_folio(page_folio(page), 1, 1303 foll_flags); 1304 ret = -EFAULT; 1305 goto out; 1306 } 1307 } 1308 1309 for (j = 0; j < page_increm; j++) { 1310 subpage = nth_page(page, j); 1311 pages[i + j] = subpage; 1312 flush_anon_page(vma, subpage, start + j * PAGE_SIZE); 1313 flush_dcache_page(subpage); 1314 } 1315 } 1316 1317 i += page_increm; 1318 start += page_increm * PAGE_SIZE; 1319 nr_pages -= page_increm; 1320 } while (nr_pages); 1321 out: 1322 if (ctx.pgmap) 1323 put_dev_pagemap(ctx.pgmap); 1324 return i ? i : ret; 1325 } 1326 1327 static bool vma_permits_fault(struct vm_area_struct *vma, 1328 unsigned int fault_flags) 1329 { 1330 bool write = !!(fault_flags & FAULT_FLAG_WRITE); 1331 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); 1332 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; 1333 1334 if (!(vm_flags & vma->vm_flags)) 1335 return false; 1336 1337 /* 1338 * The architecture might have a hardware protection 1339 * mechanism other than read/write that can deny access. 1340 * 1341 * gup always represents data access, not instruction 1342 * fetches, so execute=false here: 1343 */ 1344 if (!arch_vma_access_permitted(vma, write, false, foreign)) 1345 return false; 1346 1347 return true; 1348 } 1349 1350 /** 1351 * fixup_user_fault() - manually resolve a user page fault 1352 * @mm: mm_struct of target mm 1353 * @address: user address 1354 * @fault_flags:flags to pass down to handle_mm_fault() 1355 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller 1356 * does not allow retry. If NULL, the caller must guarantee 1357 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. 1358 * 1359 * This is meant to be called in the specific scenario where for locking reasons 1360 * we try to access user memory in atomic context (within a pagefault_disable() 1361 * section), this returns -EFAULT, and we want to resolve the user fault before 1362 * trying again. 1363 * 1364 * Typically this is meant to be used by the futex code. 1365 * 1366 * The main difference with get_user_pages() is that this function will 1367 * unconditionally call handle_mm_fault() which will in turn perform all the 1368 * necessary SW fixup of the dirty and young bits in the PTE, while 1369 * get_user_pages() only guarantees to update these in the struct page. 1370 * 1371 * This is important for some architectures where those bits also gate the 1372 * access permission to the page because they are maintained in software. On 1373 * such architectures, gup() will not be enough to make a subsequent access 1374 * succeed. 1375 * 1376 * This function will not return with an unlocked mmap_lock. So it has not the 1377 * same semantics wrt the @mm->mmap_lock as does filemap_fault(). 1378 */ 1379 int fixup_user_fault(struct mm_struct *mm, 1380 unsigned long address, unsigned int fault_flags, 1381 bool *unlocked) 1382 { 1383 struct vm_area_struct *vma; 1384 vm_fault_t ret; 1385 1386 address = untagged_addr_remote(mm, address); 1387 1388 if (unlocked) 1389 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1390 1391 retry: 1392 vma = gup_vma_lookup(mm, address); 1393 if (!vma) 1394 return -EFAULT; 1395 1396 if (!vma_permits_fault(vma, fault_flags)) 1397 return -EFAULT; 1398 1399 if ((fault_flags & FAULT_FLAG_KILLABLE) && 1400 fatal_signal_pending(current)) 1401 return -EINTR; 1402 1403 ret = handle_mm_fault(vma, address, fault_flags, NULL); 1404 1405 if (ret & VM_FAULT_COMPLETED) { 1406 /* 1407 * NOTE: it's a pity that we need to retake the lock here 1408 * to pair with the unlock() in the callers. Ideally we 1409 * could tell the callers so they do not need to unlock. 1410 */ 1411 mmap_read_lock(mm); 1412 *unlocked = true; 1413 return 0; 1414 } 1415 1416 if (ret & VM_FAULT_ERROR) { 1417 int err = vm_fault_to_errno(ret, 0); 1418 1419 if (err) 1420 return err; 1421 BUG(); 1422 } 1423 1424 if (ret & VM_FAULT_RETRY) { 1425 mmap_read_lock(mm); 1426 *unlocked = true; 1427 fault_flags |= FAULT_FLAG_TRIED; 1428 goto retry; 1429 } 1430 1431 return 0; 1432 } 1433 EXPORT_SYMBOL_GPL(fixup_user_fault); 1434 1435 /* 1436 * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is 1437 * specified, it'll also respond to generic signals. The caller of GUP 1438 * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption. 1439 */ 1440 static bool gup_signal_pending(unsigned int flags) 1441 { 1442 if (fatal_signal_pending(current)) 1443 return true; 1444 1445 if (!(flags & FOLL_INTERRUPTIBLE)) 1446 return false; 1447 1448 return signal_pending(current); 1449 } 1450 1451 /* 1452 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by 1453 * the caller. This function may drop the mmap_lock. If it does so, then it will 1454 * set (*locked = 0). 1455 * 1456 * (*locked == 0) means that the caller expects this function to acquire and 1457 * drop the mmap_lock. Therefore, the value of *locked will still be zero when 1458 * the function returns, even though it may have changed temporarily during 1459 * function execution. 1460 * 1461 * Please note that this function, unlike __get_user_pages(), will not return 0 1462 * for nr_pages > 0, unless FOLL_NOWAIT is used. 1463 */ 1464 static __always_inline long __get_user_pages_locked(struct mm_struct *mm, 1465 unsigned long start, 1466 unsigned long nr_pages, 1467 struct page **pages, 1468 int *locked, 1469 unsigned int flags) 1470 { 1471 long ret, pages_done; 1472 bool must_unlock = false; 1473 1474 /* 1475 * The internal caller expects GUP to manage the lock internally and the 1476 * lock must be released when this returns. 1477 */ 1478 if (!*locked) { 1479 if (mmap_read_lock_killable(mm)) 1480 return -EAGAIN; 1481 must_unlock = true; 1482 *locked = 1; 1483 } 1484 else 1485 mmap_assert_locked(mm); 1486 1487 if (flags & FOLL_PIN) 1488 mm_set_has_pinned_flag(&mm->flags); 1489 1490 /* 1491 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior 1492 * is to set FOLL_GET if the caller wants pages[] filled in (but has 1493 * carelessly failed to specify FOLL_GET), so keep doing that, but only 1494 * for FOLL_GET, not for the newer FOLL_PIN. 1495 * 1496 * FOLL_PIN always expects pages to be non-null, but no need to assert 1497 * that here, as any failures will be obvious enough. 1498 */ 1499 if (pages && !(flags & FOLL_PIN)) 1500 flags |= FOLL_GET; 1501 1502 pages_done = 0; 1503 for (;;) { 1504 ret = __get_user_pages(mm, start, nr_pages, flags, pages, 1505 locked); 1506 if (!(flags & FOLL_UNLOCKABLE)) { 1507 /* VM_FAULT_RETRY couldn't trigger, bypass */ 1508 pages_done = ret; 1509 break; 1510 } 1511 1512 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ 1513 if (!*locked) { 1514 BUG_ON(ret < 0); 1515 BUG_ON(ret >= nr_pages); 1516 } 1517 1518 if (ret > 0) { 1519 nr_pages -= ret; 1520 pages_done += ret; 1521 if (!nr_pages) 1522 break; 1523 } 1524 if (*locked) { 1525 /* 1526 * VM_FAULT_RETRY didn't trigger or it was a 1527 * FOLL_NOWAIT. 1528 */ 1529 if (!pages_done) 1530 pages_done = ret; 1531 break; 1532 } 1533 /* 1534 * VM_FAULT_RETRY triggered, so seek to the faulting offset. 1535 * For the prefault case (!pages) we only update counts. 1536 */ 1537 if (likely(pages)) 1538 pages += ret; 1539 start += ret << PAGE_SHIFT; 1540 1541 /* The lock was temporarily dropped, so we must unlock later */ 1542 must_unlock = true; 1543 1544 retry: 1545 /* 1546 * Repeat on the address that fired VM_FAULT_RETRY 1547 * with both FAULT_FLAG_ALLOW_RETRY and 1548 * FAULT_FLAG_TRIED. Note that GUP can be interrupted 1549 * by fatal signals of even common signals, depending on 1550 * the caller's request. So we need to check it before we 1551 * start trying again otherwise it can loop forever. 1552 */ 1553 if (gup_signal_pending(flags)) { 1554 if (!pages_done) 1555 pages_done = -EINTR; 1556 break; 1557 } 1558 1559 ret = mmap_read_lock_killable(mm); 1560 if (ret) { 1561 BUG_ON(ret > 0); 1562 if (!pages_done) 1563 pages_done = ret; 1564 break; 1565 } 1566 1567 *locked = 1; 1568 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, 1569 pages, locked); 1570 if (!*locked) { 1571 /* Continue to retry until we succeeded */ 1572 BUG_ON(ret != 0); 1573 goto retry; 1574 } 1575 if (ret != 1) { 1576 BUG_ON(ret > 1); 1577 if (!pages_done) 1578 pages_done = ret; 1579 break; 1580 } 1581 nr_pages--; 1582 pages_done++; 1583 if (!nr_pages) 1584 break; 1585 if (likely(pages)) 1586 pages++; 1587 start += PAGE_SIZE; 1588 } 1589 if (must_unlock && *locked) { 1590 /* 1591 * We either temporarily dropped the lock, or the caller 1592 * requested that we both acquire and drop the lock. Either way, 1593 * we must now unlock, and notify the caller of that state. 1594 */ 1595 mmap_read_unlock(mm); 1596 *locked = 0; 1597 } 1598 return pages_done; 1599 } 1600 1601 /** 1602 * populate_vma_page_range() - populate a range of pages in the vma. 1603 * @vma: target vma 1604 * @start: start address 1605 * @end: end address 1606 * @locked: whether the mmap_lock is still held 1607 * 1608 * This takes care of mlocking the pages too if VM_LOCKED is set. 1609 * 1610 * Return either number of pages pinned in the vma, or a negative error 1611 * code on error. 1612 * 1613 * vma->vm_mm->mmap_lock must be held. 1614 * 1615 * If @locked is NULL, it may be held for read or write and will 1616 * be unperturbed. 1617 * 1618 * If @locked is non-NULL, it must held for read only and may be 1619 * released. If it's released, *@locked will be set to 0. 1620 */ 1621 long populate_vma_page_range(struct vm_area_struct *vma, 1622 unsigned long start, unsigned long end, int *locked) 1623 { 1624 struct mm_struct *mm = vma->vm_mm; 1625 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1626 int local_locked = 1; 1627 int gup_flags; 1628 long ret; 1629 1630 VM_BUG_ON(!PAGE_ALIGNED(start)); 1631 VM_BUG_ON(!PAGE_ALIGNED(end)); 1632 VM_BUG_ON_VMA(start < vma->vm_start, vma); 1633 VM_BUG_ON_VMA(end > vma->vm_end, vma); 1634 mmap_assert_locked(mm); 1635 1636 /* 1637 * Rightly or wrongly, the VM_LOCKONFAULT case has never used 1638 * faultin_page() to break COW, so it has no work to do here. 1639 */ 1640 if (vma->vm_flags & VM_LOCKONFAULT) 1641 return nr_pages; 1642 1643 gup_flags = FOLL_TOUCH; 1644 /* 1645 * We want to touch writable mappings with a write fault in order 1646 * to break COW, except for shared mappings because these don't COW 1647 * and we would not want to dirty them for nothing. 1648 */ 1649 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 1650 gup_flags |= FOLL_WRITE; 1651 1652 /* 1653 * We want mlock to succeed for regions that have any permissions 1654 * other than PROT_NONE. 1655 */ 1656 if (vma_is_accessible(vma)) 1657 gup_flags |= FOLL_FORCE; 1658 1659 if (locked) 1660 gup_flags |= FOLL_UNLOCKABLE; 1661 1662 /* 1663 * We made sure addr is within a VMA, so the following will 1664 * not result in a stack expansion that recurses back here. 1665 */ 1666 ret = __get_user_pages(mm, start, nr_pages, gup_flags, 1667 NULL, locked ? locked : &local_locked); 1668 lru_add_drain(); 1669 return ret; 1670 } 1671 1672 /* 1673 * faultin_vma_page_range() - populate (prefault) page tables inside the 1674 * given VMA range readable/writable 1675 * 1676 * This takes care of mlocking the pages, too, if VM_LOCKED is set. 1677 * 1678 * @vma: target vma 1679 * @start: start address 1680 * @end: end address 1681 * @write: whether to prefault readable or writable 1682 * @locked: whether the mmap_lock is still held 1683 * 1684 * Returns either number of processed pages in the vma, or a negative error 1685 * code on error (see __get_user_pages()). 1686 * 1687 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and 1688 * covered by the VMA. If it's released, *@locked will be set to 0. 1689 */ 1690 long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, 1691 unsigned long end, bool write, int *locked) 1692 { 1693 struct mm_struct *mm = vma->vm_mm; 1694 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1695 int gup_flags; 1696 long ret; 1697 1698 VM_BUG_ON(!PAGE_ALIGNED(start)); 1699 VM_BUG_ON(!PAGE_ALIGNED(end)); 1700 VM_BUG_ON_VMA(start < vma->vm_start, vma); 1701 VM_BUG_ON_VMA(end > vma->vm_end, vma); 1702 mmap_assert_locked(mm); 1703 1704 /* 1705 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark 1706 * the page dirty with FOLL_WRITE -- which doesn't make a 1707 * difference with !FOLL_FORCE, because the page is writable 1708 * in the page table. 1709 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit 1710 * a poisoned page. 1711 * !FOLL_FORCE: Require proper access permissions. 1712 */ 1713 gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE; 1714 if (write) 1715 gup_flags |= FOLL_WRITE; 1716 1717 /* 1718 * We want to report -EINVAL instead of -EFAULT for any permission 1719 * problems or incompatible mappings. 1720 */ 1721 if (check_vma_flags(vma, gup_flags)) 1722 return -EINVAL; 1723 1724 ret = __get_user_pages(mm, start, nr_pages, gup_flags, 1725 NULL, locked); 1726 lru_add_drain(); 1727 return ret; 1728 } 1729 1730 /* 1731 * __mm_populate - populate and/or mlock pages within a range of address space. 1732 * 1733 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap 1734 * flags. VMAs must be already marked with the desired vm_flags, and 1735 * mmap_lock must not be held. 1736 */ 1737 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 1738 { 1739 struct mm_struct *mm = current->mm; 1740 unsigned long end, nstart, nend; 1741 struct vm_area_struct *vma = NULL; 1742 int locked = 0; 1743 long ret = 0; 1744 1745 end = start + len; 1746 1747 for (nstart = start; nstart < end; nstart = nend) { 1748 /* 1749 * We want to fault in pages for [nstart; end) address range. 1750 * Find first corresponding VMA. 1751 */ 1752 if (!locked) { 1753 locked = 1; 1754 mmap_read_lock(mm); 1755 vma = find_vma_intersection(mm, nstart, end); 1756 } else if (nstart >= vma->vm_end) 1757 vma = find_vma_intersection(mm, vma->vm_end, end); 1758 1759 if (!vma) 1760 break; 1761 /* 1762 * Set [nstart; nend) to intersection of desired address 1763 * range with the first VMA. Also, skip undesirable VMA types. 1764 */ 1765 nend = min(end, vma->vm_end); 1766 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1767 continue; 1768 if (nstart < vma->vm_start) 1769 nstart = vma->vm_start; 1770 /* 1771 * Now fault in a range of pages. populate_vma_page_range() 1772 * double checks the vma flags, so that it won't mlock pages 1773 * if the vma was already munlocked. 1774 */ 1775 ret = populate_vma_page_range(vma, nstart, nend, &locked); 1776 if (ret < 0) { 1777 if (ignore_errors) { 1778 ret = 0; 1779 continue; /* continue at next VMA */ 1780 } 1781 break; 1782 } 1783 nend = nstart + ret * PAGE_SIZE; 1784 ret = 0; 1785 } 1786 if (locked) 1787 mmap_read_unlock(mm); 1788 return ret; /* 0 or negative error code */ 1789 } 1790 #else /* CONFIG_MMU */ 1791 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, 1792 unsigned long nr_pages, struct page **pages, 1793 int *locked, unsigned int foll_flags) 1794 { 1795 struct vm_area_struct *vma; 1796 bool must_unlock = false; 1797 unsigned long vm_flags; 1798 long i; 1799 1800 if (!nr_pages) 1801 return 0; 1802 1803 /* 1804 * The internal caller expects GUP to manage the lock internally and the 1805 * lock must be released when this returns. 1806 */ 1807 if (!*locked) { 1808 if (mmap_read_lock_killable(mm)) 1809 return -EAGAIN; 1810 must_unlock = true; 1811 *locked = 1; 1812 } 1813 1814 /* calculate required read or write permissions. 1815 * If FOLL_FORCE is set, we only require the "MAY" flags. 1816 */ 1817 vm_flags = (foll_flags & FOLL_WRITE) ? 1818 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 1819 vm_flags &= (foll_flags & FOLL_FORCE) ? 1820 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 1821 1822 for (i = 0; i < nr_pages; i++) { 1823 vma = find_vma(mm, start); 1824 if (!vma) 1825 break; 1826 1827 /* protect what we can, including chardevs */ 1828 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || 1829 !(vm_flags & vma->vm_flags)) 1830 break; 1831 1832 if (pages) { 1833 pages[i] = virt_to_page((void *)start); 1834 if (pages[i]) 1835 get_page(pages[i]); 1836 } 1837 1838 start = (start + PAGE_SIZE) & PAGE_MASK; 1839 } 1840 1841 if (must_unlock && *locked) { 1842 mmap_read_unlock(mm); 1843 *locked = 0; 1844 } 1845 1846 return i ? : -EFAULT; 1847 } 1848 #endif /* !CONFIG_MMU */ 1849 1850 /** 1851 * fault_in_writeable - fault in userspace address range for writing 1852 * @uaddr: start of address range 1853 * @size: size of address range 1854 * 1855 * Returns the number of bytes not faulted in (like copy_to_user() and 1856 * copy_from_user()). 1857 */ 1858 size_t fault_in_writeable(char __user *uaddr, size_t size) 1859 { 1860 char __user *start = uaddr, *end; 1861 1862 if (unlikely(size == 0)) 1863 return 0; 1864 if (!user_write_access_begin(uaddr, size)) 1865 return size; 1866 if (!PAGE_ALIGNED(uaddr)) { 1867 unsafe_put_user(0, uaddr, out); 1868 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr); 1869 } 1870 end = (char __user *)PAGE_ALIGN((unsigned long)start + size); 1871 if (unlikely(end < start)) 1872 end = NULL; 1873 while (uaddr != end) { 1874 unsafe_put_user(0, uaddr, out); 1875 uaddr += PAGE_SIZE; 1876 } 1877 1878 out: 1879 user_write_access_end(); 1880 if (size > uaddr - start) 1881 return size - (uaddr - start); 1882 return 0; 1883 } 1884 EXPORT_SYMBOL(fault_in_writeable); 1885 1886 /** 1887 * fault_in_subpage_writeable - fault in an address range for writing 1888 * @uaddr: start of address range 1889 * @size: size of address range 1890 * 1891 * Fault in a user address range for writing while checking for permissions at 1892 * sub-page granularity (e.g. arm64 MTE). This function should be used when 1893 * the caller cannot guarantee forward progress of a copy_to_user() loop. 1894 * 1895 * Returns the number of bytes not faulted in (like copy_to_user() and 1896 * copy_from_user()). 1897 */ 1898 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size) 1899 { 1900 size_t faulted_in; 1901 1902 /* 1903 * Attempt faulting in at page granularity first for page table 1904 * permission checking. The arch-specific probe_subpage_writeable() 1905 * functions may not check for this. 1906 */ 1907 faulted_in = size - fault_in_writeable(uaddr, size); 1908 if (faulted_in) 1909 faulted_in -= probe_subpage_writeable(uaddr, faulted_in); 1910 1911 return size - faulted_in; 1912 } 1913 EXPORT_SYMBOL(fault_in_subpage_writeable); 1914 1915 /* 1916 * fault_in_safe_writeable - fault in an address range for writing 1917 * @uaddr: start of address range 1918 * @size: length of address range 1919 * 1920 * Faults in an address range for writing. This is primarily useful when we 1921 * already know that some or all of the pages in the address range aren't in 1922 * memory. 1923 * 1924 * Unlike fault_in_writeable(), this function is non-destructive. 1925 * 1926 * Note that we don't pin or otherwise hold the pages referenced that we fault 1927 * in. There's no guarantee that they'll stay in memory for any duration of 1928 * time. 1929 * 1930 * Returns the number of bytes not faulted in, like copy_to_user() and 1931 * copy_from_user(). 1932 */ 1933 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) 1934 { 1935 unsigned long start = (unsigned long)uaddr, end; 1936 struct mm_struct *mm = current->mm; 1937 bool unlocked = false; 1938 1939 if (unlikely(size == 0)) 1940 return 0; 1941 end = PAGE_ALIGN(start + size); 1942 if (end < start) 1943 end = 0; 1944 1945 mmap_read_lock(mm); 1946 do { 1947 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked)) 1948 break; 1949 start = (start + PAGE_SIZE) & PAGE_MASK; 1950 } while (start != end); 1951 mmap_read_unlock(mm); 1952 1953 if (size > (unsigned long)uaddr - start) 1954 return size - ((unsigned long)uaddr - start); 1955 return 0; 1956 } 1957 EXPORT_SYMBOL(fault_in_safe_writeable); 1958 1959 /** 1960 * fault_in_readable - fault in userspace address range for reading 1961 * @uaddr: start of user address range 1962 * @size: size of user address range 1963 * 1964 * Returns the number of bytes not faulted in (like copy_to_user() and 1965 * copy_from_user()). 1966 */ 1967 size_t fault_in_readable(const char __user *uaddr, size_t size) 1968 { 1969 const char __user *start = uaddr, *end; 1970 volatile char c; 1971 1972 if (unlikely(size == 0)) 1973 return 0; 1974 if (!user_read_access_begin(uaddr, size)) 1975 return size; 1976 if (!PAGE_ALIGNED(uaddr)) { 1977 unsafe_get_user(c, uaddr, out); 1978 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr); 1979 } 1980 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size); 1981 if (unlikely(end < start)) 1982 end = NULL; 1983 while (uaddr != end) { 1984 unsafe_get_user(c, uaddr, out); 1985 uaddr += PAGE_SIZE; 1986 } 1987 1988 out: 1989 user_read_access_end(); 1990 (void)c; 1991 if (size > uaddr - start) 1992 return size - (uaddr - start); 1993 return 0; 1994 } 1995 EXPORT_SYMBOL(fault_in_readable); 1996 1997 /** 1998 * get_dump_page() - pin user page in memory while writing it to core dump 1999 * @addr: user address 2000 * 2001 * Returns struct page pointer of user page pinned for dump, 2002 * to be freed afterwards by put_page(). 2003 * 2004 * Returns NULL on any kind of failure - a hole must then be inserted into 2005 * the corefile, to preserve alignment with its headers; and also returns 2006 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 2007 * allowing a hole to be left in the corefile to save disk space. 2008 * 2009 * Called without mmap_lock (takes and releases the mmap_lock by itself). 2010 */ 2011 #ifdef CONFIG_ELF_CORE 2012 struct page *get_dump_page(unsigned long addr) 2013 { 2014 struct page *page; 2015 int locked = 0; 2016 int ret; 2017 2018 ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked, 2019 FOLL_FORCE | FOLL_DUMP | FOLL_GET); 2020 return (ret == 1) ? page : NULL; 2021 } 2022 #endif /* CONFIG_ELF_CORE */ 2023 2024 #ifdef CONFIG_MIGRATION 2025 /* 2026 * Returns the number of collected pages. Return value is always >= 0. 2027 */ 2028 static unsigned long collect_longterm_unpinnable_pages( 2029 struct list_head *movable_page_list, 2030 unsigned long nr_pages, 2031 struct page **pages) 2032 { 2033 unsigned long i, collected = 0; 2034 struct folio *prev_folio = NULL; 2035 bool drain_allow = true; 2036 2037 for (i = 0; i < nr_pages; i++) { 2038 struct folio *folio = page_folio(pages[i]); 2039 2040 if (folio == prev_folio) 2041 continue; 2042 prev_folio = folio; 2043 2044 if (folio_is_longterm_pinnable(folio)) 2045 continue; 2046 2047 collected++; 2048 2049 if (folio_is_device_coherent(folio)) 2050 continue; 2051 2052 if (folio_test_hugetlb(folio)) { 2053 isolate_hugetlb(folio, movable_page_list); 2054 continue; 2055 } 2056 2057 if (!folio_test_lru(folio) && drain_allow) { 2058 lru_add_drain_all(); 2059 drain_allow = false; 2060 } 2061 2062 if (!folio_isolate_lru(folio)) 2063 continue; 2064 2065 list_add_tail(&folio->lru, movable_page_list); 2066 node_stat_mod_folio(folio, 2067 NR_ISOLATED_ANON + folio_is_file_lru(folio), 2068 folio_nr_pages(folio)); 2069 } 2070 2071 return collected; 2072 } 2073 2074 /* 2075 * Unpins all pages and migrates device coherent pages and movable_page_list. 2076 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure 2077 * (or partial success). 2078 */ 2079 static int migrate_longterm_unpinnable_pages( 2080 struct list_head *movable_page_list, 2081 unsigned long nr_pages, 2082 struct page **pages) 2083 { 2084 int ret; 2085 unsigned long i; 2086 2087 for (i = 0; i < nr_pages; i++) { 2088 struct folio *folio = page_folio(pages[i]); 2089 2090 if (folio_is_device_coherent(folio)) { 2091 /* 2092 * Migration will fail if the page is pinned, so convert 2093 * the pin on the source page to a normal reference. 2094 */ 2095 pages[i] = NULL; 2096 folio_get(folio); 2097 gup_put_folio(folio, 1, FOLL_PIN); 2098 2099 if (migrate_device_coherent_page(&folio->page)) { 2100 ret = -EBUSY; 2101 goto err; 2102 } 2103 2104 continue; 2105 } 2106 2107 /* 2108 * We can't migrate pages with unexpected references, so drop 2109 * the reference obtained by __get_user_pages_locked(). 2110 * Migrating pages have been added to movable_page_list after 2111 * calling folio_isolate_lru() which takes a reference so the 2112 * page won't be freed if it's migrating. 2113 */ 2114 unpin_user_page(pages[i]); 2115 pages[i] = NULL; 2116 } 2117 2118 if (!list_empty(movable_page_list)) { 2119 struct migration_target_control mtc = { 2120 .nid = NUMA_NO_NODE, 2121 .gfp_mask = GFP_USER | __GFP_NOWARN, 2122 }; 2123 2124 if (migrate_pages(movable_page_list, alloc_migration_target, 2125 NULL, (unsigned long)&mtc, MIGRATE_SYNC, 2126 MR_LONGTERM_PIN, NULL)) { 2127 ret = -ENOMEM; 2128 goto err; 2129 } 2130 } 2131 2132 putback_movable_pages(movable_page_list); 2133 2134 return -EAGAIN; 2135 2136 err: 2137 for (i = 0; i < nr_pages; i++) 2138 if (pages[i]) 2139 unpin_user_page(pages[i]); 2140 putback_movable_pages(movable_page_list); 2141 2142 return ret; 2143 } 2144 2145 /* 2146 * Check whether all pages are *allowed* to be pinned. Rather confusingly, all 2147 * pages in the range are required to be pinned via FOLL_PIN, before calling 2148 * this routine. 2149 * 2150 * If any pages in the range are not allowed to be pinned, then this routine 2151 * will migrate those pages away, unpin all the pages in the range and return 2152 * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then 2153 * call this routine again. 2154 * 2155 * If an error other than -EAGAIN occurs, this indicates a migration failure. 2156 * The caller should give up, and propagate the error back up the call stack. 2157 * 2158 * If everything is OK and all pages in the range are allowed to be pinned, then 2159 * this routine leaves all pages pinned and returns zero for success. 2160 */ 2161 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2162 struct page **pages) 2163 { 2164 unsigned long collected; 2165 LIST_HEAD(movable_page_list); 2166 2167 collected = collect_longterm_unpinnable_pages(&movable_page_list, 2168 nr_pages, pages); 2169 if (!collected) 2170 return 0; 2171 2172 return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages, 2173 pages); 2174 } 2175 #else 2176 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2177 struct page **pages) 2178 { 2179 return 0; 2180 } 2181 #endif /* CONFIG_MIGRATION */ 2182 2183 /* 2184 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which 2185 * allows us to process the FOLL_LONGTERM flag. 2186 */ 2187 static long __gup_longterm_locked(struct mm_struct *mm, 2188 unsigned long start, 2189 unsigned long nr_pages, 2190 struct page **pages, 2191 int *locked, 2192 unsigned int gup_flags) 2193 { 2194 unsigned int flags; 2195 long rc, nr_pinned_pages; 2196 2197 if (!(gup_flags & FOLL_LONGTERM)) 2198 return __get_user_pages_locked(mm, start, nr_pages, pages, 2199 locked, gup_flags); 2200 2201 flags = memalloc_pin_save(); 2202 do { 2203 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, 2204 pages, locked, 2205 gup_flags); 2206 if (nr_pinned_pages <= 0) { 2207 rc = nr_pinned_pages; 2208 break; 2209 } 2210 2211 /* FOLL_LONGTERM implies FOLL_PIN */ 2212 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); 2213 } while (rc == -EAGAIN); 2214 memalloc_pin_restore(flags); 2215 return rc ? rc : nr_pinned_pages; 2216 } 2217 2218 /* 2219 * Check that the given flags are valid for the exported gup/pup interface, and 2220 * update them with the required flags that the caller must have set. 2221 */ 2222 static bool is_valid_gup_args(struct page **pages, int *locked, 2223 unsigned int *gup_flags_p, unsigned int to_set) 2224 { 2225 unsigned int gup_flags = *gup_flags_p; 2226 2227 /* 2228 * These flags not allowed to be specified externally to the gup 2229 * interfaces: 2230 * - FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only 2231 * - FOLL_REMOTE is internal only and used on follow_page() 2232 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL 2233 */ 2234 if (WARN_ON_ONCE(gup_flags & (FOLL_PIN | FOLL_TRIED | FOLL_UNLOCKABLE | 2235 FOLL_REMOTE | FOLL_FAST_ONLY))) 2236 return false; 2237 2238 gup_flags |= to_set; 2239 if (locked) { 2240 /* At the external interface locked must be set */ 2241 if (WARN_ON_ONCE(*locked != 1)) 2242 return false; 2243 2244 gup_flags |= FOLL_UNLOCKABLE; 2245 } 2246 2247 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 2248 if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == 2249 (FOLL_PIN | FOLL_GET))) 2250 return false; 2251 2252 /* LONGTERM can only be specified when pinning */ 2253 if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM))) 2254 return false; 2255 2256 /* Pages input must be given if using GET/PIN */ 2257 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) 2258 return false; 2259 2260 /* We want to allow the pgmap to be hot-unplugged at all times */ 2261 if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) && 2262 (gup_flags & FOLL_PCI_P2PDMA))) 2263 return false; 2264 2265 *gup_flags_p = gup_flags; 2266 return true; 2267 } 2268 2269 #ifdef CONFIG_MMU 2270 /** 2271 * get_user_pages_remote() - pin user pages in memory 2272 * @mm: mm_struct of target mm 2273 * @start: starting user address 2274 * @nr_pages: number of pages from start to pin 2275 * @gup_flags: flags modifying lookup behaviour 2276 * @pages: array that receives pointers to the pages pinned. 2277 * Should be at least nr_pages long. Or NULL, if caller 2278 * only intends to ensure the pages are faulted in. 2279 * @locked: pointer to lock flag indicating whether lock is held and 2280 * subsequently whether VM_FAULT_RETRY functionality can be 2281 * utilised. Lock must initially be held. 2282 * 2283 * Returns either number of pages pinned (which may be less than the 2284 * number requested), or an error. Details about the return value: 2285 * 2286 * -- If nr_pages is 0, returns 0. 2287 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 2288 * -- If nr_pages is >0, and some pages were pinned, returns the number of 2289 * pages pinned. Again, this may be less than nr_pages. 2290 * 2291 * The caller is responsible for releasing returned @pages, via put_page(). 2292 * 2293 * Must be called with mmap_lock held for read or write. 2294 * 2295 * get_user_pages_remote walks a process's page tables and takes a reference 2296 * to each struct page that each user address corresponds to at a given 2297 * instant. That is, it takes the page that would be accessed if a user 2298 * thread accesses the given user virtual address at that instant. 2299 * 2300 * This does not guarantee that the page exists in the user mappings when 2301 * get_user_pages_remote returns, and there may even be a completely different 2302 * page there in some cases (eg. if mmapped pagecache has been invalidated 2303 * and subsequently re-faulted). However it does guarantee that the page 2304 * won't be freed completely. And mostly callers simply care that the page 2305 * contains data that was valid *at some point in time*. Typically, an IO 2306 * or similar operation cannot guarantee anything stronger anyway because 2307 * locks can't be held over the syscall boundary. 2308 * 2309 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page 2310 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must 2311 * be called after the page is finished with, and before put_page is called. 2312 * 2313 * get_user_pages_remote is typically used for fewer-copy IO operations, 2314 * to get a handle on the memory by some means other than accesses 2315 * via the user virtual addresses. The pages may be submitted for 2316 * DMA to devices or accessed via their kernel linear mapping (via the 2317 * kmap APIs). Care should be taken to use the correct cache flushing APIs. 2318 * 2319 * See also get_user_pages_fast, for performance critical applications. 2320 * 2321 * get_user_pages_remote should be phased out in favor of 2322 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing 2323 * should use get_user_pages_remote because it cannot pass 2324 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. 2325 */ 2326 long get_user_pages_remote(struct mm_struct *mm, 2327 unsigned long start, unsigned long nr_pages, 2328 unsigned int gup_flags, struct page **pages, 2329 int *locked) 2330 { 2331 int local_locked = 1; 2332 2333 if (!is_valid_gup_args(pages, locked, &gup_flags, 2334 FOLL_TOUCH | FOLL_REMOTE)) 2335 return -EINVAL; 2336 2337 return __get_user_pages_locked(mm, start, nr_pages, pages, 2338 locked ? locked : &local_locked, 2339 gup_flags); 2340 } 2341 EXPORT_SYMBOL(get_user_pages_remote); 2342 2343 #else /* CONFIG_MMU */ 2344 long get_user_pages_remote(struct mm_struct *mm, 2345 unsigned long start, unsigned long nr_pages, 2346 unsigned int gup_flags, struct page **pages, 2347 int *locked) 2348 { 2349 return 0; 2350 } 2351 #endif /* !CONFIG_MMU */ 2352 2353 /** 2354 * get_user_pages() - pin user pages in memory 2355 * @start: starting user address 2356 * @nr_pages: number of pages from start to pin 2357 * @gup_flags: flags modifying lookup behaviour 2358 * @pages: array that receives pointers to the pages pinned. 2359 * Should be at least nr_pages long. Or NULL, if caller 2360 * only intends to ensure the pages are faulted in. 2361 * 2362 * This is the same as get_user_pages_remote(), just with a less-flexible 2363 * calling convention where we assume that the mm being operated on belongs to 2364 * the current task, and doesn't allow passing of a locked parameter. We also 2365 * obviously don't pass FOLL_REMOTE in here. 2366 */ 2367 long get_user_pages(unsigned long start, unsigned long nr_pages, 2368 unsigned int gup_flags, struct page **pages) 2369 { 2370 int locked = 1; 2371 2372 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) 2373 return -EINVAL; 2374 2375 return __get_user_pages_locked(current->mm, start, nr_pages, pages, 2376 &locked, gup_flags); 2377 } 2378 EXPORT_SYMBOL(get_user_pages); 2379 2380 /* 2381 * get_user_pages_unlocked() is suitable to replace the form: 2382 * 2383 * mmap_read_lock(mm); 2384 * get_user_pages(mm, ..., pages, NULL); 2385 * mmap_read_unlock(mm); 2386 * 2387 * with: 2388 * 2389 * get_user_pages_unlocked(mm, ..., pages); 2390 * 2391 * It is functionally equivalent to get_user_pages_fast so 2392 * get_user_pages_fast should be used instead if specific gup_flags 2393 * (e.g. FOLL_FORCE) are not required. 2394 */ 2395 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 2396 struct page **pages, unsigned int gup_flags) 2397 { 2398 int locked = 0; 2399 2400 if (!is_valid_gup_args(pages, NULL, &gup_flags, 2401 FOLL_TOUCH | FOLL_UNLOCKABLE)) 2402 return -EINVAL; 2403 2404 return __get_user_pages_locked(current->mm, start, nr_pages, pages, 2405 &locked, gup_flags); 2406 } 2407 EXPORT_SYMBOL(get_user_pages_unlocked); 2408 2409 /* 2410 * Fast GUP 2411 * 2412 * get_user_pages_fast attempts to pin user pages by walking the page 2413 * tables directly and avoids taking locks. Thus the walker needs to be 2414 * protected from page table pages being freed from under it, and should 2415 * block any THP splits. 2416 * 2417 * One way to achieve this is to have the walker disable interrupts, and 2418 * rely on IPIs from the TLB flushing code blocking before the page table 2419 * pages are freed. This is unsuitable for architectures that do not need 2420 * to broadcast an IPI when invalidating TLBs. 2421 * 2422 * Another way to achieve this is to batch up page table containing pages 2423 * belonging to more than one mm_user, then rcu_sched a callback to free those 2424 * pages. Disabling interrupts will allow the fast_gup walker to both block 2425 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs 2426 * (which is a relatively rare event). The code below adopts this strategy. 2427 * 2428 * Before activating this code, please be aware that the following assumptions 2429 * are currently made: 2430 * 2431 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to 2432 * free pages containing page tables or TLB flushing requires IPI broadcast. 2433 * 2434 * *) ptes can be read atomically by the architecture. 2435 * 2436 * *) access_ok is sufficient to validate userspace address ranges. 2437 * 2438 * The last two assumptions can be relaxed by the addition of helper functions. 2439 * 2440 * This code is based heavily on the PowerPC implementation by Nick Piggin. 2441 */ 2442 #ifdef CONFIG_HAVE_FAST_GUP 2443 2444 /* 2445 * Used in the GUP-fast path to determine whether a pin is permitted for a 2446 * specific folio. 2447 * 2448 * This call assumes the caller has pinned the folio, that the lowest page table 2449 * level still points to this folio, and that interrupts have been disabled. 2450 * 2451 * Writing to pinned file-backed dirty tracked folios is inherently problematic 2452 * (see comment describing the writable_file_mapping_allowed() function). We 2453 * therefore try to avoid the most egregious case of a long-term mapping doing 2454 * so. 2455 * 2456 * This function cannot be as thorough as that one as the VMA is not available 2457 * in the fast path, so instead we whitelist known good cases and if in doubt, 2458 * fall back to the slow path. 2459 */ 2460 static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) 2461 { 2462 struct address_space *mapping; 2463 unsigned long mapping_flags; 2464 2465 /* 2466 * If we aren't pinning then no problematic write can occur. A long term 2467 * pin is the most egregious case so this is the one we disallow. 2468 */ 2469 if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) != 2470 (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) 2471 return true; 2472 2473 /* The folio is pinned, so we can safely access folio fields. */ 2474 2475 if (WARN_ON_ONCE(folio_test_slab(folio))) 2476 return false; 2477 2478 /* hugetlb mappings do not require dirty-tracking. */ 2479 if (folio_test_hugetlb(folio)) 2480 return true; 2481 2482 /* 2483 * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods 2484 * cannot proceed, which means no actions performed under RCU can 2485 * proceed either. 2486 * 2487 * inodes and thus their mappings are freed under RCU, which means the 2488 * mapping cannot be freed beneath us and thus we can safely dereference 2489 * it. 2490 */ 2491 lockdep_assert_irqs_disabled(); 2492 2493 /* 2494 * However, there may be operations which _alter_ the mapping, so ensure 2495 * we read it once and only once. 2496 */ 2497 mapping = READ_ONCE(folio->mapping); 2498 2499 /* 2500 * The mapping may have been truncated, in any case we cannot determine 2501 * if this mapping is safe - fall back to slow path to determine how to 2502 * proceed. 2503 */ 2504 if (!mapping) 2505 return false; 2506 2507 /* Anonymous folios pose no problem. */ 2508 mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS; 2509 if (mapping_flags) 2510 return mapping_flags & PAGE_MAPPING_ANON; 2511 2512 /* 2513 * At this point, we know the mapping is non-null and points to an 2514 * address_space object. The only remaining whitelisted file system is 2515 * shmem. 2516 */ 2517 return shmem_mapping(mapping); 2518 } 2519 2520 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, 2521 unsigned int flags, 2522 struct page **pages) 2523 { 2524 while ((*nr) - nr_start) { 2525 struct page *page = pages[--(*nr)]; 2526 2527 ClearPageReferenced(page); 2528 if (flags & FOLL_PIN) 2529 unpin_user_page(page); 2530 else 2531 put_page(page); 2532 } 2533 } 2534 2535 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL 2536 /* 2537 * Fast-gup relies on pte change detection to avoid concurrent pgtable 2538 * operations. 2539 * 2540 * To pin the page, fast-gup needs to do below in order: 2541 * (1) pin the page (by prefetching pte), then (2) check pte not changed. 2542 * 2543 * For the rest of pgtable operations where pgtable updates can be racy 2544 * with fast-gup, we need to do (1) clear pte, then (2) check whether page 2545 * is pinned. 2546 * 2547 * Above will work for all pte-level operations, including THP split. 2548 * 2549 * For THP collapse, it's a bit more complicated because fast-gup may be 2550 * walking a pgtable page that is being freed (pte is still valid but pmd 2551 * can be cleared already). To avoid race in such condition, we need to 2552 * also check pmd here to make sure pmd doesn't change (corresponds to 2553 * pmdp_collapse_flush() in the THP collapse code path). 2554 */ 2555 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, 2556 unsigned long end, unsigned int flags, 2557 struct page **pages, int *nr) 2558 { 2559 struct dev_pagemap *pgmap = NULL; 2560 int nr_start = *nr, ret = 0; 2561 pte_t *ptep, *ptem; 2562 2563 ptem = ptep = pte_offset_map(&pmd, addr); 2564 if (!ptep) 2565 return 0; 2566 do { 2567 pte_t pte = ptep_get_lockless(ptep); 2568 struct page *page; 2569 struct folio *folio; 2570 2571 /* 2572 * Always fallback to ordinary GUP on PROT_NONE-mapped pages: 2573 * pte_access_permitted() better should reject these pages 2574 * either way: otherwise, GUP-fast might succeed in 2575 * cases where ordinary GUP would fail due to VMA access 2576 * permissions. 2577 */ 2578 if (pte_protnone(pte)) 2579 goto pte_unmap; 2580 2581 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 2582 goto pte_unmap; 2583 2584 if (pte_devmap(pte)) { 2585 if (unlikely(flags & FOLL_LONGTERM)) 2586 goto pte_unmap; 2587 2588 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 2589 if (unlikely(!pgmap)) { 2590 undo_dev_pagemap(nr, nr_start, flags, pages); 2591 goto pte_unmap; 2592 } 2593 } else if (pte_special(pte)) 2594 goto pte_unmap; 2595 2596 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 2597 page = pte_page(pte); 2598 2599 folio = try_grab_folio(page, 1, flags); 2600 if (!folio) 2601 goto pte_unmap; 2602 2603 if (unlikely(folio_is_secretmem(folio))) { 2604 gup_put_folio(folio, 1, flags); 2605 goto pte_unmap; 2606 } 2607 2608 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || 2609 unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { 2610 gup_put_folio(folio, 1, flags); 2611 goto pte_unmap; 2612 } 2613 2614 if (!folio_fast_pin_allowed(folio, flags)) { 2615 gup_put_folio(folio, 1, flags); 2616 goto pte_unmap; 2617 } 2618 2619 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { 2620 gup_put_folio(folio, 1, flags); 2621 goto pte_unmap; 2622 } 2623 2624 /* 2625 * We need to make the page accessible if and only if we are 2626 * going to access its content (the FOLL_PIN case). Please 2627 * see Documentation/core-api/pin_user_pages.rst for 2628 * details. 2629 */ 2630 if (flags & FOLL_PIN) { 2631 ret = arch_make_page_accessible(page); 2632 if (ret) { 2633 gup_put_folio(folio, 1, flags); 2634 goto pte_unmap; 2635 } 2636 } 2637 folio_set_referenced(folio); 2638 pages[*nr] = page; 2639 (*nr)++; 2640 } while (ptep++, addr += PAGE_SIZE, addr != end); 2641 2642 ret = 1; 2643 2644 pte_unmap: 2645 if (pgmap) 2646 put_dev_pagemap(pgmap); 2647 pte_unmap(ptem); 2648 return ret; 2649 } 2650 #else 2651 2652 /* 2653 * If we can't determine whether or not a pte is special, then fail immediately 2654 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not 2655 * to be special. 2656 * 2657 * For a futex to be placed on a THP tail page, get_futex_key requires a 2658 * get_user_pages_fast_only implementation that can pin pages. Thus it's still 2659 * useful to have gup_huge_pmd even if we can't operate on ptes. 2660 */ 2661 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, 2662 unsigned long end, unsigned int flags, 2663 struct page **pages, int *nr) 2664 { 2665 return 0; 2666 } 2667 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 2668 2669 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 2670 static int __gup_device_huge(unsigned long pfn, unsigned long addr, 2671 unsigned long end, unsigned int flags, 2672 struct page **pages, int *nr) 2673 { 2674 int nr_start = *nr; 2675 struct dev_pagemap *pgmap = NULL; 2676 2677 do { 2678 struct page *page = pfn_to_page(pfn); 2679 2680 pgmap = get_dev_pagemap(pfn, pgmap); 2681 if (unlikely(!pgmap)) { 2682 undo_dev_pagemap(nr, nr_start, flags, pages); 2683 break; 2684 } 2685 2686 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) { 2687 undo_dev_pagemap(nr, nr_start, flags, pages); 2688 break; 2689 } 2690 2691 SetPageReferenced(page); 2692 pages[*nr] = page; 2693 if (unlikely(try_grab_page(page, flags))) { 2694 undo_dev_pagemap(nr, nr_start, flags, pages); 2695 break; 2696 } 2697 (*nr)++; 2698 pfn++; 2699 } while (addr += PAGE_SIZE, addr != end); 2700 2701 put_dev_pagemap(pgmap); 2702 return addr == end; 2703 } 2704 2705 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2706 unsigned long end, unsigned int flags, 2707 struct page **pages, int *nr) 2708 { 2709 unsigned long fault_pfn; 2710 int nr_start = *nr; 2711 2712 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 2713 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) 2714 return 0; 2715 2716 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 2717 undo_dev_pagemap(nr, nr_start, flags, pages); 2718 return 0; 2719 } 2720 return 1; 2721 } 2722 2723 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 2724 unsigned long end, unsigned int flags, 2725 struct page **pages, int *nr) 2726 { 2727 unsigned long fault_pfn; 2728 int nr_start = *nr; 2729 2730 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 2731 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) 2732 return 0; 2733 2734 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 2735 undo_dev_pagemap(nr, nr_start, flags, pages); 2736 return 0; 2737 } 2738 return 1; 2739 } 2740 #else 2741 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2742 unsigned long end, unsigned int flags, 2743 struct page **pages, int *nr) 2744 { 2745 BUILD_BUG(); 2746 return 0; 2747 } 2748 2749 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, 2750 unsigned long end, unsigned int flags, 2751 struct page **pages, int *nr) 2752 { 2753 BUILD_BUG(); 2754 return 0; 2755 } 2756 #endif 2757 2758 static int record_subpages(struct page *page, unsigned long addr, 2759 unsigned long end, struct page **pages) 2760 { 2761 int nr; 2762 2763 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) 2764 pages[nr] = nth_page(page, nr); 2765 2766 return nr; 2767 } 2768 2769 #ifdef CONFIG_ARCH_HAS_HUGEPD 2770 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, 2771 unsigned long sz) 2772 { 2773 unsigned long __boundary = (addr + sz) & ~(sz-1); 2774 return (__boundary - 1 < end - 1) ? __boundary : end; 2775 } 2776 2777 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, 2778 unsigned long end, unsigned int flags, 2779 struct page **pages, int *nr) 2780 { 2781 unsigned long pte_end; 2782 struct page *page; 2783 struct folio *folio; 2784 pte_t pte; 2785 int refs; 2786 2787 pte_end = (addr + sz) & ~(sz-1); 2788 if (pte_end < end) 2789 end = pte_end; 2790 2791 pte = huge_ptep_get(ptep); 2792 2793 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 2794 return 0; 2795 2796 /* hugepages are never "special" */ 2797 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 2798 2799 page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT); 2800 refs = record_subpages(page, addr, end, pages + *nr); 2801 2802 folio = try_grab_folio(page, refs, flags); 2803 if (!folio) 2804 return 0; 2805 2806 if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { 2807 gup_put_folio(folio, refs, flags); 2808 return 0; 2809 } 2810 2811 if (!folio_fast_pin_allowed(folio, flags)) { 2812 gup_put_folio(folio, refs, flags); 2813 return 0; 2814 } 2815 2816 if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) { 2817 gup_put_folio(folio, refs, flags); 2818 return 0; 2819 } 2820 2821 *nr += refs; 2822 folio_set_referenced(folio); 2823 return 1; 2824 } 2825 2826 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 2827 unsigned int pdshift, unsigned long end, unsigned int flags, 2828 struct page **pages, int *nr) 2829 { 2830 pte_t *ptep; 2831 unsigned long sz = 1UL << hugepd_shift(hugepd); 2832 unsigned long next; 2833 2834 ptep = hugepte_offset(hugepd, addr, pdshift); 2835 do { 2836 next = hugepte_addr_end(addr, end, sz); 2837 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) 2838 return 0; 2839 } while (ptep++, addr = next, addr != end); 2840 2841 return 1; 2842 } 2843 #else 2844 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 2845 unsigned int pdshift, unsigned long end, unsigned int flags, 2846 struct page **pages, int *nr) 2847 { 2848 return 0; 2849 } 2850 #endif /* CONFIG_ARCH_HAS_HUGEPD */ 2851 2852 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2853 unsigned long end, unsigned int flags, 2854 struct page **pages, int *nr) 2855 { 2856 struct page *page; 2857 struct folio *folio; 2858 int refs; 2859 2860 if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) 2861 return 0; 2862 2863 if (pmd_devmap(orig)) { 2864 if (unlikely(flags & FOLL_LONGTERM)) 2865 return 0; 2866 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags, 2867 pages, nr); 2868 } 2869 2870 page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT); 2871 refs = record_subpages(page, addr, end, pages + *nr); 2872 2873 folio = try_grab_folio(page, refs, flags); 2874 if (!folio) 2875 return 0; 2876 2877 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 2878 gup_put_folio(folio, refs, flags); 2879 return 0; 2880 } 2881 2882 if (!folio_fast_pin_allowed(folio, flags)) { 2883 gup_put_folio(folio, refs, flags); 2884 return 0; 2885 } 2886 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 2887 gup_put_folio(folio, refs, flags); 2888 return 0; 2889 } 2890 2891 *nr += refs; 2892 folio_set_referenced(folio); 2893 return 1; 2894 } 2895 2896 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 2897 unsigned long end, unsigned int flags, 2898 struct page **pages, int *nr) 2899 { 2900 struct page *page; 2901 struct folio *folio; 2902 int refs; 2903 2904 if (!pud_access_permitted(orig, flags & FOLL_WRITE)) 2905 return 0; 2906 2907 if (pud_devmap(orig)) { 2908 if (unlikely(flags & FOLL_LONGTERM)) 2909 return 0; 2910 return __gup_device_huge_pud(orig, pudp, addr, end, flags, 2911 pages, nr); 2912 } 2913 2914 page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT); 2915 refs = record_subpages(page, addr, end, pages + *nr); 2916 2917 folio = try_grab_folio(page, refs, flags); 2918 if (!folio) 2919 return 0; 2920 2921 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 2922 gup_put_folio(folio, refs, flags); 2923 return 0; 2924 } 2925 2926 if (!folio_fast_pin_allowed(folio, flags)) { 2927 gup_put_folio(folio, refs, flags); 2928 return 0; 2929 } 2930 2931 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 2932 gup_put_folio(folio, refs, flags); 2933 return 0; 2934 } 2935 2936 *nr += refs; 2937 folio_set_referenced(folio); 2938 return 1; 2939 } 2940 2941 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, 2942 unsigned long end, unsigned int flags, 2943 struct page **pages, int *nr) 2944 { 2945 int refs; 2946 struct page *page; 2947 struct folio *folio; 2948 2949 if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) 2950 return 0; 2951 2952 BUILD_BUG_ON(pgd_devmap(orig)); 2953 2954 page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT); 2955 refs = record_subpages(page, addr, end, pages + *nr); 2956 2957 folio = try_grab_folio(page, refs, flags); 2958 if (!folio) 2959 return 0; 2960 2961 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { 2962 gup_put_folio(folio, refs, flags); 2963 return 0; 2964 } 2965 2966 if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 2967 gup_put_folio(folio, refs, flags); 2968 return 0; 2969 } 2970 2971 if (!folio_fast_pin_allowed(folio, flags)) { 2972 gup_put_folio(folio, refs, flags); 2973 return 0; 2974 } 2975 2976 *nr += refs; 2977 folio_set_referenced(folio); 2978 return 1; 2979 } 2980 2981 static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, 2982 unsigned int flags, struct page **pages, int *nr) 2983 { 2984 unsigned long next; 2985 pmd_t *pmdp; 2986 2987 pmdp = pmd_offset_lockless(pudp, pud, addr); 2988 do { 2989 pmd_t pmd = pmdp_get_lockless(pmdp); 2990 2991 next = pmd_addr_end(addr, end); 2992 if (!pmd_present(pmd)) 2993 return 0; 2994 2995 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || 2996 pmd_devmap(pmd))) { 2997 /* See gup_pte_range() */ 2998 if (pmd_protnone(pmd)) 2999 return 0; 3000 3001 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, 3002 pages, nr)) 3003 return 0; 3004 3005 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { 3006 /* 3007 * architecture have different format for hugetlbfs 3008 * pmd format and THP pmd format 3009 */ 3010 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, 3011 PMD_SHIFT, next, flags, pages, nr)) 3012 return 0; 3013 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) 3014 return 0; 3015 } while (pmdp++, addr = next, addr != end); 3016 3017 return 1; 3018 } 3019 3020 static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, 3021 unsigned int flags, struct page **pages, int *nr) 3022 { 3023 unsigned long next; 3024 pud_t *pudp; 3025 3026 pudp = pud_offset_lockless(p4dp, p4d, addr); 3027 do { 3028 pud_t pud = READ_ONCE(*pudp); 3029 3030 next = pud_addr_end(addr, end); 3031 if (unlikely(!pud_present(pud))) 3032 return 0; 3033 if (unlikely(pud_huge(pud) || pud_devmap(pud))) { 3034 if (!gup_huge_pud(pud, pudp, addr, next, flags, 3035 pages, nr)) 3036 return 0; 3037 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { 3038 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, 3039 PUD_SHIFT, next, flags, pages, nr)) 3040 return 0; 3041 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) 3042 return 0; 3043 } while (pudp++, addr = next, addr != end); 3044 3045 return 1; 3046 } 3047 3048 static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, 3049 unsigned int flags, struct page **pages, int *nr) 3050 { 3051 unsigned long next; 3052 p4d_t *p4dp; 3053 3054 p4dp = p4d_offset_lockless(pgdp, pgd, addr); 3055 do { 3056 p4d_t p4d = READ_ONCE(*p4dp); 3057 3058 next = p4d_addr_end(addr, end); 3059 if (p4d_none(p4d)) 3060 return 0; 3061 BUILD_BUG_ON(p4d_huge(p4d)); 3062 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { 3063 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, 3064 P4D_SHIFT, next, flags, pages, nr)) 3065 return 0; 3066 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) 3067 return 0; 3068 } while (p4dp++, addr = next, addr != end); 3069 3070 return 1; 3071 } 3072 3073 static void gup_pgd_range(unsigned long addr, unsigned long end, 3074 unsigned int flags, struct page **pages, int *nr) 3075 { 3076 unsigned long next; 3077 pgd_t *pgdp; 3078 3079 pgdp = pgd_offset(current->mm, addr); 3080 do { 3081 pgd_t pgd = READ_ONCE(*pgdp); 3082 3083 next = pgd_addr_end(addr, end); 3084 if (pgd_none(pgd)) 3085 return; 3086 if (unlikely(pgd_huge(pgd))) { 3087 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags, 3088 pages, nr)) 3089 return; 3090 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { 3091 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, 3092 PGDIR_SHIFT, next, flags, pages, nr)) 3093 return; 3094 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) 3095 return; 3096 } while (pgdp++, addr = next, addr != end); 3097 } 3098 #else 3099 static inline void gup_pgd_range(unsigned long addr, unsigned long end, 3100 unsigned int flags, struct page **pages, int *nr) 3101 { 3102 } 3103 #endif /* CONFIG_HAVE_FAST_GUP */ 3104 3105 #ifndef gup_fast_permitted 3106 /* 3107 * Check if it's allowed to use get_user_pages_fast_only() for the range, or 3108 * we need to fall back to the slow version: 3109 */ 3110 static bool gup_fast_permitted(unsigned long start, unsigned long end) 3111 { 3112 return true; 3113 } 3114 #endif 3115 3116 static unsigned long lockless_pages_from_mm(unsigned long start, 3117 unsigned long end, 3118 unsigned int gup_flags, 3119 struct page **pages) 3120 { 3121 unsigned long flags; 3122 int nr_pinned = 0; 3123 unsigned seq; 3124 3125 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) || 3126 !gup_fast_permitted(start, end)) 3127 return 0; 3128 3129 if (gup_flags & FOLL_PIN) { 3130 seq = raw_read_seqcount(¤t->mm->write_protect_seq); 3131 if (seq & 1) 3132 return 0; 3133 } 3134 3135 /* 3136 * Disable interrupts. The nested form is used, in order to allow full, 3137 * general purpose use of this routine. 3138 * 3139 * With interrupts disabled, we block page table pages from being freed 3140 * from under us. See struct mmu_table_batch comments in 3141 * include/asm-generic/tlb.h for more details. 3142 * 3143 * We do not adopt an rcu_read_lock() here as we also want to block IPIs 3144 * that come from THPs splitting. 3145 */ 3146 local_irq_save(flags); 3147 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); 3148 local_irq_restore(flags); 3149 3150 /* 3151 * When pinning pages for DMA there could be a concurrent write protect 3152 * from fork() via copy_page_range(), in this case always fail fast GUP. 3153 */ 3154 if (gup_flags & FOLL_PIN) { 3155 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { 3156 unpin_user_pages_lockless(pages, nr_pinned); 3157 return 0; 3158 } else { 3159 sanity_check_pinned_pages(pages, nr_pinned); 3160 } 3161 } 3162 return nr_pinned; 3163 } 3164 3165 static int internal_get_user_pages_fast(unsigned long start, 3166 unsigned long nr_pages, 3167 unsigned int gup_flags, 3168 struct page **pages) 3169 { 3170 unsigned long len, end; 3171 unsigned long nr_pinned; 3172 int locked = 0; 3173 int ret; 3174 3175 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | 3176 FOLL_FORCE | FOLL_PIN | FOLL_GET | 3177 FOLL_FAST_ONLY | FOLL_NOFAULT | 3178 FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT))) 3179 return -EINVAL; 3180 3181 if (gup_flags & FOLL_PIN) 3182 mm_set_has_pinned_flag(¤t->mm->flags); 3183 3184 if (!(gup_flags & FOLL_FAST_ONLY)) 3185 might_lock_read(¤t->mm->mmap_lock); 3186 3187 start = untagged_addr(start) & PAGE_MASK; 3188 len = nr_pages << PAGE_SHIFT; 3189 if (check_add_overflow(start, len, &end)) 3190 return -EOVERFLOW; 3191 if (end > TASK_SIZE_MAX) 3192 return -EFAULT; 3193 if (unlikely(!access_ok((void __user *)start, len))) 3194 return -EFAULT; 3195 3196 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); 3197 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) 3198 return nr_pinned; 3199 3200 /* Slow path: try to get the remaining pages with get_user_pages */ 3201 start += nr_pinned << PAGE_SHIFT; 3202 pages += nr_pinned; 3203 ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, 3204 pages, &locked, 3205 gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); 3206 if (ret < 0) { 3207 /* 3208 * The caller has to unpin the pages we already pinned so 3209 * returning -errno is not an option 3210 */ 3211 if (nr_pinned) 3212 return nr_pinned; 3213 return ret; 3214 } 3215 return ret + nr_pinned; 3216 } 3217 3218 /** 3219 * get_user_pages_fast_only() - pin user pages in memory 3220 * @start: starting user address 3221 * @nr_pages: number of pages from start to pin 3222 * @gup_flags: flags modifying pin behaviour 3223 * @pages: array that receives pointers to the pages pinned. 3224 * Should be at least nr_pages long. 3225 * 3226 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 3227 * the regular GUP. 3228 * 3229 * If the architecture does not support this function, simply return with no 3230 * pages pinned. 3231 * 3232 * Careful, careful! COW breaking can go either way, so a non-write 3233 * access can get ambiguous page results. If you call this function without 3234 * 'write' set, you'd better be sure that you're ok with that ambiguity. 3235 */ 3236 int get_user_pages_fast_only(unsigned long start, int nr_pages, 3237 unsigned int gup_flags, struct page **pages) 3238 { 3239 /* 3240 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, 3241 * because gup fast is always a "pin with a +1 page refcount" request. 3242 * 3243 * FOLL_FAST_ONLY is required in order to match the API description of 3244 * this routine: no fall back to regular ("slow") GUP. 3245 */ 3246 if (!is_valid_gup_args(pages, NULL, &gup_flags, 3247 FOLL_GET | FOLL_FAST_ONLY)) 3248 return -EINVAL; 3249 3250 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); 3251 } 3252 EXPORT_SYMBOL_GPL(get_user_pages_fast_only); 3253 3254 /** 3255 * get_user_pages_fast() - pin user pages in memory 3256 * @start: starting user address 3257 * @nr_pages: number of pages from start to pin 3258 * @gup_flags: flags modifying pin behaviour 3259 * @pages: array that receives pointers to the pages pinned. 3260 * Should be at least nr_pages long. 3261 * 3262 * Attempt to pin user pages in memory without taking mm->mmap_lock. 3263 * If not successful, it will fall back to taking the lock and 3264 * calling get_user_pages(). 3265 * 3266 * Returns number of pages pinned. This may be fewer than the number requested. 3267 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns 3268 * -errno. 3269 */ 3270 int get_user_pages_fast(unsigned long start, int nr_pages, 3271 unsigned int gup_flags, struct page **pages) 3272 { 3273 /* 3274 * The caller may or may not have explicitly set FOLL_GET; either way is 3275 * OK. However, internally (within mm/gup.c), gup fast variants must set 3276 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" 3277 * request. 3278 */ 3279 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) 3280 return -EINVAL; 3281 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); 3282 } 3283 EXPORT_SYMBOL_GPL(get_user_pages_fast); 3284 3285 /** 3286 * pin_user_pages_fast() - pin user pages in memory without taking locks 3287 * 3288 * @start: starting user address 3289 * @nr_pages: number of pages from start to pin 3290 * @gup_flags: flags modifying pin behaviour 3291 * @pages: array that receives pointers to the pages pinned. 3292 * Should be at least nr_pages long. 3293 * 3294 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See 3295 * get_user_pages_fast() for documentation on the function arguments, because 3296 * the arguments here are identical. 3297 * 3298 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3299 * see Documentation/core-api/pin_user_pages.rst for further details. 3300 * 3301 * Note that if a zero_page is amongst the returned pages, it will not have 3302 * pins in it and unpin_user_page() will not remove pins from it. 3303 */ 3304 int pin_user_pages_fast(unsigned long start, int nr_pages, 3305 unsigned int gup_flags, struct page **pages) 3306 { 3307 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) 3308 return -EINVAL; 3309 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); 3310 } 3311 EXPORT_SYMBOL_GPL(pin_user_pages_fast); 3312 3313 /** 3314 * pin_user_pages_remote() - pin pages of a remote process 3315 * 3316 * @mm: mm_struct of target mm 3317 * @start: starting user address 3318 * @nr_pages: number of pages from start to pin 3319 * @gup_flags: flags modifying lookup behaviour 3320 * @pages: array that receives pointers to the pages pinned. 3321 * Should be at least nr_pages long. 3322 * @locked: pointer to lock flag indicating whether lock is held and 3323 * subsequently whether VM_FAULT_RETRY functionality can be 3324 * utilised. Lock must initially be held. 3325 * 3326 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See 3327 * get_user_pages_remote() for documentation on the function arguments, because 3328 * the arguments here are identical. 3329 * 3330 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3331 * see Documentation/core-api/pin_user_pages.rst for details. 3332 * 3333 * Note that if a zero_page is amongst the returned pages, it will not have 3334 * pins in it and unpin_user_page*() will not remove pins from it. 3335 */ 3336 long pin_user_pages_remote(struct mm_struct *mm, 3337 unsigned long start, unsigned long nr_pages, 3338 unsigned int gup_flags, struct page **pages, 3339 int *locked) 3340 { 3341 int local_locked = 1; 3342 3343 if (!is_valid_gup_args(pages, locked, &gup_flags, 3344 FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) 3345 return 0; 3346 return __gup_longterm_locked(mm, start, nr_pages, pages, 3347 locked ? locked : &local_locked, 3348 gup_flags); 3349 } 3350 EXPORT_SYMBOL(pin_user_pages_remote); 3351 3352 /** 3353 * pin_user_pages() - pin user pages in memory for use by other devices 3354 * 3355 * @start: starting user address 3356 * @nr_pages: number of pages from start to pin 3357 * @gup_flags: flags modifying lookup behaviour 3358 * @pages: array that receives pointers to the pages pinned. 3359 * Should be at least nr_pages long. 3360 * 3361 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and 3362 * FOLL_PIN is set. 3363 * 3364 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3365 * see Documentation/core-api/pin_user_pages.rst for details. 3366 * 3367 * Note that if a zero_page is amongst the returned pages, it will not have 3368 * pins in it and unpin_user_page*() will not remove pins from it. 3369 */ 3370 long pin_user_pages(unsigned long start, unsigned long nr_pages, 3371 unsigned int gup_flags, struct page **pages) 3372 { 3373 int locked = 1; 3374 3375 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) 3376 return 0; 3377 return __gup_longterm_locked(current->mm, start, nr_pages, 3378 pages, &locked, gup_flags); 3379 } 3380 EXPORT_SYMBOL(pin_user_pages); 3381 3382 /* 3383 * pin_user_pages_unlocked() is the FOLL_PIN variant of 3384 * get_user_pages_unlocked(). Behavior is the same, except that this one sets 3385 * FOLL_PIN and rejects FOLL_GET. 3386 * 3387 * Note that if a zero_page is amongst the returned pages, it will not have 3388 * pins in it and unpin_user_page*() will not remove pins from it. 3389 */ 3390 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 3391 struct page **pages, unsigned int gup_flags) 3392 { 3393 int locked = 0; 3394 3395 if (!is_valid_gup_args(pages, NULL, &gup_flags, 3396 FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) 3397 return 0; 3398 3399 return __gup_longterm_locked(current->mm, start, nr_pages, pages, 3400 &locked, gup_flags); 3401 } 3402 EXPORT_SYMBOL(pin_user_pages_unlocked); 3403