1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/err.h> 5 #include <linux/spinlock.h> 6 7 #include <linux/mm.h> 8 #include <linux/memremap.h> 9 #include <linux/pagemap.h> 10 #include <linux/rmap.h> 11 #include <linux/swap.h> 12 #include <linux/swapops.h> 13 14 #include <linux/sched/signal.h> 15 #include <linux/rwsem.h> 16 #include <linux/hugetlb.h> 17 #include <linux/migrate.h> 18 #include <linux/mm_inline.h> 19 #include <linux/sched/mm.h> 20 21 #include <asm/mmu_context.h> 22 #include <asm/pgtable.h> 23 #include <asm/tlbflush.h> 24 25 #include "internal.h" 26 27 struct follow_page_context { 28 struct dev_pagemap *pgmap; 29 unsigned int page_mask; 30 }; 31 32 typedef int (*set_dirty_func_t)(struct page *page); 33 34 static void __put_user_pages_dirty(struct page **pages, 35 unsigned long npages, 36 set_dirty_func_t sdf) 37 { 38 unsigned long index; 39 40 for (index = 0; index < npages; index++) { 41 struct page *page = compound_head(pages[index]); 42 43 /* 44 * Checking PageDirty at this point may race with 45 * clear_page_dirty_for_io(), but that's OK. Two key cases: 46 * 47 * 1) This code sees the page as already dirty, so it skips 48 * the call to sdf(). That could happen because 49 * clear_page_dirty_for_io() called page_mkclean(), 50 * followed by set_page_dirty(). However, now the page is 51 * going to get written back, which meets the original 52 * intention of setting it dirty, so all is well: 53 * clear_page_dirty_for_io() goes on to call 54 * TestClearPageDirty(), and write the page back. 55 * 56 * 2) This code sees the page as clean, so it calls sdf(). 57 * The page stays dirty, despite being written back, so it 58 * gets written back again in the next writeback cycle. 59 * This is harmless. 60 */ 61 if (!PageDirty(page)) 62 sdf(page); 63 64 put_user_page(page); 65 } 66 } 67 68 /** 69 * put_user_pages_dirty() - release and dirty an array of gup-pinned pages 70 * @pages: array of pages to be marked dirty and released. 71 * @npages: number of pages in the @pages array. 72 * 73 * "gup-pinned page" refers to a page that has had one of the get_user_pages() 74 * variants called on that page. 75 * 76 * For each page in the @pages array, make that page (or its head page, if a 77 * compound page) dirty, if it was previously listed as clean. Then, release 78 * the page using put_user_page(). 79 * 80 * Please see the put_user_page() documentation for details. 81 * 82 * set_page_dirty(), which does not lock the page, is used here. 83 * Therefore, it is the caller's responsibility to ensure that this is 84 * safe. If not, then put_user_pages_dirty_lock() should be called instead. 85 * 86 */ 87 void put_user_pages_dirty(struct page **pages, unsigned long npages) 88 { 89 __put_user_pages_dirty(pages, npages, set_page_dirty); 90 } 91 EXPORT_SYMBOL(put_user_pages_dirty); 92 93 /** 94 * put_user_pages_dirty_lock() - release and dirty an array of gup-pinned pages 95 * @pages: array of pages to be marked dirty and released. 96 * @npages: number of pages in the @pages array. 97 * 98 * For each page in the @pages array, make that page (or its head page, if a 99 * compound page) dirty, if it was previously listed as clean. Then, release 100 * the page using put_user_page(). 101 * 102 * Please see the put_user_page() documentation for details. 103 * 104 * This is just like put_user_pages_dirty(), except that it invokes 105 * set_page_dirty_lock(), instead of set_page_dirty(). 106 * 107 */ 108 void put_user_pages_dirty_lock(struct page **pages, unsigned long npages) 109 { 110 __put_user_pages_dirty(pages, npages, set_page_dirty_lock); 111 } 112 EXPORT_SYMBOL(put_user_pages_dirty_lock); 113 114 /** 115 * put_user_pages() - release an array of gup-pinned pages. 116 * @pages: array of pages to be marked dirty and released. 117 * @npages: number of pages in the @pages array. 118 * 119 * For each page in the @pages array, release the page using put_user_page(). 120 * 121 * Please see the put_user_page() documentation for details. 122 */ 123 void put_user_pages(struct page **pages, unsigned long npages) 124 { 125 unsigned long index; 126 127 /* 128 * TODO: this can be optimized for huge pages: if a series of pages is 129 * physically contiguous and part of the same compound page, then a 130 * single operation to the head page should suffice. 131 */ 132 for (index = 0; index < npages; index++) 133 put_user_page(pages[index]); 134 } 135 EXPORT_SYMBOL(put_user_pages); 136 137 static struct page *no_page_table(struct vm_area_struct *vma, 138 unsigned int flags) 139 { 140 /* 141 * When core dumping an enormous anonymous area that nobody 142 * has touched so far, we don't want to allocate unnecessary pages or 143 * page tables. Return error instead of NULL to skip handle_mm_fault, 144 * then get_dump_page() will return NULL to leave a hole in the dump. 145 * But we can only make this optimization where a hole would surely 146 * be zero-filled if handle_mm_fault() actually did handle it. 147 */ 148 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) 149 return ERR_PTR(-EFAULT); 150 return NULL; 151 } 152 153 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, 154 pte_t *pte, unsigned int flags) 155 { 156 /* No page to get reference */ 157 if (flags & FOLL_GET) 158 return -EFAULT; 159 160 if (flags & FOLL_TOUCH) { 161 pte_t entry = *pte; 162 163 if (flags & FOLL_WRITE) 164 entry = pte_mkdirty(entry); 165 entry = pte_mkyoung(entry); 166 167 if (!pte_same(*pte, entry)) { 168 set_pte_at(vma->vm_mm, address, pte, entry); 169 update_mmu_cache(vma, address, pte); 170 } 171 } 172 173 /* Proper page table entry exists, but no corresponding struct page */ 174 return -EEXIST; 175 } 176 177 /* 178 * FOLL_FORCE can write to even unwritable pte's, but only 179 * after we've gone through a COW cycle and they are dirty. 180 */ 181 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) 182 { 183 return pte_write(pte) || 184 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); 185 } 186 187 static struct page *follow_page_pte(struct vm_area_struct *vma, 188 unsigned long address, pmd_t *pmd, unsigned int flags, 189 struct dev_pagemap **pgmap) 190 { 191 struct mm_struct *mm = vma->vm_mm; 192 struct page *page; 193 spinlock_t *ptl; 194 pte_t *ptep, pte; 195 196 retry: 197 if (unlikely(pmd_bad(*pmd))) 198 return no_page_table(vma, flags); 199 200 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 201 pte = *ptep; 202 if (!pte_present(pte)) { 203 swp_entry_t entry; 204 /* 205 * KSM's break_ksm() relies upon recognizing a ksm page 206 * even while it is being migrated, so for that case we 207 * need migration_entry_wait(). 208 */ 209 if (likely(!(flags & FOLL_MIGRATION))) 210 goto no_page; 211 if (pte_none(pte)) 212 goto no_page; 213 entry = pte_to_swp_entry(pte); 214 if (!is_migration_entry(entry)) 215 goto no_page; 216 pte_unmap_unlock(ptep, ptl); 217 migration_entry_wait(mm, pmd, address); 218 goto retry; 219 } 220 if ((flags & FOLL_NUMA) && pte_protnone(pte)) 221 goto no_page; 222 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { 223 pte_unmap_unlock(ptep, ptl); 224 return NULL; 225 } 226 227 page = vm_normal_page(vma, address, pte); 228 if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { 229 /* 230 * Only return device mapping pages in the FOLL_GET case since 231 * they are only valid while holding the pgmap reference. 232 */ 233 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); 234 if (*pgmap) 235 page = pte_page(pte); 236 else 237 goto no_page; 238 } else if (unlikely(!page)) { 239 if (flags & FOLL_DUMP) { 240 /* Avoid special (like zero) pages in core dumps */ 241 page = ERR_PTR(-EFAULT); 242 goto out; 243 } 244 245 if (is_zero_pfn(pte_pfn(pte))) { 246 page = pte_page(pte); 247 } else { 248 int ret; 249 250 ret = follow_pfn_pte(vma, address, ptep, flags); 251 page = ERR_PTR(ret); 252 goto out; 253 } 254 } 255 256 if (flags & FOLL_SPLIT && PageTransCompound(page)) { 257 int ret; 258 get_page(page); 259 pte_unmap_unlock(ptep, ptl); 260 lock_page(page); 261 ret = split_huge_page(page); 262 unlock_page(page); 263 put_page(page); 264 if (ret) 265 return ERR_PTR(ret); 266 goto retry; 267 } 268 269 if (flags & FOLL_GET) { 270 if (unlikely(!try_get_page(page))) { 271 page = ERR_PTR(-ENOMEM); 272 goto out; 273 } 274 } 275 if (flags & FOLL_TOUCH) { 276 if ((flags & FOLL_WRITE) && 277 !pte_dirty(pte) && !PageDirty(page)) 278 set_page_dirty(page); 279 /* 280 * pte_mkyoung() would be more correct here, but atomic care 281 * is needed to avoid losing the dirty bit: it is easier to use 282 * mark_page_accessed(). 283 */ 284 mark_page_accessed(page); 285 } 286 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 287 /* Do not mlock pte-mapped THP */ 288 if (PageTransCompound(page)) 289 goto out; 290 291 /* 292 * The preliminary mapping check is mainly to avoid the 293 * pointless overhead of lock_page on the ZERO_PAGE 294 * which might bounce very badly if there is contention. 295 * 296 * If the page is already locked, we don't need to 297 * handle it now - vmscan will handle it later if and 298 * when it attempts to reclaim the page. 299 */ 300 if (page->mapping && trylock_page(page)) { 301 lru_add_drain(); /* push cached pages to LRU */ 302 /* 303 * Because we lock page here, and migration is 304 * blocked by the pte's page reference, and we 305 * know the page is still mapped, we don't even 306 * need to check for file-cache page truncation. 307 */ 308 mlock_vma_page(page); 309 unlock_page(page); 310 } 311 } 312 out: 313 pte_unmap_unlock(ptep, ptl); 314 return page; 315 no_page: 316 pte_unmap_unlock(ptep, ptl); 317 if (!pte_none(pte)) 318 return NULL; 319 return no_page_table(vma, flags); 320 } 321 322 static struct page *follow_pmd_mask(struct vm_area_struct *vma, 323 unsigned long address, pud_t *pudp, 324 unsigned int flags, 325 struct follow_page_context *ctx) 326 { 327 pmd_t *pmd, pmdval; 328 spinlock_t *ptl; 329 struct page *page; 330 struct mm_struct *mm = vma->vm_mm; 331 332 pmd = pmd_offset(pudp, address); 333 /* 334 * The READ_ONCE() will stabilize the pmdval in a register or 335 * on the stack so that it will stop changing under the code. 336 */ 337 pmdval = READ_ONCE(*pmd); 338 if (pmd_none(pmdval)) 339 return no_page_table(vma, flags); 340 if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { 341 page = follow_huge_pmd(mm, address, pmd, flags); 342 if (page) 343 return page; 344 return no_page_table(vma, flags); 345 } 346 if (is_hugepd(__hugepd(pmd_val(pmdval)))) { 347 page = follow_huge_pd(vma, address, 348 __hugepd(pmd_val(pmdval)), flags, 349 PMD_SHIFT); 350 if (page) 351 return page; 352 return no_page_table(vma, flags); 353 } 354 retry: 355 if (!pmd_present(pmdval)) { 356 if (likely(!(flags & FOLL_MIGRATION))) 357 return no_page_table(vma, flags); 358 VM_BUG_ON(thp_migration_supported() && 359 !is_pmd_migration_entry(pmdval)); 360 if (is_pmd_migration_entry(pmdval)) 361 pmd_migration_entry_wait(mm, pmd); 362 pmdval = READ_ONCE(*pmd); 363 /* 364 * MADV_DONTNEED may convert the pmd to null because 365 * mmap_sem is held in read mode 366 */ 367 if (pmd_none(pmdval)) 368 return no_page_table(vma, flags); 369 goto retry; 370 } 371 if (pmd_devmap(pmdval)) { 372 ptl = pmd_lock(mm, pmd); 373 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); 374 spin_unlock(ptl); 375 if (page) 376 return page; 377 } 378 if (likely(!pmd_trans_huge(pmdval))) 379 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 380 381 if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) 382 return no_page_table(vma, flags); 383 384 retry_locked: 385 ptl = pmd_lock(mm, pmd); 386 if (unlikely(pmd_none(*pmd))) { 387 spin_unlock(ptl); 388 return no_page_table(vma, flags); 389 } 390 if (unlikely(!pmd_present(*pmd))) { 391 spin_unlock(ptl); 392 if (likely(!(flags & FOLL_MIGRATION))) 393 return no_page_table(vma, flags); 394 pmd_migration_entry_wait(mm, pmd); 395 goto retry_locked; 396 } 397 if (unlikely(!pmd_trans_huge(*pmd))) { 398 spin_unlock(ptl); 399 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 400 } 401 if (flags & FOLL_SPLIT) { 402 int ret; 403 page = pmd_page(*pmd); 404 if (is_huge_zero_page(page)) { 405 spin_unlock(ptl); 406 ret = 0; 407 split_huge_pmd(vma, pmd, address); 408 if (pmd_trans_unstable(pmd)) 409 ret = -EBUSY; 410 } else { 411 if (unlikely(!try_get_page(page))) { 412 spin_unlock(ptl); 413 return ERR_PTR(-ENOMEM); 414 } 415 spin_unlock(ptl); 416 lock_page(page); 417 ret = split_huge_page(page); 418 unlock_page(page); 419 put_page(page); 420 if (pmd_none(*pmd)) 421 return no_page_table(vma, flags); 422 } 423 424 return ret ? ERR_PTR(ret) : 425 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 426 } 427 page = follow_trans_huge_pmd(vma, address, pmd, flags); 428 spin_unlock(ptl); 429 ctx->page_mask = HPAGE_PMD_NR - 1; 430 return page; 431 } 432 433 static struct page *follow_pud_mask(struct vm_area_struct *vma, 434 unsigned long address, p4d_t *p4dp, 435 unsigned int flags, 436 struct follow_page_context *ctx) 437 { 438 pud_t *pud; 439 spinlock_t *ptl; 440 struct page *page; 441 struct mm_struct *mm = vma->vm_mm; 442 443 pud = pud_offset(p4dp, address); 444 if (pud_none(*pud)) 445 return no_page_table(vma, flags); 446 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { 447 page = follow_huge_pud(mm, address, pud, flags); 448 if (page) 449 return page; 450 return no_page_table(vma, flags); 451 } 452 if (is_hugepd(__hugepd(pud_val(*pud)))) { 453 page = follow_huge_pd(vma, address, 454 __hugepd(pud_val(*pud)), flags, 455 PUD_SHIFT); 456 if (page) 457 return page; 458 return no_page_table(vma, flags); 459 } 460 if (pud_devmap(*pud)) { 461 ptl = pud_lock(mm, pud); 462 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); 463 spin_unlock(ptl); 464 if (page) 465 return page; 466 } 467 if (unlikely(pud_bad(*pud))) 468 return no_page_table(vma, flags); 469 470 return follow_pmd_mask(vma, address, pud, flags, ctx); 471 } 472 473 static struct page *follow_p4d_mask(struct vm_area_struct *vma, 474 unsigned long address, pgd_t *pgdp, 475 unsigned int flags, 476 struct follow_page_context *ctx) 477 { 478 p4d_t *p4d; 479 struct page *page; 480 481 p4d = p4d_offset(pgdp, address); 482 if (p4d_none(*p4d)) 483 return no_page_table(vma, flags); 484 BUILD_BUG_ON(p4d_huge(*p4d)); 485 if (unlikely(p4d_bad(*p4d))) 486 return no_page_table(vma, flags); 487 488 if (is_hugepd(__hugepd(p4d_val(*p4d)))) { 489 page = follow_huge_pd(vma, address, 490 __hugepd(p4d_val(*p4d)), flags, 491 P4D_SHIFT); 492 if (page) 493 return page; 494 return no_page_table(vma, flags); 495 } 496 return follow_pud_mask(vma, address, p4d, flags, ctx); 497 } 498 499 /** 500 * follow_page_mask - look up a page descriptor from a user-virtual address 501 * @vma: vm_area_struct mapping @address 502 * @address: virtual address to look up 503 * @flags: flags modifying lookup behaviour 504 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a 505 * pointer to output page_mask 506 * 507 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 508 * 509 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches 510 * the device's dev_pagemap metadata to avoid repeating expensive lookups. 511 * 512 * On output, the @ctx->page_mask is set according to the size of the page. 513 * 514 * Return: the mapped (struct page *), %NULL if no mapping exists, or 515 * an error pointer if there is a mapping to something not represented 516 * by a page descriptor (see also vm_normal_page()). 517 */ 518 struct page *follow_page_mask(struct vm_area_struct *vma, 519 unsigned long address, unsigned int flags, 520 struct follow_page_context *ctx) 521 { 522 pgd_t *pgd; 523 struct page *page; 524 struct mm_struct *mm = vma->vm_mm; 525 526 ctx->page_mask = 0; 527 528 /* make this handle hugepd */ 529 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 530 if (!IS_ERR(page)) { 531 BUG_ON(flags & FOLL_GET); 532 return page; 533 } 534 535 pgd = pgd_offset(mm, address); 536 537 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 538 return no_page_table(vma, flags); 539 540 if (pgd_huge(*pgd)) { 541 page = follow_huge_pgd(mm, address, pgd, flags); 542 if (page) 543 return page; 544 return no_page_table(vma, flags); 545 } 546 if (is_hugepd(__hugepd(pgd_val(*pgd)))) { 547 page = follow_huge_pd(vma, address, 548 __hugepd(pgd_val(*pgd)), flags, 549 PGDIR_SHIFT); 550 if (page) 551 return page; 552 return no_page_table(vma, flags); 553 } 554 555 return follow_p4d_mask(vma, address, pgd, flags, ctx); 556 } 557 558 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 559 unsigned int foll_flags) 560 { 561 struct follow_page_context ctx = { NULL }; 562 struct page *page; 563 564 page = follow_page_mask(vma, address, foll_flags, &ctx); 565 if (ctx.pgmap) 566 put_dev_pagemap(ctx.pgmap); 567 return page; 568 } 569 570 static int get_gate_page(struct mm_struct *mm, unsigned long address, 571 unsigned int gup_flags, struct vm_area_struct **vma, 572 struct page **page) 573 { 574 pgd_t *pgd; 575 p4d_t *p4d; 576 pud_t *pud; 577 pmd_t *pmd; 578 pte_t *pte; 579 int ret = -EFAULT; 580 581 /* user gate pages are read-only */ 582 if (gup_flags & FOLL_WRITE) 583 return -EFAULT; 584 if (address > TASK_SIZE) 585 pgd = pgd_offset_k(address); 586 else 587 pgd = pgd_offset_gate(mm, address); 588 BUG_ON(pgd_none(*pgd)); 589 p4d = p4d_offset(pgd, address); 590 BUG_ON(p4d_none(*p4d)); 591 pud = pud_offset(p4d, address); 592 BUG_ON(pud_none(*pud)); 593 pmd = pmd_offset(pud, address); 594 if (!pmd_present(*pmd)) 595 return -EFAULT; 596 VM_BUG_ON(pmd_trans_huge(*pmd)); 597 pte = pte_offset_map(pmd, address); 598 if (pte_none(*pte)) 599 goto unmap; 600 *vma = get_gate_vma(mm); 601 if (!page) 602 goto out; 603 *page = vm_normal_page(*vma, address, *pte); 604 if (!*page) { 605 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) 606 goto unmap; 607 *page = pte_page(*pte); 608 609 /* 610 * This should never happen (a device public page in the gate 611 * area). 612 */ 613 if (is_device_public_page(*page)) 614 goto unmap; 615 } 616 if (unlikely(!try_get_page(*page))) { 617 ret = -ENOMEM; 618 goto unmap; 619 } 620 out: 621 ret = 0; 622 unmap: 623 pte_unmap(pte); 624 return ret; 625 } 626 627 /* 628 * mmap_sem must be held on entry. If @nonblocking != NULL and 629 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. 630 * If it is, *@nonblocking will be set to 0 and -EBUSY returned. 631 */ 632 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, 633 unsigned long address, unsigned int *flags, int *nonblocking) 634 { 635 unsigned int fault_flags = 0; 636 vm_fault_t ret; 637 638 /* mlock all present pages, but do not fault in new pages */ 639 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) 640 return -ENOENT; 641 if (*flags & FOLL_WRITE) 642 fault_flags |= FAULT_FLAG_WRITE; 643 if (*flags & FOLL_REMOTE) 644 fault_flags |= FAULT_FLAG_REMOTE; 645 if (nonblocking) 646 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 647 if (*flags & FOLL_NOWAIT) 648 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 649 if (*flags & FOLL_TRIED) { 650 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); 651 fault_flags |= FAULT_FLAG_TRIED; 652 } 653 654 ret = handle_mm_fault(vma, address, fault_flags); 655 if (ret & VM_FAULT_ERROR) { 656 int err = vm_fault_to_errno(ret, *flags); 657 658 if (err) 659 return err; 660 BUG(); 661 } 662 663 if (tsk) { 664 if (ret & VM_FAULT_MAJOR) 665 tsk->maj_flt++; 666 else 667 tsk->min_flt++; 668 } 669 670 if (ret & VM_FAULT_RETRY) { 671 if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 672 *nonblocking = 0; 673 return -EBUSY; 674 } 675 676 /* 677 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when 678 * necessary, even if maybe_mkwrite decided not to set pte_write. We 679 * can thus safely do subsequent page lookups as if they were reads. 680 * But only do so when looping for pte_write is futile: in some cases 681 * userspace may also be wanting to write to the gotten user page, 682 * which a read fault here might prevent (a readonly page might get 683 * reCOWed by userspace write). 684 */ 685 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) 686 *flags |= FOLL_COW; 687 return 0; 688 } 689 690 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) 691 { 692 vm_flags_t vm_flags = vma->vm_flags; 693 int write = (gup_flags & FOLL_WRITE); 694 int foreign = (gup_flags & FOLL_REMOTE); 695 696 if (vm_flags & (VM_IO | VM_PFNMAP)) 697 return -EFAULT; 698 699 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) 700 return -EFAULT; 701 702 if (write) { 703 if (!(vm_flags & VM_WRITE)) { 704 if (!(gup_flags & FOLL_FORCE)) 705 return -EFAULT; 706 /* 707 * We used to let the write,force case do COW in a 708 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could 709 * set a breakpoint in a read-only mapping of an 710 * executable, without corrupting the file (yet only 711 * when that file had been opened for writing!). 712 * Anon pages in shared mappings are surprising: now 713 * just reject it. 714 */ 715 if (!is_cow_mapping(vm_flags)) 716 return -EFAULT; 717 } 718 } else if (!(vm_flags & VM_READ)) { 719 if (!(gup_flags & FOLL_FORCE)) 720 return -EFAULT; 721 /* 722 * Is there actually any vma we can reach here which does not 723 * have VM_MAYREAD set? 724 */ 725 if (!(vm_flags & VM_MAYREAD)) 726 return -EFAULT; 727 } 728 /* 729 * gups are always data accesses, not instruction 730 * fetches, so execute=false here 731 */ 732 if (!arch_vma_access_permitted(vma, write, false, foreign)) 733 return -EFAULT; 734 return 0; 735 } 736 737 /** 738 * __get_user_pages() - pin user pages in memory 739 * @tsk: task_struct of target task 740 * @mm: mm_struct of target mm 741 * @start: starting user address 742 * @nr_pages: number of pages from start to pin 743 * @gup_flags: flags modifying pin behaviour 744 * @pages: array that receives pointers to the pages pinned. 745 * Should be at least nr_pages long. Or NULL, if caller 746 * only intends to ensure the pages are faulted in. 747 * @vmas: array of pointers to vmas corresponding to each page. 748 * Or NULL if the caller does not require them. 749 * @nonblocking: whether waiting for disk IO or mmap_sem contention 750 * 751 * Returns number of pages pinned. This may be fewer than the number 752 * requested. If nr_pages is 0 or negative, returns 0. If no pages 753 * were pinned, returns -errno. Each page returned must be released 754 * with a put_page() call when it is finished with. vmas will only 755 * remain valid while mmap_sem is held. 756 * 757 * Must be called with mmap_sem held. It may be released. See below. 758 * 759 * __get_user_pages walks a process's page tables and takes a reference to 760 * each struct page that each user address corresponds to at a given 761 * instant. That is, it takes the page that would be accessed if a user 762 * thread accesses the given user virtual address at that instant. 763 * 764 * This does not guarantee that the page exists in the user mappings when 765 * __get_user_pages returns, and there may even be a completely different 766 * page there in some cases (eg. if mmapped pagecache has been invalidated 767 * and subsequently re faulted). However it does guarantee that the page 768 * won't be freed completely. And mostly callers simply care that the page 769 * contains data that was valid *at some point in time*. Typically, an IO 770 * or similar operation cannot guarantee anything stronger anyway because 771 * locks can't be held over the syscall boundary. 772 * 773 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If 774 * the page is written to, set_page_dirty (or set_page_dirty_lock, as 775 * appropriate) must be called after the page is finished with, and 776 * before put_page is called. 777 * 778 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO 779 * or mmap_sem contention, and if waiting is needed to pin all pages, 780 * *@nonblocking will be set to 0. Further, if @gup_flags does not 781 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in 782 * this case. 783 * 784 * A caller using such a combination of @nonblocking and @gup_flags 785 * must therefore hold the mmap_sem for reading only, and recognize 786 * when it's been released. Otherwise, it must be held for either 787 * reading or writing and will not be released. 788 * 789 * In most cases, get_user_pages or get_user_pages_fast should be used 790 * instead of __get_user_pages. __get_user_pages should be used only if 791 * you need some special @gup_flags. 792 */ 793 static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 794 unsigned long start, unsigned long nr_pages, 795 unsigned int gup_flags, struct page **pages, 796 struct vm_area_struct **vmas, int *nonblocking) 797 { 798 long ret = 0, i = 0; 799 struct vm_area_struct *vma = NULL; 800 struct follow_page_context ctx = { NULL }; 801 802 if (!nr_pages) 803 return 0; 804 805 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); 806 807 /* 808 * If FOLL_FORCE is set then do not force a full fault as the hinting 809 * fault information is unrelated to the reference behaviour of a task 810 * using the address space 811 */ 812 if (!(gup_flags & FOLL_FORCE)) 813 gup_flags |= FOLL_NUMA; 814 815 do { 816 struct page *page; 817 unsigned int foll_flags = gup_flags; 818 unsigned int page_increm; 819 820 /* first iteration or cross vma bound */ 821 if (!vma || start >= vma->vm_end) { 822 vma = find_extend_vma(mm, start); 823 if (!vma && in_gate_area(mm, start)) { 824 ret = get_gate_page(mm, start & PAGE_MASK, 825 gup_flags, &vma, 826 pages ? &pages[i] : NULL); 827 if (ret) 828 goto out; 829 ctx.page_mask = 0; 830 goto next_page; 831 } 832 833 if (!vma || check_vma_flags(vma, gup_flags)) { 834 ret = -EFAULT; 835 goto out; 836 } 837 if (is_vm_hugetlb_page(vma)) { 838 i = follow_hugetlb_page(mm, vma, pages, vmas, 839 &start, &nr_pages, i, 840 gup_flags, nonblocking); 841 continue; 842 } 843 } 844 retry: 845 /* 846 * If we have a pending SIGKILL, don't keep faulting pages and 847 * potentially allocating memory. 848 */ 849 if (fatal_signal_pending(current)) { 850 ret = -ERESTARTSYS; 851 goto out; 852 } 853 cond_resched(); 854 855 page = follow_page_mask(vma, start, foll_flags, &ctx); 856 if (!page) { 857 ret = faultin_page(tsk, vma, start, &foll_flags, 858 nonblocking); 859 switch (ret) { 860 case 0: 861 goto retry; 862 case -EBUSY: 863 ret = 0; 864 /* FALLTHRU */ 865 case -EFAULT: 866 case -ENOMEM: 867 case -EHWPOISON: 868 goto out; 869 case -ENOENT: 870 goto next_page; 871 } 872 BUG(); 873 } else if (PTR_ERR(page) == -EEXIST) { 874 /* 875 * Proper page table entry exists, but no corresponding 876 * struct page. 877 */ 878 goto next_page; 879 } else if (IS_ERR(page)) { 880 ret = PTR_ERR(page); 881 goto out; 882 } 883 if (pages) { 884 pages[i] = page; 885 flush_anon_page(vma, page, start); 886 flush_dcache_page(page); 887 ctx.page_mask = 0; 888 } 889 next_page: 890 if (vmas) { 891 vmas[i] = vma; 892 ctx.page_mask = 0; 893 } 894 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); 895 if (page_increm > nr_pages) 896 page_increm = nr_pages; 897 i += page_increm; 898 start += page_increm * PAGE_SIZE; 899 nr_pages -= page_increm; 900 } while (nr_pages); 901 out: 902 if (ctx.pgmap) 903 put_dev_pagemap(ctx.pgmap); 904 return i ? i : ret; 905 } 906 907 static bool vma_permits_fault(struct vm_area_struct *vma, 908 unsigned int fault_flags) 909 { 910 bool write = !!(fault_flags & FAULT_FLAG_WRITE); 911 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); 912 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; 913 914 if (!(vm_flags & vma->vm_flags)) 915 return false; 916 917 /* 918 * The architecture might have a hardware protection 919 * mechanism other than read/write that can deny access. 920 * 921 * gup always represents data access, not instruction 922 * fetches, so execute=false here: 923 */ 924 if (!arch_vma_access_permitted(vma, write, false, foreign)) 925 return false; 926 927 return true; 928 } 929 930 /* 931 * fixup_user_fault() - manually resolve a user page fault 932 * @tsk: the task_struct to use for page fault accounting, or 933 * NULL if faults are not to be recorded. 934 * @mm: mm_struct of target mm 935 * @address: user address 936 * @fault_flags:flags to pass down to handle_mm_fault() 937 * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller 938 * does not allow retry 939 * 940 * This is meant to be called in the specific scenario where for locking reasons 941 * we try to access user memory in atomic context (within a pagefault_disable() 942 * section), this returns -EFAULT, and we want to resolve the user fault before 943 * trying again. 944 * 945 * Typically this is meant to be used by the futex code. 946 * 947 * The main difference with get_user_pages() is that this function will 948 * unconditionally call handle_mm_fault() which will in turn perform all the 949 * necessary SW fixup of the dirty and young bits in the PTE, while 950 * get_user_pages() only guarantees to update these in the struct page. 951 * 952 * This is important for some architectures where those bits also gate the 953 * access permission to the page because they are maintained in software. On 954 * such architectures, gup() will not be enough to make a subsequent access 955 * succeed. 956 * 957 * This function will not return with an unlocked mmap_sem. So it has not the 958 * same semantics wrt the @mm->mmap_sem as does filemap_fault(). 959 */ 960 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 961 unsigned long address, unsigned int fault_flags, 962 bool *unlocked) 963 { 964 struct vm_area_struct *vma; 965 vm_fault_t ret, major = 0; 966 967 if (unlocked) 968 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 969 970 retry: 971 vma = find_extend_vma(mm, address); 972 if (!vma || address < vma->vm_start) 973 return -EFAULT; 974 975 if (!vma_permits_fault(vma, fault_flags)) 976 return -EFAULT; 977 978 ret = handle_mm_fault(vma, address, fault_flags); 979 major |= ret & VM_FAULT_MAJOR; 980 if (ret & VM_FAULT_ERROR) { 981 int err = vm_fault_to_errno(ret, 0); 982 983 if (err) 984 return err; 985 BUG(); 986 } 987 988 if (ret & VM_FAULT_RETRY) { 989 down_read(&mm->mmap_sem); 990 if (!(fault_flags & FAULT_FLAG_TRIED)) { 991 *unlocked = true; 992 fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; 993 fault_flags |= FAULT_FLAG_TRIED; 994 goto retry; 995 } 996 } 997 998 if (tsk) { 999 if (major) 1000 tsk->maj_flt++; 1001 else 1002 tsk->min_flt++; 1003 } 1004 return 0; 1005 } 1006 EXPORT_SYMBOL_GPL(fixup_user_fault); 1007 1008 static __always_inline long __get_user_pages_locked(struct task_struct *tsk, 1009 struct mm_struct *mm, 1010 unsigned long start, 1011 unsigned long nr_pages, 1012 struct page **pages, 1013 struct vm_area_struct **vmas, 1014 int *locked, 1015 unsigned int flags) 1016 { 1017 long ret, pages_done; 1018 bool lock_dropped; 1019 1020 if (locked) { 1021 /* if VM_FAULT_RETRY can be returned, vmas become invalid */ 1022 BUG_ON(vmas); 1023 /* check caller initialized locked */ 1024 BUG_ON(*locked != 1); 1025 } 1026 1027 if (pages) 1028 flags |= FOLL_GET; 1029 1030 pages_done = 0; 1031 lock_dropped = false; 1032 for (;;) { 1033 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, 1034 vmas, locked); 1035 if (!locked) 1036 /* VM_FAULT_RETRY couldn't trigger, bypass */ 1037 return ret; 1038 1039 /* VM_FAULT_RETRY cannot return errors */ 1040 if (!*locked) { 1041 BUG_ON(ret < 0); 1042 BUG_ON(ret >= nr_pages); 1043 } 1044 1045 if (!pages) 1046 /* If it's a prefault don't insist harder */ 1047 return ret; 1048 1049 if (ret > 0) { 1050 nr_pages -= ret; 1051 pages_done += ret; 1052 if (!nr_pages) 1053 break; 1054 } 1055 if (*locked) { 1056 /* 1057 * VM_FAULT_RETRY didn't trigger or it was a 1058 * FOLL_NOWAIT. 1059 */ 1060 if (!pages_done) 1061 pages_done = ret; 1062 break; 1063 } 1064 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ 1065 pages += ret; 1066 start += ret << PAGE_SHIFT; 1067 1068 /* 1069 * Repeat on the address that fired VM_FAULT_RETRY 1070 * without FAULT_FLAG_ALLOW_RETRY but with 1071 * FAULT_FLAG_TRIED. 1072 */ 1073 *locked = 1; 1074 lock_dropped = true; 1075 down_read(&mm->mmap_sem); 1076 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, 1077 pages, NULL, NULL); 1078 if (ret != 1) { 1079 BUG_ON(ret > 1); 1080 if (!pages_done) 1081 pages_done = ret; 1082 break; 1083 } 1084 nr_pages--; 1085 pages_done++; 1086 if (!nr_pages) 1087 break; 1088 pages++; 1089 start += PAGE_SIZE; 1090 } 1091 if (lock_dropped && *locked) { 1092 /* 1093 * We must let the caller know we temporarily dropped the lock 1094 * and so the critical section protected by it was lost. 1095 */ 1096 up_read(&mm->mmap_sem); 1097 *locked = 0; 1098 } 1099 return pages_done; 1100 } 1101 1102 /* 1103 * We can leverage the VM_FAULT_RETRY functionality in the page fault 1104 * paths better by using either get_user_pages_locked() or 1105 * get_user_pages_unlocked(). 1106 * 1107 * get_user_pages_locked() is suitable to replace the form: 1108 * 1109 * down_read(&mm->mmap_sem); 1110 * do_something() 1111 * get_user_pages(tsk, mm, ..., pages, NULL); 1112 * up_read(&mm->mmap_sem); 1113 * 1114 * to: 1115 * 1116 * int locked = 1; 1117 * down_read(&mm->mmap_sem); 1118 * do_something() 1119 * get_user_pages_locked(tsk, mm, ..., pages, &locked); 1120 * if (locked) 1121 * up_read(&mm->mmap_sem); 1122 */ 1123 long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1124 unsigned int gup_flags, struct page **pages, 1125 int *locked) 1126 { 1127 /* 1128 * FIXME: Current FOLL_LONGTERM behavior is incompatible with 1129 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on 1130 * vmas. As there are no users of this flag in this call we simply 1131 * disallow this option for now. 1132 */ 1133 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) 1134 return -EINVAL; 1135 1136 return __get_user_pages_locked(current, current->mm, start, nr_pages, 1137 pages, NULL, locked, 1138 gup_flags | FOLL_TOUCH); 1139 } 1140 EXPORT_SYMBOL(get_user_pages_locked); 1141 1142 /* 1143 * get_user_pages_unlocked() is suitable to replace the form: 1144 * 1145 * down_read(&mm->mmap_sem); 1146 * get_user_pages(tsk, mm, ..., pages, NULL); 1147 * up_read(&mm->mmap_sem); 1148 * 1149 * with: 1150 * 1151 * get_user_pages_unlocked(tsk, mm, ..., pages); 1152 * 1153 * It is functionally equivalent to get_user_pages_fast so 1154 * get_user_pages_fast should be used instead if specific gup_flags 1155 * (e.g. FOLL_FORCE) are not required. 1156 */ 1157 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1158 struct page **pages, unsigned int gup_flags) 1159 { 1160 struct mm_struct *mm = current->mm; 1161 int locked = 1; 1162 long ret; 1163 1164 /* 1165 * FIXME: Current FOLL_LONGTERM behavior is incompatible with 1166 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on 1167 * vmas. As there are no users of this flag in this call we simply 1168 * disallow this option for now. 1169 */ 1170 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) 1171 return -EINVAL; 1172 1173 down_read(&mm->mmap_sem); 1174 ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, 1175 &locked, gup_flags | FOLL_TOUCH); 1176 if (locked) 1177 up_read(&mm->mmap_sem); 1178 return ret; 1179 } 1180 EXPORT_SYMBOL(get_user_pages_unlocked); 1181 1182 /* 1183 * get_user_pages_remote() - pin user pages in memory 1184 * @tsk: the task_struct to use for page fault accounting, or 1185 * NULL if faults are not to be recorded. 1186 * @mm: mm_struct of target mm 1187 * @start: starting user address 1188 * @nr_pages: number of pages from start to pin 1189 * @gup_flags: flags modifying lookup behaviour 1190 * @pages: array that receives pointers to the pages pinned. 1191 * Should be at least nr_pages long. Or NULL, if caller 1192 * only intends to ensure the pages are faulted in. 1193 * @vmas: array of pointers to vmas corresponding to each page. 1194 * Or NULL if the caller does not require them. 1195 * @locked: pointer to lock flag indicating whether lock is held and 1196 * subsequently whether VM_FAULT_RETRY functionality can be 1197 * utilised. Lock must initially be held. 1198 * 1199 * Returns number of pages pinned. This may be fewer than the number 1200 * requested. If nr_pages is 0 or negative, returns 0. If no pages 1201 * were pinned, returns -errno. Each page returned must be released 1202 * with a put_page() call when it is finished with. vmas will only 1203 * remain valid while mmap_sem is held. 1204 * 1205 * Must be called with mmap_sem held for read or write. 1206 * 1207 * get_user_pages walks a process's page tables and takes a reference to 1208 * each struct page that each user address corresponds to at a given 1209 * instant. That is, it takes the page that would be accessed if a user 1210 * thread accesses the given user virtual address at that instant. 1211 * 1212 * This does not guarantee that the page exists in the user mappings when 1213 * get_user_pages returns, and there may even be a completely different 1214 * page there in some cases (eg. if mmapped pagecache has been invalidated 1215 * and subsequently re faulted). However it does guarantee that the page 1216 * won't be freed completely. And mostly callers simply care that the page 1217 * contains data that was valid *at some point in time*. Typically, an IO 1218 * or similar operation cannot guarantee anything stronger anyway because 1219 * locks can't be held over the syscall boundary. 1220 * 1221 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page 1222 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must 1223 * be called after the page is finished with, and before put_page is called. 1224 * 1225 * get_user_pages is typically used for fewer-copy IO operations, to get a 1226 * handle on the memory by some means other than accesses via the user virtual 1227 * addresses. The pages may be submitted for DMA to devices or accessed via 1228 * their kernel linear mapping (via the kmap APIs). Care should be taken to 1229 * use the correct cache flushing APIs. 1230 * 1231 * See also get_user_pages_fast, for performance critical applications. 1232 * 1233 * get_user_pages should be phased out in favor of 1234 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing 1235 * should use get_user_pages because it cannot pass 1236 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. 1237 */ 1238 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1239 unsigned long start, unsigned long nr_pages, 1240 unsigned int gup_flags, struct page **pages, 1241 struct vm_area_struct **vmas, int *locked) 1242 { 1243 /* 1244 * FIXME: Current FOLL_LONGTERM behavior is incompatible with 1245 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on 1246 * vmas. As there are no users of this flag in this call we simply 1247 * disallow this option for now. 1248 */ 1249 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) 1250 return -EINVAL; 1251 1252 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, 1253 locked, 1254 gup_flags | FOLL_TOUCH | FOLL_REMOTE); 1255 } 1256 EXPORT_SYMBOL(get_user_pages_remote); 1257 1258 #if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA) 1259 static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages) 1260 { 1261 long i; 1262 struct vm_area_struct *vma_prev = NULL; 1263 1264 for (i = 0; i < nr_pages; i++) { 1265 struct vm_area_struct *vma = vmas[i]; 1266 1267 if (vma == vma_prev) 1268 continue; 1269 1270 vma_prev = vma; 1271 1272 if (vma_is_fsdax(vma)) 1273 return true; 1274 } 1275 return false; 1276 } 1277 1278 #ifdef CONFIG_CMA 1279 static struct page *new_non_cma_page(struct page *page, unsigned long private) 1280 { 1281 /* 1282 * We want to make sure we allocate the new page from the same node 1283 * as the source page. 1284 */ 1285 int nid = page_to_nid(page); 1286 /* 1287 * Trying to allocate a page for migration. Ignore allocation 1288 * failure warnings. We don't force __GFP_THISNODE here because 1289 * this node here is the node where we have CMA reservation and 1290 * in some case these nodes will have really less non movable 1291 * allocation memory. 1292 */ 1293 gfp_t gfp_mask = GFP_USER | __GFP_NOWARN; 1294 1295 if (PageHighMem(page)) 1296 gfp_mask |= __GFP_HIGHMEM; 1297 1298 #ifdef CONFIG_HUGETLB_PAGE 1299 if (PageHuge(page)) { 1300 struct hstate *h = page_hstate(page); 1301 /* 1302 * We don't want to dequeue from the pool because pool pages will 1303 * mostly be from the CMA region. 1304 */ 1305 return alloc_migrate_huge_page(h, gfp_mask, nid, NULL); 1306 } 1307 #endif 1308 if (PageTransHuge(page)) { 1309 struct page *thp; 1310 /* 1311 * ignore allocation failure warnings 1312 */ 1313 gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN; 1314 1315 /* 1316 * Remove the movable mask so that we don't allocate from 1317 * CMA area again. 1318 */ 1319 thp_gfpmask &= ~__GFP_MOVABLE; 1320 thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER); 1321 if (!thp) 1322 return NULL; 1323 prep_transhuge_page(thp); 1324 return thp; 1325 } 1326 1327 return __alloc_pages_node(nid, gfp_mask, 0); 1328 } 1329 1330 static long check_and_migrate_cma_pages(struct task_struct *tsk, 1331 struct mm_struct *mm, 1332 unsigned long start, 1333 unsigned long nr_pages, 1334 struct page **pages, 1335 struct vm_area_struct **vmas, 1336 unsigned int gup_flags) 1337 { 1338 long i; 1339 bool drain_allow = true; 1340 bool migrate_allow = true; 1341 LIST_HEAD(cma_page_list); 1342 1343 check_again: 1344 for (i = 0; i < nr_pages; i++) { 1345 /* 1346 * If we get a page from the CMA zone, since we are going to 1347 * be pinning these entries, we might as well move them out 1348 * of the CMA zone if possible. 1349 */ 1350 if (is_migrate_cma_page(pages[i])) { 1351 1352 struct page *head = compound_head(pages[i]); 1353 1354 if (PageHuge(head)) { 1355 isolate_huge_page(head, &cma_page_list); 1356 } else { 1357 if (!PageLRU(head) && drain_allow) { 1358 lru_add_drain_all(); 1359 drain_allow = false; 1360 } 1361 1362 if (!isolate_lru_page(head)) { 1363 list_add_tail(&head->lru, &cma_page_list); 1364 mod_node_page_state(page_pgdat(head), 1365 NR_ISOLATED_ANON + 1366 page_is_file_cache(head), 1367 hpage_nr_pages(head)); 1368 } 1369 } 1370 } 1371 } 1372 1373 if (!list_empty(&cma_page_list)) { 1374 /* 1375 * drop the above get_user_pages reference. 1376 */ 1377 for (i = 0; i < nr_pages; i++) 1378 put_page(pages[i]); 1379 1380 if (migrate_pages(&cma_page_list, new_non_cma_page, 1381 NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) { 1382 /* 1383 * some of the pages failed migration. Do get_user_pages 1384 * without migration. 1385 */ 1386 migrate_allow = false; 1387 1388 if (!list_empty(&cma_page_list)) 1389 putback_movable_pages(&cma_page_list); 1390 } 1391 /* 1392 * We did migrate all the pages, Try to get the page references 1393 * again migrating any new CMA pages which we failed to isolate 1394 * earlier. 1395 */ 1396 nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages, 1397 pages, vmas, NULL, 1398 gup_flags); 1399 1400 if ((nr_pages > 0) && migrate_allow) { 1401 drain_allow = true; 1402 goto check_again; 1403 } 1404 } 1405 1406 return nr_pages; 1407 } 1408 #else 1409 static long check_and_migrate_cma_pages(struct task_struct *tsk, 1410 struct mm_struct *mm, 1411 unsigned long start, 1412 unsigned long nr_pages, 1413 struct page **pages, 1414 struct vm_area_struct **vmas, 1415 unsigned int gup_flags) 1416 { 1417 return nr_pages; 1418 } 1419 #endif 1420 1421 /* 1422 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which 1423 * allows us to process the FOLL_LONGTERM flag. 1424 */ 1425 static long __gup_longterm_locked(struct task_struct *tsk, 1426 struct mm_struct *mm, 1427 unsigned long start, 1428 unsigned long nr_pages, 1429 struct page **pages, 1430 struct vm_area_struct **vmas, 1431 unsigned int gup_flags) 1432 { 1433 struct vm_area_struct **vmas_tmp = vmas; 1434 unsigned long flags = 0; 1435 long rc, i; 1436 1437 if (gup_flags & FOLL_LONGTERM) { 1438 if (!pages) 1439 return -EINVAL; 1440 1441 if (!vmas_tmp) { 1442 vmas_tmp = kcalloc(nr_pages, 1443 sizeof(struct vm_area_struct *), 1444 GFP_KERNEL); 1445 if (!vmas_tmp) 1446 return -ENOMEM; 1447 } 1448 flags = memalloc_nocma_save(); 1449 } 1450 1451 rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, 1452 vmas_tmp, NULL, gup_flags); 1453 1454 if (gup_flags & FOLL_LONGTERM) { 1455 memalloc_nocma_restore(flags); 1456 if (rc < 0) 1457 goto out; 1458 1459 if (check_dax_vmas(vmas_tmp, rc)) { 1460 for (i = 0; i < rc; i++) 1461 put_page(pages[i]); 1462 rc = -EOPNOTSUPP; 1463 goto out; 1464 } 1465 1466 rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages, 1467 vmas_tmp, gup_flags); 1468 } 1469 1470 out: 1471 if (vmas_tmp != vmas) 1472 kfree(vmas_tmp); 1473 return rc; 1474 } 1475 #else /* !CONFIG_FS_DAX && !CONFIG_CMA */ 1476 static __always_inline long __gup_longterm_locked(struct task_struct *tsk, 1477 struct mm_struct *mm, 1478 unsigned long start, 1479 unsigned long nr_pages, 1480 struct page **pages, 1481 struct vm_area_struct **vmas, 1482 unsigned int flags) 1483 { 1484 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, 1485 NULL, flags); 1486 } 1487 #endif /* CONFIG_FS_DAX || CONFIG_CMA */ 1488 1489 /* 1490 * This is the same as get_user_pages_remote(), just with a 1491 * less-flexible calling convention where we assume that the task 1492 * and mm being operated on are the current task's and don't allow 1493 * passing of a locked parameter. We also obviously don't pass 1494 * FOLL_REMOTE in here. 1495 */ 1496 long get_user_pages(unsigned long start, unsigned long nr_pages, 1497 unsigned int gup_flags, struct page **pages, 1498 struct vm_area_struct **vmas) 1499 { 1500 return __gup_longterm_locked(current, current->mm, start, nr_pages, 1501 pages, vmas, gup_flags | FOLL_TOUCH); 1502 } 1503 EXPORT_SYMBOL(get_user_pages); 1504 1505 /** 1506 * populate_vma_page_range() - populate a range of pages in the vma. 1507 * @vma: target vma 1508 * @start: start address 1509 * @end: end address 1510 * @nonblocking: 1511 * 1512 * This takes care of mlocking the pages too if VM_LOCKED is set. 1513 * 1514 * return 0 on success, negative error code on error. 1515 * 1516 * vma->vm_mm->mmap_sem must be held. 1517 * 1518 * If @nonblocking is NULL, it may be held for read or write and will 1519 * be unperturbed. 1520 * 1521 * If @nonblocking is non-NULL, it must held for read only and may be 1522 * released. If it's released, *@nonblocking will be set to 0. 1523 */ 1524 long populate_vma_page_range(struct vm_area_struct *vma, 1525 unsigned long start, unsigned long end, int *nonblocking) 1526 { 1527 struct mm_struct *mm = vma->vm_mm; 1528 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1529 int gup_flags; 1530 1531 VM_BUG_ON(start & ~PAGE_MASK); 1532 VM_BUG_ON(end & ~PAGE_MASK); 1533 VM_BUG_ON_VMA(start < vma->vm_start, vma); 1534 VM_BUG_ON_VMA(end > vma->vm_end, vma); 1535 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); 1536 1537 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; 1538 if (vma->vm_flags & VM_LOCKONFAULT) 1539 gup_flags &= ~FOLL_POPULATE; 1540 /* 1541 * We want to touch writable mappings with a write fault in order 1542 * to break COW, except for shared mappings because these don't COW 1543 * and we would not want to dirty them for nothing. 1544 */ 1545 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 1546 gup_flags |= FOLL_WRITE; 1547 1548 /* 1549 * We want mlock to succeed for regions that have any permissions 1550 * other than PROT_NONE. 1551 */ 1552 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) 1553 gup_flags |= FOLL_FORCE; 1554 1555 /* 1556 * We made sure addr is within a VMA, so the following will 1557 * not result in a stack expansion that recurses back here. 1558 */ 1559 return __get_user_pages(current, mm, start, nr_pages, gup_flags, 1560 NULL, NULL, nonblocking); 1561 } 1562 1563 /* 1564 * __mm_populate - populate and/or mlock pages within a range of address space. 1565 * 1566 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap 1567 * flags. VMAs must be already marked with the desired vm_flags, and 1568 * mmap_sem must not be held. 1569 */ 1570 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 1571 { 1572 struct mm_struct *mm = current->mm; 1573 unsigned long end, nstart, nend; 1574 struct vm_area_struct *vma = NULL; 1575 int locked = 0; 1576 long ret = 0; 1577 1578 end = start + len; 1579 1580 for (nstart = start; nstart < end; nstart = nend) { 1581 /* 1582 * We want to fault in pages for [nstart; end) address range. 1583 * Find first corresponding VMA. 1584 */ 1585 if (!locked) { 1586 locked = 1; 1587 down_read(&mm->mmap_sem); 1588 vma = find_vma(mm, nstart); 1589 } else if (nstart >= vma->vm_end) 1590 vma = vma->vm_next; 1591 if (!vma || vma->vm_start >= end) 1592 break; 1593 /* 1594 * Set [nstart; nend) to intersection of desired address 1595 * range with the first VMA. Also, skip undesirable VMA types. 1596 */ 1597 nend = min(end, vma->vm_end); 1598 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1599 continue; 1600 if (nstart < vma->vm_start) 1601 nstart = vma->vm_start; 1602 /* 1603 * Now fault in a range of pages. populate_vma_page_range() 1604 * double checks the vma flags, so that it won't mlock pages 1605 * if the vma was already munlocked. 1606 */ 1607 ret = populate_vma_page_range(vma, nstart, nend, &locked); 1608 if (ret < 0) { 1609 if (ignore_errors) { 1610 ret = 0; 1611 continue; /* continue at next VMA */ 1612 } 1613 break; 1614 } 1615 nend = nstart + ret * PAGE_SIZE; 1616 ret = 0; 1617 } 1618 if (locked) 1619 up_read(&mm->mmap_sem); 1620 return ret; /* 0 or negative error code */ 1621 } 1622 1623 /** 1624 * get_dump_page() - pin user page in memory while writing it to core dump 1625 * @addr: user address 1626 * 1627 * Returns struct page pointer of user page pinned for dump, 1628 * to be freed afterwards by put_page(). 1629 * 1630 * Returns NULL on any kind of failure - a hole must then be inserted into 1631 * the corefile, to preserve alignment with its headers; and also returns 1632 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 1633 * allowing a hole to be left in the corefile to save diskspace. 1634 * 1635 * Called without mmap_sem, but after all other threads have been killed. 1636 */ 1637 #ifdef CONFIG_ELF_CORE 1638 struct page *get_dump_page(unsigned long addr) 1639 { 1640 struct vm_area_struct *vma; 1641 struct page *page; 1642 1643 if (__get_user_pages(current, current->mm, addr, 1, 1644 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, 1645 NULL) < 1) 1646 return NULL; 1647 flush_cache_page(vma, addr, page_to_pfn(page)); 1648 return page; 1649 } 1650 #endif /* CONFIG_ELF_CORE */ 1651 1652 /* 1653 * Generic Fast GUP 1654 * 1655 * get_user_pages_fast attempts to pin user pages by walking the page 1656 * tables directly and avoids taking locks. Thus the walker needs to be 1657 * protected from page table pages being freed from under it, and should 1658 * block any THP splits. 1659 * 1660 * One way to achieve this is to have the walker disable interrupts, and 1661 * rely on IPIs from the TLB flushing code blocking before the page table 1662 * pages are freed. This is unsuitable for architectures that do not need 1663 * to broadcast an IPI when invalidating TLBs. 1664 * 1665 * Another way to achieve this is to batch up page table containing pages 1666 * belonging to more than one mm_user, then rcu_sched a callback to free those 1667 * pages. Disabling interrupts will allow the fast_gup walker to both block 1668 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs 1669 * (which is a relatively rare event). The code below adopts this strategy. 1670 * 1671 * Before activating this code, please be aware that the following assumptions 1672 * are currently made: 1673 * 1674 * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to 1675 * free pages containing page tables or TLB flushing requires IPI broadcast. 1676 * 1677 * *) ptes can be read atomically by the architecture. 1678 * 1679 * *) access_ok is sufficient to validate userspace address ranges. 1680 * 1681 * The last two assumptions can be relaxed by the addition of helper functions. 1682 * 1683 * This code is based heavily on the PowerPC implementation by Nick Piggin. 1684 */ 1685 #ifdef CONFIG_HAVE_GENERIC_GUP 1686 1687 #ifndef gup_get_pte 1688 /* 1689 * We assume that the PTE can be read atomically. If this is not the case for 1690 * your architecture, please provide the helper. 1691 */ 1692 static inline pte_t gup_get_pte(pte_t *ptep) 1693 { 1694 return READ_ONCE(*ptep); 1695 } 1696 #endif 1697 1698 static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) 1699 { 1700 while ((*nr) - nr_start) { 1701 struct page *page = pages[--(*nr)]; 1702 1703 ClearPageReferenced(page); 1704 put_page(page); 1705 } 1706 } 1707 1708 /* 1709 * Return the compund head page with ref appropriately incremented, 1710 * or NULL if that failed. 1711 */ 1712 static inline struct page *try_get_compound_head(struct page *page, int refs) 1713 { 1714 struct page *head = compound_head(page); 1715 if (WARN_ON_ONCE(page_ref_count(head) < 0)) 1716 return NULL; 1717 if (unlikely(!page_cache_add_speculative(head, refs))) 1718 return NULL; 1719 return head; 1720 } 1721 1722 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL 1723 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1724 unsigned int flags, struct page **pages, int *nr) 1725 { 1726 struct dev_pagemap *pgmap = NULL; 1727 int nr_start = *nr, ret = 0; 1728 pte_t *ptep, *ptem; 1729 1730 ptem = ptep = pte_offset_map(&pmd, addr); 1731 do { 1732 pte_t pte = gup_get_pte(ptep); 1733 struct page *head, *page; 1734 1735 /* 1736 * Similar to the PMD case below, NUMA hinting must take slow 1737 * path using the pte_protnone check. 1738 */ 1739 if (pte_protnone(pte)) 1740 goto pte_unmap; 1741 1742 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 1743 goto pte_unmap; 1744 1745 if (pte_devmap(pte)) { 1746 if (unlikely(flags & FOLL_LONGTERM)) 1747 goto pte_unmap; 1748 1749 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 1750 if (unlikely(!pgmap)) { 1751 undo_dev_pagemap(nr, nr_start, pages); 1752 goto pte_unmap; 1753 } 1754 } else if (pte_special(pte)) 1755 goto pte_unmap; 1756 1757 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 1758 page = pte_page(pte); 1759 1760 head = try_get_compound_head(page, 1); 1761 if (!head) 1762 goto pte_unmap; 1763 1764 if (unlikely(pte_val(pte) != pte_val(*ptep))) { 1765 put_page(head); 1766 goto pte_unmap; 1767 } 1768 1769 VM_BUG_ON_PAGE(compound_head(page) != head, page); 1770 1771 SetPageReferenced(page); 1772 pages[*nr] = page; 1773 (*nr)++; 1774 1775 } while (ptep++, addr += PAGE_SIZE, addr != end); 1776 1777 ret = 1; 1778 1779 pte_unmap: 1780 if (pgmap) 1781 put_dev_pagemap(pgmap); 1782 pte_unmap(ptem); 1783 return ret; 1784 } 1785 #else 1786 1787 /* 1788 * If we can't determine whether or not a pte is special, then fail immediately 1789 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not 1790 * to be special. 1791 * 1792 * For a futex to be placed on a THP tail page, get_futex_key requires a 1793 * __get_user_pages_fast implementation that can pin pages. Thus it's still 1794 * useful to have gup_huge_pmd even if we can't operate on ptes. 1795 */ 1796 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1797 unsigned int flags, struct page **pages, int *nr) 1798 { 1799 return 0; 1800 } 1801 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 1802 1803 #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 1804 static int __gup_device_huge(unsigned long pfn, unsigned long addr, 1805 unsigned long end, struct page **pages, int *nr) 1806 { 1807 int nr_start = *nr; 1808 struct dev_pagemap *pgmap = NULL; 1809 1810 do { 1811 struct page *page = pfn_to_page(pfn); 1812 1813 pgmap = get_dev_pagemap(pfn, pgmap); 1814 if (unlikely(!pgmap)) { 1815 undo_dev_pagemap(nr, nr_start, pages); 1816 return 0; 1817 } 1818 SetPageReferenced(page); 1819 pages[*nr] = page; 1820 get_page(page); 1821 (*nr)++; 1822 pfn++; 1823 } while (addr += PAGE_SIZE, addr != end); 1824 1825 if (pgmap) 1826 put_dev_pagemap(pgmap); 1827 return 1; 1828 } 1829 1830 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 1831 unsigned long end, struct page **pages, int *nr) 1832 { 1833 unsigned long fault_pfn; 1834 int nr_start = *nr; 1835 1836 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1837 if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) 1838 return 0; 1839 1840 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 1841 undo_dev_pagemap(nr, nr_start, pages); 1842 return 0; 1843 } 1844 return 1; 1845 } 1846 1847 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 1848 unsigned long end, struct page **pages, int *nr) 1849 { 1850 unsigned long fault_pfn; 1851 int nr_start = *nr; 1852 1853 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 1854 if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) 1855 return 0; 1856 1857 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 1858 undo_dev_pagemap(nr, nr_start, pages); 1859 return 0; 1860 } 1861 return 1; 1862 } 1863 #else 1864 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 1865 unsigned long end, struct page **pages, int *nr) 1866 { 1867 BUILD_BUG(); 1868 return 0; 1869 } 1870 1871 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, 1872 unsigned long end, struct page **pages, int *nr) 1873 { 1874 BUILD_BUG(); 1875 return 0; 1876 } 1877 #endif 1878 1879 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 1880 unsigned long end, unsigned int flags, struct page **pages, int *nr) 1881 { 1882 struct page *head, *page; 1883 int refs; 1884 1885 if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) 1886 return 0; 1887 1888 if (pmd_devmap(orig)) { 1889 if (unlikely(flags & FOLL_LONGTERM)) 1890 return 0; 1891 return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); 1892 } 1893 1894 refs = 0; 1895 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1896 do { 1897 pages[*nr] = page; 1898 (*nr)++; 1899 page++; 1900 refs++; 1901 } while (addr += PAGE_SIZE, addr != end); 1902 1903 head = try_get_compound_head(pmd_page(orig), refs); 1904 if (!head) { 1905 *nr -= refs; 1906 return 0; 1907 } 1908 1909 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 1910 *nr -= refs; 1911 while (refs--) 1912 put_page(head); 1913 return 0; 1914 } 1915 1916 SetPageReferenced(head); 1917 return 1; 1918 } 1919 1920 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 1921 unsigned long end, unsigned int flags, struct page **pages, int *nr) 1922 { 1923 struct page *head, *page; 1924 int refs; 1925 1926 if (!pud_access_permitted(orig, flags & FOLL_WRITE)) 1927 return 0; 1928 1929 if (pud_devmap(orig)) { 1930 if (unlikely(flags & FOLL_LONGTERM)) 1931 return 0; 1932 return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); 1933 } 1934 1935 refs = 0; 1936 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 1937 do { 1938 pages[*nr] = page; 1939 (*nr)++; 1940 page++; 1941 refs++; 1942 } while (addr += PAGE_SIZE, addr != end); 1943 1944 head = try_get_compound_head(pud_page(orig), refs); 1945 if (!head) { 1946 *nr -= refs; 1947 return 0; 1948 } 1949 1950 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 1951 *nr -= refs; 1952 while (refs--) 1953 put_page(head); 1954 return 0; 1955 } 1956 1957 SetPageReferenced(head); 1958 return 1; 1959 } 1960 1961 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, 1962 unsigned long end, unsigned int flags, 1963 struct page **pages, int *nr) 1964 { 1965 int refs; 1966 struct page *head, *page; 1967 1968 if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) 1969 return 0; 1970 1971 BUILD_BUG_ON(pgd_devmap(orig)); 1972 refs = 0; 1973 page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); 1974 do { 1975 pages[*nr] = page; 1976 (*nr)++; 1977 page++; 1978 refs++; 1979 } while (addr += PAGE_SIZE, addr != end); 1980 1981 head = try_get_compound_head(pgd_page(orig), refs); 1982 if (!head) { 1983 *nr -= refs; 1984 return 0; 1985 } 1986 1987 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { 1988 *nr -= refs; 1989 while (refs--) 1990 put_page(head); 1991 return 0; 1992 } 1993 1994 SetPageReferenced(head); 1995 return 1; 1996 } 1997 1998 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 1999 unsigned int flags, struct page **pages, int *nr) 2000 { 2001 unsigned long next; 2002 pmd_t *pmdp; 2003 2004 pmdp = pmd_offset(&pud, addr); 2005 do { 2006 pmd_t pmd = READ_ONCE(*pmdp); 2007 2008 next = pmd_addr_end(addr, end); 2009 if (!pmd_present(pmd)) 2010 return 0; 2011 2012 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || 2013 pmd_devmap(pmd))) { 2014 /* 2015 * NUMA hinting faults need to be handled in the GUP 2016 * slowpath for accounting purposes and so that they 2017 * can be serialised against THP migration. 2018 */ 2019 if (pmd_protnone(pmd)) 2020 return 0; 2021 2022 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, 2023 pages, nr)) 2024 return 0; 2025 2026 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { 2027 /* 2028 * architecture have different format for hugetlbfs 2029 * pmd format and THP pmd format 2030 */ 2031 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, 2032 PMD_SHIFT, next, flags, pages, nr)) 2033 return 0; 2034 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) 2035 return 0; 2036 } while (pmdp++, addr = next, addr != end); 2037 2038 return 1; 2039 } 2040 2041 static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, 2042 unsigned int flags, struct page **pages, int *nr) 2043 { 2044 unsigned long next; 2045 pud_t *pudp; 2046 2047 pudp = pud_offset(&p4d, addr); 2048 do { 2049 pud_t pud = READ_ONCE(*pudp); 2050 2051 next = pud_addr_end(addr, end); 2052 if (pud_none(pud)) 2053 return 0; 2054 if (unlikely(pud_huge(pud))) { 2055 if (!gup_huge_pud(pud, pudp, addr, next, flags, 2056 pages, nr)) 2057 return 0; 2058 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { 2059 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, 2060 PUD_SHIFT, next, flags, pages, nr)) 2061 return 0; 2062 } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr)) 2063 return 0; 2064 } while (pudp++, addr = next, addr != end); 2065 2066 return 1; 2067 } 2068 2069 static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, 2070 unsigned int flags, struct page **pages, int *nr) 2071 { 2072 unsigned long next; 2073 p4d_t *p4dp; 2074 2075 p4dp = p4d_offset(&pgd, addr); 2076 do { 2077 p4d_t p4d = READ_ONCE(*p4dp); 2078 2079 next = p4d_addr_end(addr, end); 2080 if (p4d_none(p4d)) 2081 return 0; 2082 BUILD_BUG_ON(p4d_huge(p4d)); 2083 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { 2084 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, 2085 P4D_SHIFT, next, flags, pages, nr)) 2086 return 0; 2087 } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr)) 2088 return 0; 2089 } while (p4dp++, addr = next, addr != end); 2090 2091 return 1; 2092 } 2093 2094 static void gup_pgd_range(unsigned long addr, unsigned long end, 2095 unsigned int flags, struct page **pages, int *nr) 2096 { 2097 unsigned long next; 2098 pgd_t *pgdp; 2099 2100 pgdp = pgd_offset(current->mm, addr); 2101 do { 2102 pgd_t pgd = READ_ONCE(*pgdp); 2103 2104 next = pgd_addr_end(addr, end); 2105 if (pgd_none(pgd)) 2106 return; 2107 if (unlikely(pgd_huge(pgd))) { 2108 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags, 2109 pages, nr)) 2110 return; 2111 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { 2112 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, 2113 PGDIR_SHIFT, next, flags, pages, nr)) 2114 return; 2115 } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr)) 2116 return; 2117 } while (pgdp++, addr = next, addr != end); 2118 } 2119 2120 #ifndef gup_fast_permitted 2121 /* 2122 * Check if it's allowed to use __get_user_pages_fast() for the range, or 2123 * we need to fall back to the slow version: 2124 */ 2125 bool gup_fast_permitted(unsigned long start, int nr_pages) 2126 { 2127 unsigned long len, end; 2128 2129 len = (unsigned long) nr_pages << PAGE_SHIFT; 2130 end = start + len; 2131 return end >= start; 2132 } 2133 #endif 2134 2135 /* 2136 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 2137 * the regular GUP. 2138 * Note a difference with get_user_pages_fast: this always returns the 2139 * number of pages pinned, 0 if no pages were pinned. 2140 */ 2141 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 2142 struct page **pages) 2143 { 2144 unsigned long len, end; 2145 unsigned long flags; 2146 int nr = 0; 2147 2148 start &= PAGE_MASK; 2149 len = (unsigned long) nr_pages << PAGE_SHIFT; 2150 end = start + len; 2151 2152 if (unlikely(!access_ok((void __user *)start, len))) 2153 return 0; 2154 2155 /* 2156 * Disable interrupts. We use the nested form as we can already have 2157 * interrupts disabled by get_futex_key. 2158 * 2159 * With interrupts disabled, we block page table pages from being 2160 * freed from under us. See struct mmu_table_batch comments in 2161 * include/asm-generic/tlb.h for more details. 2162 * 2163 * We do not adopt an rcu_read_lock(.) here as we also want to 2164 * block IPIs that come from THPs splitting. 2165 */ 2166 2167 if (gup_fast_permitted(start, nr_pages)) { 2168 local_irq_save(flags); 2169 gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr); 2170 local_irq_restore(flags); 2171 } 2172 2173 return nr; 2174 } 2175 2176 static int __gup_longterm_unlocked(unsigned long start, int nr_pages, 2177 unsigned int gup_flags, struct page **pages) 2178 { 2179 int ret; 2180 2181 /* 2182 * FIXME: FOLL_LONGTERM does not work with 2183 * get_user_pages_unlocked() (see comments in that function) 2184 */ 2185 if (gup_flags & FOLL_LONGTERM) { 2186 down_read(¤t->mm->mmap_sem); 2187 ret = __gup_longterm_locked(current, current->mm, 2188 start, nr_pages, 2189 pages, NULL, gup_flags); 2190 up_read(¤t->mm->mmap_sem); 2191 } else { 2192 ret = get_user_pages_unlocked(start, nr_pages, 2193 pages, gup_flags); 2194 } 2195 2196 return ret; 2197 } 2198 2199 /** 2200 * get_user_pages_fast() - pin user pages in memory 2201 * @start: starting user address 2202 * @nr_pages: number of pages from start to pin 2203 * @gup_flags: flags modifying pin behaviour 2204 * @pages: array that receives pointers to the pages pinned. 2205 * Should be at least nr_pages long. 2206 * 2207 * Attempt to pin user pages in memory without taking mm->mmap_sem. 2208 * If not successful, it will fall back to taking the lock and 2209 * calling get_user_pages(). 2210 * 2211 * Returns number of pages pinned. This may be fewer than the number 2212 * requested. If nr_pages is 0 or negative, returns 0. If no pages 2213 * were pinned, returns -errno. 2214 */ 2215 int get_user_pages_fast(unsigned long start, int nr_pages, 2216 unsigned int gup_flags, struct page **pages) 2217 { 2218 unsigned long addr, len, end; 2219 int nr = 0, ret = 0; 2220 2221 start &= PAGE_MASK; 2222 addr = start; 2223 len = (unsigned long) nr_pages << PAGE_SHIFT; 2224 end = start + len; 2225 2226 if (nr_pages <= 0) 2227 return 0; 2228 2229 if (unlikely(!access_ok((void __user *)start, len))) 2230 return -EFAULT; 2231 2232 if (gup_fast_permitted(start, nr_pages)) { 2233 local_irq_disable(); 2234 gup_pgd_range(addr, end, gup_flags, pages, &nr); 2235 local_irq_enable(); 2236 ret = nr; 2237 } 2238 2239 if (nr < nr_pages) { 2240 /* Try to get the remaining pages with get_user_pages */ 2241 start += nr << PAGE_SHIFT; 2242 pages += nr; 2243 2244 ret = __gup_longterm_unlocked(start, nr_pages - nr, 2245 gup_flags, pages); 2246 2247 /* Have to be a bit careful with return values */ 2248 if (nr > 0) { 2249 if (ret < 0) 2250 ret = nr; 2251 else 2252 ret += nr; 2253 } 2254 } 2255 2256 return ret; 2257 } 2258 2259 #endif /* CONFIG_HAVE_GENERIC_GUP */ 2260