1 #include <linux/kernel.h> 2 #include <linux/errno.h> 3 #include <linux/err.h> 4 #include <linux/spinlock.h> 5 6 #include <linux/mm.h> 7 #include <linux/pagemap.h> 8 #include <linux/rmap.h> 9 #include <linux/swap.h> 10 #include <linux/swapops.h> 11 12 #include <linux/sched.h> 13 #include <linux/rwsem.h> 14 #include <linux/hugetlb.h> 15 16 #include <asm/pgtable.h> 17 #include <asm/tlbflush.h> 18 19 #include "internal.h" 20 21 static struct page *no_page_table(struct vm_area_struct *vma, 22 unsigned int flags) 23 { 24 /* 25 * When core dumping an enormous anonymous area that nobody 26 * has touched so far, we don't want to allocate unnecessary pages or 27 * page tables. Return error instead of NULL to skip handle_mm_fault, 28 * then get_dump_page() will return NULL to leave a hole in the dump. 29 * But we can only make this optimization where a hole would surely 30 * be zero-filled if handle_mm_fault() actually did handle it. 31 */ 32 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) 33 return ERR_PTR(-EFAULT); 34 return NULL; 35 } 36 37 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, 38 pte_t *pte, unsigned int flags) 39 { 40 /* No page to get reference */ 41 if (flags & FOLL_GET) 42 return -EFAULT; 43 44 if (flags & FOLL_TOUCH) { 45 pte_t entry = *pte; 46 47 if (flags & FOLL_WRITE) 48 entry = pte_mkdirty(entry); 49 entry = pte_mkyoung(entry); 50 51 if (!pte_same(*pte, entry)) { 52 set_pte_at(vma->vm_mm, address, pte, entry); 53 update_mmu_cache(vma, address, pte); 54 } 55 } 56 57 /* Proper page table entry exists, but no corresponding struct page */ 58 return -EEXIST; 59 } 60 61 static struct page *follow_page_pte(struct vm_area_struct *vma, 62 unsigned long address, pmd_t *pmd, unsigned int flags) 63 { 64 struct mm_struct *mm = vma->vm_mm; 65 struct page *page; 66 spinlock_t *ptl; 67 pte_t *ptep, pte; 68 69 retry: 70 if (unlikely(pmd_bad(*pmd))) 71 return no_page_table(vma, flags); 72 73 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 74 pte = *ptep; 75 if (!pte_present(pte)) { 76 swp_entry_t entry; 77 /* 78 * KSM's break_ksm() relies upon recognizing a ksm page 79 * even while it is being migrated, so for that case we 80 * need migration_entry_wait(). 81 */ 82 if (likely(!(flags & FOLL_MIGRATION))) 83 goto no_page; 84 if (pte_none(pte)) 85 goto no_page; 86 entry = pte_to_swp_entry(pte); 87 if (!is_migration_entry(entry)) 88 goto no_page; 89 pte_unmap_unlock(ptep, ptl); 90 migration_entry_wait(mm, pmd, address); 91 goto retry; 92 } 93 if ((flags & FOLL_NUMA) && pte_protnone(pte)) 94 goto no_page; 95 if ((flags & FOLL_WRITE) && !pte_write(pte)) { 96 pte_unmap_unlock(ptep, ptl); 97 return NULL; 98 } 99 100 page = vm_normal_page(vma, address, pte); 101 if (unlikely(!page)) { 102 if (flags & FOLL_DUMP) { 103 /* Avoid special (like zero) pages in core dumps */ 104 page = ERR_PTR(-EFAULT); 105 goto out; 106 } 107 108 if (is_zero_pfn(pte_pfn(pte))) { 109 page = pte_page(pte); 110 } else { 111 int ret; 112 113 ret = follow_pfn_pte(vma, address, ptep, flags); 114 page = ERR_PTR(ret); 115 goto out; 116 } 117 } 118 119 if (flags & FOLL_GET) 120 get_page_foll(page); 121 if (flags & FOLL_TOUCH) { 122 if ((flags & FOLL_WRITE) && 123 !pte_dirty(pte) && !PageDirty(page)) 124 set_page_dirty(page); 125 /* 126 * pte_mkyoung() would be more correct here, but atomic care 127 * is needed to avoid losing the dirty bit: it is easier to use 128 * mark_page_accessed(). 129 */ 130 mark_page_accessed(page); 131 } 132 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 133 /* 134 * The preliminary mapping check is mainly to avoid the 135 * pointless overhead of lock_page on the ZERO_PAGE 136 * which might bounce very badly if there is contention. 137 * 138 * If the page is already locked, we don't need to 139 * handle it now - vmscan will handle it later if and 140 * when it attempts to reclaim the page. 141 */ 142 if (page->mapping && trylock_page(page)) { 143 lru_add_drain(); /* push cached pages to LRU */ 144 /* 145 * Because we lock page here, and migration is 146 * blocked by the pte's page reference, and we 147 * know the page is still mapped, we don't even 148 * need to check for file-cache page truncation. 149 */ 150 mlock_vma_page(page); 151 unlock_page(page); 152 } 153 } 154 out: 155 pte_unmap_unlock(ptep, ptl); 156 return page; 157 no_page: 158 pte_unmap_unlock(ptep, ptl); 159 if (!pte_none(pte)) 160 return NULL; 161 return no_page_table(vma, flags); 162 } 163 164 /** 165 * follow_page_mask - look up a page descriptor from a user-virtual address 166 * @vma: vm_area_struct mapping @address 167 * @address: virtual address to look up 168 * @flags: flags modifying lookup behaviour 169 * @page_mask: on output, *page_mask is set according to the size of the page 170 * 171 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 172 * 173 * Returns the mapped (struct page *), %NULL if no mapping exists, or 174 * an error pointer if there is a mapping to something not represented 175 * by a page descriptor (see also vm_normal_page()). 176 */ 177 struct page *follow_page_mask(struct vm_area_struct *vma, 178 unsigned long address, unsigned int flags, 179 unsigned int *page_mask) 180 { 181 pgd_t *pgd; 182 pud_t *pud; 183 pmd_t *pmd; 184 spinlock_t *ptl; 185 struct page *page; 186 struct mm_struct *mm = vma->vm_mm; 187 188 *page_mask = 0; 189 190 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 191 if (!IS_ERR(page)) { 192 BUG_ON(flags & FOLL_GET); 193 return page; 194 } 195 196 pgd = pgd_offset(mm, address); 197 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 198 return no_page_table(vma, flags); 199 200 pud = pud_offset(pgd, address); 201 if (pud_none(*pud)) 202 return no_page_table(vma, flags); 203 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { 204 page = follow_huge_pud(mm, address, pud, flags); 205 if (page) 206 return page; 207 return no_page_table(vma, flags); 208 } 209 if (unlikely(pud_bad(*pud))) 210 return no_page_table(vma, flags); 211 212 pmd = pmd_offset(pud, address); 213 if (pmd_none(*pmd)) 214 return no_page_table(vma, flags); 215 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { 216 page = follow_huge_pmd(mm, address, pmd, flags); 217 if (page) 218 return page; 219 return no_page_table(vma, flags); 220 } 221 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 222 return no_page_table(vma, flags); 223 if (pmd_trans_huge(*pmd)) { 224 if (flags & FOLL_SPLIT) { 225 split_huge_page_pmd(vma, address, pmd); 226 return follow_page_pte(vma, address, pmd, flags); 227 } 228 ptl = pmd_lock(mm, pmd); 229 if (likely(pmd_trans_huge(*pmd))) { 230 if (unlikely(pmd_trans_splitting(*pmd))) { 231 spin_unlock(ptl); 232 wait_split_huge_page(vma->anon_vma, pmd); 233 } else { 234 page = follow_trans_huge_pmd(vma, address, 235 pmd, flags); 236 spin_unlock(ptl); 237 *page_mask = HPAGE_PMD_NR - 1; 238 return page; 239 } 240 } else 241 spin_unlock(ptl); 242 } 243 return follow_page_pte(vma, address, pmd, flags); 244 } 245 246 static int get_gate_page(struct mm_struct *mm, unsigned long address, 247 unsigned int gup_flags, struct vm_area_struct **vma, 248 struct page **page) 249 { 250 pgd_t *pgd; 251 pud_t *pud; 252 pmd_t *pmd; 253 pte_t *pte; 254 int ret = -EFAULT; 255 256 /* user gate pages are read-only */ 257 if (gup_flags & FOLL_WRITE) 258 return -EFAULT; 259 if (address > TASK_SIZE) 260 pgd = pgd_offset_k(address); 261 else 262 pgd = pgd_offset_gate(mm, address); 263 BUG_ON(pgd_none(*pgd)); 264 pud = pud_offset(pgd, address); 265 BUG_ON(pud_none(*pud)); 266 pmd = pmd_offset(pud, address); 267 if (pmd_none(*pmd)) 268 return -EFAULT; 269 VM_BUG_ON(pmd_trans_huge(*pmd)); 270 pte = pte_offset_map(pmd, address); 271 if (pte_none(*pte)) 272 goto unmap; 273 *vma = get_gate_vma(mm); 274 if (!page) 275 goto out; 276 *page = vm_normal_page(*vma, address, *pte); 277 if (!*page) { 278 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) 279 goto unmap; 280 *page = pte_page(*pte); 281 } 282 get_page(*page); 283 out: 284 ret = 0; 285 unmap: 286 pte_unmap(pte); 287 return ret; 288 } 289 290 /* 291 * mmap_sem must be held on entry. If @nonblocking != NULL and 292 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. 293 * If it is, *@nonblocking will be set to 0 and -EBUSY returned. 294 */ 295 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, 296 unsigned long address, unsigned int *flags, int *nonblocking) 297 { 298 struct mm_struct *mm = vma->vm_mm; 299 unsigned int fault_flags = 0; 300 int ret; 301 302 /* mlock all present pages, but do not fault in new pages */ 303 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) 304 return -ENOENT; 305 /* For mm_populate(), just skip the stack guard page. */ 306 if ((*flags & FOLL_POPULATE) && 307 (stack_guard_page_start(vma, address) || 308 stack_guard_page_end(vma, address + PAGE_SIZE))) 309 return -ENOENT; 310 if (*flags & FOLL_WRITE) 311 fault_flags |= FAULT_FLAG_WRITE; 312 if (nonblocking) 313 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 314 if (*flags & FOLL_NOWAIT) 315 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 316 if (*flags & FOLL_TRIED) { 317 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); 318 fault_flags |= FAULT_FLAG_TRIED; 319 } 320 321 ret = handle_mm_fault(mm, vma, address, fault_flags); 322 if (ret & VM_FAULT_ERROR) { 323 if (ret & VM_FAULT_OOM) 324 return -ENOMEM; 325 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 326 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; 327 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 328 return -EFAULT; 329 BUG(); 330 } 331 332 if (tsk) { 333 if (ret & VM_FAULT_MAJOR) 334 tsk->maj_flt++; 335 else 336 tsk->min_flt++; 337 } 338 339 if (ret & VM_FAULT_RETRY) { 340 if (nonblocking) 341 *nonblocking = 0; 342 return -EBUSY; 343 } 344 345 /* 346 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when 347 * necessary, even if maybe_mkwrite decided not to set pte_write. We 348 * can thus safely do subsequent page lookups as if they were reads. 349 * But only do so when looping for pte_write is futile: in some cases 350 * userspace may also be wanting to write to the gotten user page, 351 * which a read fault here might prevent (a readonly page might get 352 * reCOWed by userspace write). 353 */ 354 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) 355 *flags &= ~FOLL_WRITE; 356 return 0; 357 } 358 359 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) 360 { 361 vm_flags_t vm_flags = vma->vm_flags; 362 363 if (vm_flags & (VM_IO | VM_PFNMAP)) 364 return -EFAULT; 365 366 if (gup_flags & FOLL_WRITE) { 367 if (!(vm_flags & VM_WRITE)) { 368 if (!(gup_flags & FOLL_FORCE)) 369 return -EFAULT; 370 /* 371 * We used to let the write,force case do COW in a 372 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could 373 * set a breakpoint in a read-only mapping of an 374 * executable, without corrupting the file (yet only 375 * when that file had been opened for writing!). 376 * Anon pages in shared mappings are surprising: now 377 * just reject it. 378 */ 379 if (!is_cow_mapping(vm_flags)) { 380 WARN_ON_ONCE(vm_flags & VM_MAYWRITE); 381 return -EFAULT; 382 } 383 } 384 } else if (!(vm_flags & VM_READ)) { 385 if (!(gup_flags & FOLL_FORCE)) 386 return -EFAULT; 387 /* 388 * Is there actually any vma we can reach here which does not 389 * have VM_MAYREAD set? 390 */ 391 if (!(vm_flags & VM_MAYREAD)) 392 return -EFAULT; 393 } 394 return 0; 395 } 396 397 /** 398 * __get_user_pages() - pin user pages in memory 399 * @tsk: task_struct of target task 400 * @mm: mm_struct of target mm 401 * @start: starting user address 402 * @nr_pages: number of pages from start to pin 403 * @gup_flags: flags modifying pin behaviour 404 * @pages: array that receives pointers to the pages pinned. 405 * Should be at least nr_pages long. Or NULL, if caller 406 * only intends to ensure the pages are faulted in. 407 * @vmas: array of pointers to vmas corresponding to each page. 408 * Or NULL if the caller does not require them. 409 * @nonblocking: whether waiting for disk IO or mmap_sem contention 410 * 411 * Returns number of pages pinned. This may be fewer than the number 412 * requested. If nr_pages is 0 or negative, returns 0. If no pages 413 * were pinned, returns -errno. Each page returned must be released 414 * with a put_page() call when it is finished with. vmas will only 415 * remain valid while mmap_sem is held. 416 * 417 * Must be called with mmap_sem held. It may be released. See below. 418 * 419 * __get_user_pages walks a process's page tables and takes a reference to 420 * each struct page that each user address corresponds to at a given 421 * instant. That is, it takes the page that would be accessed if a user 422 * thread accesses the given user virtual address at that instant. 423 * 424 * This does not guarantee that the page exists in the user mappings when 425 * __get_user_pages returns, and there may even be a completely different 426 * page there in some cases (eg. if mmapped pagecache has been invalidated 427 * and subsequently re faulted). However it does guarantee that the page 428 * won't be freed completely. And mostly callers simply care that the page 429 * contains data that was valid *at some point in time*. Typically, an IO 430 * or similar operation cannot guarantee anything stronger anyway because 431 * locks can't be held over the syscall boundary. 432 * 433 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If 434 * the page is written to, set_page_dirty (or set_page_dirty_lock, as 435 * appropriate) must be called after the page is finished with, and 436 * before put_page is called. 437 * 438 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO 439 * or mmap_sem contention, and if waiting is needed to pin all pages, 440 * *@nonblocking will be set to 0. Further, if @gup_flags does not 441 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in 442 * this case. 443 * 444 * A caller using such a combination of @nonblocking and @gup_flags 445 * must therefore hold the mmap_sem for reading only, and recognize 446 * when it's been released. Otherwise, it must be held for either 447 * reading or writing and will not be released. 448 * 449 * In most cases, get_user_pages or get_user_pages_fast should be used 450 * instead of __get_user_pages. __get_user_pages should be used only if 451 * you need some special @gup_flags. 452 */ 453 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 454 unsigned long start, unsigned long nr_pages, 455 unsigned int gup_flags, struct page **pages, 456 struct vm_area_struct **vmas, int *nonblocking) 457 { 458 long i = 0; 459 unsigned int page_mask; 460 struct vm_area_struct *vma = NULL; 461 462 if (!nr_pages) 463 return 0; 464 465 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); 466 467 /* 468 * If FOLL_FORCE is set then do not force a full fault as the hinting 469 * fault information is unrelated to the reference behaviour of a task 470 * using the address space 471 */ 472 if (!(gup_flags & FOLL_FORCE)) 473 gup_flags |= FOLL_NUMA; 474 475 do { 476 struct page *page; 477 unsigned int foll_flags = gup_flags; 478 unsigned int page_increm; 479 480 /* first iteration or cross vma bound */ 481 if (!vma || start >= vma->vm_end) { 482 vma = find_extend_vma(mm, start); 483 if (!vma && in_gate_area(mm, start)) { 484 int ret; 485 ret = get_gate_page(mm, start & PAGE_MASK, 486 gup_flags, &vma, 487 pages ? &pages[i] : NULL); 488 if (ret) 489 return i ? : ret; 490 page_mask = 0; 491 goto next_page; 492 } 493 494 if (!vma || check_vma_flags(vma, gup_flags)) 495 return i ? : -EFAULT; 496 if (is_vm_hugetlb_page(vma)) { 497 i = follow_hugetlb_page(mm, vma, pages, vmas, 498 &start, &nr_pages, i, 499 gup_flags); 500 continue; 501 } 502 } 503 retry: 504 /* 505 * If we have a pending SIGKILL, don't keep faulting pages and 506 * potentially allocating memory. 507 */ 508 if (unlikely(fatal_signal_pending(current))) 509 return i ? i : -ERESTARTSYS; 510 cond_resched(); 511 page = follow_page_mask(vma, start, foll_flags, &page_mask); 512 if (!page) { 513 int ret; 514 ret = faultin_page(tsk, vma, start, &foll_flags, 515 nonblocking); 516 switch (ret) { 517 case 0: 518 goto retry; 519 case -EFAULT: 520 case -ENOMEM: 521 case -EHWPOISON: 522 return i ? i : ret; 523 case -EBUSY: 524 return i; 525 case -ENOENT: 526 goto next_page; 527 } 528 BUG(); 529 } else if (PTR_ERR(page) == -EEXIST) { 530 /* 531 * Proper page table entry exists, but no corresponding 532 * struct page. 533 */ 534 goto next_page; 535 } else if (IS_ERR(page)) { 536 return i ? i : PTR_ERR(page); 537 } 538 if (pages) { 539 pages[i] = page; 540 flush_anon_page(vma, page, start); 541 flush_dcache_page(page); 542 page_mask = 0; 543 } 544 next_page: 545 if (vmas) { 546 vmas[i] = vma; 547 page_mask = 0; 548 } 549 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); 550 if (page_increm > nr_pages) 551 page_increm = nr_pages; 552 i += page_increm; 553 start += page_increm * PAGE_SIZE; 554 nr_pages -= page_increm; 555 } while (nr_pages); 556 return i; 557 } 558 EXPORT_SYMBOL(__get_user_pages); 559 560 /* 561 * fixup_user_fault() - manually resolve a user page fault 562 * @tsk: the task_struct to use for page fault accounting, or 563 * NULL if faults are not to be recorded. 564 * @mm: mm_struct of target mm 565 * @address: user address 566 * @fault_flags:flags to pass down to handle_mm_fault() 567 * 568 * This is meant to be called in the specific scenario where for locking reasons 569 * we try to access user memory in atomic context (within a pagefault_disable() 570 * section), this returns -EFAULT, and we want to resolve the user fault before 571 * trying again. 572 * 573 * Typically this is meant to be used by the futex code. 574 * 575 * The main difference with get_user_pages() is that this function will 576 * unconditionally call handle_mm_fault() which will in turn perform all the 577 * necessary SW fixup of the dirty and young bits in the PTE, while 578 * handle_mm_fault() only guarantees to update these in the struct page. 579 * 580 * This is important for some architectures where those bits also gate the 581 * access permission to the page because they are maintained in software. On 582 * such architectures, gup() will not be enough to make a subsequent access 583 * succeed. 584 * 585 * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault(). 586 */ 587 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 588 unsigned long address, unsigned int fault_flags) 589 { 590 struct vm_area_struct *vma; 591 vm_flags_t vm_flags; 592 int ret; 593 594 vma = find_extend_vma(mm, address); 595 if (!vma || address < vma->vm_start) 596 return -EFAULT; 597 598 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ; 599 if (!(vm_flags & vma->vm_flags)) 600 return -EFAULT; 601 602 ret = handle_mm_fault(mm, vma, address, fault_flags); 603 if (ret & VM_FAULT_ERROR) { 604 if (ret & VM_FAULT_OOM) 605 return -ENOMEM; 606 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 607 return -EHWPOISON; 608 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 609 return -EFAULT; 610 BUG(); 611 } 612 if (tsk) { 613 if (ret & VM_FAULT_MAJOR) 614 tsk->maj_flt++; 615 else 616 tsk->min_flt++; 617 } 618 return 0; 619 } 620 621 static __always_inline long __get_user_pages_locked(struct task_struct *tsk, 622 struct mm_struct *mm, 623 unsigned long start, 624 unsigned long nr_pages, 625 int write, int force, 626 struct page **pages, 627 struct vm_area_struct **vmas, 628 int *locked, bool notify_drop, 629 unsigned int flags) 630 { 631 long ret, pages_done; 632 bool lock_dropped; 633 634 if (locked) { 635 /* if VM_FAULT_RETRY can be returned, vmas become invalid */ 636 BUG_ON(vmas); 637 /* check caller initialized locked */ 638 BUG_ON(*locked != 1); 639 } 640 641 if (pages) 642 flags |= FOLL_GET; 643 if (write) 644 flags |= FOLL_WRITE; 645 if (force) 646 flags |= FOLL_FORCE; 647 648 pages_done = 0; 649 lock_dropped = false; 650 for (;;) { 651 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, 652 vmas, locked); 653 if (!locked) 654 /* VM_FAULT_RETRY couldn't trigger, bypass */ 655 return ret; 656 657 /* VM_FAULT_RETRY cannot return errors */ 658 if (!*locked) { 659 BUG_ON(ret < 0); 660 BUG_ON(ret >= nr_pages); 661 } 662 663 if (!pages) 664 /* If it's a prefault don't insist harder */ 665 return ret; 666 667 if (ret > 0) { 668 nr_pages -= ret; 669 pages_done += ret; 670 if (!nr_pages) 671 break; 672 } 673 if (*locked) { 674 /* VM_FAULT_RETRY didn't trigger */ 675 if (!pages_done) 676 pages_done = ret; 677 break; 678 } 679 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ 680 pages += ret; 681 start += ret << PAGE_SHIFT; 682 683 /* 684 * Repeat on the address that fired VM_FAULT_RETRY 685 * without FAULT_FLAG_ALLOW_RETRY but with 686 * FAULT_FLAG_TRIED. 687 */ 688 *locked = 1; 689 lock_dropped = true; 690 down_read(&mm->mmap_sem); 691 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, 692 pages, NULL, NULL); 693 if (ret != 1) { 694 BUG_ON(ret > 1); 695 if (!pages_done) 696 pages_done = ret; 697 break; 698 } 699 nr_pages--; 700 pages_done++; 701 if (!nr_pages) 702 break; 703 pages++; 704 start += PAGE_SIZE; 705 } 706 if (notify_drop && lock_dropped && *locked) { 707 /* 708 * We must let the caller know we temporarily dropped the lock 709 * and so the critical section protected by it was lost. 710 */ 711 up_read(&mm->mmap_sem); 712 *locked = 0; 713 } 714 return pages_done; 715 } 716 717 /* 718 * We can leverage the VM_FAULT_RETRY functionality in the page fault 719 * paths better by using either get_user_pages_locked() or 720 * get_user_pages_unlocked(). 721 * 722 * get_user_pages_locked() is suitable to replace the form: 723 * 724 * down_read(&mm->mmap_sem); 725 * do_something() 726 * get_user_pages(tsk, mm, ..., pages, NULL); 727 * up_read(&mm->mmap_sem); 728 * 729 * to: 730 * 731 * int locked = 1; 732 * down_read(&mm->mmap_sem); 733 * do_something() 734 * get_user_pages_locked(tsk, mm, ..., pages, &locked); 735 * if (locked) 736 * up_read(&mm->mmap_sem); 737 */ 738 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, 739 unsigned long start, unsigned long nr_pages, 740 int write, int force, struct page **pages, 741 int *locked) 742 { 743 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 744 pages, NULL, locked, true, FOLL_TOUCH); 745 } 746 EXPORT_SYMBOL(get_user_pages_locked); 747 748 /* 749 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to 750 * pass additional gup_flags as last parameter (like FOLL_HWPOISON). 751 * 752 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the 753 * caller if required (just like with __get_user_pages). "FOLL_GET", 754 * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed 755 * according to the parameters "pages", "write", "force" 756 * respectively. 757 */ 758 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 759 unsigned long start, unsigned long nr_pages, 760 int write, int force, struct page **pages, 761 unsigned int gup_flags) 762 { 763 long ret; 764 int locked = 1; 765 down_read(&mm->mmap_sem); 766 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 767 pages, NULL, &locked, false, gup_flags); 768 if (locked) 769 up_read(&mm->mmap_sem); 770 return ret; 771 } 772 EXPORT_SYMBOL(__get_user_pages_unlocked); 773 774 /* 775 * get_user_pages_unlocked() is suitable to replace the form: 776 * 777 * down_read(&mm->mmap_sem); 778 * get_user_pages(tsk, mm, ..., pages, NULL); 779 * up_read(&mm->mmap_sem); 780 * 781 * with: 782 * 783 * get_user_pages_unlocked(tsk, mm, ..., pages); 784 * 785 * It is functionally equivalent to get_user_pages_fast so 786 * get_user_pages_fast should be used instead, if the two parameters 787 * "tsk" and "mm" are respectively equal to current and current->mm, 788 * or if "force" shall be set to 1 (get_user_pages_fast misses the 789 * "force" parameter). 790 */ 791 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 792 unsigned long start, unsigned long nr_pages, 793 int write, int force, struct page **pages) 794 { 795 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, 796 force, pages, FOLL_TOUCH); 797 } 798 EXPORT_SYMBOL(get_user_pages_unlocked); 799 800 /* 801 * get_user_pages() - pin user pages in memory 802 * @tsk: the task_struct to use for page fault accounting, or 803 * NULL if faults are not to be recorded. 804 * @mm: mm_struct of target mm 805 * @start: starting user address 806 * @nr_pages: number of pages from start to pin 807 * @write: whether pages will be written to by the caller 808 * @force: whether to force access even when user mapping is currently 809 * protected (but never forces write access to shared mapping). 810 * @pages: array that receives pointers to the pages pinned. 811 * Should be at least nr_pages long. Or NULL, if caller 812 * only intends to ensure the pages are faulted in. 813 * @vmas: array of pointers to vmas corresponding to each page. 814 * Or NULL if the caller does not require them. 815 * 816 * Returns number of pages pinned. This may be fewer than the number 817 * requested. If nr_pages is 0 or negative, returns 0. If no pages 818 * were pinned, returns -errno. Each page returned must be released 819 * with a put_page() call when it is finished with. vmas will only 820 * remain valid while mmap_sem is held. 821 * 822 * Must be called with mmap_sem held for read or write. 823 * 824 * get_user_pages walks a process's page tables and takes a reference to 825 * each struct page that each user address corresponds to at a given 826 * instant. That is, it takes the page that would be accessed if a user 827 * thread accesses the given user virtual address at that instant. 828 * 829 * This does not guarantee that the page exists in the user mappings when 830 * get_user_pages returns, and there may even be a completely different 831 * page there in some cases (eg. if mmapped pagecache has been invalidated 832 * and subsequently re faulted). However it does guarantee that the page 833 * won't be freed completely. And mostly callers simply care that the page 834 * contains data that was valid *at some point in time*. Typically, an IO 835 * or similar operation cannot guarantee anything stronger anyway because 836 * locks can't be held over the syscall boundary. 837 * 838 * If write=0, the page must not be written to. If the page is written to, 839 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called 840 * after the page is finished with, and before put_page is called. 841 * 842 * get_user_pages is typically used for fewer-copy IO operations, to get a 843 * handle on the memory by some means other than accesses via the user virtual 844 * addresses. The pages may be submitted for DMA to devices or accessed via 845 * their kernel linear mapping (via the kmap APIs). Care should be taken to 846 * use the correct cache flushing APIs. 847 * 848 * See also get_user_pages_fast, for performance critical applications. 849 * 850 * get_user_pages should be phased out in favor of 851 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing 852 * should use get_user_pages because it cannot pass 853 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. 854 */ 855 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 856 unsigned long start, unsigned long nr_pages, int write, 857 int force, struct page **pages, struct vm_area_struct **vmas) 858 { 859 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 860 pages, vmas, NULL, false, FOLL_TOUCH); 861 } 862 EXPORT_SYMBOL(get_user_pages); 863 864 /** 865 * populate_vma_page_range() - populate a range of pages in the vma. 866 * @vma: target vma 867 * @start: start address 868 * @end: end address 869 * @nonblocking: 870 * 871 * This takes care of mlocking the pages too if VM_LOCKED is set. 872 * 873 * return 0 on success, negative error code on error. 874 * 875 * vma->vm_mm->mmap_sem must be held. 876 * 877 * If @nonblocking is NULL, it may be held for read or write and will 878 * be unperturbed. 879 * 880 * If @nonblocking is non-NULL, it must held for read only and may be 881 * released. If it's released, *@nonblocking will be set to 0. 882 */ 883 long populate_vma_page_range(struct vm_area_struct *vma, 884 unsigned long start, unsigned long end, int *nonblocking) 885 { 886 struct mm_struct *mm = vma->vm_mm; 887 unsigned long nr_pages = (end - start) / PAGE_SIZE; 888 int gup_flags; 889 890 VM_BUG_ON(start & ~PAGE_MASK); 891 VM_BUG_ON(end & ~PAGE_MASK); 892 VM_BUG_ON_VMA(start < vma->vm_start, vma); 893 VM_BUG_ON_VMA(end > vma->vm_end, vma); 894 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); 895 896 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; 897 if (vma->vm_flags & VM_LOCKONFAULT) 898 gup_flags &= ~FOLL_POPULATE; 899 900 /* 901 * We want to touch writable mappings with a write fault in order 902 * to break COW, except for shared mappings because these don't COW 903 * and we would not want to dirty them for nothing. 904 */ 905 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 906 gup_flags |= FOLL_WRITE; 907 908 /* 909 * We want mlock to succeed for regions that have any permissions 910 * other than PROT_NONE. 911 */ 912 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) 913 gup_flags |= FOLL_FORCE; 914 915 /* 916 * We made sure addr is within a VMA, so the following will 917 * not result in a stack expansion that recurses back here. 918 */ 919 return __get_user_pages(current, mm, start, nr_pages, gup_flags, 920 NULL, NULL, nonblocking); 921 } 922 923 /* 924 * __mm_populate - populate and/or mlock pages within a range of address space. 925 * 926 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap 927 * flags. VMAs must be already marked with the desired vm_flags, and 928 * mmap_sem must not be held. 929 */ 930 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 931 { 932 struct mm_struct *mm = current->mm; 933 unsigned long end, nstart, nend; 934 struct vm_area_struct *vma = NULL; 935 int locked = 0; 936 long ret = 0; 937 938 VM_BUG_ON(start & ~PAGE_MASK); 939 VM_BUG_ON(len != PAGE_ALIGN(len)); 940 end = start + len; 941 942 for (nstart = start; nstart < end; nstart = nend) { 943 /* 944 * We want to fault in pages for [nstart; end) address range. 945 * Find first corresponding VMA. 946 */ 947 if (!locked) { 948 locked = 1; 949 down_read(&mm->mmap_sem); 950 vma = find_vma(mm, nstart); 951 } else if (nstart >= vma->vm_end) 952 vma = vma->vm_next; 953 if (!vma || vma->vm_start >= end) 954 break; 955 /* 956 * Set [nstart; nend) to intersection of desired address 957 * range with the first VMA. Also, skip undesirable VMA types. 958 */ 959 nend = min(end, vma->vm_end); 960 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 961 continue; 962 if (nstart < vma->vm_start) 963 nstart = vma->vm_start; 964 /* 965 * Now fault in a range of pages. populate_vma_page_range() 966 * double checks the vma flags, so that it won't mlock pages 967 * if the vma was already munlocked. 968 */ 969 ret = populate_vma_page_range(vma, nstart, nend, &locked); 970 if (ret < 0) { 971 if (ignore_errors) { 972 ret = 0; 973 continue; /* continue at next VMA */ 974 } 975 break; 976 } 977 nend = nstart + ret * PAGE_SIZE; 978 ret = 0; 979 } 980 if (locked) 981 up_read(&mm->mmap_sem); 982 return ret; /* 0 or negative error code */ 983 } 984 985 /** 986 * get_dump_page() - pin user page in memory while writing it to core dump 987 * @addr: user address 988 * 989 * Returns struct page pointer of user page pinned for dump, 990 * to be freed afterwards by page_cache_release() or put_page(). 991 * 992 * Returns NULL on any kind of failure - a hole must then be inserted into 993 * the corefile, to preserve alignment with its headers; and also returns 994 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 995 * allowing a hole to be left in the corefile to save diskspace. 996 * 997 * Called without mmap_sem, but after all other threads have been killed. 998 */ 999 #ifdef CONFIG_ELF_CORE 1000 struct page *get_dump_page(unsigned long addr) 1001 { 1002 struct vm_area_struct *vma; 1003 struct page *page; 1004 1005 if (__get_user_pages(current, current->mm, addr, 1, 1006 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, 1007 NULL) < 1) 1008 return NULL; 1009 flush_cache_page(vma, addr, page_to_pfn(page)); 1010 return page; 1011 } 1012 #endif /* CONFIG_ELF_CORE */ 1013 1014 /* 1015 * Generic RCU Fast GUP 1016 * 1017 * get_user_pages_fast attempts to pin user pages by walking the page 1018 * tables directly and avoids taking locks. Thus the walker needs to be 1019 * protected from page table pages being freed from under it, and should 1020 * block any THP splits. 1021 * 1022 * One way to achieve this is to have the walker disable interrupts, and 1023 * rely on IPIs from the TLB flushing code blocking before the page table 1024 * pages are freed. This is unsuitable for architectures that do not need 1025 * to broadcast an IPI when invalidating TLBs. 1026 * 1027 * Another way to achieve this is to batch up page table containing pages 1028 * belonging to more than one mm_user, then rcu_sched a callback to free those 1029 * pages. Disabling interrupts will allow the fast_gup walker to both block 1030 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs 1031 * (which is a relatively rare event). The code below adopts this strategy. 1032 * 1033 * Before activating this code, please be aware that the following assumptions 1034 * are currently made: 1035 * 1036 * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free 1037 * pages containing page tables. 1038 * 1039 * *) THP splits will broadcast an IPI, this can be achieved by overriding 1040 * pmdp_splitting_flush. 1041 * 1042 * *) ptes can be read atomically by the architecture. 1043 * 1044 * *) access_ok is sufficient to validate userspace address ranges. 1045 * 1046 * The last two assumptions can be relaxed by the addition of helper functions. 1047 * 1048 * This code is based heavily on the PowerPC implementation by Nick Piggin. 1049 */ 1050 #ifdef CONFIG_HAVE_GENERIC_RCU_GUP 1051 1052 #ifdef __HAVE_ARCH_PTE_SPECIAL 1053 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1054 int write, struct page **pages, int *nr) 1055 { 1056 pte_t *ptep, *ptem; 1057 int ret = 0; 1058 1059 ptem = ptep = pte_offset_map(&pmd, addr); 1060 do { 1061 /* 1062 * In the line below we are assuming that the pte can be read 1063 * atomically. If this is not the case for your architecture, 1064 * please wrap this in a helper function! 1065 * 1066 * for an example see gup_get_pte in arch/x86/mm/gup.c 1067 */ 1068 pte_t pte = READ_ONCE(*ptep); 1069 struct page *page; 1070 1071 /* 1072 * Similar to the PMD case below, NUMA hinting must take slow 1073 * path using the pte_protnone check. 1074 */ 1075 if (!pte_present(pte) || pte_special(pte) || 1076 pte_protnone(pte) || (write && !pte_write(pte))) 1077 goto pte_unmap; 1078 1079 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 1080 page = pte_page(pte); 1081 1082 if (!page_cache_get_speculative(page)) 1083 goto pte_unmap; 1084 1085 if (unlikely(pte_val(pte) != pte_val(*ptep))) { 1086 put_page(page); 1087 goto pte_unmap; 1088 } 1089 1090 pages[*nr] = page; 1091 (*nr)++; 1092 1093 } while (ptep++, addr += PAGE_SIZE, addr != end); 1094 1095 ret = 1; 1096 1097 pte_unmap: 1098 pte_unmap(ptem); 1099 return ret; 1100 } 1101 #else 1102 1103 /* 1104 * If we can't determine whether or not a pte is special, then fail immediately 1105 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not 1106 * to be special. 1107 * 1108 * For a futex to be placed on a THP tail page, get_futex_key requires a 1109 * __get_user_pages_fast implementation that can pin pages. Thus it's still 1110 * useful to have gup_huge_pmd even if we can't operate on ptes. 1111 */ 1112 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1113 int write, struct page **pages, int *nr) 1114 { 1115 return 0; 1116 } 1117 #endif /* __HAVE_ARCH_PTE_SPECIAL */ 1118 1119 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 1120 unsigned long end, int write, struct page **pages, int *nr) 1121 { 1122 struct page *head, *page, *tail; 1123 int refs; 1124 1125 if (write && !pmd_write(orig)) 1126 return 0; 1127 1128 refs = 0; 1129 head = pmd_page(orig); 1130 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1131 tail = page; 1132 do { 1133 VM_BUG_ON_PAGE(compound_head(page) != head, page); 1134 pages[*nr] = page; 1135 (*nr)++; 1136 page++; 1137 refs++; 1138 } while (addr += PAGE_SIZE, addr != end); 1139 1140 if (!page_cache_add_speculative(head, refs)) { 1141 *nr -= refs; 1142 return 0; 1143 } 1144 1145 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 1146 *nr -= refs; 1147 while (refs--) 1148 put_page(head); 1149 return 0; 1150 } 1151 1152 /* 1153 * Any tail pages need their mapcount reference taken before we 1154 * return. (This allows the THP code to bump their ref count when 1155 * they are split into base pages). 1156 */ 1157 while (refs--) { 1158 if (PageTail(tail)) 1159 get_huge_page_tail(tail); 1160 tail++; 1161 } 1162 1163 return 1; 1164 } 1165 1166 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 1167 unsigned long end, int write, struct page **pages, int *nr) 1168 { 1169 struct page *head, *page, *tail; 1170 int refs; 1171 1172 if (write && !pud_write(orig)) 1173 return 0; 1174 1175 refs = 0; 1176 head = pud_page(orig); 1177 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 1178 tail = page; 1179 do { 1180 VM_BUG_ON_PAGE(compound_head(page) != head, page); 1181 pages[*nr] = page; 1182 (*nr)++; 1183 page++; 1184 refs++; 1185 } while (addr += PAGE_SIZE, addr != end); 1186 1187 if (!page_cache_add_speculative(head, refs)) { 1188 *nr -= refs; 1189 return 0; 1190 } 1191 1192 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 1193 *nr -= refs; 1194 while (refs--) 1195 put_page(head); 1196 return 0; 1197 } 1198 1199 while (refs--) { 1200 if (PageTail(tail)) 1201 get_huge_page_tail(tail); 1202 tail++; 1203 } 1204 1205 return 1; 1206 } 1207 1208 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, 1209 unsigned long end, int write, 1210 struct page **pages, int *nr) 1211 { 1212 int refs; 1213 struct page *head, *page, *tail; 1214 1215 if (write && !pgd_write(orig)) 1216 return 0; 1217 1218 refs = 0; 1219 head = pgd_page(orig); 1220 page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); 1221 tail = page; 1222 do { 1223 VM_BUG_ON_PAGE(compound_head(page) != head, page); 1224 pages[*nr] = page; 1225 (*nr)++; 1226 page++; 1227 refs++; 1228 } while (addr += PAGE_SIZE, addr != end); 1229 1230 if (!page_cache_add_speculative(head, refs)) { 1231 *nr -= refs; 1232 return 0; 1233 } 1234 1235 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { 1236 *nr -= refs; 1237 while (refs--) 1238 put_page(head); 1239 return 0; 1240 } 1241 1242 while (refs--) { 1243 if (PageTail(tail)) 1244 get_huge_page_tail(tail); 1245 tail++; 1246 } 1247 1248 return 1; 1249 } 1250 1251 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 1252 int write, struct page **pages, int *nr) 1253 { 1254 unsigned long next; 1255 pmd_t *pmdp; 1256 1257 pmdp = pmd_offset(&pud, addr); 1258 do { 1259 pmd_t pmd = READ_ONCE(*pmdp); 1260 1261 next = pmd_addr_end(addr, end); 1262 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) 1263 return 0; 1264 1265 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { 1266 /* 1267 * NUMA hinting faults need to be handled in the GUP 1268 * slowpath for accounting purposes and so that they 1269 * can be serialised against THP migration. 1270 */ 1271 if (pmd_protnone(pmd)) 1272 return 0; 1273 1274 if (!gup_huge_pmd(pmd, pmdp, addr, next, write, 1275 pages, nr)) 1276 return 0; 1277 1278 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { 1279 /* 1280 * architecture have different format for hugetlbfs 1281 * pmd format and THP pmd format 1282 */ 1283 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, 1284 PMD_SHIFT, next, write, pages, nr)) 1285 return 0; 1286 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) 1287 return 0; 1288 } while (pmdp++, addr = next, addr != end); 1289 1290 return 1; 1291 } 1292 1293 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, 1294 int write, struct page **pages, int *nr) 1295 { 1296 unsigned long next; 1297 pud_t *pudp; 1298 1299 pudp = pud_offset(&pgd, addr); 1300 do { 1301 pud_t pud = READ_ONCE(*pudp); 1302 1303 next = pud_addr_end(addr, end); 1304 if (pud_none(pud)) 1305 return 0; 1306 if (unlikely(pud_huge(pud))) { 1307 if (!gup_huge_pud(pud, pudp, addr, next, write, 1308 pages, nr)) 1309 return 0; 1310 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { 1311 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, 1312 PUD_SHIFT, next, write, pages, nr)) 1313 return 0; 1314 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) 1315 return 0; 1316 } while (pudp++, addr = next, addr != end); 1317 1318 return 1; 1319 } 1320 1321 /* 1322 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 1323 * the regular GUP. It will only return non-negative values. 1324 */ 1325 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1326 struct page **pages) 1327 { 1328 struct mm_struct *mm = current->mm; 1329 unsigned long addr, len, end; 1330 unsigned long next, flags; 1331 pgd_t *pgdp; 1332 int nr = 0; 1333 1334 start &= PAGE_MASK; 1335 addr = start; 1336 len = (unsigned long) nr_pages << PAGE_SHIFT; 1337 end = start + len; 1338 1339 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 1340 start, len))) 1341 return 0; 1342 1343 /* 1344 * Disable interrupts. We use the nested form as we can already have 1345 * interrupts disabled by get_futex_key. 1346 * 1347 * With interrupts disabled, we block page table pages from being 1348 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h 1349 * for more details. 1350 * 1351 * We do not adopt an rcu_read_lock(.) here as we also want to 1352 * block IPIs that come from THPs splitting. 1353 */ 1354 1355 local_irq_save(flags); 1356 pgdp = pgd_offset(mm, addr); 1357 do { 1358 pgd_t pgd = READ_ONCE(*pgdp); 1359 1360 next = pgd_addr_end(addr, end); 1361 if (pgd_none(pgd)) 1362 break; 1363 if (unlikely(pgd_huge(pgd))) { 1364 if (!gup_huge_pgd(pgd, pgdp, addr, next, write, 1365 pages, &nr)) 1366 break; 1367 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { 1368 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, 1369 PGDIR_SHIFT, next, write, pages, &nr)) 1370 break; 1371 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) 1372 break; 1373 } while (pgdp++, addr = next, addr != end); 1374 local_irq_restore(flags); 1375 1376 return nr; 1377 } 1378 1379 /** 1380 * get_user_pages_fast() - pin user pages in memory 1381 * @start: starting user address 1382 * @nr_pages: number of pages from start to pin 1383 * @write: whether pages will be written to 1384 * @pages: array that receives pointers to the pages pinned. 1385 * Should be at least nr_pages long. 1386 * 1387 * Attempt to pin user pages in memory without taking mm->mmap_sem. 1388 * If not successful, it will fall back to taking the lock and 1389 * calling get_user_pages(). 1390 * 1391 * Returns number of pages pinned. This may be fewer than the number 1392 * requested. If nr_pages is 0 or negative, returns 0. If no pages 1393 * were pinned, returns -errno. 1394 */ 1395 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1396 struct page **pages) 1397 { 1398 struct mm_struct *mm = current->mm; 1399 int nr, ret; 1400 1401 start &= PAGE_MASK; 1402 nr = __get_user_pages_fast(start, nr_pages, write, pages); 1403 ret = nr; 1404 1405 if (nr < nr_pages) { 1406 /* Try to get the remaining pages with get_user_pages */ 1407 start += nr << PAGE_SHIFT; 1408 pages += nr; 1409 1410 ret = get_user_pages_unlocked(current, mm, start, 1411 nr_pages - nr, write, 0, pages); 1412 1413 /* Have to be a bit careful with return values */ 1414 if (nr > 0) { 1415 if (ret < 0) 1416 ret = nr; 1417 else 1418 ret += nr; 1419 } 1420 } 1421 1422 return ret; 1423 } 1424 1425 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ 1426