Lines Matching +full:cross +full:- +full:arm64 +full:- +full:user
1 // SPDX-License-Identifier: GPL-2.0-only
47 * THP we can assume that either the given page (PTE-mapped THP) or in sanity_check_pinned_pages()
48 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If in sanity_check_pinned_pages()
51 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
59 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); in sanity_check_pinned_pages()
61 /* Either a PTE-mapped or a PMD-mapped THP. */ in sanity_check_pinned_pages()
62 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) && in sanity_check_pinned_pages()
92 if (!put_devmap_managed_page_refs(&folio->page, refs)) in try_get_folio()
107 atomic_sub(refs, &folio->_pincount); in gup_put_folio()
112 if (!put_devmap_managed_page_refs(&folio->page, refs)) in gup_put_folio()
117 * try_grab_folio() - add a folio's refcount by a flag-dependent amount
133 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not
143 return -ENOMEM; in try_grab_folio()
145 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page))) in try_grab_folio()
146 return -EREMOTEIO; in try_grab_folio()
152 * Don't take a pin on the zero page - it's not going anywhere in try_grab_folio()
164 atomic_add(refs, &folio->_pincount); in try_grab_folio()
176 * unpin_user_page() - release a dma-pinned page
192 * folio_add_pin - Try to get an additional pin on a pinned folio
209 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); in folio_add_pin()
211 atomic_inc(&folio->_pincount); in folio_add_pin()
226 nr = min_t(unsigned int, npages - i, in gup_folio_range_next()
227 folio_nr_pages(folio) - folio_page_idx(folio, next)); in gup_folio_range_next()
244 *ntails = nr - i; in gup_folio_next()
249 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
254 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
260 * possibly via unpin_user_pages(), for the non-dirty case.
316 * unpin_user_page_range_dirty_lock() - release and optionally dirty
317 * gup-pinned page range
323 * "gup-pinned page range" refers to a range of pages that has had one of the
363 * fork() and some anonymous pages might now actually be shared -- in unpin_user_pages_lockless()
373 * unpin_user_pages() - release an array of gup-pinned pages.
390 * a hard -ERRNO error to the caller, who erroneously passed it here. in unpin_user_pages()
424 * be zero-filled if handle_mm_fault() actually did handle it. in no_page_table()
427 (vma_is_anonymous(vma) || !vma->vm_ops->fault)) in no_page_table()
428 return ERR_PTR(-EFAULT); in no_page_table()
444 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
450 return -EEXIST; in follow_pfn_pte()
467 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) in can_follow_write_pte()
470 /* ... or read-only private ones */ in can_follow_write_pte()
471 if (!(vma->vm_flags & VM_MAYWRITE)) in can_follow_write_pte()
475 if (vma->vm_flags & VM_WRITE) in can_follow_write_pte()
485 /* ... and a write-fault isn't required for other reasons. */ in can_follow_write_pte()
495 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
504 return ERR_PTR(-EINVAL); in follow_page_pte()
541 page = ERR_PTR(-EFAULT); in follow_page_pte()
555 page = ERR_PTR(-EMLINK); in follow_page_pte()
572 * Documentation/core-api/pin_user_pages.rst for details. in follow_page_pte()
611 struct mm_struct *mm = vma->vm_mm; in follow_pmd_mask()
621 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask()
627 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask()
639 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask()
645 return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : in follow_pmd_mask()
646 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask()
650 ctx->page_mask = HPAGE_PMD_NR - 1; in follow_pmd_mask()
662 struct mm_struct *mm = vma->vm_mm; in follow_pud_mask()
669 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); in follow_pud_mask()
698 * follow_page_mask - look up a page descriptor from a user-virtual address
707 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
711 * of a shared anonymous page first, -EMLINK is returned. The caller should
715 * On output, the @ctx->page_mask is set according to the size of the page.
726 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
728 ctx->page_mask = 0; in follow_page_mask()
737 &ctx->page_mask); in follow_page_mask()
761 * to fail on PROT_NONE-mapped pages. in follow_page()
779 int ret = -EFAULT; in get_gate_page()
781 /* user gate pages are read-only */ in get_gate_page()
783 return -EFAULT; in get_gate_page()
789 return -EFAULT; in get_gate_page()
792 return -EFAULT; in get_gate_page()
795 return -EFAULT; in get_gate_page()
798 return -EFAULT; in get_gate_page()
801 return -EFAULT; in get_gate_page()
827 * to 0 and -EBUSY returned.
837 return -EFAULT; in faultin_page()
845 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set in faultin_page()
848 * handle early exits caused by non-fatal signals. in faultin_page()
858 * can co-exist in faultin_page()
880 * return -EBUSY since that's not reflecting the reality of in faultin_page()
881 * what has happened - we've just fully completed a page in faultin_page()
882 * fault, with the mmap lock released. Use -EAGAIN to show in faultin_page()
885 return -EAGAIN; in faultin_page()
899 return -EBUSY; in faultin_page()
906 * Writing to file-backed mappings which require folio dirty tracking using GUP
910 * Consider the following scenario:-
912 * 1. A folio is written to via GUP which write-faults the memory, notifying
915 * the PTE being marked read-only.
944 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags()
950 return -EFAULT; in check_vma_flags()
953 return -EFAULT; in check_vma_flags()
956 return -EOPNOTSUPP; in check_vma_flags()
959 return -EFAULT; in check_vma_flags()
964 return -EFAULT; in check_vma_flags()
968 return -EFAULT; in check_vma_flags()
971 return -EFAULT; in check_vma_flags()
975 * set a breakpoint in a read-only mapping of an in check_vma_flags()
982 return -EFAULT; in check_vma_flags()
986 return -EFAULT; in check_vma_flags()
992 return -EFAULT; in check_vma_flags()
999 return -EFAULT; in check_vma_flags()
1018 if (!vma || (addr >= vma->vm_start)) in gup_vma_lookup()
1021 /* Only warn for half-way relevant accesses */ in gup_vma_lookup()
1022 if (!(vma->vm_flags & VM_GROWSDOWN)) in gup_vma_lookup()
1024 if (vma->vm_start - addr > 65536) in gup_vma_lookup()
1034 pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", in gup_vma_lookup()
1035 current->comm, task_pid_nr(current), in gup_vma_lookup()
1036 vma->vm_start, vma->vm_end, addr); in gup_vma_lookup()
1043 * __get_user_pages() - pin user pages in memory
1045 * @start: starting user address
1056 * -- If nr_pages is 0, returns 0.
1057 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1058 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1060 * -- 0 return value is possible when the fault would need to be retried.
1067 * each struct page that each user address corresponds to at a given
1068 * instant. That is, it takes the page that would be accessed if a user
1069 * thread accesses the given user virtual address at that instant.
1071 * This does not guarantee that the page exists in the user mappings when
1074 * and subsequently re-faulted). However it does guarantee that the page
1117 /* first iteration or cross vma bound */ in __get_user_pages()
1118 if (!vma || start >= vma->vm_end) { in __get_user_pages()
1126 ret = -ENOMEM; in __get_user_pages()
1130 ret = -EINVAL; in __get_user_pages()
1147 ret = -EFAULT; in __get_user_pages()
1160 ret = -EINTR; in __get_user_pages()
1166 if (!page || PTR_ERR(page) == -EMLINK) { in __get_user_pages()
1168 PTR_ERR(page) == -EMLINK, locked); in __get_user_pages()
1172 case -EBUSY: in __get_user_pages()
1173 case -EAGAIN: in __get_user_pages()
1176 case -EFAULT: in __get_user_pages()
1177 case -ENOMEM: in __get_user_pages()
1178 case -EHWPOISON: in __get_user_pages()
1182 } else if (PTR_ERR(page) == -EEXIST) { in __get_user_pages()
1212 * e.g. when start addr is not thp-size aligned. in __get_user_pages()
1223 if (try_grab_folio(folio, page_increm - 1, in __get_user_pages()
1231 ret = -EFAULT; in __get_user_pages()
1246 nr_pages -= page_increm; in __get_user_pages()
1261 if (!(vm_flags & vma->vm_flags)) in vma_permits_fault()
1278 * fixup_user_fault() - manually resolve a user page fault
1280 * @address: user address
1287 * we try to access user memory in atomic context (within a pagefault_disable()
1288 * section), this returns -EFAULT, and we want to resolve the user fault before
1304 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1321 return -EFAULT; in fixup_user_fault()
1324 return -EFAULT; in fixup_user_fault()
1328 return -EINTR; in fixup_user_fault()
1407 return -EAGAIN; in __get_user_pages_locked()
1415 mm_set_has_pinned_flag(&mm->flags); in __get_user_pages_locked()
1423 * FOLL_PIN always expects pages to be non-null, but no need to assert in __get_user_pages_locked()
1446 nr_pages -= ret; in __get_user_pages_locked()
1482 pages_done = -EINTR; in __get_user_pages_locked()
1508 nr_pages--; in __get_user_pages_locked()
1529 * populate_vma_page_range() - populate a range of pages in the vma.
1540 * vma->vm_mm->mmap_lock must be held.
1545 * If @locked is non-NULL, it must held for read only and may be
1551 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
1552 unsigned long nr_pages = (end - start) / PAGE_SIZE; in populate_vma_page_range()
1559 VM_BUG_ON_VMA(start < vma->vm_start, vma); in populate_vma_page_range()
1560 VM_BUG_ON_VMA(end > vma->vm_end, vma); in populate_vma_page_range()
1567 if (vma->vm_flags & VM_LOCKONFAULT) in populate_vma_page_range()
1576 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) in populate_vma_page_range()
1600 * faultin_page_range() - populate (prefault) page tables inside the
1616 * The range must be page-aligned.
1618 * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
1623 unsigned long nr_pages = (end - start) / PAGE_SIZE; in faultin_page_range()
1633 * the page dirty with FOLL_WRITE -- which doesn't make a in faultin_page_range()
1636 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit in faultin_page_range()
1652 * __mm_populate - populate and/or mlock pages within a range of address space.
1660 struct mm_struct *mm = current->mm; in __mm_populate()
1677 } else if (nstart >= vma->vm_end) in __mm_populate()
1678 vma = find_vma_intersection(mm, vma->vm_end, end); in __mm_populate()
1686 nend = min(end, vma->vm_end); in __mm_populate()
1687 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in __mm_populate()
1689 if (nstart < vma->vm_start) in __mm_populate()
1690 nstart = vma->vm_start; in __mm_populate()
1730 return -EAGAIN; in __get_user_pages_locked()
1749 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages_locked()
1750 !(vm_flags & vma->vm_flags)) in __get_user_pages_locked()
1767 return i ? : -EFAULT; in __get_user_pages_locked()
1772 * fault_in_writeable - fault in userspace address range for writing
1801 if (size > uaddr - start) in fault_in_writeable()
1802 return size - (uaddr - start); in fault_in_writeable()
1808 * fault_in_subpage_writeable - fault in an address range for writing
1812 * Fault in a user address range for writing while checking for permissions at
1813 * sub-page granularity (e.g. arm64 MTE). This function should be used when
1825 * permission checking. The arch-specific probe_subpage_writeable() in fault_in_subpage_writeable()
1828 faulted_in = size - fault_in_writeable(uaddr, size); in fault_in_subpage_writeable()
1830 faulted_in -= probe_subpage_writeable(uaddr, faulted_in); in fault_in_subpage_writeable()
1832 return size - faulted_in; in fault_in_subpage_writeable()
1837 * fault_in_safe_writeable - fault in an address range for writing
1845 * Unlike fault_in_writeable(), this function is non-destructive.
1857 struct mm_struct *mm = current->mm; in fault_in_safe_writeable()
1874 if (size > (unsigned long)uaddr - start) in fault_in_safe_writeable()
1875 return size - ((unsigned long)uaddr - start); in fault_in_safe_writeable()
1881 * fault_in_readable - fault in userspace address range for reading
1882 * @uaddr: start of user address range
1883 * @size: size of user address range
1912 if (size > uaddr - start) in fault_in_readable()
1913 return size - (uaddr - start); in fault_in_readable()
1919 * get_dump_page() - pin user page in memory while writing it to core dump
1920 * @addr: user address
1922 * Returns struct page pointer of user page pinned for dump,
1925 * Returns NULL on any kind of failure - a hole must then be inserted into
1927 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1939 ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked, in get_dump_page()
1986 list_add_tail(&folio->lru, movable_page_list); in collect_longterm_unpinnable_pages()
1997 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
2020 if (migrate_device_coherent_page(&folio->page)) { in migrate_longterm_unpinnable_pages()
2021 ret = -EBUSY; in migrate_longterm_unpinnable_pages()
2048 ret = -ENOMEM; in migrate_longterm_unpinnable_pages()
2055 return -EAGAIN; in migrate_longterm_unpinnable_pages()
2073 * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
2076 * If an error other than -EAGAIN occurs, this indicates a migration failure.
2134 } while (rc == -EAGAIN); in __gup_longterm_locked()
2151 * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only in is_valid_gup_args()
2152 * - FOLL_REMOTE is internal only and used on follow_page() in is_valid_gup_args()
2153 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL in is_valid_gup_args()
2180 /* We want to allow the pgmap to be hot-unplugged at all times */ in is_valid_gup_args()
2191 * get_user_pages_remote() - pin user pages in memory
2193 * @start: starting user address
2206 * -- If nr_pages is 0, returns 0.
2207 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2208 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2216 * to each struct page that each user address corresponds to at a given
2217 * instant. That is, it takes the page that would be accessed if a user
2218 * thread accesses the given user virtual address at that instant.
2220 * This does not guarantee that the page exists in the user mappings when
2223 * and subsequently re-faulted). However it does guarantee that the page
2233 * get_user_pages_remote is typically used for fewer-copy IO operations,
2235 * via the user virtual addresses. The pages may be submitted for
2255 return -EINVAL; in get_user_pages_remote()
2274 * get_user_pages() - pin user pages in memory
2275 * @start: starting user address
2282 * This is the same as get_user_pages_remote(), just with a less-flexible
2293 return -EINVAL; in get_user_pages()
2295 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages()
2322 return -EINVAL; in get_user_pages_unlocked()
2324 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages_unlocked()
2332 * get_user_pages_fast attempts to pin user pages by walking the page
2365 * Used in the GUP-fast path to determine whether a pin is permitted for a
2371 * Writing to pinned file-backed dirty tracked folios is inherently problematic
2373 * therefore try to avoid the most egregious case of a long-term mapping doing
2398 /* hugetlb mappings do not require dirty-tracking. */ in folio_fast_pin_allowed()
2403 * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods in folio_fast_pin_allowed()
2417 mapping = READ_ONCE(folio->mapping); in folio_fast_pin_allowed()
2421 * if this mapping is safe - fall back to slow path to determine how to in folio_fast_pin_allowed()
2433 * At this point, we know the mapping is non-null and points to an in folio_fast_pin_allowed()
2444 while ((*nr) - nr_start) { in undo_dev_pagemap()
2445 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2456 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
2473 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
2504 * Don't take a pin on the zero page - it's not going anywhere in try_grab_folio_fast()
2521 if (!put_devmap_managed_page_refs(&folio->page, refs)) in try_grab_folio_fast()
2535 atomic_add(refs, &folio->_pincount); in try_grab_folio_fast()
2538 refs * (GUP_PIN_COUNTING_BIAS - 1)); in try_grab_folio_fast()
2540 * Adjust the pincount before re-checking the PTE for changes. in try_grab_folio_fast()
2553 * Fast-gup relies on pte change detection to avoid concurrent pgtable
2556 * To pin the page, fast-gup needs to do below in order:
2560 * with fast-gup, we need to do (1) clear pte, then (2) check whether page
2563 * Above will work for all pte-level operations, including THP split.
2565 * For THP collapse, it's a bit more complicated because fast-gup may be
2588 * Always fallback to ordinary GUP on PROT_NONE-mapped pages: in gup_pte_range()
2590 * either way: otherwise, GUP-fast might succeed in in gup_pte_range()
2643 * see Documentation/core-api/pin_user_pages.rst for in gup_pte_range()
2789 unsigned long __boundary = (addr + sz) & ~(sz-1); in hugepte_addr_end()
2790 return (__boundary - 1 < end - 1) ? __boundary : end; in hugepte_addr_end()
2803 pte_end = (addr + sz) & ~(sz-1); in gup_hugepte()
2815 page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT); in gup_hugepte()
2832 if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) { in gup_hugepte()
2902 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { in gup_huge_pmd()
2947 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { in gup_huge_pud()
2982 if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { in gup_huge_pgd()
3095 pgdp = pgd_offset(current->mm, addr); in gup_pgd_range()
3146 seq = raw_read_seqcount(¤t->mm->write_protect_seq); in lockless_pages_from_mm()
3157 * include/asm-generic/tlb.h for more details. in lockless_pages_from_mm()
3171 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { in lockless_pages_from_mm()
3195 return -EINVAL; in internal_get_user_pages_fast()
3198 mm_set_has_pinned_flag(¤t->mm->flags); in internal_get_user_pages_fast()
3201 might_lock_read(¤t->mm->mmap_lock); in internal_get_user_pages_fast()
3206 return -EOVERFLOW; in internal_get_user_pages_fast()
3208 return -EFAULT; in internal_get_user_pages_fast()
3210 return -EFAULT; in internal_get_user_pages_fast()
3219 ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, in internal_get_user_pages_fast()
3225 * returning -errno is not an option in internal_get_user_pages_fast()
3235 * get_user_pages_fast_only() - pin user pages in memory
3236 * @start: starting user address
3242 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
3248 * Careful, careful! COW breaking can go either way, so a non-write
3264 return -EINVAL; in get_user_pages_fast_only()
3271 * get_user_pages_fast() - pin user pages in memory
3272 * @start: starting user address
3278 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3284 * -errno.
3296 return -EINVAL; in get_user_pages_fast()
3302 * pin_user_pages_fast() - pin user pages in memory without taking locks
3304 * @start: starting user address
3315 * see Documentation/core-api/pin_user_pages.rst for further details.
3324 return -EINVAL; in pin_user_pages_fast()
3330 * pin_user_pages_remote() - pin pages of a remote process
3333 * @start: starting user address
3347 * see Documentation/core-api/pin_user_pages.rst for details.
3369 * pin_user_pages() - pin user pages in memory for use by other devices
3371 * @start: starting user address
3381 * see Documentation/core-api/pin_user_pages.rst for details.
3393 return __gup_longterm_locked(current->mm, start, nr_pages, in pin_user_pages()
3415 return __gup_longterm_locked(current->mm, start, nr_pages, pages, in pin_user_pages_unlocked()