Lines Matching refs:pages

33 static inline void sanity_check_pinned_pages(struct page **pages,  in sanity_check_pinned_pages()  argument
51 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
52 struct page *page = *pages; in sanity_check_pinned_pages()
270 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
278 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
282 sanity_check_pinned_pages(pages, npages); in unpin_user_pages_dirty_lock()
284 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_dirty_lock()
355 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages) in unpin_user_pages_lockless() argument
367 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_lockless()
381 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
395 sanity_check_pinned_pages(pages, npages); in unpin_user_pages()
397 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages()
1098 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1110 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1139 pages ? &page : NULL); in __get_user_pages()
1189 if (pages) { in __get_user_pages()
1202 if (pages) { in __get_user_pages()
1238 pages[i + j] = subpage; in __get_user_pages()
1394 struct page **pages, in __get_user_pages_locked() argument
1426 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1431 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1464 if (likely(pages)) in __get_user_pages_locked()
1465 pages += ret; in __get_user_pages_locked()
1496 pages, locked); in __get_user_pages_locked()
1512 if (likely(pages)) in __get_user_pages_locked()
1513 pages++; in __get_user_pages_locked()
1713 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
1753 if (pages) { in __get_user_pages_locked()
1754 pages[i] = virt_to_page((void *)start); in __get_user_pages_locked()
1755 if (pages[i]) in __get_user_pages_locked()
1756 get_page(pages[i]); in __get_user_pages_locked()
1952 struct page **pages) in collect_longterm_unpinnable_pages() argument
1959 struct folio *folio = page_folio(pages[i]); in collect_longterm_unpinnable_pages()
2003 struct page **pages) in migrate_longterm_unpinnable_pages() argument
2009 struct folio *folio = page_folio(pages[i]); in migrate_longterm_unpinnable_pages()
2016 pages[i] = NULL; in migrate_longterm_unpinnable_pages()
2035 unpin_user_page(pages[i]); in migrate_longterm_unpinnable_pages()
2036 pages[i] = NULL; in migrate_longterm_unpinnable_pages()
2059 if (pages[i]) in migrate_longterm_unpinnable_pages()
2060 unpin_user_page(pages[i]); in migrate_longterm_unpinnable_pages()
2083 struct page **pages) in check_and_migrate_movable_pages() argument
2089 nr_pages, pages); in check_and_migrate_movable_pages()
2094 pages); in check_and_migrate_movable_pages()
2098 struct page **pages) in check_and_migrate_movable_pages() argument
2111 struct page **pages, in __gup_longterm_locked() argument
2119 return __get_user_pages_locked(mm, start, nr_pages, pages, in __gup_longterm_locked()
2125 pages, locked, in __gup_longterm_locked()
2133 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); in __gup_longterm_locked()
2143 static bool is_valid_gup_args(struct page **pages, int *locked, in is_valid_gup_args() argument
2177 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) in is_valid_gup_args()
2248 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2253 if (!is_valid_gup_args(pages, locked, &gup_flags, in get_user_pages_remote()
2257 return __get_user_pages_locked(mm, start, nr_pages, pages, in get_user_pages_remote()
2266 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2288 unsigned int gup_flags, struct page **pages) in get_user_pages() argument
2292 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) in get_user_pages()
2295 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages()
2316 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2320 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_unlocked()
2324 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages_unlocked()
2442 struct page **pages) in undo_dev_pagemap() argument
2445 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2573 struct page **pages, int *nr) in gup_pte_range() argument
2606 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2654 pages[*nr] = page; in gup_pte_range()
2679 struct page **pages, int *nr) in gup_pte_range() argument
2688 struct page **pages, int *nr) in __gup_device_huge() argument
2698 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2703 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2708 pages[*nr] = page; in __gup_device_huge()
2710 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2723 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2729 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pmd()
2733 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pmd()
2741 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2747 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pud()
2751 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pud()
2759 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2767 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2775 unsigned long end, struct page **pages) in record_subpages() argument
2780 pages[nr] = nth_page(page, nr); in record_subpages()
2795 struct page **pages, int *nr) in gup_hugepte() argument
2816 refs = record_subpages(page, addr, end, pages + *nr); in gup_hugepte()
2844 struct page **pages, int *nr) in gup_huge_pd() argument
2853 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2862 struct page **pages, int *nr) in gup_huge_pd() argument
2870 struct page **pages, int *nr) in gup_huge_pmd() argument
2883 pages, nr); in gup_huge_pmd()
2887 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pmd()
2914 struct page **pages, int *nr) in gup_huge_pud() argument
2927 pages, nr); in gup_huge_pud()
2931 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pud()
2959 struct page **pages, int *nr) in gup_huge_pgd() argument
2971 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pgd()
2998 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
3018 pages, nr)) in gup_pmd_range()
3027 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
3029 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) in gup_pmd_range()
3037 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
3051 pages, nr)) in gup_pud_range()
3055 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
3057 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) in gup_pud_range()
3065 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
3080 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
3082 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
3090 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
3104 pages, nr)) in gup_pgd_range()
3108 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
3110 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
3116 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
3135 struct page **pages) in lockless_pages_from_mm() argument
3163 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); in lockless_pages_from_mm()
3172 unpin_user_pages_lockless(pages, nr_pinned); in lockless_pages_from_mm()
3175 sanity_check_pinned_pages(pages, nr_pinned); in lockless_pages_from_mm()
3184 struct page **pages) in internal_get_user_pages_fast() argument
3212 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); in internal_get_user_pages_fast()
3218 pages += nr_pinned; in internal_get_user_pages_fast()
3220 pages, &locked, in internal_get_user_pages_fast()
3253 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
3262 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_fast_only()
3266 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast_only()
3287 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
3295 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) in get_user_pages_fast()
3297 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
3321 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
3323 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages_fast()
3325 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
3354 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
3359 if (!is_valid_gup_args(pages, locked, &gup_flags, in pin_user_pages_remote()
3362 return __gup_longterm_locked(mm, start, nr_pages, pages, in pin_user_pages_remote()
3387 unsigned int gup_flags, struct page **pages) in pin_user_pages() argument
3391 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages()
3394 pages, &locked, gup_flags); in pin_user_pages()
3407 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
3411 if (!is_valid_gup_args(pages, NULL, &gup_flags, in pin_user_pages_unlocked()
3415 return __gup_longterm_locked(current->mm, start, nr_pages, pages, in pin_user_pages_unlocked()