Lines Matching refs:nr

311 	unsigned int nr = 1;  in gup_folio_range_next()  local
314 nr = min_t(unsigned int, npages - i, in gup_folio_range_next()
317 *ntails = nr; in gup_folio_range_next()
325 unsigned int nr; in gup_folio_next() local
327 for (nr = i + 1; nr < npages; nr++) { in gup_folio_next()
328 if (page_folio(list[nr]) != folio) in gup_folio_next()
332 *ntails = nr - i; in gup_folio_next()
363 unsigned int nr; in unpin_user_pages_dirty_lock() local
371 for (i = 0; i < npages; i += nr) { in unpin_user_pages_dirty_lock()
372 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_dirty_lock()
398 gup_put_folio(folio, nr, FOLL_PIN); in unpin_user_pages_dirty_lock()
429 unsigned int nr; in unpin_user_page_range_dirty_lock() local
431 for (i = 0; i < npages; i += nr) { in unpin_user_page_range_dirty_lock()
432 folio = gup_folio_range_next(page, npages, i, &nr); in unpin_user_page_range_dirty_lock()
438 gup_put_folio(folio, nr, FOLL_PIN); in unpin_user_page_range_dirty_lock()
447 unsigned int nr; in unpin_user_pages_lockless() local
454 for (i = 0; i < npages; i += nr) { in unpin_user_pages_lockless()
455 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_lockless()
456 gup_put_folio(folio, nr, FOLL_PIN); in unpin_user_pages_lockless()
473 unsigned int nr; in unpin_user_pages() local
484 for (i = 0; i < npages; i += nr) { in unpin_user_pages()
485 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages()
486 gup_put_folio(folio, nr, FOLL_PIN); in unpin_user_pages()
2529 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, in undo_dev_pagemap() argument
2533 while ((*nr) - nr_start) { in undo_dev_pagemap()
2534 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2566 struct page **pages, int *nr) in gup_pte_range() argument
2569 int nr_start = *nr, ret = 0; in gup_pte_range()
2599 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2647 pages[*nr] = page; in gup_pte_range()
2648 (*nr)++; in gup_pte_range()
2672 struct page **pages, int *nr) in gup_pte_range() argument
2681 struct page **pages, int *nr) in __gup_device_huge() argument
2683 int nr_start = *nr; in __gup_device_huge()
2691 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2696 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2701 pages[*nr] = page; in __gup_device_huge()
2703 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2706 (*nr)++; in __gup_device_huge()
2716 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2719 int nr_start = *nr; in __gup_device_huge_pmd()
2722 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pmd()
2726 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pmd()
2734 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2737 int nr_start = *nr; in __gup_device_huge_pud()
2740 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pud()
2744 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pud()
2752 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2760 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2770 int nr; in record_subpages() local
2772 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) in record_subpages()
2773 pages[nr] = nth_page(page, nr); in record_subpages()
2775 return nr; in record_subpages()
2788 struct page **pages, int *nr) in gup_hugepte() argument
2809 refs = record_subpages(page, addr, end, pages + *nr); in gup_hugepte()
2830 *nr += refs; in gup_hugepte()
2837 struct page **pages, int *nr) in gup_huge_pd() argument
2846 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2855 struct page **pages, int *nr) in gup_huge_pd() argument
2863 struct page **pages, int *nr) in gup_huge_pmd() argument
2876 pages, nr); in gup_huge_pmd()
2880 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pmd()
2900 *nr += refs; in gup_huge_pmd()
2907 struct page **pages, int *nr) in gup_huge_pud() argument
2920 pages, nr); in gup_huge_pud()
2924 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pud()
2945 *nr += refs; in gup_huge_pud()
2952 struct page **pages, int *nr) in gup_huge_pgd() argument
2964 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pgd()
2985 *nr += refs; in gup_huge_pgd()
2991 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
3011 pages, nr)) in gup_pmd_range()
3020 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
3022 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) in gup_pmd_range()
3030 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
3044 pages, nr)) in gup_pud_range()
3048 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
3050 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) in gup_pud_range()
3058 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
3073 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
3075 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
3083 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
3097 pages, nr)) in gup_pgd_range()
3101 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
3103 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
3109 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument