Lines Matching refs:fbatch

60 				struct folio_batch *fbatch, pgoff_t *indices)  in truncate_folio_batch_exceptionals()  argument
69 for (j = 0; j < folio_batch_count(fbatch); j++) in truncate_folio_batch_exceptionals()
70 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals()
73 if (j == folio_batch_count(fbatch)) in truncate_folio_batch_exceptionals()
82 for (i = j; i < folio_batch_count(fbatch); i++) { in truncate_folio_batch_exceptionals()
83 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals()
87 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals()
105 fbatch->nr = j; in truncate_folio_batch_exceptionals()
334 struct folio_batch fbatch; in truncate_inode_pages_range() local
361 folio_batch_init(&fbatch); in truncate_inode_pages_range()
364 &fbatch, indices)) { in truncate_inode_pages_range()
365 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
366 for (i = 0; i < folio_batch_count(&fbatch); i++) in truncate_inode_pages_range()
367 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range()
368 delete_from_page_cache_batch(mapping, &fbatch); in truncate_inode_pages_range()
369 for (i = 0; i < folio_batch_count(&fbatch); i++) in truncate_inode_pages_range()
370 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range()
371 folio_batch_release(&fbatch); in truncate_inode_pages_range()
403 if (!find_get_entries(mapping, &index, end - 1, &fbatch, in truncate_inode_pages_range()
413 for (i = 0; i < folio_batch_count(&fbatch); i++) { in truncate_inode_pages_range()
414 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range()
427 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
428 folio_batch_release(&fbatch); in truncate_inode_pages_range()
501 struct folio_batch fbatch; in mapping_try_invalidate() local
507 folio_batch_init(&fbatch); in mapping_try_invalidate()
508 while (find_lock_entries(mapping, &index, end, &fbatch, indices)) { in mapping_try_invalidate()
509 for (i = 0; i < folio_batch_count(&fbatch); i++) { in mapping_try_invalidate()
510 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate()
534 folio_batch_remove_exceptionals(&fbatch); in mapping_try_invalidate()
535 folio_batch_release(&fbatch); in mapping_try_invalidate()
622 struct folio_batch fbatch; in invalidate_inode_pages2_range() local
632 folio_batch_init(&fbatch); in invalidate_inode_pages2_range()
634 while (find_get_entries(mapping, &index, end, &fbatch, indices)) { in invalidate_inode_pages2_range()
635 for (i = 0; i < folio_batch_count(&fbatch); i++) { in invalidate_inode_pages2_range()
636 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range()
678 folio_batch_remove_exceptionals(&fbatch); in invalidate_inode_pages2_range()
679 folio_batch_release(&fbatch); in invalidate_inode_pages2_range()