Home
last modified time | relevance | path

Searched refs:fbatch (Results 1 – 25 of 31) sorted by relevance

12

/openbmc/linux/include/linux/
H A Dpagevec.h40 static inline void folio_batch_init(struct folio_batch *fbatch) in folio_batch_init() argument
42 fbatch->nr = 0; in folio_batch_init()
43 fbatch->percpu_pvec_drained = false; in folio_batch_init()
46 static inline void folio_batch_reinit(struct folio_batch *fbatch) in folio_batch_reinit() argument
48 fbatch->nr = 0; in folio_batch_reinit()
51 static inline unsigned int folio_batch_count(struct folio_batch *fbatch) in folio_batch_count() argument
53 return fbatch->nr; in folio_batch_count()
56 static inline unsigned int folio_batch_space(struct folio_batch *fbatch) in folio_batch_space() argument
58 return PAGEVEC_SIZE - fbatch->nr; in folio_batch_space()
71 static inline unsigned folio_batch_add(struct folio_batch *fbatch, in folio_batch_add() argument
[all …]
H A Dpagemap.h862 pgoff_t end, struct folio_batch *fbatch);
864 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
866 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
1222 struct folio_batch *fbatch);
/openbmc/linux/mm/
H A Dswap.c53 struct folio_batch fbatch; member
195 static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) in folio_batch_move_lru() argument
201 for (i = 0; i < folio_batch_count(fbatch); i++) { in folio_batch_move_lru()
202 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru()
216 folios_put(fbatch->folios, folio_batch_count(fbatch)); in folio_batch_move_lru()
217 folio_batch_reinit(fbatch); in folio_batch_move_lru()
220 static void folio_batch_add_and_move(struct folio_batch *fbatch, in folio_batch_add_and_move() argument
223 if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) && in folio_batch_add_and_move()
226 folio_batch_move_lru(fbatch, move_fn); in folio_batch_add_and_move()
250 struct folio_batch *fbatch; in folio_rotate_reclaimable() local
[all …]
H A Dtruncate.c60 struct folio_batch *fbatch, pgoff_t *indices) in truncate_folio_batch_exceptionals() argument
69 for (j = 0; j < folio_batch_count(fbatch); j++) in truncate_folio_batch_exceptionals()
70 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals()
73 if (j == folio_batch_count(fbatch)) in truncate_folio_batch_exceptionals()
82 for (i = j; i < folio_batch_count(fbatch); i++) { in truncate_folio_batch_exceptionals()
83 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals()
87 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals()
105 fbatch->nr = j; in truncate_folio_batch_exceptionals()
334 struct folio_batch fbatch; in truncate_inode_pages_range() local
361 folio_batch_init(&fbatch); in truncate_inode_pages_range()
[all …]
H A Dmlock.c33 struct folio_batch fbatch; member
186 static void mlock_folio_batch(struct folio_batch *fbatch) in mlock_folio_batch() argument
193 for (i = 0; i < folio_batch_count(fbatch); i++) { in mlock_folio_batch()
194 folio = fbatch->folios[i]; in mlock_folio_batch()
197 fbatch->folios[i] = folio; in mlock_folio_batch()
209 folios_put(fbatch->folios, folio_batch_count(fbatch)); in mlock_folio_batch()
210 folio_batch_reinit(fbatch); in mlock_folio_batch()
215 struct folio_batch *fbatch; in mlock_drain_local() local
218 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_drain_local()
219 if (folio_batch_count(fbatch)) in mlock_drain_local()
[all …]
H A Dfilemap.c269 * @fbatch: batch of folios to delete
272 * @fbatch from the mapping. The function expects @fbatch to be sorted
274 * It tolerates holes in @fbatch (mapping entries at those indices are not
280 struct folio_batch *fbatch) in page_cache_delete_batch() argument
282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
289 if (i >= folio_batch_count(fbatch)) in page_cache_delete_batch()
302 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
304 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
321 struct folio_batch *fbatch) in delete_from_page_cache_batch() argument
508 struct folio_batch fbatch; __filemap_fdatawait_range() local
2051 find_get_entries(struct address_space * mapping,pgoff_t * start,pgoff_t end,struct folio_batch * fbatch,pgoff_t * indices) find_get_entries() argument
2097 find_lock_entries(struct address_space * mapping,pgoff_t * start,pgoff_t end,struct folio_batch * fbatch,pgoff_t * indices) find_lock_entries() argument
2162 filemap_get_folios(struct address_space * mapping,pgoff_t * start,pgoff_t end,struct folio_batch * fbatch) filemap_get_folios() argument
2215 filemap_get_folios_contig(struct address_space * mapping,pgoff_t * start,pgoff_t end,struct folio_batch * fbatch) filemap_get_folios_contig() argument
2286 filemap_get_folios_tag(struct address_space * mapping,pgoff_t * start,pgoff_t end,xa_mark_t tag,struct folio_batch * fbatch) filemap_get_folios_tag() argument
2356 filemap_get_read_batch(struct address_space * mapping,pgoff_t index,pgoff_t max,struct folio_batch * fbatch) filemap_get_read_batch() argument
2506 filemap_create_folio(struct file * file,struct address_space * mapping,pgoff_t index,struct folio_batch * fbatch) filemap_create_folio() argument
2562 filemap_get_pages(struct kiocb * iocb,size_t count,struct folio_batch * fbatch,bool need_uptodate) filemap_get_pages() argument
2650 struct folio_batch fbatch; filemap_read() local
2927 struct folio_batch fbatch; filemap_splice_read() local
[all...]
H A Dshmem.c939 struct folio_batch fbatch; in shmem_unlock_mapping() local
942 folio_batch_init(&fbatch); in shmem_unlock_mapping()
947 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { in shmem_unlock_mapping()
948 check_move_unevictable_folios(&fbatch); in shmem_unlock_mapping()
949 folio_batch_release(&fbatch); in shmem_unlock_mapping()
993 struct folio_batch fbatch; in shmem_undo_range() local
1007 folio_batch_init(&fbatch); in shmem_undo_range()
1010 &fbatch, indices)) { in shmem_undo_range()
1011 for (i = 0; i < folio_batch_count(&fbatch); i++) { in shmem_undo_range()
1012 folio = fbatch.folios[i]; in shmem_undo_range()
[all …]
H A Dpage-writeback.c2423 struct folio_batch fbatch; in write_cache_pages() local
2431 folio_batch_init(&fbatch); in write_cache_pages()
2452 tag, &fbatch); in write_cache_pages()
2458 struct folio *folio = fbatch.folios[i]; in write_cache_pages()
2537 folio_batch_release(&fbatch); in write_cache_pages()
/openbmc/linux/fs/nilfs2/
H A Dpage.c244 struct folio_batch fbatch; in nilfs_copy_dirty_pages() local
249 folio_batch_init(&fbatch); in nilfs_copy_dirty_pages()
252 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_copy_dirty_pages()
255 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_copy_dirty_pages()
256 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages()
280 folio_batch_release(&fbatch); in nilfs_copy_dirty_pages()
299 struct folio_batch fbatch; in nilfs_copy_back_pages() local
303 folio_batch_init(&fbatch); in nilfs_copy_back_pages()
305 n = filemap_get_folios(smap, &start, ~0UL, &fbatch); in nilfs_copy_back_pages()
309 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_copy_back_pages()
[all …]
H A Dsegment.c702 struct folio_batch fbatch; in nilfs_lookup_dirty_data_buffers() local
716 folio_batch_init(&fbatch); in nilfs_lookup_dirty_data_buffers()
720 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_lookup_dirty_data_buffers()
723 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_lookup_dirty_data_buffers()
725 struct folio *folio = fbatch.folios[i]; in nilfs_lookup_dirty_data_buffers()
748 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers()
754 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers()
764 struct folio_batch fbatch; in nilfs_lookup_dirty_node_buffers() local
771 folio_batch_init(&fbatch); in nilfs_lookup_dirty_node_buffers()
774 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { in nilfs_lookup_dirty_node_buffers()
[all …]
/openbmc/linux/fs/ramfs/
H A Dfile-nommu.c208 struct folio_batch fbatch; in ramfs_nommu_get_unmapped_area() local
224 folio_batch_init(&fbatch); in ramfs_nommu_get_unmapped_area()
228 ULONG_MAX, &fbatch); in ramfs_nommu_get_unmapped_area()
235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area()
244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
250 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
256 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
/openbmc/linux/drivers/gpu/drm/
H A Ddrm_gem.c505 static void drm_gem_check_release_batch(struct folio_batch *fbatch) in drm_gem_check_release_batch() argument
507 check_move_unevictable_folios(fbatch); in drm_gem_check_release_batch()
508 __folio_batch_release(fbatch); in drm_gem_check_release_batch()
542 struct folio_batch fbatch; in drm_gem_get_pages() local
589 folio_batch_init(&fbatch); in drm_gem_get_pages()
593 if (!folio_batch_add(&fbatch, f)) in drm_gem_get_pages()
594 drm_gem_check_release_batch(&fbatch); in drm_gem_get_pages()
597 if (fbatch.nr) in drm_gem_get_pages()
598 drm_gem_check_release_batch(&fbatch); in drm_gem_get_pages()
617 struct folio_batch fbatch; in drm_gem_put_pages() local
[all …]
/openbmc/linux/fs/afs/
H A Dwrite.c470 struct folio_batch fbatch; in afs_extend_writeback() local
481 folio_batch_init(&fbatch); in afs_extend_writeback()
540 if (!folio_batch_add(&fbatch, folio)) in afs_extend_writeback()
553 if (!folio_batch_count(&fbatch)) in afs_extend_writeback()
556 for (i = 0; i < folio_batch_count(&fbatch); i++) { in afs_extend_writeback()
557 folio = fbatch.folios[i]; in afs_extend_writeback()
570 folio_batch_release(&fbatch); in afs_extend_writeback()
709 struct folio_batch fbatch; in afs_writepages_region() local
715 folio_batch_init(&fbatch); in afs_writepages_region()
721 PAGECACHE_TAG_DIRTY, &fbatch); in afs_writepages_region()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/gem/
H A Di915_gem_shmem.c25 static void check_release_folio_batch(struct folio_batch *fbatch) in check_release_folio_batch() argument
27 check_move_unevictable_folios(fbatch); in check_release_folio_batch()
28 __folio_batch_release(fbatch); in check_release_folio_batch()
36 struct folio_batch fbatch; in shmem_sg_free_table() local
42 folio_batch_init(&fbatch); in shmem_sg_free_table()
54 if (!folio_batch_add(&fbatch, folio)) in shmem_sg_free_table()
55 check_release_folio_batch(&fbatch); in shmem_sg_free_table()
57 if (fbatch.nr) in shmem_sg_free_table()
58 check_release_folio_batch(&fbatch); in shmem_sg_free_table()
/openbmc/linux/fs/gfs2/
H A Daops.c209 struct folio_batch *fbatch, in gfs2_write_jdata_batch() argument
218 int nr_folios = folio_batch_count(fbatch); in gfs2_write_jdata_batch()
221 nr_pages += folio_nr_pages(fbatch->folios[i]); in gfs2_write_jdata_batch()
229 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch()
312 struct folio_batch fbatch; in gfs2_write_cache_jdata() local
322 folio_batch_init(&fbatch); in gfs2_write_cache_jdata()
349 tag, &fbatch); in gfs2_write_cache_jdata()
353 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, in gfs2_write_cache_jdata()
359 folio_batch_release(&fbatch); in gfs2_write_cache_jdata()
/openbmc/linux/fs/ceph/
H A Daddr.c939 struct folio_batch fbatch; in ceph_writepages_start() local
969 folio_batch_init(&fbatch); in ceph_writepages_start()
1036 end, tag, &fbatch); in ceph_writepages_start()
1041 page = &fbatch.folios[i]->page; in ceph_writepages_start()
1181 fbatch.folios[i] = NULL; in ceph_writepages_start()
1192 if (!fbatch.folios[j]) in ceph_writepages_start()
1195 fbatch.folios[n] = fbatch.folios[j]; in ceph_writepages_start()
1198 fbatch.nr = n; in ceph_writepages_start()
1203 folio_batch_release(&fbatch); in ceph_writepages_start()
1354 dout("folio_batch release on %d folios (%p)\n", (int)fbatch.nr, in ceph_writepages_start()
[all …]
/openbmc/linux/fs/f2fs/
H A Dnode.c1557 struct folio_batch fbatch; in last_fsync_dnode() local
1561 folio_batch_init(&fbatch); in last_fsync_dnode()
1566 &fbatch))) { in last_fsync_dnode()
1570 struct page *page = &fbatch.folios[i]->page; in last_fsync_dnode()
1574 folio_batch_release(&fbatch); in last_fsync_dnode()
1605 folio_batch_release(&fbatch); in last_fsync_dnode()
1773 struct folio_batch fbatch; in f2fs_fsync_node_pages() local
1787 folio_batch_init(&fbatch); in f2fs_fsync_node_pages()
1792 &fbatch))) { in f2fs_fsync_node_pages()
1796 struct page *page = &fbatch.folios[i]->page; in f2fs_fsync_node_pages()
[all …]
H A Dcheckpoint.c410 struct folio_batch fbatch; in f2fs_sync_meta_pages() local
418 folio_batch_init(&fbatch); in f2fs_sync_meta_pages()
424 PAGECACHE_TAG_DIRTY, &fbatch))) { in f2fs_sync_meta_pages()
428 struct folio *folio = fbatch.folios[i]; in f2fs_sync_meta_pages()
432 folio_nr_pages(fbatch.folios[i-1])) { in f2fs_sync_meta_pages()
433 folio_batch_release(&fbatch); in f2fs_sync_meta_pages()
465 folio_batch_release(&fbatch); in f2fs_sync_meta_pages()
/openbmc/linux/drivers/gpu/drm/i915/
H A Di915_gpu_error.c190 static void pool_fini(struct folio_batch *fbatch) in pool_fini() argument
192 folio_batch_release(fbatch); in pool_fini()
195 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp) in pool_refill() argument
197 while (folio_batch_space(fbatch)) { in pool_refill()
204 folio_batch_add(fbatch, folio); in pool_refill()
210 static int pool_init(struct folio_batch *fbatch, gfp_t gfp) in pool_init() argument
214 folio_batch_init(fbatch); in pool_init()
216 err = pool_refill(fbatch, gfp); in pool_init()
218 pool_fini(fbatch); in pool_init()
223 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp) in pool_alloc() argument
[all …]
/openbmc/linux/fs/btrfs/tests/
H A Dextent-io-tests.c25 struct folio_batch fbatch; in process_page_range() local
32 folio_batch_init(&fbatch); in process_page_range()
36 end_index, &fbatch); in process_page_range()
38 struct folio *folio = fbatch.folios[i]; in process_page_range()
48 folio_batch_release(&fbatch); in process_page_range()
/openbmc/linux/fs/btrfs/
H A Dextent_io.c214 struct folio_batch fbatch; in __process_pages_contig() local
217 folio_batch_init(&fbatch); in __process_pages_contig()
222 end_index, &fbatch); in __process_pages_contig()
224 struct folio *folio = fbatch.folios[i]; in __process_pages_contig()
229 folio_batch_release(&fbatch); in __process_pages_contig()
260 struct folio_batch fbatch; in lock_delalloc_pages() local
265 folio_batch_init(&fbatch); in lock_delalloc_pages()
270 end_index, &fbatch); in lock_delalloc_pages()
275 struct page *page = &fbatch.folios[i]->page; in lock_delalloc_pages()
293 folio_batch_release(&fbatch); in lock_delalloc_pages()
[all …]
H A Dcompression.c195 struct folio_batch fbatch; in end_compressed_writeback() local
203 folio_batch_init(&fbatch); in end_compressed_writeback()
206 &fbatch); in end_compressed_writeback()
212 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback()
217 folio_batch_release(&fbatch); in end_compressed_writeback()
/openbmc/linux/fs/hugetlbfs/
H A Dinode.c677 struct folio_batch fbatch; in remove_inode_hugepages() local
682 folio_batch_init(&fbatch); in remove_inode_hugepages()
684 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { in remove_inode_hugepages()
685 for (i = 0; i < folio_batch_count(&fbatch); ++i) { in remove_inode_hugepages()
686 struct folio *folio = fbatch.folios[i]; in remove_inode_hugepages()
702 folio_batch_release(&fbatch); in remove_inode_hugepages()
/openbmc/linux/fs/ext4/
H A Dinode.c1582 struct folio_batch fbatch; in mpage_release_unused_pages() local
1607 folio_batch_init(&fbatch); in mpage_release_unused_pages()
1609 nr = filemap_get_folios(mapping, &index, end, &fbatch); in mpage_release_unused_pages()
1613 struct folio *folio = fbatch.folios[i]; in mpage_release_unused_pages()
1630 folio_batch_release(&fbatch); in mpage_release_unused_pages()
2114 struct folio_batch fbatch; in mpage_map_and_submit_buffers() local
2129 folio_batch_init(&fbatch); in mpage_map_and_submit_buffers()
2131 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch); in mpage_map_and_submit_buffers()
2135 struct folio *folio = fbatch.folios[i]; in mpage_map_and_submit_buffers()
2152 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers()
[all …]
/openbmc/linux/fs/
H A Dbuffer.c1704 struct folio_batch fbatch; in clean_bdev_aliases() local
1712 folio_batch_init(&fbatch); in clean_bdev_aliases()
1713 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { in clean_bdev_aliases()
1714 count = folio_batch_count(&fbatch); in clean_bdev_aliases()
1716 struct folio *folio = fbatch.folios[i]; in clean_bdev_aliases()
1745 folio_batch_release(&fbatch); in clean_bdev_aliases()

12