Home
last modified time | relevance | path

Searched refs:fbatch (Results 1 – 25 of 31) sorted by relevance

12

/openbmc/linux/include/linux/
H A Dpagevec.h40 static inline void folio_batch_init(struct folio_batch *fbatch) in folio_batch_init() argument
42 fbatch->nr = 0; in folio_batch_init()
43 fbatch->percpu_pvec_drained = false; in folio_batch_init()
46 static inline void folio_batch_reinit(struct folio_batch *fbatch) in folio_batch_reinit() argument
48 fbatch->nr = 0; in folio_batch_reinit()
53 return fbatch->nr; in folio_batch_count()
58 return PAGEVEC_SIZE - fbatch->nr; in folio_batch_space()
74 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add()
75 return folio_batch_space(fbatch); in folio_batch_add()
82 if (folio_batch_count(fbatch)) in folio_batch_release()
[all …]
H A Dpagemap.h847 pgoff_t end, struct folio_batch *fbatch);
849 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
851 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
1207 struct folio_batch *fbatch);
/openbmc/linux/mm/
H A Dswap.c53 struct folio_batch fbatch; member
224 folios_put(fbatch->folios, folio_batch_count(fbatch)); in folio_batch_move_lru()
225 folio_batch_reinit(fbatch); in folio_batch_move_lru()
258 struct folio_batch *fbatch; in folio_rotate_reclaimable() local
263 fbatch = this_cpu_ptr(&lru_rotate.fbatch); in folio_rotate_reclaimable()
356 struct folio_batch *fbatch; in folio_activate() local
386 struct folio_batch *fbatch; in __lru_cache_activate_folio() local
503 struct folio_batch *fbatch; in folio_add_lru() local
654 fbatch = &per_cpu(lru_rotate.fbatch, cpu); in lru_add_drain_cpu()
1062 release_pages(fbatch->folios, folio_batch_count(fbatch)); in __folio_batch_release()
[all …]
H A Dtruncate.c87 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals()
105 fbatch->nr = j; in truncate_folio_batch_exceptionals()
334 struct folio_batch fbatch; in truncate_inode_pages_range() local
361 folio_batch_init(&fbatch); in truncate_inode_pages_range()
364 &fbatch, indices)) { in truncate_inode_pages_range()
371 folio_batch_release(&fbatch); in truncate_inode_pages_range()
428 folio_batch_release(&fbatch); in truncate_inode_pages_range()
501 struct folio_batch fbatch; in mapping_try_invalidate() local
507 folio_batch_init(&fbatch); in mapping_try_invalidate()
622 struct folio_batch fbatch; in invalidate_inode_pages2_range() local
[all …]
H A Dmlock.c33 struct folio_batch fbatch; member
194 folio = fbatch->folios[i]; in mlock_folio_batch()
197 fbatch->folios[i] = folio; in mlock_folio_batch()
209 folios_put(fbatch->folios, folio_batch_count(fbatch)); in mlock_folio_batch()
210 folio_batch_reinit(fbatch); in mlock_folio_batch()
215 struct folio_batch *fbatch; in mlock_drain_local() local
218 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_drain_local()
229 fbatch = &per_cpu(mlock_fbatch.fbatch, cpu); in mlock_drain_remote()
248 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_folio()
274 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_new_folio()
[all …]
H A Dfilemap.c508 struct folio_batch fbatch; in __filemap_fdatawait_range() local
511 folio_batch_init(&fbatch); in __filemap_fdatawait_range()
528 folio_batch_release(&fbatch); in __filemap_fdatawait_range()
2570 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2590 if (likely(--fbatch->nr)) in filemap_get_pages()
2624 struct folio_batch fbatch; in filemap_read() local
2636 folio_batch_init(&fbatch); in filemap_read()
2689 fbatch.folios[0])) in filemap_read()
2726 folio_batch_init(&fbatch); in filemap_read()
2901 struct folio_batch fbatch; in filemap_splice_read() local
[all …]
H A Dshmem.c928 struct folio_batch fbatch; in shmem_unlock_mapping() local
931 folio_batch_init(&fbatch); in shmem_unlock_mapping()
938 folio_batch_release(&fbatch); in shmem_unlock_mapping()
982 struct folio_batch fbatch; in shmem_undo_range() local
996 folio_batch_init(&fbatch); in shmem_undo_range()
999 &fbatch, indices)) { in shmem_undo_range()
1001 folio = fbatch.folios[i]; in shmem_undo_range()
1016 folio_batch_release(&fbatch); in shmem_undo_range()
1070 folio = fbatch.folios[i]; in shmem_undo_range()
1363 struct folio_batch fbatch; in shmem_unuse_inode() local
[all …]
H A Dpage-writeback.c2401 struct folio_batch fbatch; in write_cache_pages() local
2409 folio_batch_init(&fbatch); in write_cache_pages()
2430 tag, &fbatch); in write_cache_pages()
2436 struct folio *folio = fbatch.folios[i]; in write_cache_pages()
2515 folio_batch_release(&fbatch); in write_cache_pages()
/openbmc/linux/fs/nilfs2/
H A Dpage.c243 struct folio_batch fbatch; in nilfs_copy_dirty_pages() local
248 folio_batch_init(&fbatch); in nilfs_copy_dirty_pages()
279 folio_batch_release(&fbatch); in nilfs_copy_dirty_pages()
298 struct folio_batch fbatch; in nilfs_copy_back_pages() local
302 folio_batch_init(&fbatch); in nilfs_copy_back_pages()
361 struct folio_batch fbatch; in nilfs_clear_dirty_pages() local
365 folio_batch_init(&fbatch); in nilfs_clear_dirty_pages()
495 struct folio_batch fbatch; in nilfs_find_uncommitted_extent() local
503 folio_batch_init(&fbatch); in nilfs_find_uncommitted_extent()
507 &fbatch); in nilfs_find_uncommitted_extent()
[all …]
H A Dsegment.c702 struct folio_batch fbatch; in nilfs_lookup_dirty_data_buffers() local
716 folio_batch_init(&fbatch); in nilfs_lookup_dirty_data_buffers()
720 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_lookup_dirty_data_buffers()
725 struct folio *folio = fbatch.folios[i]; in nilfs_lookup_dirty_data_buffers()
748 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers()
754 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers()
764 struct folio_batch fbatch; in nilfs_lookup_dirty_node_buffers() local
771 folio_batch_init(&fbatch); in nilfs_lookup_dirty_node_buffers()
774 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { in nilfs_lookup_dirty_node_buffers()
776 bh = head = folio_buffers(fbatch.folios[i]); in nilfs_lookup_dirty_node_buffers()
[all …]
/openbmc/linux/fs/ramfs/
H A Dfile-nommu.c208 struct folio_batch fbatch; in ramfs_nommu_get_unmapped_area() local
224 folio_batch_init(&fbatch); in ramfs_nommu_get_unmapped_area()
228 ULONG_MAX, &fbatch); in ramfs_nommu_get_unmapped_area()
235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area()
244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
250 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
256 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
/openbmc/linux/drivers/gpu/drm/
H A Ddrm_gem.c507 check_move_unevictable_folios(fbatch); in drm_gem_check_release_batch()
508 __folio_batch_release(fbatch); in drm_gem_check_release_batch()
542 struct folio_batch fbatch; in drm_gem_get_pages() local
589 folio_batch_init(&fbatch); in drm_gem_get_pages()
593 if (!folio_batch_add(&fbatch, f)) in drm_gem_get_pages()
597 if (fbatch.nr) in drm_gem_get_pages()
598 drm_gem_check_release_batch(&fbatch); in drm_gem_get_pages()
617 struct folio_batch fbatch; in drm_gem_put_pages() local
630 folio_batch_init(&fbatch); in drm_gem_put_pages()
645 if (!folio_batch_add(&fbatch, folio)) in drm_gem_put_pages()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/gem/
H A Di915_gem_shmem.c25 static void check_release_folio_batch(struct folio_batch *fbatch) in check_release_folio_batch() argument
27 check_move_unevictable_folios(fbatch); in check_release_folio_batch()
28 __folio_batch_release(fbatch); in check_release_folio_batch()
36 struct folio_batch fbatch; in shmem_sg_free_table() local
42 folio_batch_init(&fbatch); in shmem_sg_free_table()
54 if (!folio_batch_add(&fbatch, folio)) in shmem_sg_free_table()
55 check_release_folio_batch(&fbatch); in shmem_sg_free_table()
57 if (fbatch.nr) in shmem_sg_free_table()
58 check_release_folio_batch(&fbatch); in shmem_sg_free_table()
/openbmc/linux/fs/afs/
H A Dwrite.c470 struct folio_batch fbatch; in afs_extend_writeback() local
481 folio_batch_init(&fbatch); in afs_extend_writeback()
553 if (!folio_batch_count(&fbatch)) in afs_extend_writeback()
557 folio = fbatch.folios[i]; in afs_extend_writeback()
570 folio_batch_release(&fbatch); in afs_extend_writeback()
709 struct folio_batch fbatch; in afs_writepages_region() local
715 folio_batch_init(&fbatch); in afs_writepages_region()
726 folio = fbatch.folios[i]; in afs_writepages_region()
740 folio_batch_release(&fbatch); in afs_writepages_region()
785 folio_batch_release(&fbatch); in afs_writepages_region()
[all …]
/openbmc/linux/fs/gfs2/
H A Daops.c209 struct folio_batch *fbatch, in gfs2_write_jdata_batch() argument
218 int nr_folios = folio_batch_count(fbatch); in gfs2_write_jdata_batch()
221 nr_pages += folio_nr_pages(fbatch->folios[i]); in gfs2_write_jdata_batch()
229 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch()
312 struct folio_batch fbatch; in gfs2_write_cache_jdata() local
322 folio_batch_init(&fbatch); in gfs2_write_cache_jdata()
349 tag, &fbatch); in gfs2_write_cache_jdata()
353 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, in gfs2_write_cache_jdata()
359 folio_batch_release(&fbatch); in gfs2_write_cache_jdata()
/openbmc/linux/fs/ceph/
H A Daddr.c935 struct folio_batch fbatch; in ceph_writepages_start() local
965 folio_batch_init(&fbatch); in ceph_writepages_start()
1032 end, tag, &fbatch); in ceph_writepages_start()
1177 fbatch.folios[i] = NULL; in ceph_writepages_start()
1188 if (!fbatch.folios[j]) in ceph_writepages_start()
1191 fbatch.folios[n] = fbatch.folios[j]; in ceph_writepages_start()
1194 fbatch.nr = n; in ceph_writepages_start()
1199 folio_batch_release(&fbatch); in ceph_writepages_start()
1351 fbatch.nr ? fbatch.folios[0] : NULL); in ceph_writepages_start()
1352 folio_batch_release(&fbatch); in ceph_writepages_start()
[all …]
/openbmc/linux/fs/f2fs/
H A Dnode.c1542 struct folio_batch fbatch; in last_fsync_dnode() local
1546 folio_batch_init(&fbatch); in last_fsync_dnode()
1551 &fbatch))) { in last_fsync_dnode()
1758 struct folio_batch fbatch; in f2fs_fsync_node_pages() local
1772 folio_batch_init(&fbatch); in f2fs_fsync_node_pages()
1777 &fbatch))) { in f2fs_fsync_node_pages()
1917 struct folio_batch fbatch; in f2fs_flush_inline_data() local
1920 folio_batch_init(&fbatch); in f2fs_flush_inline_data()
1924 &fbatch))) { in f2fs_flush_inline_data()
1965 struct folio_batch fbatch; in f2fs_sync_node_pages() local
[all …]
H A Dcheckpoint.c410 struct folio_batch fbatch; in f2fs_sync_meta_pages() local
418 folio_batch_init(&fbatch); in f2fs_sync_meta_pages()
424 PAGECACHE_TAG_DIRTY, &fbatch))) { in f2fs_sync_meta_pages()
428 struct folio *folio = fbatch.folios[i]; in f2fs_sync_meta_pages()
432 folio_nr_pages(fbatch.folios[i-1])) { in f2fs_sync_meta_pages()
433 folio_batch_release(&fbatch); in f2fs_sync_meta_pages()
465 folio_batch_release(&fbatch); in f2fs_sync_meta_pages()
/openbmc/linux/drivers/gpu/drm/i915/
H A Di915_gpu_error.c190 static void pool_fini(struct folio_batch *fbatch) in pool_fini() argument
192 folio_batch_release(fbatch); in pool_fini()
197 while (folio_batch_space(fbatch)) { in pool_refill()
204 folio_batch_add(fbatch, folio); in pool_refill()
214 folio_batch_init(fbatch); in pool_init()
216 err = pool_refill(fbatch, gfp); in pool_init()
218 pool_fini(fbatch); in pool_init()
228 if (!folio && folio_batch_count(fbatch)) in pool_alloc()
229 folio = fbatch->folios[--fbatch->nr]; in pool_alloc()
238 if (folio_batch_space(fbatch)) in pool_free()
[all …]
/openbmc/linux/fs/btrfs/tests/
H A Dextent-io-tests.c24 struct folio_batch fbatch; in process_page_range() local
31 folio_batch_init(&fbatch); in process_page_range()
35 end_index, &fbatch); in process_page_range()
37 struct folio *folio = fbatch.folios[i]; in process_page_range()
47 folio_batch_release(&fbatch); in process_page_range()
/openbmc/linux/fs/btrfs/
H A Dextent_io.c214 struct folio_batch fbatch; in __process_pages_contig() local
217 folio_batch_init(&fbatch); in __process_pages_contig()
222 end_index, &fbatch); in __process_pages_contig()
260 struct folio_batch fbatch; in lock_delalloc_pages() local
265 folio_batch_init(&fbatch); in lock_delalloc_pages()
270 end_index, &fbatch); in lock_delalloc_pages()
1869 struct folio_batch fbatch; in btree_write_cache_pages() local
1876 folio_batch_init(&fbatch); in btree_write_cache_pages()
1900 tag, &fbatch))) { in btree_write_cache_pages()
1997 struct folio_batch fbatch; in extent_write_cache_pages() local
[all …]
H A Dcompression.c195 struct folio_batch fbatch; in end_compressed_writeback() local
203 folio_batch_init(&fbatch); in end_compressed_writeback()
206 &fbatch); in end_compressed_writeback()
212 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback()
217 folio_batch_release(&fbatch); in end_compressed_writeback()
/openbmc/linux/fs/hugetlbfs/
H A Dinode.c677 struct folio_batch fbatch; in remove_inode_hugepages() local
682 folio_batch_init(&fbatch); in remove_inode_hugepages()
684 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { in remove_inode_hugepages()
685 for (i = 0; i < folio_batch_count(&fbatch); ++i) { in remove_inode_hugepages()
686 struct folio *folio = fbatch.folios[i]; in remove_inode_hugepages()
702 folio_batch_release(&fbatch); in remove_inode_hugepages()
/openbmc/linux/fs/ext4/
H A Dinode.c1549 struct folio_batch fbatch; in mpage_release_unused_pages() local
1574 folio_batch_init(&fbatch); in mpage_release_unused_pages()
1597 folio_batch_release(&fbatch); in mpage_release_unused_pages()
2090 struct folio_batch fbatch; in mpage_map_and_submit_buffers() local
2105 folio_batch_init(&fbatch); in mpage_map_and_submit_buffers()
2128 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers()
2135 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers()
2373 struct folio_batch fbatch; in mpage_prepare_extent_to_map() local
2398 folio_batch_init(&fbatch); in mpage_prepare_extent_to_map()
2401 tag, &fbatch); in mpage_prepare_extent_to_map()
[all …]
/openbmc/linux/fs/
H A Dbuffer.c1704 struct folio_batch fbatch; in clean_bdev_aliases() local
1712 folio_batch_init(&fbatch); in clean_bdev_aliases()
1713 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { in clean_bdev_aliases()
1714 count = folio_batch_count(&fbatch); in clean_bdev_aliases()
1716 struct folio *folio = fbatch.folios[i]; in clean_bdev_aliases()
1745 folio_batch_release(&fbatch); in clean_bdev_aliases()

12