/openbmc/linux/fs/ramfs/ |
H A D | file-nommu.c | 235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area() 244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
|
/openbmc/linux/mm/ |
H A D | truncate.c | 70 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals() 83 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals() 87 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals() 367 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range() 370 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range() 414 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() 510 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate() 636 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range()
|
H A D | swap.c | 202 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() 216 folios_put(fbatch->folios, folio_batch_count(fbatch)); in folio_batch_move_lru() 395 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio() 1042 release_pages(fbatch->folios, folio_batch_count(fbatch)); in __folio_batch_release() 1061 struct folio *folio = fbatch->folios[i]; in folio_batch_remove_exceptionals() 1063 fbatch->folios[j++] = folio; in folio_batch_remove_exceptionals()
|
H A D | migrate.c | 1830 LIST_HEAD(folios); in migrate_pages_sync() 1836 reason, &folios, split_folios, &astats, in migrate_pages_sync() 1844 list_splice_tail(&folios, ret_folios); in migrate_pages_sync() 1854 list_splice_tail_init(&folios, from); in migrate_pages_sync() 1856 list_move(from->next, &folios); in migrate_pages_sync() 1857 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, in migrate_pages_sync() 1860 list_splice_tail_init(&folios, ret_folios); in migrate_pages_sync() 1902 LIST_HEAD(folios); in migrate_pages() 1930 list_cut_before(&folios, from, &folio2->lru); in migrate_pages() 1932 list_splice_init(from, &folios); in migrate_pages() [all …]
|
H A D | filemap.c | 282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch() 302 if (folio != fbatch->folios[i]) { in page_cache_delete_batch() 304 fbatch->folios[i]->index, folio); in page_cache_delete_batch() 331 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch() 343 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch() 523 struct folio *folio = fbatch.folios[i]; in __filemap_fdatawait_range() 2068 folio = fbatch->folios[idx]; in find_get_entries() 2132 folio = fbatch->folios[idx]; in find_lock_entries() 2260 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig() 2596 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages() [all …]
|
H A D | mlock.c | 194 folio = fbatch->folios[i]; in mlock_folio_batch() 197 fbatch->folios[i] = folio; in mlock_folio_batch() 209 folios_put(fbatch->folios, folio_batch_count(fbatch)); in mlock_folio_batch()
|
H A D | vmscan.c | 4358 struct list_head *head = &lrugen->folios[old_gen][type][zone]; in inc_min_seq() 4369 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); in inc_min_seq() 4397 if (!list_empty(&lrugen->folios[gen][type][zone])) in try_to_inc_min_seq() 4944 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio() 4953 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio() 4963 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio() 4971 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio() 5036 struct list_head *head = &lrugen->folios[gen][type][zone]; in scan_folios() 5636 if (!list_empty(&lrugen->folios[gen][type][zone])) in state_is_valid() 5681 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; in drain_evictable() [all …]
|
/openbmc/linux/include/linux/ |
H A D | pagevec.h | 31 struct folio *folios[PAGEVEC_SIZE]; member 74 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add()
|
H A D | mm_inline.h | 264 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in lru_gen_add_folio() 266 list_add(&folio->lru, &lrugen->folios[gen][type][zone]); in lru_gen_add_folio()
|
H A D | mm.h | 1517 struct folio **folios; member 1535 static inline void folios_put(struct folio **folios, unsigned int nr) in folios_put() argument 1537 release_pages(folios, nr); in folios_put()
|
/openbmc/linux/Documentation/mm/ |
H A D | unevictable-lru.rst | 13 folios. 28 folios and to hide these folios from vmscan. This mechanism is based on a patch 72 The Unevictable LRU infrastructure maintains unevictable folios as if they were 75 (1) We get to "treat unevictable folios just like we treat other folios in the 80 (2) We want to be able to migrate unevictable folios between nodes for memory 82 can only migrate folios that it can successfully isolate from the LRU 84 maintain folios elsewhere than on an LRU-like list, where they can be 88 anonymous, swap-backed folios. This differentiation is only important 89 while the folios are, in fact, evictable. 171 list. Instead, vmscan will do this if and when it encounters the folios during [all …]
|
H A D | multigen_lru.rst | 92 truncated generation number is an index to ``lrugen->folios[]``. The 96 ``lrugen->folios[]``; otherwise it stores zero. 100 generations, tiers do not have dedicated ``lrugen->folios[]``. In 131 increments ``min_seq`` when ``lrugen->folios[]`` indexed by 226 since each node and memcg combination has an LRU of folios (see 232 the active/inactive LRU (of folios): 255 The multi-gen LRU (of folios) can be disassembled into the following
|
/openbmc/linux/fs/nilfs2/ |
H A D | page.c | 256 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages() 310 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_back_pages() 371 struct folio *folio = fbatch.folios[i]; in nilfs_clear_dirty_pages() 516 folio = fbatch.folios[i]; in nilfs_find_uncommitted_extent()
|
/openbmc/linux/fs/smb/client/ |
H A D | cifsencrypt.c | 106 struct folio *folios[16], *folio; in cifs_shash_xarray() local 119 nr = xa_extract(iter->xarray, (void **)folios, index, last, in cifs_shash_xarray() 120 ARRAY_SIZE(folios), XA_PRESENT); in cifs_shash_xarray() 125 folio = folios[i]; in cifs_shash_xarray() 144 } while (nr == ARRAY_SIZE(folios)); in cifs_shash_xarray()
|
/openbmc/linux/Documentation/filesystems/ |
H A D | netfs_library.rst | 104 * Handle folios that span multiple pages. 109 don't match folio sizes or folio alignments and that may cross folios. 363 it transferred. The filesystem also should not deal with setting folios 367 Note that the helpers have the folios locked, but not pinned. It is 391 [Optional] This is called after the folios in the request have all been 438 * Once the data is read, the folios that have been fully read/cleared: 446 * Any folios that need writing to the cache will then have DIO writes issued. 450 * Writes to the cache will proceed asynchronously and the folios will have the
|
H A D | fsverity.rst | 633 ``->readahead()`` methods must be modified to verify folios before 689 verification. Finally, folios where no decryption or verity error 690 occurred are marked Uptodate, and the folios are unlocked. 856 - To prevent bypassing verification, folios must not be marked 858 filesystem is responsible for marking folios Uptodate via
|
H A D | vfs.rst | 702 on dirty pages, and ->release_folio on clean folios with the private 859 If the mapping does not support large folios, the folio will 869 In normal operation, folios are read through the ->readahead() 976 release_folio is called on folios with private data to tell the 986 some or all folios in an address_space. This can happen 991 and needs to be certain that all folios are invalidated, then 1031 some filesystems have more complex state (unstable folios in NFS
|
/openbmc/linux/Documentation/core-api/ |
H A D | pin_user_pages.rst | 58 For large folios, the GUP_PIN_COUNTING_BIAS scheme is not used. Instead, 62 This approach for large folios avoids the counting upper limit problems 68 This also means that huge pages and large folios do not suffer 273 fields, and to better report on large folios in general. Specifically, 274 for large folios, the exact pincount is reported.
|
/openbmc/linux/fs/ceph/ |
H A D | addr.c | 1041 page = &fbatch.folios[i]->page; in ceph_writepages_start() 1181 fbatch.folios[i] = NULL; in ceph_writepages_start() 1192 if (!fbatch.folios[j]) in ceph_writepages_start() 1195 fbatch.folios[n] = fbatch.folios[j]; in ceph_writepages_start() 1355 fbatch.nr ? fbatch.folios[0] : NULL); in ceph_writepages_start() 1378 page = &fbatch.folios[i]->page; in ceph_writepages_start()
|
/openbmc/linux/fs/gfs2/ |
H A D | aops.c | 221 nr_pages += folio_nr_pages(fbatch->folios[i]); in gfs2_write_jdata_batch() 229 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch()
|
/openbmc/linux/fs/afs/ |
H A D | write.c | 557 folio = fbatch.folios[i]; in afs_extend_writeback() 726 folio = fbatch.folios[i]; in afs_writepages_region()
|
/openbmc/linux/fs/btrfs/tests/ |
H A D | extent-io-tests.c | 38 struct folio *folio = fbatch.folios[i]; in process_page_range()
|
/openbmc/linux/fs/f2fs/ |
H A D | node.c | 1570 struct page *page = &fbatch.folios[i]->page; in last_fsync_dnode() 1796 struct page *page = &fbatch.folios[i]->page; in f2fs_fsync_node_pages() 1943 struct page *page = &fbatch.folios[i]->page; in f2fs_flush_inline_data() 1997 struct page *page = &fbatch.folios[i]->page; in f2fs_sync_node_pages()
|
H A D | checkpoint.c | 428 struct folio *folio = fbatch.folios[i]; in f2fs_sync_meta_pages() 432 folio_nr_pages(fbatch.folios[i-1])) { in f2fs_sync_meta_pages()
|
/openbmc/linux/fs/btrfs/ |
H A D | extent_io.c | 224 struct folio *folio = fbatch.folios[i]; in __process_pages_contig() 275 struct page *page = &fbatch.folios[i]->page; in lock_delalloc_pages() 1904 struct folio *folio = fbatch.folios[i]; in btree_write_cache_pages() 2061 struct folio *folio = fbatch.folios[i]; in extent_write_cache_pages()
|