Lines Matching refs:folio

62 	struct folio *folio = folio_get_nontail_page(page);  in isolate_movable_page()  local
74 if (!folio) in isolate_movable_page()
77 if (unlikely(folio_test_slab(folio))) in isolate_movable_page()
86 if (unlikely(!__folio_test_movable(folio))) in isolate_movable_page()
90 if (unlikely(folio_test_slab(folio))) in isolate_movable_page()
104 if (unlikely(!folio_trylock(folio))) in isolate_movable_page()
107 if (!folio_test_movable(folio) || folio_test_isolated(folio)) in isolate_movable_page()
110 mops = folio_movable_ops(folio); in isolate_movable_page()
111 VM_BUG_ON_FOLIO(!mops, folio); in isolate_movable_page()
113 if (!mops->isolate_page(&folio->page, mode)) in isolate_movable_page()
117 WARN_ON_ONCE(folio_test_isolated(folio)); in isolate_movable_page()
118 folio_set_isolated(folio); in isolate_movable_page()
119 folio_unlock(folio); in isolate_movable_page()
124 folio_unlock(folio); in isolate_movable_page()
126 folio_put(folio); in isolate_movable_page()
131 static void putback_movable_folio(struct folio *folio) in putback_movable_folio() argument
133 const struct movable_operations *mops = folio_movable_ops(folio); in putback_movable_folio()
135 mops->putback_page(&folio->page); in putback_movable_folio()
136 folio_clear_isolated(folio); in putback_movable_folio()
149 struct folio *folio; in putback_movable_pages() local
150 struct folio *folio2; in putback_movable_pages()
152 list_for_each_entry_safe(folio, folio2, l, lru) { in putback_movable_pages()
153 if (unlikely(folio_test_hugetlb(folio))) { in putback_movable_pages()
154 folio_putback_active_hugetlb(folio); in putback_movable_pages()
157 list_del(&folio->lru); in putback_movable_pages()
163 if (unlikely(__folio_test_movable(folio))) { in putback_movable_pages()
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); in putback_movable_pages()
165 folio_lock(folio); in putback_movable_pages()
166 if (folio_test_movable(folio)) in putback_movable_pages()
167 putback_movable_folio(folio); in putback_movable_pages()
169 folio_clear_isolated(folio); in putback_movable_pages()
170 folio_unlock(folio); in putback_movable_pages()
171 folio_put(folio); in putback_movable_pages()
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON + in putback_movable_pages()
174 folio_is_file_lru(folio), -folio_nr_pages(folio)); in putback_movable_pages()
175 folio_putback_lru(folio); in putback_movable_pages()
183 static bool remove_migration_pte(struct folio *folio, in remove_migration_pte() argument
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) in remove_migration_pte()
199 new = folio_page(folio, idx); in remove_migration_pte()
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || in remove_migration_pte()
205 !folio_test_pmd_mappable(folio), folio); in remove_migration_pte()
211 folio_get(folio); in remove_migration_pte()
220 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) in remove_migration_pte()
227 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) in remove_migration_pte()
245 if (folio_test_hugetlb(folio)) { in remove_migration_pte()
251 if (folio_test_anon(folio)) in remove_migration_pte()
261 if (folio_test_anon(folio)) in remove_migration_pte()
285 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) in remove_migration_ptes()
379 struct folio *folio) in folio_expected_refs() argument
385 refs += folio_nr_pages(folio); in folio_expected_refs()
386 if (folio_test_private(folio)) in folio_expected_refs()
401 struct folio *newfolio, struct folio *folio, int extra_count) in folio_migrate_mapping() argument
403 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in folio_migrate_mapping()
406 int expected_count = folio_expected_refs(mapping, folio) + extra_count; in folio_migrate_mapping()
407 long nr = folio_nr_pages(folio); in folio_migrate_mapping()
412 if (folio_ref_count(folio) != expected_count) in folio_migrate_mapping()
416 newfolio->index = folio->index; in folio_migrate_mapping()
417 newfolio->mapping = folio->mapping; in folio_migrate_mapping()
418 if (folio_test_swapbacked(folio)) in folio_migrate_mapping()
424 oldzone = folio_zone(folio); in folio_migrate_mapping()
428 if (!folio_ref_freeze(folio, expected_count)) { in folio_migrate_mapping()
437 newfolio->index = folio->index; in folio_migrate_mapping()
438 newfolio->mapping = folio->mapping; in folio_migrate_mapping()
440 if (folio_test_swapbacked(folio)) { in folio_migrate_mapping()
442 if (folio_test_swapcache(folio)) { in folio_migrate_mapping()
444 newfolio->private = folio_get_private(folio); in folio_migrate_mapping()
448 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); in folio_migrate_mapping()
453 dirty = folio_test_dirty(folio); in folio_migrate_mapping()
455 folio_clear_dirty(folio); in folio_migrate_mapping()
470 folio_ref_unfreeze(folio, expected_count - nr); in folio_migrate_mapping()
489 memcg = folio_memcg(folio); in folio_migrate_mapping()
495 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { in folio_migrate_mapping()
499 if (folio_test_pmd_mappable(folio)) { in folio_migrate_mapping()
505 if (folio_test_swapcache(folio)) { in folio_migrate_mapping()
528 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping()
557 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) in folio_migrate_flags() argument
561 if (folio_test_error(folio)) in folio_migrate_flags()
563 if (folio_test_referenced(folio)) in folio_migrate_flags()
565 if (folio_test_uptodate(folio)) in folio_migrate_flags()
567 if (folio_test_clear_active(folio)) { in folio_migrate_flags()
568 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); in folio_migrate_flags()
570 } else if (folio_test_clear_unevictable(folio)) in folio_migrate_flags()
572 if (folio_test_workingset(folio)) in folio_migrate_flags()
574 if (folio_test_checked(folio)) in folio_migrate_flags()
582 if (folio_test_mappedtodisk(folio)) in folio_migrate_flags()
586 if (folio_test_dirty(folio)) in folio_migrate_flags()
589 if (folio_test_young(folio)) in folio_migrate_flags()
591 if (folio_test_idle(folio)) in folio_migrate_flags()
598 cpupid = page_cpupid_xchg_last(&folio->page, -1); in folio_migrate_flags()
605 bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); in folio_migrate_flags()
613 folio_migrate_ksm(newfolio, folio); in folio_migrate_flags()
618 if (folio_test_swapcache(folio)) in folio_migrate_flags()
619 folio_clear_swapcache(folio); in folio_migrate_flags()
620 folio_clear_private(folio); in folio_migrate_flags()
623 if (!folio_test_hugetlb(folio)) in folio_migrate_flags()
624 folio->private = NULL; in folio_migrate_flags()
638 if (folio_test_readahead(folio)) in folio_migrate_flags()
641 folio_copy_owner(newfolio, folio); in folio_migrate_flags()
643 if (!folio_test_hugetlb(folio)) in folio_migrate_flags()
644 mem_cgroup_migrate(folio, newfolio); in folio_migrate_flags()
648 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) in folio_migrate_copy() argument
650 folio_copy(newfolio, folio); in folio_migrate_copy()
651 folio_migrate_flags(newfolio, folio); in folio_migrate_copy()
659 int migrate_folio_extra(struct address_space *mapping, struct folio *dst, in migrate_folio_extra()
660 struct folio *src, enum migrate_mode mode, int extra_count) in migrate_folio_extra()
690 int migrate_folio(struct address_space *mapping, struct folio *dst, in migrate_folio()
691 struct folio *src, enum migrate_mode mode) in migrate_folio()
732 struct folio *dst, struct folio *src, enum migrate_mode mode, in __buffer_migrate_folio()
824 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio()
845 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio_norefs()
853 struct folio *dst, struct folio *src, enum migrate_mode mode) in filemap_migrate_folio()
875 static int writeout(struct address_space *mapping, struct folio *folio) in writeout() argument
890 if (!folio_clear_dirty_for_io(folio)) in writeout()
902 remove_migration_ptes(folio, folio, false); in writeout()
904 rc = mapping->a_ops->writepage(&folio->page, &wbc); in writeout()
908 folio_lock(folio); in writeout()
917 struct folio *dst, struct folio *src, enum migrate_mode mode) in fallback_migrate_folio()
952 static int move_to_new_folio(struct folio *dst, struct folio *src, in move_to_new_folio()
1039 static void __migrate_folio_record(struct folio *dst, in __migrate_folio_record()
1046 static void __migrate_folio_extract(struct folio *dst, in __migrate_folio_extract()
1058 static void migrate_folio_undo_src(struct folio *src, in migrate_folio_undo_src()
1076 static void migrate_folio_undo_dst(struct folio *dst, bool locked, in migrate_folio_undo_dst()
1088 static void migrate_folio_done(struct folio *src, in migrate_folio_done()
1108 struct folio *src, struct folio **dstp, enum migrate_mode mode, in migrate_folio_unmap()
1111 struct folio *dst; in migrate_folio_unmap()
1269 struct folio *src, struct folio *dst, in migrate_folio_move()
1366 struct folio *src, int force, enum migrate_mode mode, in unmap_and_move_huge_page()
1369 struct folio *dst; in unmap_and_move_huge_page()
1478 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios) in try_split_folio() argument
1482 folio_lock(folio); in try_split_folio()
1483 rc = split_folio_to_list(folio, split_folios); in try_split_folio()
1484 folio_unlock(folio); in try_split_folio()
1486 list_move_tail(&folio->lru, split_folios); in try_split_folio()
1528 struct folio *folio, *folio2; in migrate_hugetlbs() local
1535 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_hugetlbs()
1536 if (!folio_test_hugetlb(folio)) in migrate_hugetlbs()
1539 nr_pages = folio_nr_pages(folio); in migrate_hugetlbs()
1550 if (!hugepage_migration_supported(folio_hstate(folio))) { in migrate_hugetlbs()
1553 list_move_tail(&folio->lru, ret_folios); in migrate_hugetlbs()
1559 folio, pass > 2, mode, in migrate_hugetlbs()
1628 struct folio *folio, *folio2, *dst = NULL, *dst2; in migrate_pages_batch() local
1642 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_pages_batch()
1643 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); in migrate_pages_batch()
1644 nr_pages = folio_nr_pages(folio); in migrate_pages_batch()
1661 if (!try_split_folio(folio, split_folios)) { in migrate_pages_batch()
1666 list_move_tail(&folio->lru, ret_folios); in migrate_pages_batch()
1671 private, folio, &dst, mode, reason, in migrate_pages_batch()
1691 if (folio_test_large(folio) && !nosplit) { in migrate_pages_batch()
1692 int ret = try_split_folio(folio, split_folios); in migrate_pages_batch()
1731 list_move_tail(&folio->lru, &unmap_folios); in migrate_pages_batch()
1761 dst = list_first_entry(&dst_folios, struct folio, lru); in migrate_pages_batch()
1763 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { in migrate_pages_batch()
1764 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); in migrate_pages_batch()
1765 nr_pages = folio_nr_pages(folio); in migrate_pages_batch()
1770 folio, dst, mode, in migrate_pages_batch()
1805 dst = list_first_entry(&dst_folios, struct folio, lru); in migrate_pages_batch()
1807 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { in migrate_pages_batch()
1812 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED, in migrate_pages_batch()
1901 struct folio *folio, *folio2; in migrate_pages() local
1918 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_pages()
1920 if (folio_test_hugetlb(folio)) { in migrate_pages()
1921 list_move_tail(&folio->lru, &ret_folios); in migrate_pages()
1925 nr_pages += folio_nr_pages(folio); in migrate_pages()
1991 struct folio *alloc_migration_target(struct folio *src, unsigned long private) in alloc_migration_target()
2487 static struct folio *alloc_misplaced_dst_folio(struct folio *src, in alloc_misplaced_dst_folio()