Lines Matching refs:folio

494 struct anon_vma *folio_get_anon_vma(struct folio *folio)  in folio_get_anon_vma()  argument
500 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_get_anon_vma()
503 if (!folio_mapped(folio)) in folio_get_anon_vma()
519 if (!folio_mapped(folio)) { in folio_get_anon_vma()
538 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, in folio_lock_anon_vma_read() argument
546 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read()
549 if (!folio_mapped(folio)) in folio_lock_anon_vma_read()
560 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
579 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
747 struct folio *folio = page_folio(page); in page_address_in_vma() local
748 if (folio_test_anon(folio)) { in page_address_in_vma()
749 struct anon_vma *page__anon_vma = folio_anon_vma(folio); in page_address_in_vma()
759 } else if (vma->vm_file->f_mapping != folio->mapping) { in page_address_in_vma()
804 static bool folio_referenced_one(struct folio *folio, in folio_referenced_one() argument
808 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in folio_referenced_one()
815 (!folio_test_large(folio) || !pvmw.pte)) { in folio_referenced_one()
817 mlock_vma_folio(folio, vma, !pvmw.pte); in folio_referenced_one()
846 folio_clear_idle(folio); in folio_referenced_one()
847 if (folio_test_clear_young(folio)) in folio_referenced_one()
897 int folio_referenced(struct folio *folio, int is_locked, in folio_referenced() argument
902 .mapcount = folio_mapcount(folio), in folio_referenced()
917 if (!folio_raw_mapping(folio)) in folio_referenced()
920 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { in folio_referenced()
921 we_locked = folio_trylock(folio); in folio_referenced()
926 rmap_walk(folio, &rwc); in folio_referenced()
930 folio_unlock(folio); in folio_referenced()
997 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, in page_mkclean_one() argument
1000 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); in page_mkclean_one()
1016 int folio_mkclean(struct folio *folio) in folio_mkclean() argument
1026 BUG_ON(!folio_test_locked(folio)); in folio_mkclean()
1028 if (!folio_mapped(folio)) in folio_mkclean()
1031 mapping = folio_mapping(folio); in folio_mkclean()
1035 rmap_walk(folio, &rwc); in folio_mkclean()
1073 int folio_total_mapcount(struct folio *folio) in folio_total_mapcount() argument
1075 int mapcount = folio_entire_mapcount(folio); in folio_total_mapcount()
1080 if (folio_nr_pages_mapped(folio) == 0) in folio_total_mapcount()
1087 nr_pages = folio_nr_pages(folio); in folio_total_mapcount()
1089 mapcount += atomic_read(&folio_page(folio, i)->_mapcount); in folio_total_mapcount()
1109 struct folio *folio = page_folio(page); in page_move_anon_rmap() local
1111 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_move_anon_rmap()
1120 WRITE_ONCE(folio->mapping, anon_vma); in page_move_anon_rmap()
1132 static void __page_set_anon_rmap(struct folio *folio, struct page *page, in __page_set_anon_rmap() argument
1139 if (folio_test_anon(folio)) in __page_set_anon_rmap()
1157 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); in __page_set_anon_rmap()
1158 folio->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1171 static void __page_check_anon_rmap(struct folio *folio, struct page *page, in __page_check_anon_rmap() argument
1185 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, in __page_check_anon_rmap()
1186 folio); in __page_check_anon_rmap()
1206 struct folio *folio = page_folio(page); in page_add_anon_rmap() local
1207 atomic_t *mapped = &folio->_nr_pages_mapped; in page_add_anon_rmap()
1216 if (first && folio_test_large(folio)) { in page_add_anon_rmap()
1220 } else if (folio_test_pmd_mappable(folio)) { in page_add_anon_rmap()
1223 first = atomic_inc_and_test(&folio->_entire_mapcount); in page_add_anon_rmap()
1227 nr_pmdmapped = folio_nr_pages(folio); in page_add_anon_rmap()
1243 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); in page_add_anon_rmap()
1245 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); in page_add_anon_rmap()
1247 if (likely(!folio_test_ksm(folio))) { in page_add_anon_rmap()
1250 __page_set_anon_rmap(folio, page, vma, address, in page_add_anon_rmap()
1253 __page_check_anon_rmap(folio, page, vma, address); in page_add_anon_rmap()
1256 mlock_vma_folio(folio, vma, compound); in page_add_anon_rmap()
1272 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, in folio_add_new_anon_rmap() argument
1278 __folio_set_swapbacked(folio); in folio_add_new_anon_rmap()
1280 if (likely(!folio_test_pmd_mappable(folio))) { in folio_add_new_anon_rmap()
1282 atomic_set(&folio->_mapcount, 0); in folio_add_new_anon_rmap()
1286 atomic_set(&folio->_entire_mapcount, 0); in folio_add_new_anon_rmap()
1287 atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); in folio_add_new_anon_rmap()
1288 nr = folio_nr_pages(folio); in folio_add_new_anon_rmap()
1289 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); in folio_add_new_anon_rmap()
1292 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); in folio_add_new_anon_rmap()
1293 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); in folio_add_new_anon_rmap()
1308 void folio_add_file_rmap_range(struct folio *folio, struct page *page, in folio_add_file_rmap_range() argument
1312 atomic_t *mapped = &folio->_nr_pages_mapped; in folio_add_file_rmap_range()
1316 VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); in folio_add_file_rmap_range()
1322 if (first && folio_test_large(folio)) { in folio_add_file_rmap_range()
1330 } else if (folio_test_pmd_mappable(folio)) { in folio_add_file_rmap_range()
1333 first = atomic_inc_and_test(&folio->_entire_mapcount); in folio_add_file_rmap_range()
1337 nr_pmdmapped = folio_nr_pages(folio); in folio_add_file_rmap_range()
1350 __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? in folio_add_file_rmap_range()
1353 __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); in folio_add_file_rmap_range()
1355 mlock_vma_folio(folio, vma, compound); in folio_add_file_rmap_range()
1369 struct folio *folio = page_folio(page); in page_add_file_rmap() local
1377 nr_pages = folio_nr_pages(folio); in page_add_file_rmap()
1379 folio_add_file_rmap_range(folio, page, nr_pages, vma, compound); in page_add_file_rmap()
1393 struct folio *folio = page_folio(page); in page_remove_rmap() local
1394 atomic_t *mapped = &folio->_nr_pages_mapped; in page_remove_rmap()
1402 if (unlikely(folio_test_hugetlb(folio))) { in page_remove_rmap()
1404 atomic_dec(&folio->_entire_mapcount); in page_remove_rmap()
1412 if (last && folio_test_large(folio)) { in page_remove_rmap()
1416 } else if (folio_test_pmd_mappable(folio)) { in page_remove_rmap()
1419 last = atomic_add_negative(-1, &folio->_entire_mapcount); in page_remove_rmap()
1423 nr_pmdmapped = folio_nr_pages(folio); in page_remove_rmap()
1436 if (folio_test_anon(folio)) in page_remove_rmap()
1438 else if (folio_test_swapbacked(folio)) in page_remove_rmap()
1442 __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); in page_remove_rmap()
1445 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; in page_remove_rmap()
1446 __lruvec_stat_mod_folio(folio, idx, -nr); in page_remove_rmap()
1453 if (folio_test_pmd_mappable(folio) && folio_test_anon(folio)) in page_remove_rmap()
1455 deferred_split_folio(folio); in page_remove_rmap()
1466 munlock_vma_folio(folio, vma, compound); in page_remove_rmap()
1472 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, in try_to_unmap_one() argument
1476 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_unmap_one()
1495 split_huge_pmd_address(vma, address, false, folio); in try_to_unmap_one()
1508 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1523 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in try_to_unmap_one()
1531 mlock_vma_folio(folio, vma, false); in try_to_unmap_one()
1538 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_unmap_one()
1540 anon_exclusive = folio_test_anon(folio) && in try_to_unmap_one()
1543 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1544 bool anon = folio_test_anon(folio); in try_to_unmap_one()
1626 folio_mark_dirty(folio); in try_to_unmap_one()
1633 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1634 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_unmap_one()
1638 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_unmap_one()
1653 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_unmap_one()
1654 } else if (folio_test_anon(folio)) { in try_to_unmap_one()
1661 if (unlikely(folio_test_swapbacked(folio) != in try_to_unmap_one()
1662 folio_test_swapcache(folio))) { in try_to_unmap_one()
1670 if (!folio_test_swapbacked(folio)) { in try_to_unmap_one()
1680 ref_count = folio_ref_count(folio); in try_to_unmap_one()
1681 map_count = folio_mapcount(folio); in try_to_unmap_one()
1694 !folio_test_dirty(folio)) { in try_to_unmap_one()
1704 folio_set_swapbacked(folio); in try_to_unmap_one()
1761 dec_mm_counter(mm, mm_counter_file(&folio->page)); in try_to_unmap_one()
1764 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); in try_to_unmap_one()
1767 folio_put(folio); in try_to_unmap_one()
1780 static int folio_not_mapped(struct folio *folio) in folio_not_mapped() argument
1782 return !folio_mapped(folio); in folio_not_mapped()
1796 void try_to_unmap(struct folio *folio, enum ttu_flags flags) in try_to_unmap() argument
1806 rmap_walk_locked(folio, &rwc); in try_to_unmap()
1808 rmap_walk(folio, &rwc); in try_to_unmap()
1817 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, in try_to_migrate_one() argument
1821 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_migrate_one()
1844 split_huge_pmd_address(vma, address, true, folio); in try_to_migrate_one()
1857 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
1874 subpage = folio_page(folio, in try_to_migrate_one()
1875 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); in try_to_migrate_one()
1876 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || in try_to_migrate_one()
1877 !folio_test_pmd_mappable(folio), folio); in try_to_migrate_one()
1889 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in try_to_migrate_one()
1893 if (folio_is_zone_device(folio)) { in try_to_migrate_one()
1904 VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); in try_to_migrate_one()
1905 subpage = &folio->page; in try_to_migrate_one()
1907 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_migrate_one()
1910 anon_exclusive = folio_test_anon(folio) && in try_to_migrate_one()
1913 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
1914 bool anon = folio_test_anon(folio); in try_to_migrate_one()
1986 folio_mark_dirty(folio); in try_to_migrate_one()
1991 if (folio_is_device_private(folio)) { in try_to_migrate_one()
1992 unsigned long pfn = folio_pfn(folio); in try_to_migrate_one()
2023 compound_order(&folio->page)); in try_to_migrate_one()
2030 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2031 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_migrate_one()
2035 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_migrate_one()
2050 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_migrate_one()
2056 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2065 VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && in try_to_migrate_one()
2071 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2104 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2110 compound_order(&folio->page)); in try_to_migrate_one()
2117 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); in try_to_migrate_one()
2120 folio_put(folio); in try_to_migrate_one()
2136 void try_to_migrate(struct folio *folio, enum ttu_flags flags) in try_to_migrate() argument
2153 if (folio_is_zone_device(folio) && in try_to_migrate()
2154 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) in try_to_migrate()
2165 if (!folio_test_ksm(folio) && folio_test_anon(folio)) in try_to_migrate()
2169 rmap_walk_locked(folio, &rwc); in try_to_migrate()
2171 rmap_walk(folio, &rwc); in try_to_migrate()
2182 static bool page_make_device_exclusive_one(struct folio *folio, in page_make_device_exclusive_one() argument
2186 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in page_make_device_exclusive_one()
2198 address + folio_size(folio)), in page_make_device_exclusive_one()
2204 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in page_make_device_exclusive_one()
2213 subpage = folio_page(folio, in page_make_device_exclusive_one()
2214 pte_pfn(ptent) - folio_pfn(folio)); in page_make_device_exclusive_one()
2223 folio_mark_dirty(folio); in page_make_device_exclusive_one()
2279 static bool folio_make_device_exclusive(struct folio *folio, in folio_make_device_exclusive() argument
2299 if (!folio_test_anon(folio)) in folio_make_device_exclusive()
2302 rmap_walk(folio, &rwc); in folio_make_device_exclusive()
2304 return args.valid && !folio_mapcount(folio); in folio_make_device_exclusive()
2342 struct folio *folio = page_folio(pages[i]); in make_device_exclusive_range() local
2343 if (PageTail(pages[i]) || !folio_trylock(folio)) { in make_device_exclusive_range()
2344 folio_put(folio); in make_device_exclusive_range()
2349 if (!folio_make_device_exclusive(folio, mm, start, owner)) { in make_device_exclusive_range()
2350 folio_unlock(folio); in make_device_exclusive_range()
2351 folio_put(folio); in make_device_exclusive_range()
2370 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, in rmap_walk_anon_lock() argument
2376 return rwc->anon_lock(folio, rwc); in rmap_walk_anon_lock()
2384 anon_vma = folio_anon_vma(folio); in rmap_walk_anon_lock()
2412 static void rmap_walk_anon(struct folio *folio, in rmap_walk_anon() argument
2420 anon_vma = folio_anon_vma(folio); in rmap_walk_anon()
2422 VM_BUG_ON_FOLIO(!anon_vma, folio); in rmap_walk_anon()
2424 anon_vma = rmap_walk_anon_lock(folio, rwc); in rmap_walk_anon()
2429 pgoff_start = folio_pgoff(folio); in rmap_walk_anon()
2430 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_anon()
2434 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_anon()
2442 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_anon()
2444 if (rwc->done && rwc->done(folio)) in rmap_walk_anon()
2461 static void rmap_walk_file(struct folio *folio, in rmap_walk_file() argument
2464 struct address_space *mapping = folio_mapping(folio); in rmap_walk_file()
2474 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in rmap_walk_file()
2479 pgoff_start = folio_pgoff(folio); in rmap_walk_file()
2480 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_file()
2495 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_file()
2503 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_file()
2505 if (rwc->done && rwc->done(folio)) in rmap_walk_file()
2514 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) in rmap_walk() argument
2516 if (unlikely(folio_test_ksm(folio))) in rmap_walk()
2517 rmap_walk_ksm(folio, rwc); in rmap_walk()
2518 else if (folio_test_anon(folio)) in rmap_walk()
2519 rmap_walk_anon(folio, rwc, false); in rmap_walk()
2521 rmap_walk_file(folio, rwc, false); in rmap_walk()
2525 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
2528 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); in rmap_walk_locked()
2529 if (folio_test_anon(folio)) in rmap_walk_locked()
2530 rmap_walk_anon(folio, rwc, true); in rmap_walk_locked()
2532 rmap_walk_file(folio, rwc, true); in rmap_walk_locked()
2546 struct folio *folio = page_folio(page); in hugepage_add_anon_rmap() local
2550 BUG_ON(!folio_test_locked(folio)); in hugepage_add_anon_rmap()
2553 first = atomic_inc_and_test(&folio->_entire_mapcount); in hugepage_add_anon_rmap()
2557 __page_set_anon_rmap(folio, page, vma, address, in hugepage_add_anon_rmap()
2561 void hugepage_add_new_anon_rmap(struct folio *folio, in hugepage_add_new_anon_rmap() argument
2566 atomic_set(&folio->_entire_mapcount, 0); in hugepage_add_new_anon_rmap()
2567 folio_clear_hugetlb_restore_reserve(folio); in hugepage_add_new_anon_rmap()
2568 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); in hugepage_add_new_anon_rmap()