/openbmc/linux/mm/ |
H A D | folio-compat.c | 15 return folio_mapping(page_folio(page)); in page_mapping() 21 return folio_unlock(page_folio(page)); in unlock_page() 27 return folio_end_writeback(page_folio(page)); in end_page_writeback() 33 return folio_wait_writeback(page_folio(page)); in wait_on_page_writeback() 39 return folio_wait_stable(page_folio(page)); in wait_for_stable_page() 45 folio_mark_accessed(page_folio(page)); in mark_page_accessed() 51 return folio_start_writeback(page_folio(page)); in set_page_writeback() 57 return folio_mark_dirty(page_folio(page)); in set_page_dirty() 63 return filemap_dirty_folio(page_mapping(page), page_folio(page)); in __set_page_dirty_nobuffers() 69 return folio_clear_dirty_for_io(page_folio(page)); in clear_page_dirty_for_io() [all …]
|
H A D | migrate_device.c | 217 folio_mark_dirty(page_folio(page)); in migrate_vma_collect_pmd() 395 folio = page_folio(page); in migrate_device_unmap() 421 folio = page_folio(page); in migrate_device_unmap() 599 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page() 734 folio = page_folio(page); in __migrate_device_pages() 757 r = migrate_folio_extra(mapping, page_folio(newpage), in __migrate_device_pages() 758 page_folio(page), in __migrate_device_pages() 761 r = migrate_folio(mapping, page_folio(newpage), in __migrate_device_pages() 762 page_folio(page), MIGRATE_SYNC_NO_COPY); in __migrate_device_pages() 837 src = page_folio(page); in migrate_device_finalize() [all …]
|
H A D | page_io.c | 181 struct folio *folio = page_folio(page); in swap_writepage() 283 count_swpout_vm_event(page_folio(sio->bvec[p].bv_page)); in sio_write_complete() 334 struct folio *folio = page_folio(page); in swap_writepage_bdev_sync() 355 struct folio *folio = page_folio(page); in swap_writepage_bdev_async() 408 struct folio *folio = page_folio(sio->bvec[p].bv_page); in sio_read_complete() 416 struct folio *folio = page_folio(sio->bvec[p].bv_page); in sio_read_complete() 495 struct folio *folio = page_folio(page); in swap_readpage()
|
H A D | memory-failure.c | 930 mem_cgroup_uncharge(page_folio(p)); in delete_from_lru_cache() 947 struct folio *folio = page_folio(p); in truncate_error_page() 1174 struct folio *folio = page_folio(p); in me_swapcache_clean() 1196 struct folio *folio = page_folio(p); in me_huge_page() 1395 struct folio *folio = page_folio(page); in __get_hwpoison_page() 1402 if (folio == page_folio(page)) in __get_hwpoison_page() 1406 folio = page_folio(page); in __get_hwpoison_page() 1419 if (folio == page_folio(page)) in __get_hwpoison_page() 1492 struct folio *folio = page_folio(page); in __get_unpoison_page() 1499 if (folio == page_folio(page)) in __get_unpoison_page() [all …]
|
H A D | page_idle.c | 42 folio = page_folio(page); in page_idle_get_folio() 45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in page_idle_get_folio()
|
H A D | gup.c | 53 struct folio *folio = page_folio(page); in sanity_check_pinned_pages() 76 folio = page_folio(page); in try_get_folio() 91 if (unlikely(page_folio(page) != folio)) { in try_get_folio() 187 gup_put_folio(page_folio(page), 1, FOLL_PIN); in unpin_user_page() 222 struct folio *folio = page_folio(next); in gup_folio_range_next() 236 struct folio *folio = page_folio(list[i]); in gup_folio_next() 240 if (page_folio(list[nr]) != folio) in gup_folio_next() 563 ret = try_grab_folio(page_folio(page), 1, flags); in follow_page_pte() 814 ret = try_grab_folio(page_folio(*page), 1, gup_flags); in get_gate_page() 1217 struct folio *folio = page_folio(page); in __get_user_pages() [all …]
|
/openbmc/linux/include/linux/ |
H A D | page_idle.h | 125 return folio_test_young(page_folio(page)); in page_is_young() 130 folio_set_young(page_folio(page)); in set_page_young() 135 return folio_test_clear_young(page_folio(page)); in test_and_clear_page_young() 140 return folio_test_idle(page_folio(page)); in page_is_idle() 145 folio_set_idle(page_folio(page)); in set_page_idle()
|
H A D | netfs.h | 94 folio_start_fscache(page_folio(page)); in set_page_fscache() 99 folio_end_private_2(page_folio(page)); in end_page_fscache() 104 folio_wait_private_2(page_folio(page)); in wait_on_page_fscache() 109 return folio_wait_private_2_killable(page_folio(page)); in wait_on_page_fscache_killable()
|
H A D | pagemap.h | 459 return folio_file_mapping(page_folio(page)); in page_file_mapping() 534 folio_attach_private(page_folio(page), data); in attach_page_private() 539 return folio_detach_private(page_folio(page)); in detach_page_private() 1045 return folio_trylock(page_folio(page)); in trylock_page() 1093 folio = page_folio(page); in lock_page() 1161 folio_wait_locked(page_folio(page)); in wait_on_page_locked() 1175 __folio_mark_dirty(page_folio(page), mapping, warn); in __set_page_dirty() 1574 return i_blocks_per_folio(inode, page_folio(page)); in i_blocks_per_page()
|
H A D | page-flags.h | 268 #define page_folio(p) (_Generic((p), \ macro 563 return folio_test_swapcache(page_folio(page)); in PageSwapCache() 677 return folio_test_anon(page_folio(page)); in PageAnon() 707 return folio_test_ksm(page_folio(page)); in PageKsm() 744 return folio_test_uptodate(page_folio(page)); in PageUptodate() 1020 return folio_test_hugetlb(page_folio(page)); in PAGE_TYPE_OPS()
|
/openbmc/linux/arch/sh/mm/ |
H A D | cache.c | 64 struct folio *folio = page_folio(page); in copy_to_user_page() 85 struct folio *folio = page_folio(page); in copy_from_user_page() 102 struct folio *src = page_folio(from); in copy_user_highpage() 150 struct folio *folio = page_folio(pfn_to_page(pfn)); in __update_cache() 160 struct folio *folio = page_folio(page); in __flush_anon_page() 247 cacheop_on_each_cpu(local_flush_icache_folio, page_folio(page), 1); in flush_icache_pages()
|
H A D | kmap.c | 30 struct folio *folio = page_folio(page); in kmap_coherent()
|
/openbmc/linux/arch/csky/abiv1/ |
H A D | cacheflush.c | 40 flush_dcache_folio(page_folio(page)); in flush_dcache_page() 58 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range()
|
/openbmc/linux/arch/arm64/mm/ |
H A D | flush.c | 54 struct folio *folio = page_folio(pte_page(pte)); in __sync_icache_dcache() 79 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/openbmc/linux/arch/mips/mm/ |
H A D | cache.c | 104 struct folio *folio = page_folio(page); in __flush_dcache_pages() 130 struct folio *folio = page_folio(page); in __flush_anon_page() 157 folio = page_folio(pfn_to_page(pfn)); in __update_cache()
|
H A D | init.c | 91 BUG_ON(folio_test_dcache_dirty(page_folio(page))); in __kmap_pgprot() 172 struct folio *src = page_folio(from); in copy_user_highpage() 198 struct folio *folio = page_folio(page); in copy_to_user_page() 218 struct folio *folio = page_folio(page); in copy_from_user_page()
|
/openbmc/linux/mm/damon/ |
H A D | ops-common.c | 30 folio = page_folio(page); in damon_get_folio() 33 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio()
|
/openbmc/linux/arch/ia64/include/asm/ |
H A D | cacheflush.h | 24 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/openbmc/linux/arch/openrisc/include/asm/ |
H A D | cacheflush.h | 68 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/openbmc/linux/arch/openrisc/mm/ |
H A D | cache.c | 46 struct folio *folio = page_folio(pfn_to_page(pfn)); in update_cache()
|
/openbmc/linux/arch/arm/mm/ |
H A D | copypage-xscale.c | 87 struct folio *src = page_folio(from); in xscale_mc_copy_user_highpage()
|
H A D | copypage-v4mc.c | 67 struct folio *src = page_folio(from); in v4_mc_copy_user_highpage()
|
/openbmc/linux/arch/riscv/include/asm/ |
H A D | cacheflush.h | 34 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/openbmc/linux/arch/csky/abiv2/inc/abi/ |
H A D | cacheflush.h | 31 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/openbmc/linux/arch/nios2/mm/ |
H A D | cacheflush.c | 205 flush_dcache_folio(page_folio(page)); in flush_dcache_page() 229 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range()
|