Lines Matching refs:p

218 static int hwpoison_filter_dev(struct page *p)  in hwpoison_filter_dev()  argument
227 mapping = page_mapping(p); in hwpoison_filter_dev()
242 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags() argument
247 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == in hwpoison_filter_flags()
267 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task() argument
272 if (page_cgroup_ino(p) != hwpoison_filter_memcg) in hwpoison_filter_task()
278 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task() argument
281 int hwpoison_filter(struct page *p) in hwpoison_filter() argument
286 if (hwpoison_filter_dev(p)) in hwpoison_filter()
289 if (hwpoison_filter_flags(p)) in hwpoison_filter()
292 if (hwpoison_filter_task(p)) in hwpoison_filter()
298 int hwpoison_filter(struct page *p) in hwpoison_filter() argument
373 void shake_page(struct page *p) in shake_page() argument
375 if (PageHuge(p)) in shake_page()
381 if (PageSlab(p)) in shake_page()
443 static void __add_to_kill(struct task_struct *tsk, struct page *p, in __add_to_kill() argument
455 tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma); in __add_to_kill()
456 if (is_zone_device_page(p)) { in __add_to_kill()
461 tk->size_shift = page_shift(compound_head(p)); in __add_to_kill()
475 page_to_pfn(p), tsk->comm); in __add_to_kill()
486 static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p, in add_to_kill_anon_file() argument
490 __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF); in add_to_kill_anon_file()
506 void add_to_kill_ksm(struct task_struct *tsk, struct page *p, in add_to_kill_ksm() argument
511 __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF); in add_to_kill_ksm()
681 static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p, in add_to_kill_fsdax() argument
685 __add_to_kill(tsk, p, vma, to_kill, 0, pgoff); in add_to_kill_fsdax()
857 static int kill_accessing_process(struct task_struct *p, unsigned long pfn, in kill_accessing_process() argument
864 priv.tk.tsk = p; in kill_accessing_process()
866 if (!p->mm) in kill_accessing_process()
869 mmap_read_lock(p->mm); in kill_accessing_process()
870 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops, in kill_accessing_process()
876 mmap_read_unlock(p->mm); in kill_accessing_process()
916 static int delete_from_lru_cache(struct page *p) in delete_from_lru_cache() argument
918 if (isolate_lru_page(p)) { in delete_from_lru_cache()
923 ClearPageActive(p); in delete_from_lru_cache()
924 ClearPageUnevictable(p); in delete_from_lru_cache()
930 mem_cgroup_uncharge(page_folio(p)); in delete_from_lru_cache()
935 put_page(p); in delete_from_lru_cache()
941 static int truncate_error_page(struct page *p, unsigned long pfn, in truncate_error_page() argument
947 struct folio *folio = page_folio(p); in truncate_error_page()
948 int err = mapping->a_ops->error_remove_page(mapping, p); in truncate_error_page()
961 if (invalidate_inode_page(p)) in truncate_error_page()
976 int (*action)(struct page_state *ps, struct page *p);
985 static bool has_extra_refcount(struct page_state *ps, struct page *p, in has_extra_refcount() argument
988 int count = page_count(p) - 1; in has_extra_refcount()
995 page_to_pfn(p), action_page_types[ps->type], count); in has_extra_refcount()
1007 static int me_kernel(struct page_state *ps, struct page *p) in me_kernel() argument
1009 unlock_page(p); in me_kernel()
1016 static int me_unknown(struct page_state *ps, struct page *p) in me_unknown() argument
1018 pr_err("%#lx: Unknown page state\n", page_to_pfn(p)); in me_unknown()
1019 unlock_page(p); in me_unknown()
1026 static int me_pagecache_clean(struct page_state *ps, struct page *p) in me_pagecache_clean() argument
1032 delete_from_lru_cache(p); in me_pagecache_clean()
1038 if (PageAnon(p)) { in me_pagecache_clean()
1050 mapping = page_mapping(p); in me_pagecache_clean()
1070 ret = truncate_error_page(p, page_to_pfn(p), mapping); in me_pagecache_clean()
1071 if (has_extra_refcount(ps, p, extra_pins)) in me_pagecache_clean()
1075 unlock_page(p); in me_pagecache_clean()
1085 static int me_pagecache_dirty(struct page_state *ps, struct page *p) in me_pagecache_dirty() argument
1087 struct address_space *mapping = page_mapping(p); in me_pagecache_dirty()
1089 SetPageError(p); in me_pagecache_dirty()
1129 return me_pagecache_clean(ps, p); in me_pagecache_dirty()
1151 static int me_swapcache_dirty(struct page_state *ps, struct page *p) in me_swapcache_dirty() argument
1156 ClearPageDirty(p); in me_swapcache_dirty()
1158 ClearPageUptodate(p); in me_swapcache_dirty()
1160 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED; in me_swapcache_dirty()
1161 unlock_page(p); in me_swapcache_dirty()
1166 if (has_extra_refcount(ps, p, extra_pins)) in me_swapcache_dirty()
1172 static int me_swapcache_clean(struct page_state *ps, struct page *p) in me_swapcache_clean() argument
1174 struct folio *folio = page_folio(p); in me_swapcache_clean()
1179 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; in me_swapcache_clean()
1182 if (has_extra_refcount(ps, p, false)) in me_swapcache_clean()
1194 static int me_huge_page(struct page_state *ps, struct page *p) in me_huge_page() argument
1196 struct folio *folio = page_folio(p); in me_huge_page()
1203 res = truncate_error_page(&folio->page, page_to_pfn(p), mapping); in me_huge_page()
1215 if (__page_handle_poison(p) > 0) { in me_huge_page()
1216 page_ref_inc(p); in me_huge_page()
1223 if (has_extra_refcount(ps, p, extra_pins)) in me_huge_page()
1346 static int page_action(struct page_state *ps, struct page *p, in page_action() argument
1352 result = ps->action(ps, p); in page_action()
1429 static int get_any_page(struct page *p, unsigned long flags) in get_any_page() argument
1439 ret = __get_hwpoison_page(p, flags); in get_any_page()
1441 if (page_count(p)) { in get_any_page()
1446 } else if (!PageHuge(p) && !is_free_buddy_page(p)) { in get_any_page()
1459 shake_page(p); in get_any_page()
1467 if (PageHuge(p) || HWPoisonHandlable(p, flags)) { in get_any_page()
1475 put_page(p); in get_any_page()
1476 shake_page(p); in get_any_page()
1480 put_page(p); in get_any_page()
1485 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p)); in get_any_page()
1543 static int get_hwpoison_page(struct page *p, unsigned long flags) in get_hwpoison_page() argument
1547 zone_pcp_disable(page_zone(p)); in get_hwpoison_page()
1549 ret = __get_unpoison_page(p); in get_hwpoison_page()
1551 ret = get_any_page(p, flags); in get_hwpoison_page()
1552 zone_pcp_enable(page_zone(p)); in get_hwpoison_page()
1561 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, in hwpoison_user_mappings() argument
1576 if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p)) in hwpoison_user_mappings()
1578 if (!(PageLRU(hpage) || PageHuge(p))) in hwpoison_user_mappings()
1585 if (!page_mapped(p)) in hwpoison_user_mappings()
1588 if (PageSwapCache(p)) { in hwpoison_user_mappings()
1616 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); in hwpoison_user_mappings()
1636 unmap_success = !page_mapped(p); in hwpoison_user_mappings()
1639 pfn, page_mapcount(p)); in hwpoison_user_mappings()
1665 static int identify_page_state(unsigned long pfn, struct page *p, in identify_page_state() argument
1676 if ((p->flags & ps->mask) == ps->res) in identify_page_state()
1679 page_flags |= (p->flags & (1UL << PG_dirty)); in identify_page_state()
1685 return page_action(ps, p, pfn); in identify_page_state()
1852 struct raw_hwp_page *p; in is_raw_hwpoison_page_in_hugepage() local
1872 llist_for_each_entry(p, raw_hwp_head->first, node) { in is_raw_hwpoison_page_in_hugepage()
1873 if (page == p->page) { in is_raw_hwpoison_page_in_hugepage()
1887 struct raw_hwp_page *p, *next; in __folio_free_raw_hwp() local
1891 llist_for_each_entry_safe(p, next, head, node) { in __folio_free_raw_hwp()
1893 SetPageHWPoison(p->page); in __folio_free_raw_hwp()
1895 num_poisoned_pages_sub(page_to_pfn(p->page), 1); in __folio_free_raw_hwp()
1896 kfree(p); in __folio_free_raw_hwp()
1906 struct raw_hwp_page *p, *next; in folio_set_hugetlb_hwpoison() local
1917 llist_for_each_entry_safe(p, next, head->first, node) { in folio_set_hugetlb_hwpoison()
1918 if (p->page == page) in folio_set_hugetlb_hwpoison()
2040 struct page *p = pfn_to_page(pfn); in try_memory_failure_hugetlb() local
2054 folio = page_folio(p); in try_memory_failure_hugetlb()
2066 folio = page_folio(p); in try_memory_failure_hugetlb()
2069 if (hwpoison_filter(p)) { in try_memory_failure_hugetlb()
2085 if (__page_handle_poison(p) > 0) { in try_memory_failure_hugetlb()
2086 page_ref_inc(p); in try_memory_failure_hugetlb()
2096 if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) { in try_memory_failure_hugetlb()
2101 return identify_page_state(pfn, p, page_flags); in try_memory_failure_hugetlb()
2184 struct page *p; in memory_failure() local
2200 p = pfn_to_online_page(pfn); in memory_failure()
2201 if (!p) { in memory_failure()
2225 if (TestSetPageHWPoison(p)) { in memory_failure()
2231 put_page(p); in memory_failure()
2247 res = get_hwpoison_page(p, flags); in memory_failure()
2249 if (is_free_buddy_page(p)) { in memory_failure()
2250 if (take_page_off_buddy(p)) { in memory_failure()
2251 page_ref_inc(p); in memory_failure()
2256 ClearPageHWPoison(p); in memory_failure()
2273 hpage = compound_head(p); in memory_failure()
2289 if (try_to_split_thp_page(p) < 0) { in memory_failure()
2293 VM_BUG_ON_PAGE(!page_count(p), p); in memory_failure()
2304 shake_page(p); in memory_failure()
2306 lock_page(p); in memory_failure()
2314 if (PageCompound(p)) { in memory_failure()
2316 ClearPageHWPoison(p); in memory_failure()
2317 unlock_page(p); in memory_failure()
2318 put_page(p); in memory_failure()
2334 page_flags = p->flags; in memory_failure()
2336 if (hwpoison_filter(p)) { in memory_failure()
2337 ClearPageHWPoison(p); in memory_failure()
2338 unlock_page(p); in memory_failure()
2339 put_page(p); in memory_failure()
2349 if (!PageLRU(p) && !PageWriteback(p)) in memory_failure()
2356 wait_on_page_writeback(p); in memory_failure()
2362 if (!hwpoison_user_mappings(p, pfn, flags, p)) { in memory_failure()
2370 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { in memory_failure()
2376 res = identify_page_state(pfn, p, page_flags); in memory_failure()
2380 unlock_page(p); in memory_failure()
2518 struct page *p; in unpoison_memory() local
2528 p = pfn_to_page(pfn); in unpoison_memory()
2529 folio = page_folio(p); in unpoison_memory()
2547 if (!PageHWPoison(p)) { in unpoison_memory()
2579 ghp = get_hwpoison_page(p, MF_UNPOISON); in unpoison_memory()
2581 if (PageHuge(p)) { in unpoison_memory()
2590 ret = put_page_back_buddy(p) ? 0 : -EBUSY; in unpoison_memory()
2597 if (PageHuge(p)) { in unpoison_memory()
2607 if (TestClearPageHWPoison(p)) { in unpoison_memory()
2619 page_to_pfn(p), &unpoison_rs); in unpoison_memory()