/openbmc/linux/mm/ |
H A D | page_vma_mapped.c | 18 pte_t ptent; in map_pte() local 40 ptent = ptep_get(pvmw->pte); in map_pte() 43 if (!is_swap_pte(ptent)) in map_pte() 45 } else if (is_swap_pte(ptent)) { in map_pte() 63 entry = pte_to_swp_entry(ptent); in map_pte() 67 } else if (!pte_present(ptent)) { in map_pte() 99 pte_t ptent = ptep_get(pvmw->pte); in check_pte() local 103 if (!is_swap_pte(ptent)) in check_pte() 105 entry = pte_to_swp_entry(ptent); in check_pte() 112 } else if (is_swap_pte(ptent)) { in check_pte() [all …]
|
H A D | mapping_dirty_helpers.c | 38 pte_t ptent = ptep_get(pte); in wp_pte() local 40 if (pte_write(ptent)) { in wp_pte() 43 ptent = pte_wrprotect(old_pte); in wp_pte() 44 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte() 94 pte_t ptent = ptep_get(pte); in clean_record_pte() local 96 if (pte_dirty(ptent)) { in clean_record_pte() 101 ptent = pte_mkclean(old_pte); in clean_record_pte() 102 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in clean_record_pte()
|
H A D | madvise.c | 351 pte_t *start_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() local 443 ptent = ptep_get(pte); in madvise_cold_or_pageout_pte_range() 445 if (pte_none(ptent)) in madvise_cold_or_pageout_pte_range() 448 if (!pte_present(ptent)) in madvise_cold_or_pageout_pte_range() 451 folio = vm_normal_folio(vma, addr, ptent); in madvise_cold_or_pageout_pte_range() 499 if (pte_young(ptent)) { in madvise_cold_or_pageout_pte_range() 500 ptent = ptep_get_and_clear_full(mm, addr, pte, in madvise_cold_or_pageout_pte_range() 502 ptent = pte_mkold(ptent); in madvise_cold_or_pageout_pte_range() 503 set_pte_at(mm, addr, pte, ptent); in madvise_cold_or_pageout_pte_range() 633 pte_t *start_pte, *pte, ptent; in madvise_free_pte_range() local [all …]
|
H A D | mprotect.c | 110 pte_t ptent; in change_pte_range() local 165 ptent = pte_modify(oldpte, newprot); in change_pte_range() 168 ptent = pte_mkuffd_wp(ptent); in change_pte_range() 170 ptent = pte_clear_uffd_wp(ptent); in change_pte_range() 186 !pte_write(ptent) && in change_pte_range() 187 can_change_pte_writable(vma, addr, ptent)) in change_pte_range() 188 ptent = pte_mkwrite(ptent, vma); in change_pte_range() 190 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range() 191 if (pte_needs_flush(oldpte, ptent)) in change_pte_range()
|
H A D | highmem.c | 194 pte_t ptent; in flush_all_zero_pkmaps() local 207 ptent = ptep_get(&pkmap_page_table[i]); in flush_all_zero_pkmaps() 208 BUG_ON(pte_none(ptent)); in flush_all_zero_pkmaps() 217 page = pte_page(ptent); in flush_all_zero_pkmaps()
|
H A D | mlock.c | 315 pte_t ptent; in mlock_pte_range() local 338 ptent = ptep_get(pte); in mlock_pte_range() 339 if (!pte_present(ptent)) in mlock_pte_range() 341 folio = vm_normal_folio(vma, addr, ptent); in mlock_pte_range()
|
H A D | khugepaged.c | 1540 pte_t ptent = ptep_get(pte); in collapse_pte_mapped_thp() local 1543 if (pte_none(ptent)) in collapse_pte_mapped_thp() 1547 if (!pte_present(ptent)) { in collapse_pte_mapped_thp() 1552 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp() 1592 pte_t ptent = ptep_get(pte); in collapse_pte_mapped_thp() local 1594 if (pte_none(ptent)) in collapse_pte_mapped_thp() 1602 if (!pte_present(ptent)) { in collapse_pte_mapped_thp() 1606 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp()
|
H A D | memory.c | 1009 pte_t ptent; in copy_pte_range() local 1055 ptent = ptep_get(src_pte); in copy_pte_range() 1056 if (pte_none(ptent)) { in copy_pte_range() 1060 if (unlikely(!pte_present(ptent))) { in copy_pte_range() 1416 pte_t ptent = ptep_get(pte); in zap_pte_range() local 1419 if (pte_none(ptent)) in zap_pte_range() 1425 if (pte_present(ptent)) { in zap_pte_range() 1428 page = vm_normal_page(vma, addr, ptent); in zap_pte_range() 1431 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range() 1433 arch_check_zapped_pte(vma, ptent); in zap_pte_range() [all …]
|
H A D | memcontrol.c | 5698 unsigned long addr, pte_t ptent) in mc_handle_present_pte() argument 5700 struct page *page = vm_normal_page(vma, addr, ptent); in mc_handle_present_pte() 5718 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() argument 5721 swp_entry_t ent = pte_to_swp_entry(ptent); in mc_handle_swap_pte() 5751 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() argument 5758 unsigned long addr, pte_t ptent) in mc_handle_file_pte() argument 5922 unsigned long addr, pte_t ptent, union mc_target *target) in get_mctgt_type() argument 5928 if (pte_present(ptent)) in get_mctgt_type() 5929 page = mc_handle_present_pte(vma, addr, ptent); in get_mctgt_type() 5930 else if (pte_none_mostly(ptent)) in get_mctgt_type() [all …]
|
H A D | rmap.c | 2194 pte_t ptent; in page_make_device_exclusive_one() local 2206 ptent = ptep_get(pvmw.pte); in page_make_device_exclusive_one() 2207 if (!pte_present(ptent)) { in page_make_device_exclusive_one() 2214 pte_pfn(ptent) - folio_pfn(folio)); in page_make_device_exclusive_one() 2218 flush_cache_page(vma, address, pte_pfn(ptent)); in page_make_device_exclusive_one()
|
H A D | ksm.c | 438 pte_t ptent; in break_ksm_pmd_entry() local 444 ptent = ptep_get(pte); in break_ksm_pmd_entry() 445 if (pte_present(ptent)) { in break_ksm_pmd_entry() 446 page = vm_normal_page(walk->vma, addr, ptent); in break_ksm_pmd_entry() 447 } else if (!pte_none(ptent)) { in break_ksm_pmd_entry() 448 swp_entry_t entry = pte_to_swp_entry(ptent); in break_ksm_pmd_entry()
|
H A D | mempolicy.c | 503 pte_t ptent; in queue_folios_pte_range() local 519 ptent = ptep_get(pte); in queue_folios_pte_range() 520 if (pte_none(ptent)) in queue_folios_pte_range() 522 if (!pte_present(ptent)) { in queue_folios_pte_range() 523 if (is_migration_entry(pte_to_swp_entry(ptent))) in queue_folios_pte_range() 527 folio = vm_normal_folio(vma, addr, ptent); in queue_folios_pte_range()
|
H A D | memory-failure.c | 397 pte_t ptent; in dev_pagemap_mapping_shift() local 419 ptent = ptep_get(pte); in dev_pagemap_mapping_shift() 420 if (pte_present(ptent) && pte_devmap(ptent)) in dev_pagemap_mapping_shift()
|
H A D | vmalloc.c | 328 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range() local 329 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); in vunmap_pte_range() 2966 pte_t ptent; in vmap_pfn_apply() local 2971 ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); in vmap_pfn_apply() 2972 set_pte_at(&init_mm, addr, pte, ptent); in vmap_pfn_apply()
|
H A D | vmscan.c | 4017 pte_t ptent = ptep_get(pte + i); in walk_pte_range() local 4022 pfn = get_pte_pfn(ptent, args->vma, addr); in walk_pte_range() 4026 if (!pte_young(ptent)) { in walk_pte_range() 4041 if (pte_dirty(ptent) && !folio_test_dirty(folio) && in walk_pte_range() 4713 pte_t ptent = ptep_get(pte + i); in lru_gen_look_around() local 4715 pfn = get_pte_pfn(ptent, vma, addr); in lru_gen_look_around() 4719 if (!pte_young(ptent)) in lru_gen_look_around() 4731 if (pte_dirty(ptent) && !folio_test_dirty(folio) && in lru_gen_look_around()
|
H A D | swapfile.c | 1856 pte_t ptent; in unuse_pte_range() local 1864 ptent = ptep_get_lockless(pte); in unuse_pte_range() 1866 if (!is_swap_pte(ptent)) in unuse_pte_range() 1869 entry = pte_to_swp_entry(ptent); in unuse_pte_range()
|
/openbmc/linux/fs/proc/ |
H A D | task_mmu.c | 530 pte_t ptent = ptep_get(pte); in smaps_pte_entry() local 532 if (pte_present(ptent)) { in smaps_pte_entry() 533 page = vm_normal_page(vma, addr, ptent); in smaps_pte_entry() 534 young = pte_young(ptent); in smaps_pte_entry() 535 dirty = pte_dirty(ptent); in smaps_pte_entry() 536 } else if (is_swap_pte(ptent)) { in smaps_pte_entry() 537 swp_entry_t swpent = pte_to_swp_entry(ptent); in smaps_pte_entry() 727 pte_t ptent = ptep_get(pte); in smaps_hugetlb_range() local 729 if (pte_present(ptent)) { in smaps_hugetlb_range() 730 page = vm_normal_page(vma, addr, ptent); in smaps_hugetlb_range() [all …]
|
/openbmc/linux/mm/kasan/ |
H A D | init.c | 356 pte_t ptent; in kasan_remove_pte_table() local 363 ptent = ptep_get(pte); in kasan_remove_pte_table() 365 if (!pte_present(ptent)) in kasan_remove_pte_table() 368 if (WARN_ON(!kasan_early_shadow_page_entry(ptent))) in kasan_remove_pte_table()
|
/openbmc/linux/mm/damon/ |
H A D | vaddr.c | 443 pte_t ptent; in damon_young_pmd_entry() local 486 ptent = ptep_get(pte); in damon_young_pmd_entry() 487 if (!pte_present(ptent)) in damon_young_pmd_entry() 489 folio = damon_get_folio(pte_pfn(ptent)); in damon_young_pmd_entry() 492 if (pte_young(ptent) || !folio_test_idle(folio) || in damon_young_pmd_entry()
|
/openbmc/linux/drivers/gpu/drm/i915/gem/selftests/ |
H A D | i915_gem_mman.c | 1684 pte_t ptent = ptep_get(pte); in check_present_pte() local 1686 if (!pte_present(ptent) || pte_none(ptent)) { in check_present_pte() 1697 pte_t ptent = ptep_get(pte); in check_absent_pte() local 1699 if (pte_present(ptent) && !pte_none(ptent)) { in check_absent_pte()
|
/openbmc/linux/fs/ |
H A D | userfaultfd.c | 335 pte_t ptent; in userfaultfd_must_wait() local 375 ptent = ptep_get(pte); in userfaultfd_must_wait() 376 if (pte_none_mostly(ptent)) in userfaultfd_must_wait() 378 if (!pte_write(ptent) && (reason & VM_UFFD_WP)) in userfaultfd_must_wait()
|