/openbmc/linux/arch/x86/include/asm/ |
H A D | kfence.h | 33 if (level != PG_LEVEL_4K) in arch_kfence_init_pool() 46 if (WARN_ON(!pte || level != PG_LEVEL_4K)) in kfence_protect_page()
|
H A D | pgtable_types.h | 552 PG_LEVEL_4K, enumerator
|
/openbmc/linux/arch/x86/coco/tdx/ |
H A D | tdx-shared.c | 24 case PG_LEVEL_4K: in try_accept_one() 64 accept_size = try_accept_one(start, len, PG_LEVEL_4K); in tdx_accept_memory()
|
/openbmc/linux/arch/x86/kvm/mmu/ |
H A D | spte.c | 176 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && in make_spte() 189 if (level > PG_LEVEL_4K) in make_spte() 251 WARN_ON_ONCE(level > PG_LEVEL_4K); in make_spte() 302 if (role.level == PG_LEVEL_4K) { in make_huge_page_split_spte()
|
H A D | page_track.c | 67 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track() 96 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in __kvm_write_track_add_gfn() 134 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_gfn_is_write_tracked()
|
H A D | paging_tmpl.h | 74 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K) 163 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in FNAME() 295 gpte |= level - PG_LEVEL_4K - 1; in FNAME() 444 if (walker->level > PG_LEVEL_4K && is_cpuid_PSE36()) in FNAME() 569 if (level == PG_LEVEL_4K) { in FNAME() 594 if (sp->role.level > PG_LEVEL_4K) in FNAME() 862 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K); in FNAME()
|
H A D | mmu.c | 833 if (sp->role.level > PG_LEVEL_4K) in account_shadowed() 838 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in account_shadowed() 839 kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K); in account_shadowed() 879 if (sp->role.level > PG_LEVEL_4K) in unaccount_shadowed() 1086 return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; in gfn_to_rmap() 1202 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K); in drop_large_spte() 1316 PG_LEVEL_4K, slot); in kvm_mmu_write_protect_pt_masked() 1349 PG_LEVEL_4K, slot); in kvm_mmu_clear_dirty_pt_masked() 1385 kvm_mmu_try_split_huge_pages(kvm, slot, start, end, PG_LEVEL_4K); in kvm_arch_mmu_enable_log_dirty_pt_masked() 1435 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K); in kvm_vcpu_write_protect_gfn() [all …]
|
H A D | mmu_internal.h | 297 .req_level = PG_LEVEL_4K, in kvm_mmu_do_page_fault() 298 .goal_level = PG_LEVEL_4K, in kvm_mmu_do_page_fault()
|
H A D | tdp_iter.h | 129 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end)
|
H A D | tdp_mmu.c | 430 WARN_ON_ONCE(level < PG_LEVEL_4K); in handle_changed_spte() 802 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { in tdp_mmu_zap_leafs() 1228 if (iter->level != PG_LEVEL_4K || in set_spte_gfn() 1608 if (iter.level > PG_LEVEL_4K || in clear_dirty_pt_masked()
|
H A D | spte.h | 320 return (level == PG_LEVEL_4K) || is_large_pte(pte); in is_last_spte()
|
/openbmc/linux/arch/x86/xen/ |
H A D | p2m.c | 247 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_build_mfn_list_list() 445 BUG_ON(!ptep || level != PG_LEVEL_4K); in get_phys_to_machine() 539 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_alloc_p2m_entry() 664 BUG_ON(!ptep || level != PG_LEVEL_4K); in __set_phys_to_machine()
|
/openbmc/linux/tools/testing/selftests/kvm/lib/x86_64/ |
H A D | vmx.c | 429 for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) { in __nested_pg_map() 453 __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K); in nested_pg_map() 493 __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K); in nested_map()
|
H A D | processor.c | 221 pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); in __virt_pg_map() 229 __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); in virt_arch_pg_map() 296 return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); in __vm_get_page_table_entry() 301 int level = PG_LEVEL_4K; in vm_get_page_table_entry()
|
/openbmc/linux/arch/x86/mm/ |
H A D | mem_encrypt_amd.c | 259 case PG_LEVEL_4K: in pg_level_to_pfn() 425 if (level == PG_LEVEL_4K) { in early_set_memory_enc_dec()
|
H A D | pti.c | 288 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) in pti_setup_vsyscall()
|
H A D | kmmio.c | 174 case PG_LEVEL_4K: in clear_page_presence()
|
H A D | init_64.c | 497 update_page_count(PG_LEVEL_4K, pages); in phys_pte_init() 1103 update_page_count(PG_LEVEL_4K, -pages); in remove_pte_table()
|
H A D | init_32.c | 371 update_page_count(PG_LEVEL_4K, pages_4k); in kernel_physical_mapping_init()
|
/openbmc/linux/tools/testing/selftests/kvm/x86_64/ |
H A D | nx_huge_pages_test.c | 148 virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_4K); in run_test()
|
H A D | hyperv_tlb_flush.c | 624 __virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK, PG_LEVEL_4K); in main()
|
/openbmc/linux/arch/x86/kvm/vmx/ |
H A D | capabilities.h | 330 return PG_LEVEL_4K; in ept_caps_to_lpage_level()
|
/openbmc/linux/arch/x86/mm/pat/ |
H A D | cpa-test.c | 206 if (level != PG_LEVEL_4K) { in pageattr_test()
|
H A D | set_memory.c | 113 direct_pages_count[PG_LEVEL_4K] << 2); in arch_report_meminfo() 532 if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) in protect_kernel_text_ro() 718 *level = PG_LEVEL_4K; in lookup_address_in_pgd_attr() 1643 if (level == PG_LEVEL_4K) { in __change_page_attr()
|
/openbmc/linux/tools/testing/selftests/kvm/include/x86_64/ |
H A D | processor.h | 1204 PG_LEVEL_4K, enumerator 1214 #define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K)
|