Lines Matching refs:role
293 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep()
569 int level = sptep_to_sp(sptep)->role.level; in mmu_spte_clear_track_bits()
723 if (sp->role.passthrough) in kvm_mmu_page_get_gfn()
726 if (!sp->role.direct) in kvm_mmu_page_get_gfn()
729 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
755 return sp->role.access; in kvm_mmu_page_get_access()
768 sp->role.passthrough ? "passthrough" : "direct", in kvm_mmu_page_set_translation()
773 sp->role.passthrough ? "passthrough" : "direct", in kvm_mmu_page_set_translation()
829 slots = kvm_memslots_for_spte_role(kvm, sp->role); in account_shadowed()
833 if (sp->role.level > PG_LEVEL_4K) in account_shadowed()
877 slots = kvm_memslots_for_spte_role(kvm, sp->role); in unaccount_shadowed()
879 if (sp->role.level > PG_LEVEL_4K) in unaccount_shadowed()
1105 slots = kvm_memslots_for_spte_role(kvm, sp->role); in rmap_remove()
1108 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); in rmap_remove()
1202 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K); in drop_large_spte()
1646 kvm_update_page_stats(kvm, sp->role.level, 1); in __rmap_add()
1648 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); in __rmap_add()
1655 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in __rmap_add()
1738 if (!sp->role.direct) in kvm_mmu_free_shadow_page()
1893 if (sp->role.direct) in sp_has_gptes()
1896 if (sp->role.passthrough) in sp_has_gptes()
1938 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte || in kvm_sync_page_check()
1939 (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) in kvm_sync_page_check()
2007 if (sp->role.invalid) in is_obsolete_sp()
2034 int level = sp->role.level; in mmu_pages_next()
2058 level = sp->role.level; in mmu_pages_first()
2149 union kvm_mmu_page_role role) in kvm_mmu_find_shadow_page() argument
2162 if (sp->role.word != role.word) { in kvm_mmu_find_shadow_page()
2172 if (role.level > PG_LEVEL_4K && sp->unsync) in kvm_mmu_find_shadow_page()
2179 if (sp->role.direct) in kvm_mmu_find_shadow_page()
2234 union kvm_mmu_page_role role) in kvm_mmu_alloc_shadow_page() argument
2240 if (!role.direct) in kvm_mmu_alloc_shadow_page()
2257 sp->role = role; in kvm_mmu_alloc_shadow_page()
2270 union kvm_mmu_page_role role) in __kvm_mmu_get_shadow_page() argument
2278 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role); in __kvm_mmu_get_shadow_page()
2281 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role); in __kvm_mmu_get_shadow_page()
2290 union kvm_mmu_page_role role) in kvm_mmu_get_shadow_page() argument
2298 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role); in kvm_mmu_get_shadow_page()
2305 union kvm_mmu_page_role role; in kvm_mmu_child_role() local
2307 role = parent_sp->role; in kvm_mmu_child_role()
2308 role.level--; in kvm_mmu_child_role()
2309 role.access = access; in kvm_mmu_child_role()
2310 role.direct = direct; in kvm_mmu_child_role()
2311 role.passthrough = 0; in kvm_mmu_child_role()
2339 if (role.has_4_byte_gpte) { in kvm_mmu_child_role()
2340 WARN_ON_ONCE(role.level != PG_LEVEL_4K); in kvm_mmu_child_role()
2341 role.quadrant = spte_index(sptep) & 1; in kvm_mmu_child_role()
2344 return role; in kvm_mmu_child_role()
2351 union kvm_mmu_page_role role; in kvm_mmu_get_child_sp() local
2356 role = kvm_mmu_child_role(sptep, direct, access); in kvm_mmu_get_child_sp()
2357 return kvm_mmu_get_shadow_page(vcpu, gfn, role); in kvm_mmu_get_child_sp()
2478 if (child->role.access == direct_access) in validate_direct_spte()
2495 if (is_last_spte(pte, sp->role.level)) { in mmu_page_zap_pte()
2507 child->role.guest_mode && !child->parent_ptes.val) in mmu_page_zap_pte()
2547 if (parent->role.level == PG_LEVEL_4K) in mmu_zap_unsync_children()
2580 if (!sp->role.invalid && sp_has_gptes(sp)) in __kvm_mmu_prepare_zap_page()
2594 if (sp->role.invalid) in __kvm_mmu_prepare_zap_page()
2617 sp->role.invalid = 1; in __kvm_mmu_prepare_zap_page()
2657 WARN_ON_ONCE(!sp->role.invalid || sp->root_count); in kvm_mmu_commit_zap_page()
2852 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K); in mmu_try_to_unsync_pages()
2905 int level = sp->role.level; in mmu_set_spte()
2977 unsigned int access = sp->role.access; in direct_pte_prefetch_many()
3005 WARN_ON_ONCE(!sp->role.direct); in __direct_pte_prefetch()
3038 if (sp->role.level > PG_LEVEL_4K) in direct_pte_prefetch()
3449 if (!is_last_spte(spte, sp->role.level)) in fast_page_fault()
3502 if (sp->role.level > PG_LEVEL_4K && in fast_page_fault()
3552 else if (!--sp->root_count && sp->role.invalid) in mmu_free_root_page()
3634 if (!sp || sp->role.guest_mode) in kvm_mmu_free_guest_mode_roots()
3645 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role; in mmu_alloc_root() local
3648 role.level = level; in mmu_alloc_root()
3649 role.quadrant = quadrant; in mmu_alloc_root()
3651 WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte); in mmu_alloc_root()
3652 WARN_ON_ONCE(role.direct && role.has_4_byte_gpte); in mmu_alloc_root()
3654 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role); in mmu_alloc_root()
4523 union kvm_mmu_page_role role) in is_root_usable() argument
4530 if (!role.direct && pgd != root->pgd) in is_root_usable()
4537 return role.word == sp->role.word; in is_root_usable()
5113 union kvm_cpu_role role = {0}; in kvm_calc_cpu_role() local
5115 role.base.access = ACC_ALL; in kvm_calc_cpu_role()
5116 role.base.smm = is_smm(vcpu); in kvm_calc_cpu_role()
5117 role.base.guest_mode = is_guest_mode(vcpu); in kvm_calc_cpu_role()
5118 role.ext.valid = 1; in kvm_calc_cpu_role()
5121 role.base.direct = 1; in kvm_calc_cpu_role()
5122 return role; in kvm_calc_cpu_role()
5125 role.base.efer_nx = ____is_efer_nx(regs); in kvm_calc_cpu_role()
5126 role.base.cr0_wp = ____is_cr0_wp(regs); in kvm_calc_cpu_role()
5127 role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs); in kvm_calc_cpu_role()
5128 role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs); in kvm_calc_cpu_role()
5129 role.base.has_4_byte_gpte = !____is_cr4_pae(regs); in kvm_calc_cpu_role()
5132 role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL in kvm_calc_cpu_role()
5135 role.base.level = PT32E_ROOT_LEVEL; in kvm_calc_cpu_role()
5137 role.base.level = PT32_ROOT_LEVEL; in kvm_calc_cpu_role()
5139 role.ext.cr4_smep = ____is_cr4_smep(regs); in kvm_calc_cpu_role()
5140 role.ext.cr4_smap = ____is_cr4_smap(regs); in kvm_calc_cpu_role()
5141 role.ext.cr4_pse = ____is_cr4_pse(regs); in kvm_calc_cpu_role()
5144 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs); in kvm_calc_cpu_role()
5145 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs); in kvm_calc_cpu_role()
5146 role.ext.efer_lma = ____is_efer_lma(regs); in kvm_calc_cpu_role()
5147 return role; in kvm_calc_cpu_role()
5182 union kvm_mmu_page_role role = {0}; in kvm_calc_tdp_mmu_root_page_role() local
5184 role.access = ACC_ALL; in kvm_calc_tdp_mmu_root_page_role()
5185 role.cr0_wp = true; in kvm_calc_tdp_mmu_root_page_role()
5186 role.efer_nx = true; in kvm_calc_tdp_mmu_root_page_role()
5187 role.smm = cpu_role.base.smm; in kvm_calc_tdp_mmu_root_page_role()
5188 role.guest_mode = cpu_role.base.guest_mode; in kvm_calc_tdp_mmu_root_page_role()
5189 role.ad_disabled = !kvm_ad_enabled(); in kvm_calc_tdp_mmu_root_page_role()
5190 role.level = kvm_mmu_get_tdp_level(vcpu); in kvm_calc_tdp_mmu_root_page_role()
5191 role.direct = true; in kvm_calc_tdp_mmu_root_page_role()
5192 role.has_4_byte_gpte = false; in kvm_calc_tdp_mmu_root_page_role()
5194 return role; in kvm_calc_tdp_mmu_root_page_role()
5303 union kvm_cpu_role role = {0}; in kvm_calc_shadow_ept_root_page_role() local
5310 role.base.level = level; in kvm_calc_shadow_ept_root_page_role()
5311 role.base.has_4_byte_gpte = false; in kvm_calc_shadow_ept_root_page_role()
5312 role.base.direct = false; in kvm_calc_shadow_ept_root_page_role()
5313 role.base.ad_disabled = !accessed_dirty; in kvm_calc_shadow_ept_root_page_role()
5314 role.base.guest_mode = true; in kvm_calc_shadow_ept_root_page_role()
5315 role.base.access = ACC_ALL; in kvm_calc_shadow_ept_root_page_role()
5317 role.ext.word = 0; in kvm_calc_shadow_ept_root_page_role()
5318 role.ext.execonly = execonly; in kvm_calc_shadow_ept_root_page_role()
5319 role.ext.valid = 1; in kvm_calc_shadow_ept_root_page_role()
5321 return role; in kvm_calc_shadow_ept_root_page_role()
5584 if (sp->role.level == PG_LEVEL_4K) in detect_write_flooding()
5601 pte_size = sp->role.has_4_byte_gpte ? 4 : 8; in detect_write_misaligned()
5623 level = sp->role.level; in get_written_sptes()
5625 if (sp->role.has_4_byte_gpte) { in get_written_sptes()
5639 if (quadrant != sp->role.quadrant) in get_written_sptes()
5685 if (gentry && sp->role.level != PG_LEVEL_4K) in kvm_mmu_track_write()
6070 if (WARN_ON_ONCE(sp->role.invalid)) in kvm_zap_obsolete_pages()
6345 union kvm_mmu_page_role role; in shadow_mmu_get_sp_for_split() local
6358 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access); in shadow_mmu_get_sp_for_split()
6365 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role); in shadow_mmu_get_sp_for_split()
6402 flush |= !is_last_spte(*sptep, sp->role.level); in shadow_mmu_split_huge_page()
6406 spte = make_huge_page_split_spte(kvm, huge_spte, sp->role, index); in shadow_mmu_split_huge_page()
6408 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access); in shadow_mmu_split_huge_page()
6425 level = huge_sp->role.level; in shadow_mmu_try_split_huge_page()
6467 if (WARN_ON_ONCE(!sp->role.guest_mode)) in shadow_mmu_try_split_huge_pages()
6479 if (sp->role.invalid) in shadow_mmu_try_split_huge_pages()
6588 if (sp->role.direct && in kvm_mmu_zap_collapsible_spte()
6589 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn, in kvm_mmu_zap_collapsible_spte()
6671 if (WARN_ON_ONCE(sp->role.invalid)) in kvm_mmu_zap_all()
7048 WARN_ON_ONCE(!sp->role.direct); in kvm_recover_nx_huge_pages()
7074 slots = kvm_memslots_for_spte_role(kvm, sp->role); in kvm_recover_nx_huge_pages()