xref: /openbmc/linux/arch/x86/kvm/mmu/mmu_internal.h (revision 11788d9b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_INTERNAL_H
3 #define __KVM_X86_MMU_INTERNAL_H
4 
5 #include <linux/types.h>
6 
7 #include <asm/kvm_host.h>
8 
9 struct kvm_mmu_page {
10 	struct list_head link;
11 	struct hlist_node hash_link;
12 	struct list_head lpage_disallowed_link;
13 
14 	bool unsync;
15 	u8 mmu_valid_gen;
16 	bool mmio_cached;
17 	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
18 
19 	/*
20 	 * The following two entries are used to key the shadow page in the
21 	 * hash table.
22 	 */
23 	union kvm_mmu_page_role role;
24 	gfn_t gfn;
25 
26 	u64 *spt;
27 	/* hold the gfn of each spte inside spt */
28 	gfn_t *gfns;
29 	int root_count;          /* Currently serving as active root */
30 	unsigned int unsync_children;
31 	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
32 	DECLARE_BITMAP(unsync_child_bitmap, 512);
33 
34 #ifdef CONFIG_X86_32
35 	/*
36 	 * Used out of the mmu-lock to avoid reading spte values while an
37 	 * update is in progress; see the comments in __get_spte_lockless().
38 	 */
39 	int clear_spte_count;
40 #endif
41 
42 	/* Number of writes since the last time traversal visited this page.  */
43 	atomic_t write_flooding_count;
44 };
45 
46 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
47 {
48 	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
49 
50 	return (struct kvm_mmu_page *)page_private(page);
51 }
52 
53 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
54 {
55 	return to_shadow_page(__pa(sptep));
56 }
57 
58 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
59 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
60 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
61 				    struct kvm_memory_slot *slot, u64 gfn);
62 
63 #endif /* __KVM_X86_MMU_INTERNAL_H */
64