xref: /openbmc/linux/include/linux/kvm_types.h (revision f530b531)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #ifndef __KVM_TYPES_H__
4 #define __KVM_TYPES_H__
5 
6 struct kvm;
7 struct kvm_async_pf;
8 struct kvm_device_ops;
9 struct kvm_interrupt;
10 struct kvm_irq_routing_table;
11 struct kvm_memory_slot;
12 struct kvm_one_reg;
13 struct kvm_run;
14 struct kvm_userspace_memory_region;
15 struct kvm_vcpu;
16 struct kvm_vcpu_init;
17 struct kvm_memslots;
18 
19 enum kvm_mr_change;
20 
21 #include <linux/bits.h>
22 #include <linux/mutex.h>
23 #include <linux/types.h>
24 #include <linux/spinlock_types.h>
25 
26 #include <asm/kvm_types.h>
27 
28 /*
29  * Address types:
30  *
31  *  gva - guest virtual address
32  *  gpa - guest physical address
33  *  gfn - guest frame number
34  *  hva - host virtual address
35  *  hpa - host physical address
36  *  hfn - host frame number
37  */
38 
39 typedef unsigned long  gva_t;
40 typedef u64            gpa_t;
41 typedef u64            gfn_t;
42 
43 #define INVALID_GPA	(~(gpa_t)0)
44 
45 typedef unsigned long  hva_t;
46 typedef u64            hpa_t;
47 typedef u64            hfn_t;
48 
49 typedef hfn_t kvm_pfn_t;
50 
51 enum pfn_cache_usage {
52 	KVM_GUEST_USES_PFN = BIT(0),
53 	KVM_HOST_USES_PFN  = BIT(1),
54 	KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
55 };
56 
57 struct gfn_to_hva_cache {
58 	u64 generation;
59 	gpa_t gpa;
60 	unsigned long hva;
61 	unsigned long len;
62 	struct kvm_memory_slot *memslot;
63 };
64 
65 struct gfn_to_pfn_cache {
66 	u64 generation;
67 	gpa_t gpa;
68 	unsigned long uhva;
69 	struct kvm_memory_slot *memslot;
70 	struct kvm *kvm;
71 	struct kvm_vcpu *vcpu;
72 	struct list_head list;
73 	rwlock_t lock;
74 	struct mutex refresh_lock;
75 	void *khva;
76 	kvm_pfn_t pfn;
77 	enum pfn_cache_usage usage;
78 	bool active;
79 	bool valid;
80 };
81 
82 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
83 /*
84  * Memory caches are used to preallocate memory ahead of various MMU flows,
85  * e.g. page fault handlers.  Gracefully handling allocation failures deep in
86  * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
87  * holding MMU locks.  Note, these caches act more like prefetch buffers than
88  * classical caches, i.e. objects are not returned to the cache on being freed.
89  *
90  * The @capacity field and @objects array are lazily initialized when the cache
91  * is topped up (__kvm_mmu_topup_memory_cache()).
92  */
93 struct kvm_mmu_memory_cache {
94 	gfp_t gfp_zero;
95 	gfp_t gfp_custom;
96 	struct kmem_cache *kmem_cache;
97 	int capacity;
98 	int nobjs;
99 	void **objects;
100 };
101 #endif
102 
103 #define HALT_POLL_HIST_COUNT			32
104 
105 struct kvm_vm_stat_generic {
106 	u64 remote_tlb_flush;
107 	u64 remote_tlb_flush_requests;
108 };
109 
110 struct kvm_vcpu_stat_generic {
111 	u64 halt_successful_poll;
112 	u64 halt_attempted_poll;
113 	u64 halt_poll_invalid;
114 	u64 halt_wakeup;
115 	u64 halt_poll_success_ns;
116 	u64 halt_poll_fail_ns;
117 	u64 halt_wait_ns;
118 	u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
119 	u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
120 	u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
121 	u64 blocking;
122 };
123 
124 #define KVM_STATS_NAME_SIZE	48
125 
126 #endif /* __KVM_TYPES_H__ */
127