1*7d9a662eSMichael Roth /* SPDX-License-Identifier: GPL-2.0-only */
2*7d9a662eSMichael Roth /*
3*7d9a662eSMichael Roth  * tools/testing/selftests/kvm/include/kvm_util_base.h
4*7d9a662eSMichael Roth  *
5*7d9a662eSMichael Roth  * Copyright (C) 2018, Google LLC.
6*7d9a662eSMichael Roth  */
7*7d9a662eSMichael Roth #ifndef SELFTEST_KVM_UTIL_BASE_H
8*7d9a662eSMichael Roth #define SELFTEST_KVM_UTIL_BASE_H
9*7d9a662eSMichael Roth 
10*7d9a662eSMichael Roth #include "test_util.h"
11*7d9a662eSMichael Roth 
12*7d9a662eSMichael Roth #include "asm/kvm.h"
13*7d9a662eSMichael Roth #include "linux/list.h"
14*7d9a662eSMichael Roth #include "linux/kvm.h"
15*7d9a662eSMichael Roth #include <sys/ioctl.h>
16*7d9a662eSMichael Roth 
17*7d9a662eSMichael Roth #include "sparsebit.h"
18*7d9a662eSMichael Roth 
19*7d9a662eSMichael Roth #define KVM_DEV_PATH "/dev/kvm"
20*7d9a662eSMichael Roth #define KVM_MAX_VCPUS 512
21*7d9a662eSMichael Roth 
22*7d9a662eSMichael Roth #define NSEC_PER_SEC 1000000000L
23*7d9a662eSMichael Roth 
24*7d9a662eSMichael Roth /*
25*7d9a662eSMichael Roth  * Callers of kvm_util only have an incomplete/opaque description of the
26*7d9a662eSMichael Roth  * structure kvm_util is using to maintain the state of a VM.
27*7d9a662eSMichael Roth  */
28*7d9a662eSMichael Roth struct kvm_vm;
29*7d9a662eSMichael Roth 
30*7d9a662eSMichael Roth typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
31*7d9a662eSMichael Roth typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
32*7d9a662eSMichael Roth 
33*7d9a662eSMichael Roth /* Minimum allocated guest virtual and physical addresses */
34*7d9a662eSMichael Roth #define KVM_UTIL_MIN_VADDR		0x2000
35*7d9a662eSMichael Roth #define KVM_GUEST_PAGE_TABLE_MIN_PADDR	0x180000
36*7d9a662eSMichael Roth 
37*7d9a662eSMichael Roth #define DEFAULT_GUEST_PHY_PAGES		512
38*7d9a662eSMichael Roth #define DEFAULT_GUEST_STACK_VADDR_MIN	0xab6000
39*7d9a662eSMichael Roth #define DEFAULT_STACK_PGS		5
40*7d9a662eSMichael Roth 
41*7d9a662eSMichael Roth enum vm_guest_mode {
42*7d9a662eSMichael Roth 	VM_MODE_P52V48_4K,
43*7d9a662eSMichael Roth 	VM_MODE_P52V48_64K,
44*7d9a662eSMichael Roth 	VM_MODE_P48V48_4K,
45*7d9a662eSMichael Roth 	VM_MODE_P48V48_16K,
46*7d9a662eSMichael Roth 	VM_MODE_P48V48_64K,
47*7d9a662eSMichael Roth 	VM_MODE_P40V48_4K,
48*7d9a662eSMichael Roth 	VM_MODE_P40V48_16K,
49*7d9a662eSMichael Roth 	VM_MODE_P40V48_64K,
50*7d9a662eSMichael Roth 	VM_MODE_PXXV48_4K,	/* For 48bits VA but ANY bits PA */
51*7d9a662eSMichael Roth 	VM_MODE_P47V64_4K,
52*7d9a662eSMichael Roth 	VM_MODE_P44V64_4K,
53*7d9a662eSMichael Roth 	VM_MODE_P36V48_4K,
54*7d9a662eSMichael Roth 	VM_MODE_P36V48_16K,
55*7d9a662eSMichael Roth 	VM_MODE_P36V48_64K,
56*7d9a662eSMichael Roth 	VM_MODE_P36V47_16K,
57*7d9a662eSMichael Roth 	NUM_VM_MODES,
58*7d9a662eSMichael Roth };
59*7d9a662eSMichael Roth 
60*7d9a662eSMichael Roth #if defined(__aarch64__)
61*7d9a662eSMichael Roth 
62*7d9a662eSMichael Roth extern enum vm_guest_mode vm_mode_default;
63*7d9a662eSMichael Roth 
64*7d9a662eSMichael Roth #define VM_MODE_DEFAULT			vm_mode_default
65*7d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
66*7d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 8)
67*7d9a662eSMichael Roth 
68*7d9a662eSMichael Roth #elif defined(__x86_64__)
69*7d9a662eSMichael Roth 
70*7d9a662eSMichael Roth #define VM_MODE_DEFAULT			VM_MODE_PXXV48_4K
71*7d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
72*7d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 8)
73*7d9a662eSMichael Roth 
74*7d9a662eSMichael Roth #elif defined(__s390x__)
75*7d9a662eSMichael Roth 
76*7d9a662eSMichael Roth #define VM_MODE_DEFAULT			VM_MODE_P44V64_4K
77*7d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
78*7d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 16)
79*7d9a662eSMichael Roth 
80*7d9a662eSMichael Roth #elif defined(__riscv)
81*7d9a662eSMichael Roth 
82*7d9a662eSMichael Roth #if __riscv_xlen == 32
83*7d9a662eSMichael Roth #error "RISC-V 32-bit kvm selftests not supported"
84*7d9a662eSMichael Roth #endif
85*7d9a662eSMichael Roth 
86*7d9a662eSMichael Roth #define VM_MODE_DEFAULT			VM_MODE_P40V48_4K
87*7d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
88*7d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 8)
89*7d9a662eSMichael Roth 
90*7d9a662eSMichael Roth #endif
91*7d9a662eSMichael Roth 
92*7d9a662eSMichael Roth #define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
93*7d9a662eSMichael Roth #define PTES_PER_MIN_PAGE	ptes_per_page(MIN_PAGE_SIZE)
94*7d9a662eSMichael Roth 
95*7d9a662eSMichael Roth struct vm_guest_mode_params {
96*7d9a662eSMichael Roth 	unsigned int pa_bits;
97*7d9a662eSMichael Roth 	unsigned int va_bits;
98*7d9a662eSMichael Roth 	unsigned int page_size;
99*7d9a662eSMichael Roth 	unsigned int page_shift;
100*7d9a662eSMichael Roth };
101*7d9a662eSMichael Roth extern const struct vm_guest_mode_params vm_guest_mode_params[];
102*7d9a662eSMichael Roth 
103*7d9a662eSMichael Roth int open_path_or_exit(const char *path, int flags);
104*7d9a662eSMichael Roth int open_kvm_dev_path_or_exit(void);
105*7d9a662eSMichael Roth int kvm_check_cap(long cap);
106*7d9a662eSMichael Roth int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
107*7d9a662eSMichael Roth int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
108*7d9a662eSMichael Roth 		    struct kvm_enable_cap *cap);
109*7d9a662eSMichael Roth void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
110*7d9a662eSMichael Roth const char *vm_guest_mode_string(uint32_t i);
111*7d9a662eSMichael Roth 
112*7d9a662eSMichael Roth struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
113*7d9a662eSMichael Roth void kvm_vm_free(struct kvm_vm *vmp);
114*7d9a662eSMichael Roth void kvm_vm_restart(struct kvm_vm *vmp, int perm);
115*7d9a662eSMichael Roth void kvm_vm_release(struct kvm_vm *vmp);
116*7d9a662eSMichael Roth void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
117*7d9a662eSMichael Roth void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
118*7d9a662eSMichael Roth 			    uint64_t first_page, uint32_t num_pages);
119*7d9a662eSMichael Roth uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm);
120*7d9a662eSMichael Roth 
121*7d9a662eSMichael Roth int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
122*7d9a662eSMichael Roth 		       size_t len);
123*7d9a662eSMichael Roth 
124*7d9a662eSMichael Roth void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
125*7d9a662eSMichael Roth 
126*7d9a662eSMichael Roth void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
127*7d9a662eSMichael Roth 
128*7d9a662eSMichael Roth /*
129*7d9a662eSMichael Roth  * VM VCPU Dump
130*7d9a662eSMichael Roth  *
131*7d9a662eSMichael Roth  * Input Args:
132*7d9a662eSMichael Roth  *   stream - Output FILE stream
133*7d9a662eSMichael Roth  *   vm     - Virtual Machine
134*7d9a662eSMichael Roth  *   vcpuid - VCPU ID
135*7d9a662eSMichael Roth  *   indent - Left margin indent amount
136*7d9a662eSMichael Roth  *
137*7d9a662eSMichael Roth  * Output Args: None
138*7d9a662eSMichael Roth  *
139*7d9a662eSMichael Roth  * Return: None
140*7d9a662eSMichael Roth  *
141*7d9a662eSMichael Roth  * Dumps the current state of the VCPU specified by @vcpuid, within the VM
142*7d9a662eSMichael Roth  * given by @vm, to the FILE stream given by @stream.
143*7d9a662eSMichael Roth  */
144*7d9a662eSMichael Roth void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
145*7d9a662eSMichael Roth 	       uint8_t indent);
146*7d9a662eSMichael Roth 
147*7d9a662eSMichael Roth void vm_create_irqchip(struct kvm_vm *vm);
148*7d9a662eSMichael Roth 
149*7d9a662eSMichael Roth void vm_userspace_mem_region_add(struct kvm_vm *vm,
150*7d9a662eSMichael Roth 	enum vm_mem_backing_src_type src_type,
151*7d9a662eSMichael Roth 	uint64_t guest_paddr, uint32_t slot, uint64_t npages,
152*7d9a662eSMichael Roth 	uint32_t flags);
153*7d9a662eSMichael Roth 
154*7d9a662eSMichael Roth void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
155*7d9a662eSMichael Roth 		void *arg);
156*7d9a662eSMichael Roth int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
157*7d9a662eSMichael Roth 		void *arg);
158*7d9a662eSMichael Roth void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
159*7d9a662eSMichael Roth int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg);
160*7d9a662eSMichael Roth void kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
161*7d9a662eSMichael Roth int _kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
162*7d9a662eSMichael Roth void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
163*7d9a662eSMichael Roth void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
164*7d9a662eSMichael Roth void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
165*7d9a662eSMichael Roth void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
166*7d9a662eSMichael Roth vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
167*7d9a662eSMichael Roth vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
168*7d9a662eSMichael Roth vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
169*7d9a662eSMichael Roth 
170*7d9a662eSMichael Roth void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
171*7d9a662eSMichael Roth 	      unsigned int npages);
172*7d9a662eSMichael Roth void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
173*7d9a662eSMichael Roth void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
174*7d9a662eSMichael Roth vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
175*7d9a662eSMichael Roth void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
176*7d9a662eSMichael Roth 
177*7d9a662eSMichael Roth /*
178*7d9a662eSMichael Roth  * Address Guest Virtual to Guest Physical
179*7d9a662eSMichael Roth  *
180*7d9a662eSMichael Roth  * Input Args:
181*7d9a662eSMichael Roth  *   vm - Virtual Machine
182*7d9a662eSMichael Roth  *   gva - VM virtual address
183*7d9a662eSMichael Roth  *
184*7d9a662eSMichael Roth  * Output Args: None
185*7d9a662eSMichael Roth  *
186*7d9a662eSMichael Roth  * Return:
187*7d9a662eSMichael Roth  *   Equivalent VM physical address
188*7d9a662eSMichael Roth  *
189*7d9a662eSMichael Roth  * Returns the VM physical address of the translated VM virtual
190*7d9a662eSMichael Roth  * address given by @gva.
191*7d9a662eSMichael Roth  */
192*7d9a662eSMichael Roth vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
193*7d9a662eSMichael Roth 
194*7d9a662eSMichael Roth struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
195*7d9a662eSMichael Roth void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
196*7d9a662eSMichael Roth int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
197*7d9a662eSMichael Roth int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid);
198*7d9a662eSMichael Roth void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
199*7d9a662eSMichael Roth void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
200*7d9a662eSMichael Roth 			  struct kvm_guest_debug *debug);
201*7d9a662eSMichael Roth void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
202*7d9a662eSMichael Roth 		       struct kvm_mp_state *mp_state);
203*7d9a662eSMichael Roth struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid);
204*7d9a662eSMichael Roth void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
205*7d9a662eSMichael Roth void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
206*7d9a662eSMichael Roth 
207*7d9a662eSMichael Roth /*
208*7d9a662eSMichael Roth  * VM VCPU Args Set
209*7d9a662eSMichael Roth  *
210*7d9a662eSMichael Roth  * Input Args:
211*7d9a662eSMichael Roth  *   vm - Virtual Machine
212*7d9a662eSMichael Roth  *   vcpuid - VCPU ID
213*7d9a662eSMichael Roth  *   num - number of arguments
214*7d9a662eSMichael Roth  *   ... - arguments, each of type uint64_t
215*7d9a662eSMichael Roth  *
216*7d9a662eSMichael Roth  * Output Args: None
217*7d9a662eSMichael Roth  *
218*7d9a662eSMichael Roth  * Return: None
219*7d9a662eSMichael Roth  *
220*7d9a662eSMichael Roth  * Sets the first @num function input registers of the VCPU with @vcpuid,
221*7d9a662eSMichael Roth  * per the C calling convention of the architecture, to the values given
222*7d9a662eSMichael Roth  * as variable args. Each of the variable args is expected to be of type
223*7d9a662eSMichael Roth  * uint64_t. The maximum @num can be is specific to the architecture.
224*7d9a662eSMichael Roth  */
225*7d9a662eSMichael Roth void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
226*7d9a662eSMichael Roth 
227*7d9a662eSMichael Roth void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
228*7d9a662eSMichael Roth 		    struct kvm_sregs *sregs);
229*7d9a662eSMichael Roth void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
230*7d9a662eSMichael Roth 		    struct kvm_sregs *sregs);
231*7d9a662eSMichael Roth int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
232*7d9a662eSMichael Roth 		    struct kvm_sregs *sregs);
233*7d9a662eSMichael Roth void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid,
234*7d9a662eSMichael Roth 		  struct kvm_fpu *fpu);
235*7d9a662eSMichael Roth void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid,
236*7d9a662eSMichael Roth 		  struct kvm_fpu *fpu);
237*7d9a662eSMichael Roth void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg);
238*7d9a662eSMichael Roth void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg);
239*7d9a662eSMichael Roth #ifdef __KVM_HAVE_VCPU_EVENTS
240*7d9a662eSMichael Roth void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
241*7d9a662eSMichael Roth 		     struct kvm_vcpu_events *events);
242*7d9a662eSMichael Roth void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
243*7d9a662eSMichael Roth 		     struct kvm_vcpu_events *events);
244*7d9a662eSMichael Roth #endif
245*7d9a662eSMichael Roth #ifdef __x86_64__
246*7d9a662eSMichael Roth void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
247*7d9a662eSMichael Roth 			   struct kvm_nested_state *state);
248*7d9a662eSMichael Roth int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
249*7d9a662eSMichael Roth 			  struct kvm_nested_state *state, bool ignore_error);
250*7d9a662eSMichael Roth #endif
251*7d9a662eSMichael Roth void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid);
252*7d9a662eSMichael Roth 
253*7d9a662eSMichael Roth int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
254*7d9a662eSMichael Roth int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
255*7d9a662eSMichael Roth int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd);
256*7d9a662eSMichael Roth int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test);
257*7d9a662eSMichael Roth int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
258*7d9a662eSMichael Roth 		       void *val, bool write);
259*7d9a662eSMichael Roth int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
260*7d9a662eSMichael Roth 		      void *val, bool write);
261*7d9a662eSMichael Roth void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
262*7d9a662eSMichael Roth int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
263*7d9a662eSMichael Roth 
264*7d9a662eSMichael Roth int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
265*7d9a662eSMichael Roth 			  uint64_t attr);
266*7d9a662eSMichael Roth int vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
267*7d9a662eSMichael Roth 			 uint64_t attr);
268*7d9a662eSMichael Roth int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
269*7d9a662eSMichael Roth 			  uint64_t attr, void *val, bool write);
270*7d9a662eSMichael Roth int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
271*7d9a662eSMichael Roth 			 uint64_t attr, void *val, bool write);
272*7d9a662eSMichael Roth 
273*7d9a662eSMichael Roth #define KVM_MAX_IRQ_ROUTES		4096
274*7d9a662eSMichael Roth 
275*7d9a662eSMichael Roth struct kvm_irq_routing *kvm_gsi_routing_create(void);
276*7d9a662eSMichael Roth void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
277*7d9a662eSMichael Roth 		uint32_t gsi, uint32_t pin);
278*7d9a662eSMichael Roth int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
279*7d9a662eSMichael Roth void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
280*7d9a662eSMichael Roth 
281*7d9a662eSMichael Roth const char *exit_reason_str(unsigned int exit_reason);
282*7d9a662eSMichael Roth 
283*7d9a662eSMichael Roth void virt_pgd_alloc(struct kvm_vm *vm);
284*7d9a662eSMichael Roth 
285*7d9a662eSMichael Roth /*
286*7d9a662eSMichael Roth  * VM Virtual Page Map
287*7d9a662eSMichael Roth  *
288*7d9a662eSMichael Roth  * Input Args:
289*7d9a662eSMichael Roth  *   vm - Virtual Machine
290*7d9a662eSMichael Roth  *   vaddr - VM Virtual Address
291*7d9a662eSMichael Roth  *   paddr - VM Physical Address
292*7d9a662eSMichael Roth  *   memslot - Memory region slot for new virtual translation tables
293*7d9a662eSMichael Roth  *
294*7d9a662eSMichael Roth  * Output Args: None
295*7d9a662eSMichael Roth  *
296*7d9a662eSMichael Roth  * Return: None
297*7d9a662eSMichael Roth  *
298*7d9a662eSMichael Roth  * Within @vm, creates a virtual translation for the page starting
299*7d9a662eSMichael Roth  * at @vaddr to the page starting at @paddr.
300*7d9a662eSMichael Roth  */
301*7d9a662eSMichael Roth void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
302*7d9a662eSMichael Roth 
303*7d9a662eSMichael Roth vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
304*7d9a662eSMichael Roth 			     uint32_t memslot);
305*7d9a662eSMichael Roth vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
306*7d9a662eSMichael Roth 			      vm_paddr_t paddr_min, uint32_t memslot);
307*7d9a662eSMichael Roth vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
308*7d9a662eSMichael Roth 
309*7d9a662eSMichael Roth /*
310*7d9a662eSMichael Roth  * Create a VM with reasonable defaults
311*7d9a662eSMichael Roth  *
312*7d9a662eSMichael Roth  * Input Args:
313*7d9a662eSMichael Roth  *   vcpuid - The id of the single VCPU to add to the VM.
314*7d9a662eSMichael Roth  *   extra_mem_pages - The number of extra pages to add (this will
315*7d9a662eSMichael Roth  *                     decide how much extra space we will need to
316*7d9a662eSMichael Roth  *                     setup the page tables using memslot 0)
317*7d9a662eSMichael Roth  *   guest_code - The vCPU's entry point
318*7d9a662eSMichael Roth  *
319*7d9a662eSMichael Roth  * Output Args: None
320*7d9a662eSMichael Roth  *
321*7d9a662eSMichael Roth  * Return:
322*7d9a662eSMichael Roth  *   Pointer to opaque structure that describes the created VM.
323*7d9a662eSMichael Roth  */
324*7d9a662eSMichael Roth struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
325*7d9a662eSMichael Roth 				 void *guest_code);
326*7d9a662eSMichael Roth 
327*7d9a662eSMichael Roth /* Same as vm_create_default, but can be used for more than one vcpu */
328*7d9a662eSMichael Roth struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
329*7d9a662eSMichael Roth 					    uint32_t num_percpu_pages, void *guest_code,
330*7d9a662eSMichael Roth 					    uint32_t vcpuids[]);
331*7d9a662eSMichael Roth 
332*7d9a662eSMichael Roth /* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */
333*7d9a662eSMichael Roth struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
334*7d9a662eSMichael Roth 				    uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
335*7d9a662eSMichael Roth 				    uint32_t num_percpu_pages, void *guest_code,
336*7d9a662eSMichael Roth 				    uint32_t vcpuids[]);
337*7d9a662eSMichael Roth 
338*7d9a662eSMichael Roth /*
339*7d9a662eSMichael Roth  * Adds a vCPU with reasonable defaults (e.g. a stack)
340*7d9a662eSMichael Roth  *
341*7d9a662eSMichael Roth  * Input Args:
342*7d9a662eSMichael Roth  *   vm - Virtual Machine
343*7d9a662eSMichael Roth  *   vcpuid - The id of the VCPU to add to the VM.
344*7d9a662eSMichael Roth  *   guest_code - The vCPU's entry point
345*7d9a662eSMichael Roth  */
346*7d9a662eSMichael Roth void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
347*7d9a662eSMichael Roth 
348*7d9a662eSMichael Roth bool vm_is_unrestricted_guest(struct kvm_vm *vm);
349*7d9a662eSMichael Roth 
350*7d9a662eSMichael Roth unsigned int vm_get_page_size(struct kvm_vm *vm);
351*7d9a662eSMichael Roth unsigned int vm_get_page_shift(struct kvm_vm *vm);
352*7d9a662eSMichael Roth unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
353*7d9a662eSMichael Roth uint64_t vm_get_max_gfn(struct kvm_vm *vm);
354*7d9a662eSMichael Roth int vm_get_fd(struct kvm_vm *vm);
355*7d9a662eSMichael Roth 
356*7d9a662eSMichael Roth unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
357*7d9a662eSMichael Roth unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
358*7d9a662eSMichael Roth unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
359*7d9a662eSMichael Roth static inline unsigned int
360*7d9a662eSMichael Roth vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
361*7d9a662eSMichael Roth {
362*7d9a662eSMichael Roth 	unsigned int n;
363*7d9a662eSMichael Roth 	n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
364*7d9a662eSMichael Roth #ifdef __s390x__
365*7d9a662eSMichael Roth 	/* s390 requires 1M aligned guest sizes */
366*7d9a662eSMichael Roth 	n = (n + 255) & ~255;
367*7d9a662eSMichael Roth #endif
368*7d9a662eSMichael Roth 	return n;
369*7d9a662eSMichael Roth }
370*7d9a662eSMichael Roth 
371*7d9a662eSMichael Roth struct kvm_userspace_memory_region *
372*7d9a662eSMichael Roth kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
373*7d9a662eSMichael Roth 				 uint64_t end);
374*7d9a662eSMichael Roth 
375*7d9a662eSMichael Roth struct kvm_dirty_log *
376*7d9a662eSMichael Roth allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
377*7d9a662eSMichael Roth 
378*7d9a662eSMichael Roth int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
379*7d9a662eSMichael Roth 
380*7d9a662eSMichael Roth #define sync_global_to_guest(vm, g) ({				\
381*7d9a662eSMichael Roth 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
382*7d9a662eSMichael Roth 	memcpy(_p, &(g), sizeof(g));				\
383*7d9a662eSMichael Roth })
384*7d9a662eSMichael Roth 
385*7d9a662eSMichael Roth #define sync_global_from_guest(vm, g) ({			\
386*7d9a662eSMichael Roth 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
387*7d9a662eSMichael Roth 	memcpy(&(g), _p, sizeof(g));				\
388*7d9a662eSMichael Roth })
389*7d9a662eSMichael Roth 
390*7d9a662eSMichael Roth void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid);
391*7d9a662eSMichael Roth 
392*7d9a662eSMichael Roth /* Common ucalls */
393*7d9a662eSMichael Roth enum {
394*7d9a662eSMichael Roth 	UCALL_NONE,
395*7d9a662eSMichael Roth 	UCALL_SYNC,
396*7d9a662eSMichael Roth 	UCALL_ABORT,
397*7d9a662eSMichael Roth 	UCALL_DONE,
398*7d9a662eSMichael Roth 	UCALL_UNHANDLED,
399*7d9a662eSMichael Roth };
400*7d9a662eSMichael Roth 
401*7d9a662eSMichael Roth #define UCALL_MAX_ARGS 6
402*7d9a662eSMichael Roth 
403*7d9a662eSMichael Roth struct ucall {
404*7d9a662eSMichael Roth 	uint64_t cmd;
405*7d9a662eSMichael Roth 	uint64_t args[UCALL_MAX_ARGS];
406*7d9a662eSMichael Roth };
407*7d9a662eSMichael Roth 
408*7d9a662eSMichael Roth void ucall_init(struct kvm_vm *vm, void *arg);
409*7d9a662eSMichael Roth void ucall_uninit(struct kvm_vm *vm);
410*7d9a662eSMichael Roth void ucall(uint64_t cmd, int nargs, ...);
411*7d9a662eSMichael Roth uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
412*7d9a662eSMichael Roth 
413*7d9a662eSMichael Roth #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4)	\
414*7d9a662eSMichael Roth 				ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
415*7d9a662eSMichael Roth #define GUEST_SYNC(stage)	ucall(UCALL_SYNC, 2, "hello", stage)
416*7d9a662eSMichael Roth #define GUEST_DONE()		ucall(UCALL_DONE, 0)
417*7d9a662eSMichael Roth #define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) do {    \
418*7d9a662eSMichael Roth 	if (!(_condition))                                              \
419*7d9a662eSMichael Roth 		ucall(UCALL_ABORT, 2 + _nargs,                          \
420*7d9a662eSMichael Roth 			"Failed guest assert: "                         \
421*7d9a662eSMichael Roth 			_condstr, __LINE__, _args);                     \
422*7d9a662eSMichael Roth } while (0)
423*7d9a662eSMichael Roth 
424*7d9a662eSMichael Roth #define GUEST_ASSERT(_condition) \
425*7d9a662eSMichael Roth 	__GUEST_ASSERT(_condition, #_condition, 0, 0)
426*7d9a662eSMichael Roth 
427*7d9a662eSMichael Roth #define GUEST_ASSERT_1(_condition, arg1) \
428*7d9a662eSMichael Roth 	__GUEST_ASSERT(_condition, #_condition, 1, (arg1))
429*7d9a662eSMichael Roth 
430*7d9a662eSMichael Roth #define GUEST_ASSERT_2(_condition, arg1, arg2) \
431*7d9a662eSMichael Roth 	__GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2))
432*7d9a662eSMichael Roth 
433*7d9a662eSMichael Roth #define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \
434*7d9a662eSMichael Roth 	__GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3))
435*7d9a662eSMichael Roth 
436*7d9a662eSMichael Roth #define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \
437*7d9a662eSMichael Roth 	__GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4))
438*7d9a662eSMichael Roth 
439*7d9a662eSMichael Roth #define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b)
440*7d9a662eSMichael Roth 
441*7d9a662eSMichael Roth int vm_get_stats_fd(struct kvm_vm *vm);
442*7d9a662eSMichael Roth int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid);
443*7d9a662eSMichael Roth 
444*7d9a662eSMichael Roth uint32_t guest_get_vcpuid(void);
445*7d9a662eSMichael Roth 
446*7d9a662eSMichael Roth #endif /* SELFTEST_KVM_UTIL_BASE_H */
447