17d9a662eSMichael Roth /* SPDX-License-Identifier: GPL-2.0-only */
27d9a662eSMichael Roth /*
37d9a662eSMichael Roth  * tools/testing/selftests/kvm/include/kvm_util_base.h
47d9a662eSMichael Roth  *
57d9a662eSMichael Roth  * Copyright (C) 2018, Google LLC.
67d9a662eSMichael Roth  */
77d9a662eSMichael Roth #ifndef SELFTEST_KVM_UTIL_BASE_H
87d9a662eSMichael Roth #define SELFTEST_KVM_UTIL_BASE_H
97d9a662eSMichael Roth 
107d9a662eSMichael Roth #include "test_util.h"
117d9a662eSMichael Roth 
12b530eba1SSean Christopherson #include <linux/compiler.h>
13b530eba1SSean Christopherson #include "linux/hashtable.h"
147d9a662eSMichael Roth #include "linux/list.h"
15b530eba1SSean Christopherson #include <linux/kernel.h>
16b530eba1SSean Christopherson #include <linux/kvm.h>
17b530eba1SSean Christopherson #include "linux/rbtree.h"
189177b715SAndrew Jones #include <linux/types.h>
19b530eba1SSean Christopherson 
2003b47505SSean Christopherson #include <asm/atomic.h>
210cc64b08SSean Christopherson 
227d9a662eSMichael Roth #include <sys/ioctl.h>
237d9a662eSMichael Roth 
247d9a662eSMichael Roth #include "sparsebit.h"
257d9a662eSMichael Roth 
260c326523SSean Christopherson /*
270c326523SSean Christopherson  * Provide a version of static_assert() that is guaranteed to have an optional
280c326523SSean Christopherson  * message param.  If _ISOC11_SOURCE is defined, glibc (/usr/include/assert.h)
290c326523SSean Christopherson  * #undefs and #defines static_assert() as a direct alias to _Static_assert(),
300c326523SSean Christopherson  * i.e. effectively makes the message mandatory.  Many KVM selftests #define
310c326523SSean Christopherson  * _GNU_SOURCE for various reasons, and _GNU_SOURCE implies _ISOC11_SOURCE.  As
320c326523SSean Christopherson  * a result, static_assert() behavior is non-deterministic and may or may not
330c326523SSean Christopherson  * require a message depending on #include order.
340c326523SSean Christopherson  */
350c326523SSean Christopherson #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg)
360c326523SSean Christopherson #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr)
370c326523SSean Christopherson 
387d9a662eSMichael Roth #define KVM_DEV_PATH "/dev/kvm"
397d9a662eSMichael Roth #define KVM_MAX_VCPUS 512
407d9a662eSMichael Roth 
417d9a662eSMichael Roth #define NSEC_PER_SEC 1000000000L
427d9a662eSMichael Roth 
437d9a662eSMichael Roth typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
447d9a662eSMichael Roth typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
457d9a662eSMichael Roth 
46b530eba1SSean Christopherson struct userspace_mem_region {
47b530eba1SSean Christopherson 	struct kvm_userspace_memory_region region;
48b530eba1SSean Christopherson 	struct sparsebit *unused_phy_pages;
49b530eba1SSean Christopherson 	int fd;
50b530eba1SSean Christopherson 	off_t offset;
51bd3ed7e1SRicardo Koller 	enum vm_mem_backing_src_type backing_src_type;
52b530eba1SSean Christopherson 	void *host_mem;
53b530eba1SSean Christopherson 	void *host_alias;
54b530eba1SSean Christopherson 	void *mmap_start;
55b530eba1SSean Christopherson 	void *mmap_alias;
56b530eba1SSean Christopherson 	size_t mmap_size;
57b530eba1SSean Christopherson 	struct rb_node gpa_node;
58b530eba1SSean Christopherson 	struct rb_node hva_node;
59b530eba1SSean Christopherson 	struct hlist_node slot_node;
60b530eba1SSean Christopherson };
61b530eba1SSean Christopherson 
620cc64b08SSean Christopherson struct kvm_vcpu {
63b530eba1SSean Christopherson 	struct list_head list;
64b530eba1SSean Christopherson 	uint32_t id;
65b530eba1SSean Christopherson 	int fd;
66c472df1aSSean Christopherson 	struct kvm_vm *vm;
671079c3d4SSean Christopherson 	struct kvm_run *run;
687fbc6038SSean Christopherson #ifdef __x86_64__
697fbc6038SSean Christopherson 	struct kvm_cpuid2 *cpuid;
707fbc6038SSean Christopherson #endif
71b530eba1SSean Christopherson 	struct kvm_dirty_gfn *dirty_gfns;
72b530eba1SSean Christopherson 	uint32_t fetch_index;
73b530eba1SSean Christopherson 	uint32_t dirty_gfns_count;
74b530eba1SSean Christopherson };
75b530eba1SSean Christopherson 
76b530eba1SSean Christopherson struct userspace_mem_regions {
77b530eba1SSean Christopherson 	struct rb_root gpa_tree;
78b530eba1SSean Christopherson 	struct rb_root hva_tree;
79b530eba1SSean Christopherson 	DECLARE_HASHTABLE(slot_hash, 9);
80b530eba1SSean Christopherson };
81b530eba1SSean Christopherson 
82290c5b54SRicardo Koller enum kvm_mem_region_type {
83290c5b54SRicardo Koller 	MEM_REGION_CODE,
84290c5b54SRicardo Koller 	MEM_REGION_DATA,
85290c5b54SRicardo Koller 	MEM_REGION_PT,
86290c5b54SRicardo Koller 	MEM_REGION_TEST_DATA,
87290c5b54SRicardo Koller 	NR_MEM_REGIONS,
88290c5b54SRicardo Koller };
89290c5b54SRicardo Koller 
90b530eba1SSean Christopherson struct kvm_vm {
91b530eba1SSean Christopherson 	int mode;
92b530eba1SSean Christopherson 	unsigned long type;
93b530eba1SSean Christopherson 	int kvm_fd;
94b530eba1SSean Christopherson 	int fd;
95b530eba1SSean Christopherson 	unsigned int pgtable_levels;
96b530eba1SSean Christopherson 	unsigned int page_size;
97b530eba1SSean Christopherson 	unsigned int page_shift;
98b530eba1SSean Christopherson 	unsigned int pa_bits;
99b530eba1SSean Christopherson 	unsigned int va_bits;
100b530eba1SSean Christopherson 	uint64_t max_gfn;
101b530eba1SSean Christopherson 	struct list_head vcpus;
102b530eba1SSean Christopherson 	struct userspace_mem_regions regions;
103b530eba1SSean Christopherson 	struct sparsebit *vpages_valid;
104b530eba1SSean Christopherson 	struct sparsebit *vpages_mapped;
105b530eba1SSean Christopherson 	bool has_irqchip;
106b530eba1SSean Christopherson 	bool pgd_created;
10703b47505SSean Christopherson 	vm_paddr_t ucall_mmio_addr;
108b530eba1SSean Christopherson 	vm_paddr_t pgd;
109b530eba1SSean Christopherson 	vm_vaddr_t gdt;
110b530eba1SSean Christopherson 	vm_vaddr_t tss;
111b530eba1SSean Christopherson 	vm_vaddr_t idt;
112b530eba1SSean Christopherson 	vm_vaddr_t handlers;
113b530eba1SSean Christopherson 	uint32_t dirty_ring_size;
11483f6e109SBen Gardon 
11583f6e109SBen Gardon 	/* Cache of information for binary stats interface */
11683f6e109SBen Gardon 	int stats_fd;
11783f6e109SBen Gardon 	struct kvm_stats_header stats_header;
11883f6e109SBen Gardon 	struct kvm_stats_desc *stats_desc;
119290c5b54SRicardo Koller 
120290c5b54SRicardo Koller 	/*
121290c5b54SRicardo Koller 	 * KVM region slots. These are the default memslots used by page
122290c5b54SRicardo Koller 	 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
123290c5b54SRicardo Koller 	 * memslot.
124290c5b54SRicardo Koller 	 */
125290c5b54SRicardo Koller 	uint32_t memslots[NR_MEM_REGIONS];
126b530eba1SSean Christopherson };
127b530eba1SSean Christopherson 
1289177b715SAndrew Jones struct vcpu_reg_sublist {
1299177b715SAndrew Jones 	const char *name;
1309177b715SAndrew Jones 	long capability;
1319177b715SAndrew Jones 	int feature;
1329177b715SAndrew Jones 	bool finalize;
1339177b715SAndrew Jones 	__u64 *regs;
1349177b715SAndrew Jones 	__u64 regs_n;
1359177b715SAndrew Jones 	__u64 *rejects_set;
1369177b715SAndrew Jones 	__u64 rejects_set_n;
1379177b715SAndrew Jones };
1389177b715SAndrew Jones 
1399177b715SAndrew Jones struct vcpu_reg_list {
1409177b715SAndrew Jones 	char *name;
1419177b715SAndrew Jones 	struct vcpu_reg_sublist sublists[];
1429177b715SAndrew Jones };
143b530eba1SSean Christopherson 
144*e8566033SHaibo Xu #define for_each_sublist(c, s)		\
145*e8566033SHaibo Xu 	for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
146*e8566033SHaibo Xu 
147b530eba1SSean Christopherson #define kvm_for_each_vcpu(vm, i, vcpu)			\
148b530eba1SSean Christopherson 	for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++)	\
149b530eba1SSean Christopherson 		if (!((vcpu) = vm->vcpus[i]))		\
150b530eba1SSean Christopherson 			continue;			\
151b530eba1SSean Christopherson 		else
152b530eba1SSean Christopherson 
153b530eba1SSean Christopherson struct userspace_mem_region *
154b530eba1SSean Christopherson memslot2region(struct kvm_vm *vm, uint32_t memslot);
155b530eba1SSean Christopherson 
156290c5b54SRicardo Koller static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
157290c5b54SRicardo Koller 							     enum kvm_mem_region_type type)
158290c5b54SRicardo Koller {
159290c5b54SRicardo Koller 	assert(type < NR_MEM_REGIONS);
160290c5b54SRicardo Koller 	return memslot2region(vm, vm->memslots[type]);
161290c5b54SRicardo Koller }
162290c5b54SRicardo Koller 
1637d9a662eSMichael Roth /* Minimum allocated guest virtual and physical addresses */
1647d9a662eSMichael Roth #define KVM_UTIL_MIN_VADDR		0x2000
1657d9a662eSMichael Roth #define KVM_GUEST_PAGE_TABLE_MIN_PADDR	0x180000
1667d9a662eSMichael Roth 
1677d9a662eSMichael Roth #define DEFAULT_GUEST_STACK_VADDR_MIN	0xab6000
1687d9a662eSMichael Roth #define DEFAULT_STACK_PGS		5
1697d9a662eSMichael Roth 
1707d9a662eSMichael Roth enum vm_guest_mode {
1717d9a662eSMichael Roth 	VM_MODE_P52V48_4K,
1727d9a662eSMichael Roth 	VM_MODE_P52V48_64K,
1737d9a662eSMichael Roth 	VM_MODE_P48V48_4K,
1747d9a662eSMichael Roth 	VM_MODE_P48V48_16K,
1757d9a662eSMichael Roth 	VM_MODE_P48V48_64K,
1767d9a662eSMichael Roth 	VM_MODE_P40V48_4K,
1777d9a662eSMichael Roth 	VM_MODE_P40V48_16K,
1787d9a662eSMichael Roth 	VM_MODE_P40V48_64K,
1797d9a662eSMichael Roth 	VM_MODE_PXXV48_4K,	/* For 48bits VA but ANY bits PA */
1807d9a662eSMichael Roth 	VM_MODE_P47V64_4K,
1817d9a662eSMichael Roth 	VM_MODE_P44V64_4K,
1827d9a662eSMichael Roth 	VM_MODE_P36V48_4K,
1837d9a662eSMichael Roth 	VM_MODE_P36V48_16K,
1847d9a662eSMichael Roth 	VM_MODE_P36V48_64K,
1857d9a662eSMichael Roth 	VM_MODE_P36V47_16K,
1867d9a662eSMichael Roth 	NUM_VM_MODES,
1877d9a662eSMichael Roth };
1887d9a662eSMichael Roth 
1897d9a662eSMichael Roth #if defined(__aarch64__)
1907d9a662eSMichael Roth 
1917d9a662eSMichael Roth extern enum vm_guest_mode vm_mode_default;
1927d9a662eSMichael Roth 
1937d9a662eSMichael Roth #define VM_MODE_DEFAULT			vm_mode_default
1947d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
1957d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 8)
1967d9a662eSMichael Roth 
1977d9a662eSMichael Roth #elif defined(__x86_64__)
1987d9a662eSMichael Roth 
1997d9a662eSMichael Roth #define VM_MODE_DEFAULT			VM_MODE_PXXV48_4K
2007d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
2017d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 8)
2027d9a662eSMichael Roth 
2037d9a662eSMichael Roth #elif defined(__s390x__)
2047d9a662eSMichael Roth 
2057d9a662eSMichael Roth #define VM_MODE_DEFAULT			VM_MODE_P44V64_4K
2067d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
2077d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 16)
2087d9a662eSMichael Roth 
2097d9a662eSMichael Roth #elif defined(__riscv)
2107d9a662eSMichael Roth 
2117d9a662eSMichael Roth #if __riscv_xlen == 32
2127d9a662eSMichael Roth #error "RISC-V 32-bit kvm selftests not supported"
2137d9a662eSMichael Roth #endif
2147d9a662eSMichael Roth 
2157d9a662eSMichael Roth #define VM_MODE_DEFAULT			VM_MODE_P40V48_4K
2167d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
2177d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 8)
2187d9a662eSMichael Roth 
2197d9a662eSMichael Roth #endif
2207d9a662eSMichael Roth 
2217d9a662eSMichael Roth #define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
2227d9a662eSMichael Roth #define PTES_PER_MIN_PAGE	ptes_per_page(MIN_PAGE_SIZE)
2237d9a662eSMichael Roth 
2247d9a662eSMichael Roth struct vm_guest_mode_params {
2257d9a662eSMichael Roth 	unsigned int pa_bits;
2267d9a662eSMichael Roth 	unsigned int va_bits;
2277d9a662eSMichael Roth 	unsigned int page_size;
2287d9a662eSMichael Roth 	unsigned int page_shift;
2297d9a662eSMichael Roth };
2307d9a662eSMichael Roth extern const struct vm_guest_mode_params vm_guest_mode_params[];
2317d9a662eSMichael Roth 
2327d9a662eSMichael Roth int open_path_or_exit(const char *path, int flags);
2337d9a662eSMichael Roth int open_kvm_dev_path_or_exit(void);
2344d2bd143SDavid Matlack 
235d14d9139SLike Xu bool get_kvm_param_bool(const char *param);
2364d2bd143SDavid Matlack bool get_kvm_intel_param_bool(const char *param);
2374d2bd143SDavid Matlack bool get_kvm_amd_param_bool(const char *param);
2384d2bd143SDavid Matlack 
239d8ba3f14SSean Christopherson unsigned int kvm_check_cap(long cap);
24071ab5a6fSSean Christopherson 
2413ea9b809SSean Christopherson static inline bool kvm_has_cap(long cap)
2423ea9b809SSean Christopherson {
2433ea9b809SSean Christopherson 	return kvm_check_cap(cap);
2443ea9b809SSean Christopherson }
2453ea9b809SSean Christopherson 
24671ab5a6fSSean Christopherson #define __KVM_SYSCALL_ERROR(_name, _ret) \
24771ab5a6fSSean Christopherson 	"%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
24871ab5a6fSSean Christopherson 
24971ab5a6fSSean Christopherson #define __KVM_IOCTL_ERROR(_name, _ret)	__KVM_SYSCALL_ERROR(_name, _ret)
25071ab5a6fSSean Christopherson #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
25171ab5a6fSSean Christopherson 
252fcba483eSSean Christopherson #define kvm_do_ioctl(fd, cmd, arg)						\
253fcba483eSSean Christopherson ({										\
2540c326523SSean Christopherson 	kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd));	\
255fcba483eSSean Christopherson 	ioctl(fd, cmd, arg);							\
256fcba483eSSean Christopherson })
257fcba483eSSean Christopherson 
2582de1b7b1SSean Christopherson #define __kvm_ioctl(kvm_fd, cmd, arg)				\
259fcba483eSSean Christopherson 	kvm_do_ioctl(kvm_fd, cmd, arg)
2602de1b7b1SSean Christopherson 
2612de1b7b1SSean Christopherson 
262fcba483eSSean Christopherson #define _kvm_ioctl(kvm_fd, cmd, name, arg)			\
263fcba483eSSean Christopherson ({								\
264fcba483eSSean Christopherson 	int ret = __kvm_ioctl(kvm_fd, cmd, arg);		\
265fcba483eSSean Christopherson 								\
266fcba483eSSean Christopherson 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));	\
267fcba483eSSean Christopherson })
2682de1b7b1SSean Christopherson 
2692de1b7b1SSean Christopherson #define kvm_ioctl(kvm_fd, cmd, arg) \
2702de1b7b1SSean Christopherson 	_kvm_ioctl(kvm_fd, cmd, #cmd, arg)
27171ab5a6fSSean Christopherson 
272ad125f30SSean Christopherson static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
273ad125f30SSean Christopherson 
274fcba483eSSean Christopherson #define __vm_ioctl(vm, cmd, arg)				\
275fcba483eSSean Christopherson ({								\
276ad125f30SSean Christopherson 	static_assert_is_vm(vm);				\
277fcba483eSSean Christopherson 	kvm_do_ioctl((vm)->fd, cmd, arg);			\
278fcba483eSSean Christopherson })
27971ab5a6fSSean Christopherson 
280fcba483eSSean Christopherson #define _vm_ioctl(vm, cmd, name, arg)				\
281fcba483eSSean Christopherson ({								\
282fcba483eSSean Christopherson 	int ret = __vm_ioctl(vm, cmd, arg);			\
283fcba483eSSean Christopherson 								\
284fcba483eSSean Christopherson 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));	\
285fcba483eSSean Christopherson })
286fcba483eSSean Christopherson 
287fcba483eSSean Christopherson #define vm_ioctl(vm, cmd, arg)					\
288fcba483eSSean Christopherson 	_vm_ioctl(vm, cmd, #cmd, arg)
289fcba483eSSean Christopherson 
290ad125f30SSean Christopherson 
291ad125f30SSean Christopherson static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
292ad125f30SSean Christopherson 
293fcba483eSSean Christopherson #define __vcpu_ioctl(vcpu, cmd, arg)				\
294fcba483eSSean Christopherson ({								\
295ad125f30SSean Christopherson 	static_assert_is_vcpu(vcpu);				\
296fcba483eSSean Christopherson 	kvm_do_ioctl((vcpu)->fd, cmd, arg);			\
297fcba483eSSean Christopherson })
298fcba483eSSean Christopherson 
299fcba483eSSean Christopherson #define _vcpu_ioctl(vcpu, cmd, name, arg)			\
300fcba483eSSean Christopherson ({								\
301fcba483eSSean Christopherson 	int ret = __vcpu_ioctl(vcpu, cmd, arg);			\
302fcba483eSSean Christopherson 								\
303fcba483eSSean Christopherson 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));	\
304fcba483eSSean Christopherson })
305fcba483eSSean Christopherson 
306768e9a61SSean Christopherson #define vcpu_ioctl(vcpu, cmd, arg)				\
307768e9a61SSean Christopherson 	_vcpu_ioctl(vcpu, cmd, #cmd, arg)
30871ab5a6fSSean Christopherson 
30910825b55SSean Christopherson /*
31010825b55SSean Christopherson  * Looks up and returns the value corresponding to the capability
31110825b55SSean Christopherson  * (KVM_CAP_*) given by cap.
31210825b55SSean Christopherson  */
31310825b55SSean Christopherson static inline int vm_check_cap(struct kvm_vm *vm, long cap)
31410825b55SSean Christopherson {
31510825b55SSean Christopherson 	int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
31610825b55SSean Christopherson 
31710825b55SSean Christopherson 	TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
31810825b55SSean Christopherson 	return ret;
31910825b55SSean Christopherson }
32010825b55SSean Christopherson 
321a12c86c4SSean Christopherson static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
322ac712209SSean Christopherson {
323a12c86c4SSean Christopherson 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
324a12c86c4SSean Christopherson 
325a12c86c4SSean Christopherson 	return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
326ac712209SSean Christopherson }
327a12c86c4SSean Christopherson static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
32810825b55SSean Christopherson {
329a12c86c4SSean Christopherson 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
330a12c86c4SSean Christopherson 
331a12c86c4SSean Christopherson 	vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
33210825b55SSean Christopherson }
33310825b55SSean Christopherson 
3347d9a662eSMichael Roth void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
3357d9a662eSMichael Roth const char *vm_guest_mode_string(uint32_t i);
3367d9a662eSMichael Roth 
3377d9a662eSMichael Roth void kvm_vm_free(struct kvm_vm *vmp);
338ccc82ba6SSean Christopherson void kvm_vm_restart(struct kvm_vm *vmp);
3397d9a662eSMichael Roth void kvm_vm_release(struct kvm_vm *vmp);
3407d9a662eSMichael Roth int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
3417d9a662eSMichael Roth 		       size_t len);
3427d9a662eSMichael Roth void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
343a4187c9bSSean Christopherson int kvm_memfd_alloc(size_t size, bool hugepages);
3447d9a662eSMichael Roth 
3457d9a662eSMichael Roth void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
3467d9a662eSMichael Roth 
34710825b55SSean Christopherson static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
34810825b55SSean Christopherson {
34910825b55SSean Christopherson 	struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
35010825b55SSean Christopherson 
35110825b55SSean Christopherson 	vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
35210825b55SSean Christopherson }
35310825b55SSean Christopherson 
35410825b55SSean Christopherson static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
35510825b55SSean Christopherson 					  uint64_t first_page, uint32_t num_pages)
35610825b55SSean Christopherson {
35710825b55SSean Christopherson 	struct kvm_clear_dirty_log args = {
35810825b55SSean Christopherson 		.dirty_bitmap = log,
35910825b55SSean Christopherson 		.slot = slot,
36010825b55SSean Christopherson 		.first_page = first_page,
36110825b55SSean Christopherson 		.num_pages = num_pages
36210825b55SSean Christopherson 	};
36310825b55SSean Christopherson 
36410825b55SSean Christopherson 	vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
36510825b55SSean Christopherson }
36610825b55SSean Christopherson 
36710825b55SSean Christopherson static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
36810825b55SSean Christopherson {
36910825b55SSean Christopherson 	return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
37010825b55SSean Christopherson }
37110825b55SSean Christopherson 
37210825b55SSean Christopherson static inline int vm_get_stats_fd(struct kvm_vm *vm)
37310825b55SSean Christopherson {
37410825b55SSean Christopherson 	int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
37510825b55SSean Christopherson 
37610825b55SSean Christopherson 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
37710825b55SSean Christopherson 	return fd;
37810825b55SSean Christopherson }
37910825b55SSean Christopherson 
38032faa064SBen Gardon static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
38132faa064SBen Gardon {
38232faa064SBen Gardon 	ssize_t ret;
38332faa064SBen Gardon 
384625646aeSSean Christopherson 	ret = pread(stats_fd, header, sizeof(*header), 0);
385625646aeSSean Christopherson 	TEST_ASSERT(ret == sizeof(*header),
386625646aeSSean Christopherson 		    "Failed to read '%lu' header bytes, ret = '%ld'",
387625646aeSSean Christopherson 		    sizeof(*header), ret);
38832faa064SBen Gardon }
38932faa064SBen Gardon 
3904d0a0594SBen Gardon struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
3914d0a0594SBen Gardon 					      struct kvm_stats_header *header);
3924d0a0594SBen Gardon 
3934d0a0594SBen Gardon static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
3944d0a0594SBen Gardon {
3954d0a0594SBen Gardon 	 /*
3964d0a0594SBen Gardon 	  * The base size of the descriptor is defined by KVM's ABI, but the
3974d0a0594SBen Gardon 	  * size of the name field is variable, as far as KVM's ABI is
3984d0a0594SBen Gardon 	  * concerned. For a given instance of KVM, the name field is the same
3994d0a0594SBen Gardon 	  * size for all stats and is provided in the overall stats header.
4004d0a0594SBen Gardon 	  */
4014d0a0594SBen Gardon 	return sizeof(struct kvm_stats_desc) + header->name_size;
4024d0a0594SBen Gardon }
4034d0a0594SBen Gardon 
4044d0a0594SBen Gardon static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
4054d0a0594SBen Gardon 							  int index,
4064d0a0594SBen Gardon 							  struct kvm_stats_header *header)
4074d0a0594SBen Gardon {
4084d0a0594SBen Gardon 	/*
4094d0a0594SBen Gardon 	 * Note, size_desc includes the size of the name field, which is
4104d0a0594SBen Gardon 	 * variable. i.e. this is NOT equivalent to &stats_desc[i].
4114d0a0594SBen Gardon 	 */
4124d0a0594SBen Gardon 	return (void *)stats + index * get_stats_descriptor_size(header);
4134d0a0594SBen Gardon }
4144d0a0594SBen Gardon 
415ed6b53ecSBen Gardon void read_stat_data(int stats_fd, struct kvm_stats_header *header,
416ed6b53ecSBen Gardon 		    struct kvm_stats_desc *desc, uint64_t *data,
417ed6b53ecSBen Gardon 		    size_t max_elements);
418ed6b53ecSBen Gardon 
4198448ec59SBen Gardon void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
4208448ec59SBen Gardon 		   size_t max_elements);
4218448ec59SBen Gardon 
4228448ec59SBen Gardon static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
4238448ec59SBen Gardon {
4248448ec59SBen Gardon 	uint64_t data;
4258448ec59SBen Gardon 
4268448ec59SBen Gardon 	__vm_get_stat(vm, stat_name, &data, 1);
4278448ec59SBen Gardon 	return data;
4288448ec59SBen Gardon }
4298448ec59SBen Gardon 
4307d9a662eSMichael Roth void vm_create_irqchip(struct kvm_vm *vm);
4317d9a662eSMichael Roth 
4323d7d6043SSean Christopherson void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
4333d7d6043SSean Christopherson 			       uint64_t gpa, uint64_t size, void *hva);
4343d7d6043SSean Christopherson int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
4353d7d6043SSean Christopherson 				uint64_t gpa, uint64_t size, void *hva);
4367d9a662eSMichael Roth void vm_userspace_mem_region_add(struct kvm_vm *vm,
4377d9a662eSMichael Roth 	enum vm_mem_backing_src_type src_type,
4387d9a662eSMichael Roth 	uint64_t guest_paddr, uint32_t slot, uint64_t npages,
4397d9a662eSMichael Roth 	uint32_t flags);
4407d9a662eSMichael Roth 
4417d9a662eSMichael Roth void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
4427d9a662eSMichael Roth void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
4437d9a662eSMichael Roth void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
444768e9a61SSean Christopherson struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
445e8b9a055SOliver Upton void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
4462d4a5f91SVitaly Kuznetsov vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
4477d9a662eSMichael Roth vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
4481446e331SRicardo Koller vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
4491446e331SRicardo Koller 			    enum kvm_mem_region_type type);
4507d9a662eSMichael Roth vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
4511446e331SRicardo Koller vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
4521446e331SRicardo Koller 				 enum kvm_mem_region_type type);
4537d9a662eSMichael Roth vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
4547d9a662eSMichael Roth 
4557d9a662eSMichael Roth void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
4567d9a662eSMichael Roth 	      unsigned int npages);
4577d9a662eSMichael Roth void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
4587d9a662eSMichael Roth void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
4597d9a662eSMichael Roth vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
4607d9a662eSMichael Roth void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
4617d9a662eSMichael Roth 
462768e9a61SSean Christopherson void vcpu_run(struct kvm_vcpu *vcpu);
463768e9a61SSean Christopherson int _vcpu_run(struct kvm_vcpu *vcpu);
46438d4a385SSean Christopherson 
465768e9a61SSean Christopherson static inline int __vcpu_run(struct kvm_vcpu *vcpu)
46638d4a385SSean Christopherson {
467768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
46838d4a385SSean Christopherson }
46938d4a385SSean Christopherson 
470768e9a61SSean Christopherson void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
471768e9a61SSean Christopherson struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
472ffb7c77fSSean Christopherson 
473768e9a61SSean Christopherson static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
474768e9a61SSean Christopherson 				   uint64_t arg0)
475ffb7c77fSSean Christopherson {
476a12c86c4SSean Christopherson 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
477a12c86c4SSean Christopherson 
478768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
479ffb7c77fSSean Christopherson }
480ffb7c77fSSean Christopherson 
481768e9a61SSean Christopherson static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
482ffb7c77fSSean Christopherson 					struct kvm_guest_debug *debug)
483ffb7c77fSSean Christopherson {
484768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
485ffb7c77fSSean Christopherson }
486ffb7c77fSSean Christopherson 
487768e9a61SSean Christopherson static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
488877bd399SSean Christopherson 				     struct kvm_mp_state *mp_state)
489877bd399SSean Christopherson {
490768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
491877bd399SSean Christopherson }
492768e9a61SSean Christopherson static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
493ffb7c77fSSean Christopherson 				     struct kvm_mp_state *mp_state)
494ffb7c77fSSean Christopherson {
495768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
496ffb7c77fSSean Christopherson }
497ffb7c77fSSean Christopherson 
498768e9a61SSean Christopherson static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
499ffb7c77fSSean Christopherson {
500768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
501ffb7c77fSSean Christopherson }
502ffb7c77fSSean Christopherson 
503768e9a61SSean Christopherson static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
504ffb7c77fSSean Christopherson {
505768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
506ffb7c77fSSean Christopherson }
507768e9a61SSean Christopherson static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
508ffb7c77fSSean Christopherson {
509768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
510ffb7c77fSSean Christopherson 
511ffb7c77fSSean Christopherson }
512768e9a61SSean Christopherson static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
513ffb7c77fSSean Christopherson {
514768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
515ffb7c77fSSean Christopherson }
516768e9a61SSean Christopherson static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
517ffb7c77fSSean Christopherson {
518768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
519ffb7c77fSSean Christopherson }
520768e9a61SSean Christopherson static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
521ffb7c77fSSean Christopherson {
522768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
523ffb7c77fSSean Christopherson }
524768e9a61SSean Christopherson static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
525ffb7c77fSSean Christopherson {
526768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
527ffb7c77fSSean Christopherson }
528bfff0f60SSean Christopherson 
529768e9a61SSean Christopherson static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
530ffb7c77fSSean Christopherson {
531768e9a61SSean Christopherson 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
532bfff0f60SSean Christopherson 
533768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
534bfff0f60SSean Christopherson }
535768e9a61SSean Christopherson static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
536bfff0f60SSean Christopherson {
537768e9a61SSean Christopherson 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
538bfff0f60SSean Christopherson 
539768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
540bfff0f60SSean Christopherson }
541768e9a61SSean Christopherson static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
542bfff0f60SSean Christopherson {
543768e9a61SSean Christopherson 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
544bfff0f60SSean Christopherson 
545768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
546ffb7c77fSSean Christopherson }
547768e9a61SSean Christopherson static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
548ffb7c77fSSean Christopherson {
549768e9a61SSean Christopherson 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
550bfff0f60SSean Christopherson 
551768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
552ffb7c77fSSean Christopherson }
553bfff0f60SSean Christopherson 
554ffb7c77fSSean Christopherson #ifdef __KVM_HAVE_VCPU_EVENTS
555768e9a61SSean Christopherson static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
556ffb7c77fSSean Christopherson 				   struct kvm_vcpu_events *events)
557ffb7c77fSSean Christopherson {
558768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
559ffb7c77fSSean Christopherson }
560768e9a61SSean Christopherson static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
561ffb7c77fSSean Christopherson 				   struct kvm_vcpu_events *events)
562ffb7c77fSSean Christopherson {
563768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
564ffb7c77fSSean Christopherson }
565ffb7c77fSSean Christopherson #endif
566ffb7c77fSSean Christopherson #ifdef __x86_64__
567768e9a61SSean Christopherson static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
568ffb7c77fSSean Christopherson 					 struct kvm_nested_state *state)
569ffb7c77fSSean Christopherson {
570768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
571ffb7c77fSSean Christopherson }
572768e9a61SSean Christopherson static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
573ffb7c77fSSean Christopherson 					  struct kvm_nested_state *state)
574ffb7c77fSSean Christopherson {
575768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
576ffb7c77fSSean Christopherson }
577ffb7c77fSSean Christopherson 
578768e9a61SSean Christopherson static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
579ffb7c77fSSean Christopherson 					 struct kvm_nested_state *state)
580ffb7c77fSSean Christopherson {
581768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
582ffb7c77fSSean Christopherson }
583ffb7c77fSSean Christopherson #endif
584768e9a61SSean Christopherson static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
585ffb7c77fSSean Christopherson {
586768e9a61SSean Christopherson 	int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
587ffb7c77fSSean Christopherson 
588ffb7c77fSSean Christopherson 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
589ffb7c77fSSean Christopherson 	return fd;
590ffb7c77fSSean Christopherson }
591ffb7c77fSSean Christopherson 
59240918184SSean Christopherson int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
59340918184SSean Christopherson 
59440918184SSean Christopherson static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
59540918184SSean Christopherson {
59640918184SSean Christopherson 	int ret = __kvm_has_device_attr(dev_fd, group, attr);
59740918184SSean Christopherson 
59840918184SSean Christopherson 	TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
59940918184SSean Christopherson }
60040918184SSean Christopherson 
60140918184SSean Christopherson int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
60240918184SSean Christopherson 
60340918184SSean Christopherson static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
60440918184SSean Christopherson 				       uint64_t attr, void *val)
60540918184SSean Christopherson {
60640918184SSean Christopherson 	int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
60740918184SSean Christopherson 
60840918184SSean Christopherson 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
60940918184SSean Christopherson }
61040918184SSean Christopherson 
61140918184SSean Christopherson int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
61240918184SSean Christopherson 
61340918184SSean Christopherson static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
61440918184SSean Christopherson 				       uint64_t attr, void *val)
61540918184SSean Christopherson {
61640918184SSean Christopherson 	int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
61740918184SSean Christopherson 
61840918184SSean Christopherson 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
61940918184SSean Christopherson }
62040918184SSean Christopherson 
621768e9a61SSean Christopherson static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
622768e9a61SSean Christopherson 					 uint64_t attr)
62340918184SSean Christopherson {
624768e9a61SSean Christopherson 	return __kvm_has_device_attr(vcpu->fd, group, attr);
62540918184SSean Christopherson }
62640918184SSean Christopherson 
627768e9a61SSean Christopherson static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
628768e9a61SSean Christopherson 					uint64_t attr)
629768e9a61SSean Christopherson {
630768e9a61SSean Christopherson 	kvm_has_device_attr(vcpu->fd, group, attr);
631768e9a61SSean Christopherson }
632768e9a61SSean Christopherson 
633768e9a61SSean Christopherson static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
634768e9a61SSean Christopherson 					 uint64_t attr, void *val)
635768e9a61SSean Christopherson {
636768e9a61SSean Christopherson 	return __kvm_device_attr_get(vcpu->fd, group, attr, val);
637768e9a61SSean Christopherson }
638768e9a61SSean Christopherson 
639768e9a61SSean Christopherson static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
640768e9a61SSean Christopherson 					uint64_t attr, void *val)
641768e9a61SSean Christopherson {
642768e9a61SSean Christopherson 	kvm_device_attr_get(vcpu->fd, group, attr, val);
643768e9a61SSean Christopherson }
644768e9a61SSean Christopherson 
645768e9a61SSean Christopherson static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
646768e9a61SSean Christopherson 					 uint64_t attr, void *val)
647768e9a61SSean Christopherson {
648768e9a61SSean Christopherson 	return __kvm_device_attr_set(vcpu->fd, group, attr, val);
649768e9a61SSean Christopherson }
650768e9a61SSean Christopherson 
651768e9a61SSean Christopherson static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
652768e9a61SSean Christopherson 					uint64_t attr, void *val)
653768e9a61SSean Christopherson {
654768e9a61SSean Christopherson 	kvm_device_attr_set(vcpu->fd, group, attr, val);
655768e9a61SSean Christopherson }
656768e9a61SSean Christopherson 
65740918184SSean Christopherson int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
65840918184SSean Christopherson int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
65940918184SSean Christopherson 
66040918184SSean Christopherson static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
66140918184SSean Christopherson {
66240918184SSean Christopherson 	int fd = __kvm_create_device(vm, type);
66340918184SSean Christopherson 
66440918184SSean Christopherson 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
66540918184SSean Christopherson 	return fd;
66640918184SSean Christopherson }
66740918184SSean Christopherson 
668768e9a61SSean Christopherson void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
6697d9a662eSMichael Roth 
6707d9a662eSMichael Roth /*
6717d9a662eSMichael Roth  * VM VCPU Args Set
6727d9a662eSMichael Roth  *
6737d9a662eSMichael Roth  * Input Args:
6747d9a662eSMichael Roth  *   vm - Virtual Machine
6757d9a662eSMichael Roth  *   num - number of arguments
6767d9a662eSMichael Roth  *   ... - arguments, each of type uint64_t
6777d9a662eSMichael Roth  *
6787d9a662eSMichael Roth  * Output Args: None
6797d9a662eSMichael Roth  *
6807d9a662eSMichael Roth  * Return: None
6817d9a662eSMichael Roth  *
682768e9a61SSean Christopherson  * Sets the first @num input parameters for the function at @vcpu's entry point,
683768e9a61SSean Christopherson  * per the C calling convention of the architecture, to the values given as
684768e9a61SSean Christopherson  * variable args. Each of the variable args is expected to be of type uint64_t.
685768e9a61SSean Christopherson  * The maximum @num can be is specific to the architecture.
6867d9a662eSMichael Roth  */
687768e9a61SSean Christopherson void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
6887d9a662eSMichael Roth 
6897d9a662eSMichael Roth void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
6907d9a662eSMichael Roth int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
6917d9a662eSMichael Roth 
6927d9a662eSMichael Roth #define KVM_MAX_IRQ_ROUTES		4096
6937d9a662eSMichael Roth 
6947d9a662eSMichael Roth struct kvm_irq_routing *kvm_gsi_routing_create(void);
6957d9a662eSMichael Roth void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
6967d9a662eSMichael Roth 		uint32_t gsi, uint32_t pin);
6977d9a662eSMichael Roth int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
6987d9a662eSMichael Roth void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
6997d9a662eSMichael Roth 
7007d9a662eSMichael Roth const char *exit_reason_str(unsigned int exit_reason);
7017d9a662eSMichael Roth 
7027d9a662eSMichael Roth vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
7037d9a662eSMichael Roth 			     uint32_t memslot);
7047d9a662eSMichael Roth vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
7057d9a662eSMichael Roth 			      vm_paddr_t paddr_min, uint32_t memslot);
7067d9a662eSMichael Roth vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
7077d9a662eSMichael Roth 
7083f44e7fdSSean Christopherson /*
7093f44e7fdSSean Christopherson  * ____vm_create() does KVM_CREATE_VM and little else.  __vm_create() also
7103f44e7fdSSean Christopherson  * loads the test binary into guest memory and creates an IRQ chip (x86 only).
7116e1d13bfSSean Christopherson  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
7126e1d13bfSSean Christopherson  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
7133f44e7fdSSean Christopherson  */
714290c5b54SRicardo Koller struct kvm_vm *____vm_create(enum vm_guest_mode mode);
7156e1d13bfSSean Christopherson struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
7166e1d13bfSSean Christopherson 			   uint64_t nr_extra_pages);
7173f44e7fdSSean Christopherson 
71895fb0460SSean Christopherson static inline struct kvm_vm *vm_create_barebones(void)
71995fb0460SSean Christopherson {
720290c5b54SRicardo Koller 	return ____vm_create(VM_MODE_DEFAULT);
7213f44e7fdSSean Christopherson }
7223f44e7fdSSean Christopherson 
7236e1d13bfSSean Christopherson static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
7243f44e7fdSSean Christopherson {
7256e1d13bfSSean Christopherson 	return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
72695fb0460SSean Christopherson }
72795fb0460SSean Christopherson 
7280ffc70eaSSean Christopherson struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
729acaf50adSSean Christopherson 				      uint64_t extra_mem_pages,
7303222d026SSean Christopherson 				      void *guest_code, struct kvm_vcpu *vcpus[]);
7310ffc70eaSSean Christopherson 
7320ffc70eaSSean Christopherson static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
7330ffc70eaSSean Christopherson 						  void *guest_code,
7340ffc70eaSSean Christopherson 						  struct kvm_vcpu *vcpus[])
7350ffc70eaSSean Christopherson {
736acaf50adSSean Christopherson 	return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
7375114c3e2SSean Christopherson 				      guest_code, vcpus);
7380ffc70eaSSean Christopherson }
7397d9a662eSMichael Roth 
7407d9a662eSMichael Roth /*
741f17686aaSSean Christopherson  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
742f17686aaSSean Christopherson  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
743f17686aaSSean Christopherson  */
7440cc64b08SSean Christopherson struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
745f17686aaSSean Christopherson 					 uint64_t extra_mem_pages,
746f17686aaSSean Christopherson 					 void *guest_code);
747f17686aaSSean Christopherson 
7480cc64b08SSean Christopherson static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
749f17686aaSSean Christopherson 						     void *guest_code)
750f17686aaSSean Christopherson {
751f17686aaSSean Christopherson 	return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
752f17686aaSSean Christopherson }
753f17686aaSSean Christopherson 
7540cc64b08SSean Christopherson struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
755f17686aaSSean Christopherson 
756d886724eSVipin Sharma void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
757d4ec586cSPeter Xu void kvm_print_vcpu_pinning_help(void);
758d886724eSVipin Sharma void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
759d886724eSVipin Sharma 			    int nr_vcpus);
760d886724eSVipin Sharma 
7617d9a662eSMichael Roth unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
7627d9a662eSMichael Roth unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
7637d9a662eSMichael Roth unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
7647d9a662eSMichael Roth unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
7657d9a662eSMichael Roth static inline unsigned int
7667d9a662eSMichael Roth vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
7677d9a662eSMichael Roth {
7687d9a662eSMichael Roth 	unsigned int n;
7697d9a662eSMichael Roth 	n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
7707d9a662eSMichael Roth #ifdef __s390x__
7717d9a662eSMichael Roth 	/* s390 requires 1M aligned guest sizes */
7727d9a662eSMichael Roth 	n = (n + 255) & ~255;
7737d9a662eSMichael Roth #endif
7747d9a662eSMichael Roth 	return n;
7757d9a662eSMichael Roth }
7767d9a662eSMichael Roth 
7777d9a662eSMichael Roth struct kvm_userspace_memory_region *
7787d9a662eSMichael Roth kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
7797d9a662eSMichael Roth 				 uint64_t end);
7807d9a662eSMichael Roth 
7817d9a662eSMichael Roth #define sync_global_to_guest(vm, g) ({				\
7827d9a662eSMichael Roth 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
7837d9a662eSMichael Roth 	memcpy(_p, &(g), sizeof(g));				\
7847d9a662eSMichael Roth })
7857d9a662eSMichael Roth 
7867d9a662eSMichael Roth #define sync_global_from_guest(vm, g) ({			\
7877d9a662eSMichael Roth 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
7887d9a662eSMichael Roth 	memcpy(&(g), _p, sizeof(g));				\
7897d9a662eSMichael Roth })
7907d9a662eSMichael Roth 
79103b47505SSean Christopherson /*
79203b47505SSean Christopherson  * Write a global value, but only in the VM's (guest's) domain.  Primarily used
79303b47505SSean Christopherson  * for "globals" that hold per-VM values (VMs always duplicate code and global
79403b47505SSean Christopherson  * data into their own region of physical memory), but can be used anytime it's
79503b47505SSean Christopherson  * undesirable to change the host's copy of the global.
79603b47505SSean Christopherson  */
79703b47505SSean Christopherson #define write_guest_global(vm, g, val) ({			\
79803b47505SSean Christopherson 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
79903b47505SSean Christopherson 	typeof(g) _val = val;					\
80003b47505SSean Christopherson 								\
80103b47505SSean Christopherson 	memcpy(_p, &(_val), sizeof(g));				\
80203b47505SSean Christopherson })
80303b47505SSean Christopherson 
804768e9a61SSean Christopherson void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
8057d9a662eSMichael Roth 
806768e9a61SSean Christopherson void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
8079931be3fSSean Christopherson 		    uint8_t indent);
8089931be3fSSean Christopherson 
809768e9a61SSean Christopherson static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
8109931be3fSSean Christopherson 			     uint8_t indent)
8119931be3fSSean Christopherson {
812768e9a61SSean Christopherson 	vcpu_arch_dump(stream, vcpu, indent);
8139931be3fSSean Christopherson }
8149931be3fSSean Christopherson 
8159931be3fSSean Christopherson /*
8169931be3fSSean Christopherson  * Adds a vCPU with reasonable defaults (e.g. a stack)
8179931be3fSSean Christopherson  *
8189931be3fSSean Christopherson  * Input Args:
8199931be3fSSean Christopherson  *   vm - Virtual Machine
820768e9a61SSean Christopherson  *   vcpu_id - The id of the VCPU to add to the VM.
8219931be3fSSean Christopherson  *   guest_code - The vCPU's entry point
8229931be3fSSean Christopherson  */
8231422efd6SSean Christopherson struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
8241422efd6SSean Christopherson 				  void *guest_code);
8259931be3fSSean Christopherson 
826f742d94fSSean Christopherson static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
8279931be3fSSean Christopherson 					   void *guest_code)
8289931be3fSSean Christopherson {
8291422efd6SSean Christopherson 	return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
8309931be3fSSean Christopherson }
8319931be3fSSean Christopherson 
8324c16fa3eSSean Christopherson /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
8334c16fa3eSSean Christopherson struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
8344c16fa3eSSean Christopherson 
8354c16fa3eSSean Christopherson static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
8364c16fa3eSSean Christopherson 						uint32_t vcpu_id)
8374c16fa3eSSean Christopherson {
8384c16fa3eSSean Christopherson 	return vm_arch_vcpu_recreate(vm, vcpu_id);
8394c16fa3eSSean Christopherson }
8404c16fa3eSSean Christopherson 
8417fbc6038SSean Christopherson void vcpu_arch_free(struct kvm_vcpu *vcpu);
8427fbc6038SSean Christopherson 
8439931be3fSSean Christopherson void virt_arch_pgd_alloc(struct kvm_vm *vm);
8449931be3fSSean Christopherson 
8459931be3fSSean Christopherson static inline void virt_pgd_alloc(struct kvm_vm *vm)
8469931be3fSSean Christopherson {
8479931be3fSSean Christopherson 	virt_arch_pgd_alloc(vm);
8489931be3fSSean Christopherson }
8499931be3fSSean Christopherson 
8509931be3fSSean Christopherson /*
8519931be3fSSean Christopherson  * VM Virtual Page Map
8529931be3fSSean Christopherson  *
8539931be3fSSean Christopherson  * Input Args:
8549931be3fSSean Christopherson  *   vm - Virtual Machine
8559931be3fSSean Christopherson  *   vaddr - VM Virtual Address
8569931be3fSSean Christopherson  *   paddr - VM Physical Address
8579931be3fSSean Christopherson  *   memslot - Memory region slot for new virtual translation tables
8589931be3fSSean Christopherson  *
8599931be3fSSean Christopherson  * Output Args: None
8609931be3fSSean Christopherson  *
8619931be3fSSean Christopherson  * Return: None
8629931be3fSSean Christopherson  *
8639931be3fSSean Christopherson  * Within @vm, creates a virtual translation for the page starting
8649931be3fSSean Christopherson  * at @vaddr to the page starting at @paddr.
8659931be3fSSean Christopherson  */
8669931be3fSSean Christopherson void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
8679931be3fSSean Christopherson 
8689931be3fSSean Christopherson static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
8699931be3fSSean Christopherson {
8709931be3fSSean Christopherson 	virt_arch_pg_map(vm, vaddr, paddr);
8719931be3fSSean Christopherson }
8729931be3fSSean Christopherson 
8739931be3fSSean Christopherson 
8749931be3fSSean Christopherson /*
8759931be3fSSean Christopherson  * Address Guest Virtual to Guest Physical
8769931be3fSSean Christopherson  *
8779931be3fSSean Christopherson  * Input Args:
8789931be3fSSean Christopherson  *   vm - Virtual Machine
8799931be3fSSean Christopherson  *   gva - VM virtual address
8809931be3fSSean Christopherson  *
8819931be3fSSean Christopherson  * Output Args: None
8829931be3fSSean Christopherson  *
8839931be3fSSean Christopherson  * Return:
8849931be3fSSean Christopherson  *   Equivalent VM physical address
8859931be3fSSean Christopherson  *
8869931be3fSSean Christopherson  * Returns the VM physical address of the translated VM virtual
8879931be3fSSean Christopherson  * address given by @gva.
8889931be3fSSean Christopherson  */
8899931be3fSSean Christopherson vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
8909931be3fSSean Christopherson 
8919931be3fSSean Christopherson static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
8929931be3fSSean Christopherson {
8939931be3fSSean Christopherson 	return addr_arch_gva2gpa(vm, gva);
8949931be3fSSean Christopherson }
8959931be3fSSean Christopherson 
8969931be3fSSean Christopherson /*
8979931be3fSSean Christopherson  * Virtual Translation Tables Dump
8989931be3fSSean Christopherson  *
8999931be3fSSean Christopherson  * Input Args:
9009931be3fSSean Christopherson  *   stream - Output FILE stream
9019931be3fSSean Christopherson  *   vm     - Virtual Machine
9029931be3fSSean Christopherson  *   indent - Left margin indent amount
9039931be3fSSean Christopherson  *
9049931be3fSSean Christopherson  * Output Args: None
9059931be3fSSean Christopherson  *
9069931be3fSSean Christopherson  * Return: None
9079931be3fSSean Christopherson  *
9089931be3fSSean Christopherson  * Dumps to the FILE stream given by @stream, the contents of all the
9099931be3fSSean Christopherson  * virtual translation tables for the VM given by @vm.
9109931be3fSSean Christopherson  */
9119931be3fSSean Christopherson void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
9129931be3fSSean Christopherson 
9139931be3fSSean Christopherson static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
9149931be3fSSean Christopherson {
9159931be3fSSean Christopherson 	virt_arch_dump(stream, vm, indent);
9169931be3fSSean Christopherson }
9179931be3fSSean Christopherson 
918b774da3fSBen Gardon 
919b774da3fSBen Gardon static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
920b774da3fSBen Gardon {
921b774da3fSBen Gardon 	return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
922b774da3fSBen Gardon }
923b774da3fSBen Gardon 
924e1ab3124SVishal Annapurve /*
925e1ab3124SVishal Annapurve  * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
926e1ab3124SVishal Annapurve  * to allow for arch-specific setup that is common to all tests, e.g. computing
927e1ab3124SVishal Annapurve  * the default guest "mode".
928e1ab3124SVishal Annapurve  */
929e1ab3124SVishal Annapurve void kvm_selftest_arch_init(void);
930e1ab3124SVishal Annapurve 
9312115713cSVishal Annapurve void kvm_arch_vm_post_create(struct kvm_vm *vm);
9322115713cSVishal Annapurve 
9337d9a662eSMichael Roth #endif /* SELFTEST_KVM_UTIL_BASE_H */
934