17d9a662eSMichael Roth /* SPDX-License-Identifier: GPL-2.0-only */
27d9a662eSMichael Roth /*
37d9a662eSMichael Roth  * tools/testing/selftests/kvm/include/kvm_util_base.h
47d9a662eSMichael Roth  *
57d9a662eSMichael Roth  * Copyright (C) 2018, Google LLC.
67d9a662eSMichael Roth  */
77d9a662eSMichael Roth #ifndef SELFTEST_KVM_UTIL_BASE_H
87d9a662eSMichael Roth #define SELFTEST_KVM_UTIL_BASE_H
97d9a662eSMichael Roth 
107d9a662eSMichael Roth #include "test_util.h"
117d9a662eSMichael Roth 
12b530eba1SSean Christopherson #include <linux/compiler.h>
13b530eba1SSean Christopherson #include "linux/hashtable.h"
147d9a662eSMichael Roth #include "linux/list.h"
15b530eba1SSean Christopherson #include <linux/kernel.h>
16b530eba1SSean Christopherson #include <linux/kvm.h>
17b530eba1SSean Christopherson #include "linux/rbtree.h"
189177b715SAndrew Jones #include <linux/types.h>
19b530eba1SSean Christopherson 
2003b47505SSean Christopherson #include <asm/atomic.h>
210cc64b08SSean Christopherson 
227d9a662eSMichael Roth #include <sys/ioctl.h>
237d9a662eSMichael Roth 
247d9a662eSMichael Roth #include "sparsebit.h"
257d9a662eSMichael Roth 
260c326523SSean Christopherson /*
270c326523SSean Christopherson  * Provide a version of static_assert() that is guaranteed to have an optional
280c326523SSean Christopherson  * message param.  If _ISOC11_SOURCE is defined, glibc (/usr/include/assert.h)
290c326523SSean Christopherson  * #undefs and #defines static_assert() as a direct alias to _Static_assert(),
300c326523SSean Christopherson  * i.e. effectively makes the message mandatory.  Many KVM selftests #define
310c326523SSean Christopherson  * _GNU_SOURCE for various reasons, and _GNU_SOURCE implies _ISOC11_SOURCE.  As
320c326523SSean Christopherson  * a result, static_assert() behavior is non-deterministic and may or may not
330c326523SSean Christopherson  * require a message depending on #include order.
340c326523SSean Christopherson  */
350c326523SSean Christopherson #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg)
360c326523SSean Christopherson #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr)
370c326523SSean Christopherson 
387d9a662eSMichael Roth #define KVM_DEV_PATH "/dev/kvm"
397d9a662eSMichael Roth #define KVM_MAX_VCPUS 512
407d9a662eSMichael Roth 
417d9a662eSMichael Roth #define NSEC_PER_SEC 1000000000L
427d9a662eSMichael Roth 
437d9a662eSMichael Roth typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
447d9a662eSMichael Roth typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
457d9a662eSMichael Roth 
46b530eba1SSean Christopherson struct userspace_mem_region {
47b530eba1SSean Christopherson 	struct kvm_userspace_memory_region region;
48b530eba1SSean Christopherson 	struct sparsebit *unused_phy_pages;
49b530eba1SSean Christopherson 	int fd;
50b530eba1SSean Christopherson 	off_t offset;
51bd3ed7e1SRicardo Koller 	enum vm_mem_backing_src_type backing_src_type;
52b530eba1SSean Christopherson 	void *host_mem;
53b530eba1SSean Christopherson 	void *host_alias;
54b530eba1SSean Christopherson 	void *mmap_start;
55b530eba1SSean Christopherson 	void *mmap_alias;
56b530eba1SSean Christopherson 	size_t mmap_size;
57b530eba1SSean Christopherson 	struct rb_node gpa_node;
58b530eba1SSean Christopherson 	struct rb_node hva_node;
59b530eba1SSean Christopherson 	struct hlist_node slot_node;
60b530eba1SSean Christopherson };
61b530eba1SSean Christopherson 
620cc64b08SSean Christopherson struct kvm_vcpu {
63b530eba1SSean Christopherson 	struct list_head list;
64b530eba1SSean Christopherson 	uint32_t id;
65b530eba1SSean Christopherson 	int fd;
66c472df1aSSean Christopherson 	struct kvm_vm *vm;
671079c3d4SSean Christopherson 	struct kvm_run *run;
687fbc6038SSean Christopherson #ifdef __x86_64__
697fbc6038SSean Christopherson 	struct kvm_cpuid2 *cpuid;
707fbc6038SSean Christopherson #endif
71b530eba1SSean Christopherson 	struct kvm_dirty_gfn *dirty_gfns;
72b530eba1SSean Christopherson 	uint32_t fetch_index;
73b530eba1SSean Christopherson 	uint32_t dirty_gfns_count;
74b530eba1SSean Christopherson };
75b530eba1SSean Christopherson 
76b530eba1SSean Christopherson struct userspace_mem_regions {
77b530eba1SSean Christopherson 	struct rb_root gpa_tree;
78b530eba1SSean Christopherson 	struct rb_root hva_tree;
79b530eba1SSean Christopherson 	DECLARE_HASHTABLE(slot_hash, 9);
80b530eba1SSean Christopherson };
81b530eba1SSean Christopherson 
82290c5b54SRicardo Koller enum kvm_mem_region_type {
83290c5b54SRicardo Koller 	MEM_REGION_CODE,
84290c5b54SRicardo Koller 	MEM_REGION_DATA,
85290c5b54SRicardo Koller 	MEM_REGION_PT,
86290c5b54SRicardo Koller 	MEM_REGION_TEST_DATA,
87290c5b54SRicardo Koller 	NR_MEM_REGIONS,
88290c5b54SRicardo Koller };
89290c5b54SRicardo Koller 
90b530eba1SSean Christopherson struct kvm_vm {
91b530eba1SSean Christopherson 	int mode;
92b530eba1SSean Christopherson 	unsigned long type;
93b530eba1SSean Christopherson 	int kvm_fd;
94b530eba1SSean Christopherson 	int fd;
95b530eba1SSean Christopherson 	unsigned int pgtable_levels;
96b530eba1SSean Christopherson 	unsigned int page_size;
97b530eba1SSean Christopherson 	unsigned int page_shift;
98b530eba1SSean Christopherson 	unsigned int pa_bits;
99b530eba1SSean Christopherson 	unsigned int va_bits;
100b530eba1SSean Christopherson 	uint64_t max_gfn;
101b530eba1SSean Christopherson 	struct list_head vcpus;
102b530eba1SSean Christopherson 	struct userspace_mem_regions regions;
103b530eba1SSean Christopherson 	struct sparsebit *vpages_valid;
104b530eba1SSean Christopherson 	struct sparsebit *vpages_mapped;
105b530eba1SSean Christopherson 	bool has_irqchip;
106b530eba1SSean Christopherson 	bool pgd_created;
10703b47505SSean Christopherson 	vm_paddr_t ucall_mmio_addr;
108b530eba1SSean Christopherson 	vm_paddr_t pgd;
109b530eba1SSean Christopherson 	vm_vaddr_t gdt;
110b530eba1SSean Christopherson 	vm_vaddr_t tss;
111b530eba1SSean Christopherson 	vm_vaddr_t idt;
112b530eba1SSean Christopherson 	vm_vaddr_t handlers;
113b530eba1SSean Christopherson 	uint32_t dirty_ring_size;
11483f6e109SBen Gardon 
11583f6e109SBen Gardon 	/* Cache of information for binary stats interface */
11683f6e109SBen Gardon 	int stats_fd;
11783f6e109SBen Gardon 	struct kvm_stats_header stats_header;
11883f6e109SBen Gardon 	struct kvm_stats_desc *stats_desc;
119290c5b54SRicardo Koller 
120290c5b54SRicardo Koller 	/*
121290c5b54SRicardo Koller 	 * KVM region slots. These are the default memslots used by page
122290c5b54SRicardo Koller 	 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
123290c5b54SRicardo Koller 	 * memslot.
124290c5b54SRicardo Koller 	 */
125290c5b54SRicardo Koller 	uint32_t memslots[NR_MEM_REGIONS];
126b530eba1SSean Christopherson };
127b530eba1SSean Christopherson 
1289177b715SAndrew Jones struct vcpu_reg_sublist {
1299177b715SAndrew Jones 	const char *name;
1309177b715SAndrew Jones 	long capability;
1319177b715SAndrew Jones 	int feature;
1329177b715SAndrew Jones 	bool finalize;
1339177b715SAndrew Jones 	__u64 *regs;
1349177b715SAndrew Jones 	__u64 regs_n;
1359177b715SAndrew Jones 	__u64 *rejects_set;
1369177b715SAndrew Jones 	__u64 rejects_set_n;
137*cbc0daa6SHaibo Xu 	__u64 *skips_set;
138*cbc0daa6SHaibo Xu 	__u64 skips_set_n;
1399177b715SAndrew Jones };
1409177b715SAndrew Jones 
1419177b715SAndrew Jones struct vcpu_reg_list {
1429177b715SAndrew Jones 	char *name;
1439177b715SAndrew Jones 	struct vcpu_reg_sublist sublists[];
1449177b715SAndrew Jones };
145b530eba1SSean Christopherson 
146e8566033SHaibo Xu #define for_each_sublist(c, s)		\
147e8566033SHaibo Xu 	for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
148e8566033SHaibo Xu 
149b530eba1SSean Christopherson #define kvm_for_each_vcpu(vm, i, vcpu)			\
150b530eba1SSean Christopherson 	for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++)	\
151b530eba1SSean Christopherson 		if (!((vcpu) = vm->vcpus[i]))		\
152b530eba1SSean Christopherson 			continue;			\
153b530eba1SSean Christopherson 		else
154b530eba1SSean Christopherson 
155b530eba1SSean Christopherson struct userspace_mem_region *
156b530eba1SSean Christopherson memslot2region(struct kvm_vm *vm, uint32_t memslot);
157b530eba1SSean Christopherson 
vm_get_mem_region(struct kvm_vm * vm,enum kvm_mem_region_type type)158290c5b54SRicardo Koller static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
159290c5b54SRicardo Koller 							     enum kvm_mem_region_type type)
160290c5b54SRicardo Koller {
161290c5b54SRicardo Koller 	assert(type < NR_MEM_REGIONS);
162290c5b54SRicardo Koller 	return memslot2region(vm, vm->memslots[type]);
163290c5b54SRicardo Koller }
164290c5b54SRicardo Koller 
1657d9a662eSMichael Roth /* Minimum allocated guest virtual and physical addresses */
1667d9a662eSMichael Roth #define KVM_UTIL_MIN_VADDR		0x2000
1677d9a662eSMichael Roth #define KVM_GUEST_PAGE_TABLE_MIN_PADDR	0x180000
1687d9a662eSMichael Roth 
1697d9a662eSMichael Roth #define DEFAULT_GUEST_STACK_VADDR_MIN	0xab6000
1707d9a662eSMichael Roth #define DEFAULT_STACK_PGS		5
1717d9a662eSMichael Roth 
1727d9a662eSMichael Roth enum vm_guest_mode {
1737d9a662eSMichael Roth 	VM_MODE_P52V48_4K,
1747d9a662eSMichael Roth 	VM_MODE_P52V48_64K,
1757d9a662eSMichael Roth 	VM_MODE_P48V48_4K,
1767d9a662eSMichael Roth 	VM_MODE_P48V48_16K,
1777d9a662eSMichael Roth 	VM_MODE_P48V48_64K,
1787d9a662eSMichael Roth 	VM_MODE_P40V48_4K,
1797d9a662eSMichael Roth 	VM_MODE_P40V48_16K,
1807d9a662eSMichael Roth 	VM_MODE_P40V48_64K,
1817d9a662eSMichael Roth 	VM_MODE_PXXV48_4K,	/* For 48bits VA but ANY bits PA */
1827d9a662eSMichael Roth 	VM_MODE_P47V64_4K,
1837d9a662eSMichael Roth 	VM_MODE_P44V64_4K,
1847d9a662eSMichael Roth 	VM_MODE_P36V48_4K,
1857d9a662eSMichael Roth 	VM_MODE_P36V48_16K,
1867d9a662eSMichael Roth 	VM_MODE_P36V48_64K,
1877d9a662eSMichael Roth 	VM_MODE_P36V47_16K,
1887d9a662eSMichael Roth 	NUM_VM_MODES,
1897d9a662eSMichael Roth };
1907d9a662eSMichael Roth 
1917d9a662eSMichael Roth #if defined(__aarch64__)
1927d9a662eSMichael Roth 
1937d9a662eSMichael Roth extern enum vm_guest_mode vm_mode_default;
1947d9a662eSMichael Roth 
1957d9a662eSMichael Roth #define VM_MODE_DEFAULT			vm_mode_default
1967d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
1977d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 8)
1987d9a662eSMichael Roth 
1997d9a662eSMichael Roth #elif defined(__x86_64__)
2007d9a662eSMichael Roth 
2017d9a662eSMichael Roth #define VM_MODE_DEFAULT			VM_MODE_PXXV48_4K
2027d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
2037d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 8)
2047d9a662eSMichael Roth 
2057d9a662eSMichael Roth #elif defined(__s390x__)
2067d9a662eSMichael Roth 
2077d9a662eSMichael Roth #define VM_MODE_DEFAULT			VM_MODE_P44V64_4K
2087d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
2097d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 16)
2107d9a662eSMichael Roth 
2117d9a662eSMichael Roth #elif defined(__riscv)
2127d9a662eSMichael Roth 
2137d9a662eSMichael Roth #if __riscv_xlen == 32
2147d9a662eSMichael Roth #error "RISC-V 32-bit kvm selftests not supported"
2157d9a662eSMichael Roth #endif
2167d9a662eSMichael Roth 
2177d9a662eSMichael Roth #define VM_MODE_DEFAULT			VM_MODE_P40V48_4K
2187d9a662eSMichael Roth #define MIN_PAGE_SHIFT			12U
2197d9a662eSMichael Roth #define ptes_per_page(page_size)	((page_size) / 8)
2207d9a662eSMichael Roth 
2217d9a662eSMichael Roth #endif
2227d9a662eSMichael Roth 
2237d9a662eSMichael Roth #define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
2247d9a662eSMichael Roth #define PTES_PER_MIN_PAGE	ptes_per_page(MIN_PAGE_SIZE)
2257d9a662eSMichael Roth 
2267d9a662eSMichael Roth struct vm_guest_mode_params {
2277d9a662eSMichael Roth 	unsigned int pa_bits;
2287d9a662eSMichael Roth 	unsigned int va_bits;
2297d9a662eSMichael Roth 	unsigned int page_size;
2307d9a662eSMichael Roth 	unsigned int page_shift;
2317d9a662eSMichael Roth };
2327d9a662eSMichael Roth extern const struct vm_guest_mode_params vm_guest_mode_params[];
2337d9a662eSMichael Roth 
2347d9a662eSMichael Roth int open_path_or_exit(const char *path, int flags);
2357d9a662eSMichael Roth int open_kvm_dev_path_or_exit(void);
2364d2bd143SDavid Matlack 
237d14d9139SLike Xu bool get_kvm_param_bool(const char *param);
2384d2bd143SDavid Matlack bool get_kvm_intel_param_bool(const char *param);
2394d2bd143SDavid Matlack bool get_kvm_amd_param_bool(const char *param);
2404d2bd143SDavid Matlack 
241d8ba3f14SSean Christopherson unsigned int kvm_check_cap(long cap);
24271ab5a6fSSean Christopherson 
kvm_has_cap(long cap)2433ea9b809SSean Christopherson static inline bool kvm_has_cap(long cap)
2443ea9b809SSean Christopherson {
2453ea9b809SSean Christopherson 	return kvm_check_cap(cap);
2463ea9b809SSean Christopherson }
2473ea9b809SSean Christopherson 
24871ab5a6fSSean Christopherson #define __KVM_SYSCALL_ERROR(_name, _ret) \
24971ab5a6fSSean Christopherson 	"%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
25071ab5a6fSSean Christopherson 
25171ab5a6fSSean Christopherson #define __KVM_IOCTL_ERROR(_name, _ret)	__KVM_SYSCALL_ERROR(_name, _ret)
25271ab5a6fSSean Christopherson #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
25371ab5a6fSSean Christopherson 
254fcba483eSSean Christopherson #define kvm_do_ioctl(fd, cmd, arg)						\
255fcba483eSSean Christopherson ({										\
2560c326523SSean Christopherson 	kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd));	\
257fcba483eSSean Christopherson 	ioctl(fd, cmd, arg);							\
258fcba483eSSean Christopherson })
259fcba483eSSean Christopherson 
2602de1b7b1SSean Christopherson #define __kvm_ioctl(kvm_fd, cmd, arg)				\
261fcba483eSSean Christopherson 	kvm_do_ioctl(kvm_fd, cmd, arg)
2622de1b7b1SSean Christopherson 
2632de1b7b1SSean Christopherson 
264fcba483eSSean Christopherson #define _kvm_ioctl(kvm_fd, cmd, name, arg)			\
265fcba483eSSean Christopherson ({								\
266fcba483eSSean Christopherson 	int ret = __kvm_ioctl(kvm_fd, cmd, arg);		\
267fcba483eSSean Christopherson 								\
268fcba483eSSean Christopherson 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));	\
269fcba483eSSean Christopherson })
2702de1b7b1SSean Christopherson 
2712de1b7b1SSean Christopherson #define kvm_ioctl(kvm_fd, cmd, arg) \
2722de1b7b1SSean Christopherson 	_kvm_ioctl(kvm_fd, cmd, #cmd, arg)
27371ab5a6fSSean Christopherson 
static_assert_is_vm(struct kvm_vm * vm)274ad125f30SSean Christopherson static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
275ad125f30SSean Christopherson 
276fcba483eSSean Christopherson #define __vm_ioctl(vm, cmd, arg)				\
277fcba483eSSean Christopherson ({								\
278ad125f30SSean Christopherson 	static_assert_is_vm(vm);				\
279fcba483eSSean Christopherson 	kvm_do_ioctl((vm)->fd, cmd, arg);			\
280fcba483eSSean Christopherson })
28171ab5a6fSSean Christopherson 
282fcba483eSSean Christopherson #define _vm_ioctl(vm, cmd, name, arg)				\
283fcba483eSSean Christopherson ({								\
284fcba483eSSean Christopherson 	int ret = __vm_ioctl(vm, cmd, arg);			\
285fcba483eSSean Christopherson 								\
286fcba483eSSean Christopherson 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));	\
287fcba483eSSean Christopherson })
288fcba483eSSean Christopherson 
289fcba483eSSean Christopherson #define vm_ioctl(vm, cmd, arg)					\
290fcba483eSSean Christopherson 	_vm_ioctl(vm, cmd, #cmd, arg)
291fcba483eSSean Christopherson 
292ad125f30SSean Christopherson 
static_assert_is_vcpu(struct kvm_vcpu * vcpu)293ad125f30SSean Christopherson static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
294ad125f30SSean Christopherson 
295fcba483eSSean Christopherson #define __vcpu_ioctl(vcpu, cmd, arg)				\
296fcba483eSSean Christopherson ({								\
297ad125f30SSean Christopherson 	static_assert_is_vcpu(vcpu);				\
298fcba483eSSean Christopherson 	kvm_do_ioctl((vcpu)->fd, cmd, arg);			\
299fcba483eSSean Christopherson })
300fcba483eSSean Christopherson 
301fcba483eSSean Christopherson #define _vcpu_ioctl(vcpu, cmd, name, arg)			\
302fcba483eSSean Christopherson ({								\
303fcba483eSSean Christopherson 	int ret = __vcpu_ioctl(vcpu, cmd, arg);			\
304fcba483eSSean Christopherson 								\
305fcba483eSSean Christopherson 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));	\
306fcba483eSSean Christopherson })
307fcba483eSSean Christopherson 
308768e9a61SSean Christopherson #define vcpu_ioctl(vcpu, cmd, arg)				\
309768e9a61SSean Christopherson 	_vcpu_ioctl(vcpu, cmd, #cmd, arg)
31071ab5a6fSSean Christopherson 
31110825b55SSean Christopherson /*
31210825b55SSean Christopherson  * Looks up and returns the value corresponding to the capability
31310825b55SSean Christopherson  * (KVM_CAP_*) given by cap.
31410825b55SSean Christopherson  */
vm_check_cap(struct kvm_vm * vm,long cap)31510825b55SSean Christopherson static inline int vm_check_cap(struct kvm_vm *vm, long cap)
31610825b55SSean Christopherson {
31710825b55SSean Christopherson 	int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
31810825b55SSean Christopherson 
31910825b55SSean Christopherson 	TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
32010825b55SSean Christopherson 	return ret;
32110825b55SSean Christopherson }
32210825b55SSean Christopherson 
__vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0)323a12c86c4SSean Christopherson static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
324ac712209SSean Christopherson {
325a12c86c4SSean Christopherson 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
326a12c86c4SSean Christopherson 
327a12c86c4SSean Christopherson 	return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
328ac712209SSean Christopherson }
vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0)329a12c86c4SSean Christopherson static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
33010825b55SSean Christopherson {
331a12c86c4SSean Christopherson 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
332a12c86c4SSean Christopherson 
333a12c86c4SSean Christopherson 	vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
33410825b55SSean Christopherson }
33510825b55SSean Christopherson 
3367d9a662eSMichael Roth void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
3377d9a662eSMichael Roth const char *vm_guest_mode_string(uint32_t i);
3387d9a662eSMichael Roth 
3397d9a662eSMichael Roth void kvm_vm_free(struct kvm_vm *vmp);
340ccc82ba6SSean Christopherson void kvm_vm_restart(struct kvm_vm *vmp);
3417d9a662eSMichael Roth void kvm_vm_release(struct kvm_vm *vmp);
3427d9a662eSMichael Roth int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
3437d9a662eSMichael Roth 		       size_t len);
3447d9a662eSMichael Roth void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
345a4187c9bSSean Christopherson int kvm_memfd_alloc(size_t size, bool hugepages);
3467d9a662eSMichael Roth 
3477d9a662eSMichael Roth void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
3487d9a662eSMichael Roth 
kvm_vm_get_dirty_log(struct kvm_vm * vm,int slot,void * log)34910825b55SSean Christopherson static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
35010825b55SSean Christopherson {
35110825b55SSean Christopherson 	struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
35210825b55SSean Christopherson 
35310825b55SSean Christopherson 	vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
35410825b55SSean Christopherson }
35510825b55SSean Christopherson 
kvm_vm_clear_dirty_log(struct kvm_vm * vm,int slot,void * log,uint64_t first_page,uint32_t num_pages)35610825b55SSean Christopherson static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
35710825b55SSean Christopherson 					  uint64_t first_page, uint32_t num_pages)
35810825b55SSean Christopherson {
35910825b55SSean Christopherson 	struct kvm_clear_dirty_log args = {
36010825b55SSean Christopherson 		.dirty_bitmap = log,
36110825b55SSean Christopherson 		.slot = slot,
36210825b55SSean Christopherson 		.first_page = first_page,
36310825b55SSean Christopherson 		.num_pages = num_pages
36410825b55SSean Christopherson 	};
36510825b55SSean Christopherson 
36610825b55SSean Christopherson 	vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
36710825b55SSean Christopherson }
36810825b55SSean Christopherson 
kvm_vm_reset_dirty_ring(struct kvm_vm * vm)36910825b55SSean Christopherson static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
37010825b55SSean Christopherson {
37110825b55SSean Christopherson 	return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
37210825b55SSean Christopherson }
37310825b55SSean Christopherson 
vm_get_stats_fd(struct kvm_vm * vm)37410825b55SSean Christopherson static inline int vm_get_stats_fd(struct kvm_vm *vm)
37510825b55SSean Christopherson {
37610825b55SSean Christopherson 	int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
37710825b55SSean Christopherson 
37810825b55SSean Christopherson 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
37910825b55SSean Christopherson 	return fd;
38010825b55SSean Christopherson }
38110825b55SSean Christopherson 
read_stats_header(int stats_fd,struct kvm_stats_header * header)38232faa064SBen Gardon static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
38332faa064SBen Gardon {
38432faa064SBen Gardon 	ssize_t ret;
38532faa064SBen Gardon 
386625646aeSSean Christopherson 	ret = pread(stats_fd, header, sizeof(*header), 0);
387625646aeSSean Christopherson 	TEST_ASSERT(ret == sizeof(*header),
388625646aeSSean Christopherson 		    "Failed to read '%lu' header bytes, ret = '%ld'",
389625646aeSSean Christopherson 		    sizeof(*header), ret);
39032faa064SBen Gardon }
39132faa064SBen Gardon 
3924d0a0594SBen Gardon struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
3934d0a0594SBen Gardon 					      struct kvm_stats_header *header);
3944d0a0594SBen Gardon 
get_stats_descriptor_size(struct kvm_stats_header * header)3954d0a0594SBen Gardon static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
3964d0a0594SBen Gardon {
3974d0a0594SBen Gardon 	 /*
3984d0a0594SBen Gardon 	  * The base size of the descriptor is defined by KVM's ABI, but the
3994d0a0594SBen Gardon 	  * size of the name field is variable, as far as KVM's ABI is
4004d0a0594SBen Gardon 	  * concerned. For a given instance of KVM, the name field is the same
4014d0a0594SBen Gardon 	  * size for all stats and is provided in the overall stats header.
4024d0a0594SBen Gardon 	  */
4034d0a0594SBen Gardon 	return sizeof(struct kvm_stats_desc) + header->name_size;
4044d0a0594SBen Gardon }
4054d0a0594SBen Gardon 
get_stats_descriptor(struct kvm_stats_desc * stats,int index,struct kvm_stats_header * header)4064d0a0594SBen Gardon static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
4074d0a0594SBen Gardon 							  int index,
4084d0a0594SBen Gardon 							  struct kvm_stats_header *header)
4094d0a0594SBen Gardon {
4104d0a0594SBen Gardon 	/*
4114d0a0594SBen Gardon 	 * Note, size_desc includes the size of the name field, which is
4124d0a0594SBen Gardon 	 * variable. i.e. this is NOT equivalent to &stats_desc[i].
4134d0a0594SBen Gardon 	 */
4144d0a0594SBen Gardon 	return (void *)stats + index * get_stats_descriptor_size(header);
4154d0a0594SBen Gardon }
4164d0a0594SBen Gardon 
417ed6b53ecSBen Gardon void read_stat_data(int stats_fd, struct kvm_stats_header *header,
418ed6b53ecSBen Gardon 		    struct kvm_stats_desc *desc, uint64_t *data,
419ed6b53ecSBen Gardon 		    size_t max_elements);
420ed6b53ecSBen Gardon 
4218448ec59SBen Gardon void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
4228448ec59SBen Gardon 		   size_t max_elements);
4238448ec59SBen Gardon 
vm_get_stat(struct kvm_vm * vm,const char * stat_name)4248448ec59SBen Gardon static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
4258448ec59SBen Gardon {
4268448ec59SBen Gardon 	uint64_t data;
4278448ec59SBen Gardon 
4288448ec59SBen Gardon 	__vm_get_stat(vm, stat_name, &data, 1);
4298448ec59SBen Gardon 	return data;
4308448ec59SBen Gardon }
4318448ec59SBen Gardon 
4327d9a662eSMichael Roth void vm_create_irqchip(struct kvm_vm *vm);
4337d9a662eSMichael Roth 
4343d7d6043SSean Christopherson void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
4353d7d6043SSean Christopherson 			       uint64_t gpa, uint64_t size, void *hva);
4363d7d6043SSean Christopherson int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
4373d7d6043SSean Christopherson 				uint64_t gpa, uint64_t size, void *hva);
4387d9a662eSMichael Roth void vm_userspace_mem_region_add(struct kvm_vm *vm,
4397d9a662eSMichael Roth 	enum vm_mem_backing_src_type src_type,
4407d9a662eSMichael Roth 	uint64_t guest_paddr, uint32_t slot, uint64_t npages,
4417d9a662eSMichael Roth 	uint32_t flags);
4427d9a662eSMichael Roth 
4437d9a662eSMichael Roth void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
4447d9a662eSMichael Roth void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
4457d9a662eSMichael Roth void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
446768e9a61SSean Christopherson struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
447e8b9a055SOliver Upton void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
4482d4a5f91SVitaly Kuznetsov vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
4497d9a662eSMichael Roth vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
4501446e331SRicardo Koller vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
4511446e331SRicardo Koller 			    enum kvm_mem_region_type type);
4527d9a662eSMichael Roth vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
4531446e331SRicardo Koller vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
4541446e331SRicardo Koller 				 enum kvm_mem_region_type type);
4557d9a662eSMichael Roth vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
4567d9a662eSMichael Roth 
4577d9a662eSMichael Roth void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
4587d9a662eSMichael Roth 	      unsigned int npages);
4597d9a662eSMichael Roth void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
4607d9a662eSMichael Roth void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
4617d9a662eSMichael Roth vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
4627d9a662eSMichael Roth void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
4637d9a662eSMichael Roth 
464768e9a61SSean Christopherson void vcpu_run(struct kvm_vcpu *vcpu);
465768e9a61SSean Christopherson int _vcpu_run(struct kvm_vcpu *vcpu);
46638d4a385SSean Christopherson 
__vcpu_run(struct kvm_vcpu * vcpu)467768e9a61SSean Christopherson static inline int __vcpu_run(struct kvm_vcpu *vcpu)
46838d4a385SSean Christopherson {
469768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
47038d4a385SSean Christopherson }
47138d4a385SSean Christopherson 
472768e9a61SSean Christopherson void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
473768e9a61SSean Christopherson struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
474ffb7c77fSSean Christopherson 
vcpu_enable_cap(struct kvm_vcpu * vcpu,uint32_t cap,uint64_t arg0)475768e9a61SSean Christopherson static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
476768e9a61SSean Christopherson 				   uint64_t arg0)
477ffb7c77fSSean Christopherson {
478a12c86c4SSean Christopherson 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
479a12c86c4SSean Christopherson 
480768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
481ffb7c77fSSean Christopherson }
482ffb7c77fSSean Christopherson 
vcpu_guest_debug_set(struct kvm_vcpu * vcpu,struct kvm_guest_debug * debug)483768e9a61SSean Christopherson static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
484ffb7c77fSSean Christopherson 					struct kvm_guest_debug *debug)
485ffb7c77fSSean Christopherson {
486768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
487ffb7c77fSSean Christopherson }
488ffb7c77fSSean Christopherson 
vcpu_mp_state_get(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)489768e9a61SSean Christopherson static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
490877bd399SSean Christopherson 				     struct kvm_mp_state *mp_state)
491877bd399SSean Christopherson {
492768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
493877bd399SSean Christopherson }
vcpu_mp_state_set(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)494768e9a61SSean Christopherson static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
495ffb7c77fSSean Christopherson 				     struct kvm_mp_state *mp_state)
496ffb7c77fSSean Christopherson {
497768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
498ffb7c77fSSean Christopherson }
499ffb7c77fSSean Christopherson 
vcpu_regs_get(struct kvm_vcpu * vcpu,struct kvm_regs * regs)500768e9a61SSean Christopherson static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
501ffb7c77fSSean Christopherson {
502768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
503ffb7c77fSSean Christopherson }
504ffb7c77fSSean Christopherson 
vcpu_regs_set(struct kvm_vcpu * vcpu,struct kvm_regs * regs)505768e9a61SSean Christopherson static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
506ffb7c77fSSean Christopherson {
507768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
508ffb7c77fSSean Christopherson }
vcpu_sregs_get(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)509768e9a61SSean Christopherson static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
510ffb7c77fSSean Christopherson {
511768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
512ffb7c77fSSean Christopherson 
513ffb7c77fSSean Christopherson }
vcpu_sregs_set(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)514768e9a61SSean Christopherson static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
515ffb7c77fSSean Christopherson {
516768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
517ffb7c77fSSean Christopherson }
_vcpu_sregs_set(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)518768e9a61SSean Christopherson static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
519ffb7c77fSSean Christopherson {
520768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
521ffb7c77fSSean Christopherson }
vcpu_fpu_get(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)522768e9a61SSean Christopherson static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
523ffb7c77fSSean Christopherson {
524768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
525ffb7c77fSSean Christopherson }
vcpu_fpu_set(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)526768e9a61SSean Christopherson static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
527ffb7c77fSSean Christopherson {
528768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
529ffb7c77fSSean Christopherson }
530bfff0f60SSean Christopherson 
__vcpu_get_reg(struct kvm_vcpu * vcpu,uint64_t id,void * addr)531768e9a61SSean Christopherson static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
532ffb7c77fSSean Christopherson {
533768e9a61SSean Christopherson 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
534bfff0f60SSean Christopherson 
535768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
536bfff0f60SSean Christopherson }
__vcpu_set_reg(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)537768e9a61SSean Christopherson static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
538bfff0f60SSean Christopherson {
539768e9a61SSean Christopherson 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
540bfff0f60SSean Christopherson 
541768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
542bfff0f60SSean Christopherson }
vcpu_get_reg(struct kvm_vcpu * vcpu,uint64_t id,void * addr)543768e9a61SSean Christopherson static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
544bfff0f60SSean Christopherson {
545768e9a61SSean Christopherson 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
546bfff0f60SSean Christopherson 
547768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
548ffb7c77fSSean Christopherson }
vcpu_set_reg(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)549768e9a61SSean Christopherson static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
550ffb7c77fSSean Christopherson {
551768e9a61SSean Christopherson 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
552bfff0f60SSean Christopherson 
553768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
554ffb7c77fSSean Christopherson }
555bfff0f60SSean Christopherson 
556ffb7c77fSSean Christopherson #ifdef __KVM_HAVE_VCPU_EVENTS
vcpu_events_get(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)557768e9a61SSean Christopherson static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
558ffb7c77fSSean Christopherson 				   struct kvm_vcpu_events *events)
559ffb7c77fSSean Christopherson {
560768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
561ffb7c77fSSean Christopherson }
vcpu_events_set(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)562768e9a61SSean Christopherson static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
563ffb7c77fSSean Christopherson 				   struct kvm_vcpu_events *events)
564ffb7c77fSSean Christopherson {
565768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
566ffb7c77fSSean Christopherson }
567ffb7c77fSSean Christopherson #endif
568ffb7c77fSSean Christopherson #ifdef __x86_64__
vcpu_nested_state_get(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)569768e9a61SSean Christopherson static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
570ffb7c77fSSean Christopherson 					 struct kvm_nested_state *state)
571ffb7c77fSSean Christopherson {
572768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
573ffb7c77fSSean Christopherson }
__vcpu_nested_state_set(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)574768e9a61SSean Christopherson static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
575ffb7c77fSSean Christopherson 					  struct kvm_nested_state *state)
576ffb7c77fSSean Christopherson {
577768e9a61SSean Christopherson 	return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
578ffb7c77fSSean Christopherson }
579ffb7c77fSSean Christopherson 
vcpu_nested_state_set(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)580768e9a61SSean Christopherson static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
581ffb7c77fSSean Christopherson 					 struct kvm_nested_state *state)
582ffb7c77fSSean Christopherson {
583768e9a61SSean Christopherson 	vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
584ffb7c77fSSean Christopherson }
585ffb7c77fSSean Christopherson #endif
vcpu_get_stats_fd(struct kvm_vcpu * vcpu)586768e9a61SSean Christopherson static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
587ffb7c77fSSean Christopherson {
588768e9a61SSean Christopherson 	int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
589ffb7c77fSSean Christopherson 
590ffb7c77fSSean Christopherson 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
591ffb7c77fSSean Christopherson 	return fd;
592ffb7c77fSSean Christopherson }
593ffb7c77fSSean Christopherson 
59440918184SSean Christopherson int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
59540918184SSean Christopherson 
kvm_has_device_attr(int dev_fd,uint32_t group,uint64_t attr)59640918184SSean Christopherson static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
59740918184SSean Christopherson {
59840918184SSean Christopherson 	int ret = __kvm_has_device_attr(dev_fd, group, attr);
59940918184SSean Christopherson 
60040918184SSean Christopherson 	TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
60140918184SSean Christopherson }
60240918184SSean Christopherson 
60340918184SSean Christopherson int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
60440918184SSean Christopherson 
kvm_device_attr_get(int dev_fd,uint32_t group,uint64_t attr,void * val)60540918184SSean Christopherson static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
60640918184SSean Christopherson 				       uint64_t attr, void *val)
60740918184SSean Christopherson {
60840918184SSean Christopherson 	int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
60940918184SSean Christopherson 
61040918184SSean Christopherson 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
61140918184SSean Christopherson }
61240918184SSean Christopherson 
61340918184SSean Christopherson int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
61440918184SSean Christopherson 
kvm_device_attr_set(int dev_fd,uint32_t group,uint64_t attr,void * val)61540918184SSean Christopherson static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
61640918184SSean Christopherson 				       uint64_t attr, void *val)
61740918184SSean Christopherson {
61840918184SSean Christopherson 	int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
61940918184SSean Christopherson 
62040918184SSean Christopherson 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
62140918184SSean Christopherson }
62240918184SSean Christopherson 
__vcpu_has_device_attr(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr)623768e9a61SSean Christopherson static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
624768e9a61SSean Christopherson 					 uint64_t attr)
62540918184SSean Christopherson {
626768e9a61SSean Christopherson 	return __kvm_has_device_attr(vcpu->fd, group, attr);
62740918184SSean Christopherson }
62840918184SSean Christopherson 
vcpu_has_device_attr(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr)629768e9a61SSean Christopherson static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
630768e9a61SSean Christopherson 					uint64_t attr)
631768e9a61SSean Christopherson {
632768e9a61SSean Christopherson 	kvm_has_device_attr(vcpu->fd, group, attr);
633768e9a61SSean Christopherson }
634768e9a61SSean Christopherson 
__vcpu_device_attr_get(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)635768e9a61SSean Christopherson static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
636768e9a61SSean Christopherson 					 uint64_t attr, void *val)
637768e9a61SSean Christopherson {
638768e9a61SSean Christopherson 	return __kvm_device_attr_get(vcpu->fd, group, attr, val);
639768e9a61SSean Christopherson }
640768e9a61SSean Christopherson 
vcpu_device_attr_get(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)641768e9a61SSean Christopherson static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
642768e9a61SSean Christopherson 					uint64_t attr, void *val)
643768e9a61SSean Christopherson {
644768e9a61SSean Christopherson 	kvm_device_attr_get(vcpu->fd, group, attr, val);
645768e9a61SSean Christopherson }
646768e9a61SSean Christopherson 
__vcpu_device_attr_set(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)647768e9a61SSean Christopherson static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
648768e9a61SSean Christopherson 					 uint64_t attr, void *val)
649768e9a61SSean Christopherson {
650768e9a61SSean Christopherson 	return __kvm_device_attr_set(vcpu->fd, group, attr, val);
651768e9a61SSean Christopherson }
652768e9a61SSean Christopherson 
vcpu_device_attr_set(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)653768e9a61SSean Christopherson static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
654768e9a61SSean Christopherson 					uint64_t attr, void *val)
655768e9a61SSean Christopherson {
656768e9a61SSean Christopherson 	kvm_device_attr_set(vcpu->fd, group, attr, val);
657768e9a61SSean Christopherson }
658768e9a61SSean Christopherson 
65940918184SSean Christopherson int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
66040918184SSean Christopherson int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
66140918184SSean Christopherson 
kvm_create_device(struct kvm_vm * vm,uint64_t type)66240918184SSean Christopherson static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
66340918184SSean Christopherson {
66440918184SSean Christopherson 	int fd = __kvm_create_device(vm, type);
66540918184SSean Christopherson 
66640918184SSean Christopherson 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
66740918184SSean Christopherson 	return fd;
66840918184SSean Christopherson }
66940918184SSean Christopherson 
670768e9a61SSean Christopherson void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
6717d9a662eSMichael Roth 
6727d9a662eSMichael Roth /*
6737d9a662eSMichael Roth  * VM VCPU Args Set
6747d9a662eSMichael Roth  *
6757d9a662eSMichael Roth  * Input Args:
6767d9a662eSMichael Roth  *   vm - Virtual Machine
6777d9a662eSMichael Roth  *   num - number of arguments
6787d9a662eSMichael Roth  *   ... - arguments, each of type uint64_t
6797d9a662eSMichael Roth  *
6807d9a662eSMichael Roth  * Output Args: None
6817d9a662eSMichael Roth  *
6827d9a662eSMichael Roth  * Return: None
6837d9a662eSMichael Roth  *
684768e9a61SSean Christopherson  * Sets the first @num input parameters for the function at @vcpu's entry point,
685768e9a61SSean Christopherson  * per the C calling convention of the architecture, to the values given as
686768e9a61SSean Christopherson  * variable args. Each of the variable args is expected to be of type uint64_t.
687768e9a61SSean Christopherson  * The maximum @num can be is specific to the architecture.
6887d9a662eSMichael Roth  */
689768e9a61SSean Christopherson void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
6907d9a662eSMichael Roth 
6917d9a662eSMichael Roth void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
6927d9a662eSMichael Roth int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
6937d9a662eSMichael Roth 
6947d9a662eSMichael Roth #define KVM_MAX_IRQ_ROUTES		4096
6957d9a662eSMichael Roth 
6967d9a662eSMichael Roth struct kvm_irq_routing *kvm_gsi_routing_create(void);
6977d9a662eSMichael Roth void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
6987d9a662eSMichael Roth 		uint32_t gsi, uint32_t pin);
6997d9a662eSMichael Roth int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
7007d9a662eSMichael Roth void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
7017d9a662eSMichael Roth 
7027d9a662eSMichael Roth const char *exit_reason_str(unsigned int exit_reason);
7037d9a662eSMichael Roth 
7047d9a662eSMichael Roth vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
7057d9a662eSMichael Roth 			     uint32_t memslot);
7067d9a662eSMichael Roth vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
7077d9a662eSMichael Roth 			      vm_paddr_t paddr_min, uint32_t memslot);
7087d9a662eSMichael Roth vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
7097d9a662eSMichael Roth 
7103f44e7fdSSean Christopherson /*
7113f44e7fdSSean Christopherson  * ____vm_create() does KVM_CREATE_VM and little else.  __vm_create() also
7123f44e7fdSSean Christopherson  * loads the test binary into guest memory and creates an IRQ chip (x86 only).
7136e1d13bfSSean Christopherson  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
7146e1d13bfSSean Christopherson  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
7153f44e7fdSSean Christopherson  */
716290c5b54SRicardo Koller struct kvm_vm *____vm_create(enum vm_guest_mode mode);
7176e1d13bfSSean Christopherson struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
7186e1d13bfSSean Christopherson 			   uint64_t nr_extra_pages);
7193f44e7fdSSean Christopherson 
vm_create_barebones(void)72095fb0460SSean Christopherson static inline struct kvm_vm *vm_create_barebones(void)
72195fb0460SSean Christopherson {
722290c5b54SRicardo Koller 	return ____vm_create(VM_MODE_DEFAULT);
7233f44e7fdSSean Christopherson }
7243f44e7fdSSean Christopherson 
vm_create(uint32_t nr_runnable_vcpus)7256e1d13bfSSean Christopherson static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
7263f44e7fdSSean Christopherson {
7276e1d13bfSSean Christopherson 	return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
72895fb0460SSean Christopherson }
72995fb0460SSean Christopherson 
7300ffc70eaSSean Christopherson struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
731acaf50adSSean Christopherson 				      uint64_t extra_mem_pages,
7323222d026SSean Christopherson 				      void *guest_code, struct kvm_vcpu *vcpus[]);
7330ffc70eaSSean Christopherson 
vm_create_with_vcpus(uint32_t nr_vcpus,void * guest_code,struct kvm_vcpu * vcpus[])7340ffc70eaSSean Christopherson static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
7350ffc70eaSSean Christopherson 						  void *guest_code,
7360ffc70eaSSean Christopherson 						  struct kvm_vcpu *vcpus[])
7370ffc70eaSSean Christopherson {
738acaf50adSSean Christopherson 	return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
7395114c3e2SSean Christopherson 				      guest_code, vcpus);
7400ffc70eaSSean Christopherson }
7417d9a662eSMichael Roth 
7427d9a662eSMichael Roth /*
743f17686aaSSean Christopherson  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
744f17686aaSSean Christopherson  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
745f17686aaSSean Christopherson  */
7460cc64b08SSean Christopherson struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
747f17686aaSSean Christopherson 					 uint64_t extra_mem_pages,
748f17686aaSSean Christopherson 					 void *guest_code);
749f17686aaSSean Christopherson 
vm_create_with_one_vcpu(struct kvm_vcpu ** vcpu,void * guest_code)7500cc64b08SSean Christopherson static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
751f17686aaSSean Christopherson 						     void *guest_code)
752f17686aaSSean Christopherson {
753f17686aaSSean Christopherson 	return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
754f17686aaSSean Christopherson }
755f17686aaSSean Christopherson 
7560cc64b08SSean Christopherson struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
757f17686aaSSean Christopherson 
758d886724eSVipin Sharma void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
759d4ec586cSPeter Xu void kvm_print_vcpu_pinning_help(void);
760d886724eSVipin Sharma void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
761d886724eSVipin Sharma 			    int nr_vcpus);
762d886724eSVipin Sharma 
7637d9a662eSMichael Roth unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
7647d9a662eSMichael Roth unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
7657d9a662eSMichael Roth unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
7667d9a662eSMichael Roth unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
7677d9a662eSMichael Roth static inline unsigned int
vm_adjust_num_guest_pages(enum vm_guest_mode mode,unsigned int num_guest_pages)7687d9a662eSMichael Roth vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
7697d9a662eSMichael Roth {
7707d9a662eSMichael Roth 	unsigned int n;
7717d9a662eSMichael Roth 	n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
7727d9a662eSMichael Roth #ifdef __s390x__
7737d9a662eSMichael Roth 	/* s390 requires 1M aligned guest sizes */
7747d9a662eSMichael Roth 	n = (n + 255) & ~255;
7757d9a662eSMichael Roth #endif
7767d9a662eSMichael Roth 	return n;
7777d9a662eSMichael Roth }
7787d9a662eSMichael Roth 
7797d9a662eSMichael Roth struct kvm_userspace_memory_region *
7807d9a662eSMichael Roth kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
7817d9a662eSMichael Roth 				 uint64_t end);
7827d9a662eSMichael Roth 
7837d9a662eSMichael Roth #define sync_global_to_guest(vm, g) ({				\
7847d9a662eSMichael Roth 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
7857d9a662eSMichael Roth 	memcpy(_p, &(g), sizeof(g));				\
7867d9a662eSMichael Roth })
7877d9a662eSMichael Roth 
7887d9a662eSMichael Roth #define sync_global_from_guest(vm, g) ({			\
7897d9a662eSMichael Roth 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
7907d9a662eSMichael Roth 	memcpy(&(g), _p, sizeof(g));				\
7917d9a662eSMichael Roth })
7927d9a662eSMichael Roth 
79303b47505SSean Christopherson /*
79403b47505SSean Christopherson  * Write a global value, but only in the VM's (guest's) domain.  Primarily used
79503b47505SSean Christopherson  * for "globals" that hold per-VM values (VMs always duplicate code and global
79603b47505SSean Christopherson  * data into their own region of physical memory), but can be used anytime it's
79703b47505SSean Christopherson  * undesirable to change the host's copy of the global.
79803b47505SSean Christopherson  */
79903b47505SSean Christopherson #define write_guest_global(vm, g, val) ({			\
80003b47505SSean Christopherson 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
80103b47505SSean Christopherson 	typeof(g) _val = val;					\
80203b47505SSean Christopherson 								\
80303b47505SSean Christopherson 	memcpy(_p, &(_val), sizeof(g));				\
80403b47505SSean Christopherson })
80503b47505SSean Christopherson 
806768e9a61SSean Christopherson void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
8077d9a662eSMichael Roth 
808768e9a61SSean Christopherson void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
8099931be3fSSean Christopherson 		    uint8_t indent);
8109931be3fSSean Christopherson 
vcpu_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)811768e9a61SSean Christopherson static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
8129931be3fSSean Christopherson 			     uint8_t indent)
8139931be3fSSean Christopherson {
814768e9a61SSean Christopherson 	vcpu_arch_dump(stream, vcpu, indent);
8159931be3fSSean Christopherson }
8169931be3fSSean Christopherson 
8179931be3fSSean Christopherson /*
8189931be3fSSean Christopherson  * Adds a vCPU with reasonable defaults (e.g. a stack)
8199931be3fSSean Christopherson  *
8209931be3fSSean Christopherson  * Input Args:
8219931be3fSSean Christopherson  *   vm - Virtual Machine
822768e9a61SSean Christopherson  *   vcpu_id - The id of the VCPU to add to the VM.
8239931be3fSSean Christopherson  *   guest_code - The vCPU's entry point
8249931be3fSSean Christopherson  */
8251422efd6SSean Christopherson struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
8261422efd6SSean Christopherson 				  void *guest_code);
8279931be3fSSean Christopherson 
vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)828f742d94fSSean Christopherson static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
8299931be3fSSean Christopherson 					   void *guest_code)
8309931be3fSSean Christopherson {
8311422efd6SSean Christopherson 	return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
8329931be3fSSean Christopherson }
8339931be3fSSean Christopherson 
8344c16fa3eSSean Christopherson /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
8354c16fa3eSSean Christopherson struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
8364c16fa3eSSean Christopherson 
vm_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id)8374c16fa3eSSean Christopherson static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
8384c16fa3eSSean Christopherson 						uint32_t vcpu_id)
8394c16fa3eSSean Christopherson {
8404c16fa3eSSean Christopherson 	return vm_arch_vcpu_recreate(vm, vcpu_id);
8414c16fa3eSSean Christopherson }
8424c16fa3eSSean Christopherson 
8437fbc6038SSean Christopherson void vcpu_arch_free(struct kvm_vcpu *vcpu);
8447fbc6038SSean Christopherson 
8459931be3fSSean Christopherson void virt_arch_pgd_alloc(struct kvm_vm *vm);
8469931be3fSSean Christopherson 
virt_pgd_alloc(struct kvm_vm * vm)8479931be3fSSean Christopherson static inline void virt_pgd_alloc(struct kvm_vm *vm)
8489931be3fSSean Christopherson {
8499931be3fSSean Christopherson 	virt_arch_pgd_alloc(vm);
8509931be3fSSean Christopherson }
8519931be3fSSean Christopherson 
8529931be3fSSean Christopherson /*
8539931be3fSSean Christopherson  * VM Virtual Page Map
8549931be3fSSean Christopherson  *
8559931be3fSSean Christopherson  * Input Args:
8569931be3fSSean Christopherson  *   vm - Virtual Machine
8579931be3fSSean Christopherson  *   vaddr - VM Virtual Address
8589931be3fSSean Christopherson  *   paddr - VM Physical Address
8599931be3fSSean Christopherson  *   memslot - Memory region slot for new virtual translation tables
8609931be3fSSean Christopherson  *
8619931be3fSSean Christopherson  * Output Args: None
8629931be3fSSean Christopherson  *
8639931be3fSSean Christopherson  * Return: None
8649931be3fSSean Christopherson  *
8659931be3fSSean Christopherson  * Within @vm, creates a virtual translation for the page starting
8669931be3fSSean Christopherson  * at @vaddr to the page starting at @paddr.
8679931be3fSSean Christopherson  */
8689931be3fSSean Christopherson void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
8699931be3fSSean Christopherson 
virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)8709931be3fSSean Christopherson static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
8719931be3fSSean Christopherson {
8729931be3fSSean Christopherson 	virt_arch_pg_map(vm, vaddr, paddr);
8739931be3fSSean Christopherson }
8749931be3fSSean Christopherson 
8759931be3fSSean Christopherson 
8769931be3fSSean Christopherson /*
8779931be3fSSean Christopherson  * Address Guest Virtual to Guest Physical
8789931be3fSSean Christopherson  *
8799931be3fSSean Christopherson  * Input Args:
8809931be3fSSean Christopherson  *   vm - Virtual Machine
8819931be3fSSean Christopherson  *   gva - VM virtual address
8829931be3fSSean Christopherson  *
8839931be3fSSean Christopherson  * Output Args: None
8849931be3fSSean Christopherson  *
8859931be3fSSean Christopherson  * Return:
8869931be3fSSean Christopherson  *   Equivalent VM physical address
8879931be3fSSean Christopherson  *
8889931be3fSSean Christopherson  * Returns the VM physical address of the translated VM virtual
8899931be3fSSean Christopherson  * address given by @gva.
8909931be3fSSean Christopherson  */
8919931be3fSSean Christopherson vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
8929931be3fSSean Christopherson 
addr_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)8939931be3fSSean Christopherson static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
8949931be3fSSean Christopherson {
8959931be3fSSean Christopherson 	return addr_arch_gva2gpa(vm, gva);
8969931be3fSSean Christopherson }
8979931be3fSSean Christopherson 
8989931be3fSSean Christopherson /*
8999931be3fSSean Christopherson  * Virtual Translation Tables Dump
9009931be3fSSean Christopherson  *
9019931be3fSSean Christopherson  * Input Args:
9029931be3fSSean Christopherson  *   stream - Output FILE stream
9039931be3fSSean Christopherson  *   vm     - Virtual Machine
9049931be3fSSean Christopherson  *   indent - Left margin indent amount
9059931be3fSSean Christopherson  *
9069931be3fSSean Christopherson  * Output Args: None
9079931be3fSSean Christopherson  *
9089931be3fSSean Christopherson  * Return: None
9099931be3fSSean Christopherson  *
9109931be3fSSean Christopherson  * Dumps to the FILE stream given by @stream, the contents of all the
9119931be3fSSean Christopherson  * virtual translation tables for the VM given by @vm.
9129931be3fSSean Christopherson  */
9139931be3fSSean Christopherson void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
9149931be3fSSean Christopherson 
virt_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)9159931be3fSSean Christopherson static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
9169931be3fSSean Christopherson {
9179931be3fSSean Christopherson 	virt_arch_dump(stream, vm, indent);
9189931be3fSSean Christopherson }
9199931be3fSSean Christopherson 
920b774da3fSBen Gardon 
__vm_disable_nx_huge_pages(struct kvm_vm * vm)921b774da3fSBen Gardon static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
922b774da3fSBen Gardon {
923b774da3fSBen Gardon 	return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
924b774da3fSBen Gardon }
925b774da3fSBen Gardon 
926e1ab3124SVishal Annapurve /*
927e1ab3124SVishal Annapurve  * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
928e1ab3124SVishal Annapurve  * to allow for arch-specific setup that is common to all tests, e.g. computing
929e1ab3124SVishal Annapurve  * the default guest "mode".
930e1ab3124SVishal Annapurve  */
931e1ab3124SVishal Annapurve void kvm_selftest_arch_init(void);
932e1ab3124SVishal Annapurve 
9332115713cSVishal Annapurve void kvm_arch_vm_post_create(struct kvm_vm *vm);
9342115713cSVishal Annapurve 
9357d9a662eSMichael Roth #endif /* SELFTEST_KVM_UTIL_BASE_H */
936