1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * tools/testing/selftests/kvm/include/kvm_util_base.h
4  *
5  * Copyright (C) 2018, Google LLC.
6  */
7 #ifndef SELFTEST_KVM_UTIL_BASE_H
8 #define SELFTEST_KVM_UTIL_BASE_H
9 
10 #include "test_util.h"
11 
12 #include <linux/compiler.h>
13 #include "linux/hashtable.h"
14 #include "linux/list.h"
15 #include <linux/kernel.h>
16 #include <linux/kvm.h>
17 #include "linux/rbtree.h"
18 
19 
20 #include <sys/ioctl.h>
21 
22 #include "sparsebit.h"
23 
24 #define KVM_DEV_PATH "/dev/kvm"
25 #define KVM_MAX_VCPUS 512
26 
27 #define NSEC_PER_SEC 1000000000L
28 
29 typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
30 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
31 
32 struct userspace_mem_region {
33 	struct kvm_userspace_memory_region region;
34 	struct sparsebit *unused_phy_pages;
35 	int fd;
36 	off_t offset;
37 	void *host_mem;
38 	void *host_alias;
39 	void *mmap_start;
40 	void *mmap_alias;
41 	size_t mmap_size;
42 	struct rb_node gpa_node;
43 	struct rb_node hva_node;
44 	struct hlist_node slot_node;
45 };
46 
47 struct kvm_vcpu {
48 	struct list_head list;
49 	uint32_t id;
50 	int fd;
51 	struct kvm_vm *vm;
52 	struct kvm_run *run;
53 #ifdef __x86_64__
54 	struct kvm_cpuid2 *cpuid;
55 #endif
56 	struct kvm_dirty_gfn *dirty_gfns;
57 	uint32_t fetch_index;
58 	uint32_t dirty_gfns_count;
59 };
60 
61 struct userspace_mem_regions {
62 	struct rb_root gpa_tree;
63 	struct rb_root hva_tree;
64 	DECLARE_HASHTABLE(slot_hash, 9);
65 };
66 
67 struct kvm_vm {
68 	int mode;
69 	unsigned long type;
70 	int kvm_fd;
71 	int fd;
72 	unsigned int pgtable_levels;
73 	unsigned int page_size;
74 	unsigned int page_shift;
75 	unsigned int pa_bits;
76 	unsigned int va_bits;
77 	uint64_t max_gfn;
78 	struct list_head vcpus;
79 	struct userspace_mem_regions regions;
80 	struct sparsebit *vpages_valid;
81 	struct sparsebit *vpages_mapped;
82 	bool has_irqchip;
83 	bool pgd_created;
84 	vm_paddr_t pgd;
85 	vm_vaddr_t gdt;
86 	vm_vaddr_t tss;
87 	vm_vaddr_t idt;
88 	vm_vaddr_t handlers;
89 	uint32_t dirty_ring_size;
90 
91 	/* Cache of information for binary stats interface */
92 	int stats_fd;
93 	struct kvm_stats_header stats_header;
94 	struct kvm_stats_desc *stats_desc;
95 };
96 
97 
98 #define kvm_for_each_vcpu(vm, i, vcpu)			\
99 	for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++)	\
100 		if (!((vcpu) = vm->vcpus[i]))		\
101 			continue;			\
102 		else
103 
104 struct userspace_mem_region *
105 memslot2region(struct kvm_vm *vm, uint32_t memslot);
106 
107 /* Minimum allocated guest virtual and physical addresses */
108 #define KVM_UTIL_MIN_VADDR		0x2000
109 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR	0x180000
110 
111 #define DEFAULT_GUEST_STACK_VADDR_MIN	0xab6000
112 #define DEFAULT_STACK_PGS		5
113 
114 enum vm_guest_mode {
115 	VM_MODE_P52V48_4K,
116 	VM_MODE_P52V48_64K,
117 	VM_MODE_P48V48_4K,
118 	VM_MODE_P48V48_16K,
119 	VM_MODE_P48V48_64K,
120 	VM_MODE_P40V48_4K,
121 	VM_MODE_P40V48_16K,
122 	VM_MODE_P40V48_64K,
123 	VM_MODE_PXXV48_4K,	/* For 48bits VA but ANY bits PA */
124 	VM_MODE_P47V64_4K,
125 	VM_MODE_P44V64_4K,
126 	VM_MODE_P36V48_4K,
127 	VM_MODE_P36V48_16K,
128 	VM_MODE_P36V48_64K,
129 	VM_MODE_P36V47_16K,
130 	NUM_VM_MODES,
131 };
132 
133 #if defined(__aarch64__)
134 
135 extern enum vm_guest_mode vm_mode_default;
136 
137 #define VM_MODE_DEFAULT			vm_mode_default
138 #define MIN_PAGE_SHIFT			12U
139 #define ptes_per_page(page_size)	((page_size) / 8)
140 
141 #elif defined(__x86_64__)
142 
143 #define VM_MODE_DEFAULT			VM_MODE_PXXV48_4K
144 #define MIN_PAGE_SHIFT			12U
145 #define ptes_per_page(page_size)	((page_size) / 8)
146 
147 #elif defined(__s390x__)
148 
149 #define VM_MODE_DEFAULT			VM_MODE_P44V64_4K
150 #define MIN_PAGE_SHIFT			12U
151 #define ptes_per_page(page_size)	((page_size) / 16)
152 
153 #elif defined(__riscv)
154 
155 #if __riscv_xlen == 32
156 #error "RISC-V 32-bit kvm selftests not supported"
157 #endif
158 
159 #define VM_MODE_DEFAULT			VM_MODE_P40V48_4K
160 #define MIN_PAGE_SHIFT			12U
161 #define ptes_per_page(page_size)	((page_size) / 8)
162 
163 #endif
164 
165 #define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
166 #define PTES_PER_MIN_PAGE	ptes_per_page(MIN_PAGE_SIZE)
167 
168 struct vm_guest_mode_params {
169 	unsigned int pa_bits;
170 	unsigned int va_bits;
171 	unsigned int page_size;
172 	unsigned int page_shift;
173 };
174 extern const struct vm_guest_mode_params vm_guest_mode_params[];
175 
176 int open_path_or_exit(const char *path, int flags);
177 int open_kvm_dev_path_or_exit(void);
178 unsigned int kvm_check_cap(long cap);
179 
180 static inline bool kvm_has_cap(long cap)
181 {
182 	return kvm_check_cap(cap);
183 }
184 
185 #define __KVM_SYSCALL_ERROR(_name, _ret) \
186 	"%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
187 
188 #define __KVM_IOCTL_ERROR(_name, _ret)	__KVM_SYSCALL_ERROR(_name, _ret)
189 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
190 
191 #define kvm_do_ioctl(fd, cmd, arg)						\
192 ({										\
193 	static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd), "");	\
194 	ioctl(fd, cmd, arg);							\
195 })
196 
197 #define __kvm_ioctl(kvm_fd, cmd, arg)				\
198 	kvm_do_ioctl(kvm_fd, cmd, arg)
199 
200 
201 #define _kvm_ioctl(kvm_fd, cmd, name, arg)			\
202 ({								\
203 	int ret = __kvm_ioctl(kvm_fd, cmd, arg);		\
204 								\
205 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));	\
206 })
207 
208 #define kvm_ioctl(kvm_fd, cmd, arg) \
209 	_kvm_ioctl(kvm_fd, cmd, #cmd, arg)
210 
211 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
212 
213 #define __vm_ioctl(vm, cmd, arg)				\
214 ({								\
215 	static_assert_is_vm(vm);				\
216 	kvm_do_ioctl((vm)->fd, cmd, arg);			\
217 })
218 
219 #define _vm_ioctl(vm, cmd, name, arg)				\
220 ({								\
221 	int ret = __vm_ioctl(vm, cmd, arg);			\
222 								\
223 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));	\
224 })
225 
226 #define vm_ioctl(vm, cmd, arg)					\
227 	_vm_ioctl(vm, cmd, #cmd, arg)
228 
229 
230 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
231 
232 #define __vcpu_ioctl(vcpu, cmd, arg)				\
233 ({								\
234 	static_assert_is_vcpu(vcpu);				\
235 	kvm_do_ioctl((vcpu)->fd, cmd, arg);			\
236 })
237 
238 #define _vcpu_ioctl(vcpu, cmd, name, arg)			\
239 ({								\
240 	int ret = __vcpu_ioctl(vcpu, cmd, arg);			\
241 								\
242 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));	\
243 })
244 
245 #define vcpu_ioctl(vcpu, cmd, arg)				\
246 	_vcpu_ioctl(vcpu, cmd, #cmd, arg)
247 
248 /*
249  * Looks up and returns the value corresponding to the capability
250  * (KVM_CAP_*) given by cap.
251  */
252 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
253 {
254 	int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
255 
256 	TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
257 	return ret;
258 }
259 
260 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
261 {
262 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
263 
264 	return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
265 }
266 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
267 {
268 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
269 
270 	vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
271 }
272 
273 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
274 const char *vm_guest_mode_string(uint32_t i);
275 
276 void kvm_vm_free(struct kvm_vm *vmp);
277 void kvm_vm_restart(struct kvm_vm *vmp);
278 void kvm_vm_release(struct kvm_vm *vmp);
279 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
280 		       size_t len);
281 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
282 int kvm_memfd_alloc(size_t size, bool hugepages);
283 
284 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
285 
286 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
287 {
288 	struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
289 
290 	vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
291 }
292 
293 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
294 					  uint64_t first_page, uint32_t num_pages)
295 {
296 	struct kvm_clear_dirty_log args = {
297 		.dirty_bitmap = log,
298 		.slot = slot,
299 		.first_page = first_page,
300 		.num_pages = num_pages
301 	};
302 
303 	vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
304 }
305 
306 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
307 {
308 	return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
309 }
310 
311 static inline int vm_get_stats_fd(struct kvm_vm *vm)
312 {
313 	int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
314 
315 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
316 	return fd;
317 }
318 
319 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
320 {
321 	ssize_t ret;
322 
323 	ret = read(stats_fd, header, sizeof(*header));
324 	TEST_ASSERT(ret == sizeof(*header), "Read stats header");
325 }
326 
327 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
328 					      struct kvm_stats_header *header);
329 
330 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
331 {
332 	 /*
333 	  * The base size of the descriptor is defined by KVM's ABI, but the
334 	  * size of the name field is variable, as far as KVM's ABI is
335 	  * concerned. For a given instance of KVM, the name field is the same
336 	  * size for all stats and is provided in the overall stats header.
337 	  */
338 	return sizeof(struct kvm_stats_desc) + header->name_size;
339 }
340 
341 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
342 							  int index,
343 							  struct kvm_stats_header *header)
344 {
345 	/*
346 	 * Note, size_desc includes the size of the name field, which is
347 	 * variable. i.e. this is NOT equivalent to &stats_desc[i].
348 	 */
349 	return (void *)stats + index * get_stats_descriptor_size(header);
350 }
351 
352 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
353 		    struct kvm_stats_desc *desc, uint64_t *data,
354 		    size_t max_elements);
355 
356 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
357 		   size_t max_elements);
358 
359 static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
360 {
361 	uint64_t data;
362 
363 	__vm_get_stat(vm, stat_name, &data, 1);
364 	return data;
365 }
366 
367 void vm_create_irqchip(struct kvm_vm *vm);
368 
369 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
370 			       uint64_t gpa, uint64_t size, void *hva);
371 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
372 				uint64_t gpa, uint64_t size, void *hva);
373 void vm_userspace_mem_region_add(struct kvm_vm *vm,
374 	enum vm_mem_backing_src_type src_type,
375 	uint64_t guest_paddr, uint32_t slot, uint64_t npages,
376 	uint32_t flags);
377 
378 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
379 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
380 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
381 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
382 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
383 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
384 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
385 
386 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
387 	      unsigned int npages);
388 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
389 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
390 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
391 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
392 
393 void vcpu_run(struct kvm_vcpu *vcpu);
394 int _vcpu_run(struct kvm_vcpu *vcpu);
395 
396 static inline int __vcpu_run(struct kvm_vcpu *vcpu)
397 {
398 	return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
399 }
400 
401 void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
402 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
403 
404 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
405 				   uint64_t arg0)
406 {
407 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
408 
409 	vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
410 }
411 
412 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
413 					struct kvm_guest_debug *debug)
414 {
415 	vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
416 }
417 
418 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
419 				     struct kvm_mp_state *mp_state)
420 {
421 	vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
422 }
423 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
424 				     struct kvm_mp_state *mp_state)
425 {
426 	vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
427 }
428 
429 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
430 {
431 	vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
432 }
433 
434 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
435 {
436 	vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
437 }
438 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
439 {
440 	vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
441 
442 }
443 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
444 {
445 	vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
446 }
447 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
448 {
449 	return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
450 }
451 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
452 {
453 	vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
454 }
455 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
456 {
457 	vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
458 }
459 
460 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
461 {
462 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
463 
464 	return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
465 }
466 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
467 {
468 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
469 
470 	return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
471 }
472 static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
473 {
474 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
475 
476 	vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
477 }
478 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
479 {
480 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
481 
482 	vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
483 }
484 
485 #ifdef __KVM_HAVE_VCPU_EVENTS
486 static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
487 				   struct kvm_vcpu_events *events)
488 {
489 	vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
490 }
491 static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
492 				   struct kvm_vcpu_events *events)
493 {
494 	vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
495 }
496 #endif
497 #ifdef __x86_64__
498 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
499 					 struct kvm_nested_state *state)
500 {
501 	vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
502 }
503 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
504 					  struct kvm_nested_state *state)
505 {
506 	return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
507 }
508 
509 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
510 					 struct kvm_nested_state *state)
511 {
512 	vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
513 }
514 #endif
515 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
516 {
517 	int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
518 
519 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
520 	return fd;
521 }
522 
523 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
524 
525 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
526 {
527 	int ret = __kvm_has_device_attr(dev_fd, group, attr);
528 
529 	TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
530 }
531 
532 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
533 
534 static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
535 				       uint64_t attr, void *val)
536 {
537 	int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
538 
539 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
540 }
541 
542 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
543 
544 static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
545 				       uint64_t attr, void *val)
546 {
547 	int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
548 
549 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
550 }
551 
552 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
553 					 uint64_t attr)
554 {
555 	return __kvm_has_device_attr(vcpu->fd, group, attr);
556 }
557 
558 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
559 					uint64_t attr)
560 {
561 	kvm_has_device_attr(vcpu->fd, group, attr);
562 }
563 
564 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
565 					 uint64_t attr, void *val)
566 {
567 	return __kvm_device_attr_get(vcpu->fd, group, attr, val);
568 }
569 
570 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
571 					uint64_t attr, void *val)
572 {
573 	kvm_device_attr_get(vcpu->fd, group, attr, val);
574 }
575 
576 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
577 					 uint64_t attr, void *val)
578 {
579 	return __kvm_device_attr_set(vcpu->fd, group, attr, val);
580 }
581 
582 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
583 					uint64_t attr, void *val)
584 {
585 	kvm_device_attr_set(vcpu->fd, group, attr, val);
586 }
587 
588 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
589 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
590 
591 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
592 {
593 	int fd = __kvm_create_device(vm, type);
594 
595 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
596 	return fd;
597 }
598 
599 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
600 
601 /*
602  * VM VCPU Args Set
603  *
604  * Input Args:
605  *   vm - Virtual Machine
606  *   num - number of arguments
607  *   ... - arguments, each of type uint64_t
608  *
609  * Output Args: None
610  *
611  * Return: None
612  *
613  * Sets the first @num input parameters for the function at @vcpu's entry point,
614  * per the C calling convention of the architecture, to the values given as
615  * variable args. Each of the variable args is expected to be of type uint64_t.
616  * The maximum @num can be is specific to the architecture.
617  */
618 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
619 
620 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
621 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
622 
623 #define KVM_MAX_IRQ_ROUTES		4096
624 
625 struct kvm_irq_routing *kvm_gsi_routing_create(void);
626 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
627 		uint32_t gsi, uint32_t pin);
628 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
629 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
630 
631 const char *exit_reason_str(unsigned int exit_reason);
632 
633 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
634 			     uint32_t memslot);
635 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
636 			      vm_paddr_t paddr_min, uint32_t memslot);
637 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
638 
639 /*
640  * ____vm_create() does KVM_CREATE_VM and little else.  __vm_create() also
641  * loads the test binary into guest memory and creates an IRQ chip (x86 only).
642  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
643  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
644  */
645 struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages);
646 struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
647 			   uint64_t nr_extra_pages);
648 
649 static inline struct kvm_vm *vm_create_barebones(void)
650 {
651 	return ____vm_create(VM_MODE_DEFAULT, 0);
652 }
653 
654 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
655 {
656 	return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
657 }
658 
659 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
660 				      uint64_t extra_mem_pages,
661 				      void *guest_code, struct kvm_vcpu *vcpus[]);
662 
663 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
664 						  void *guest_code,
665 						  struct kvm_vcpu *vcpus[])
666 {
667 	return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
668 				      guest_code, vcpus);
669 }
670 
671 /*
672  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
673  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
674  */
675 struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
676 					 uint64_t extra_mem_pages,
677 					 void *guest_code);
678 
679 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
680 						     void *guest_code)
681 {
682 	return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
683 }
684 
685 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
686 
687 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
688 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
689 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
690 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
691 static inline unsigned int
692 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
693 {
694 	unsigned int n;
695 	n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
696 #ifdef __s390x__
697 	/* s390 requires 1M aligned guest sizes */
698 	n = (n + 255) & ~255;
699 #endif
700 	return n;
701 }
702 
703 struct kvm_userspace_memory_region *
704 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
705 				 uint64_t end);
706 
707 #define sync_global_to_guest(vm, g) ({				\
708 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
709 	memcpy(_p, &(g), sizeof(g));				\
710 })
711 
712 #define sync_global_from_guest(vm, g) ({			\
713 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
714 	memcpy(&(g), _p, sizeof(g));				\
715 })
716 
717 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
718 
719 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
720 		    uint8_t indent);
721 
722 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
723 			     uint8_t indent)
724 {
725 	vcpu_arch_dump(stream, vcpu, indent);
726 }
727 
728 /*
729  * Adds a vCPU with reasonable defaults (e.g. a stack)
730  *
731  * Input Args:
732  *   vm - Virtual Machine
733  *   vcpu_id - The id of the VCPU to add to the VM.
734  *   guest_code - The vCPU's entry point
735  */
736 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
737 				  void *guest_code);
738 
739 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
740 					   void *guest_code)
741 {
742 	return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
743 }
744 
745 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
746 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
747 
748 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
749 						uint32_t vcpu_id)
750 {
751 	return vm_arch_vcpu_recreate(vm, vcpu_id);
752 }
753 
754 void vcpu_arch_free(struct kvm_vcpu *vcpu);
755 
756 void virt_arch_pgd_alloc(struct kvm_vm *vm);
757 
758 static inline void virt_pgd_alloc(struct kvm_vm *vm)
759 {
760 	virt_arch_pgd_alloc(vm);
761 }
762 
763 /*
764  * VM Virtual Page Map
765  *
766  * Input Args:
767  *   vm - Virtual Machine
768  *   vaddr - VM Virtual Address
769  *   paddr - VM Physical Address
770  *   memslot - Memory region slot for new virtual translation tables
771  *
772  * Output Args: None
773  *
774  * Return: None
775  *
776  * Within @vm, creates a virtual translation for the page starting
777  * at @vaddr to the page starting at @paddr.
778  */
779 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
780 
781 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
782 {
783 	virt_arch_pg_map(vm, vaddr, paddr);
784 }
785 
786 
787 /*
788  * Address Guest Virtual to Guest Physical
789  *
790  * Input Args:
791  *   vm - Virtual Machine
792  *   gva - VM virtual address
793  *
794  * Output Args: None
795  *
796  * Return:
797  *   Equivalent VM physical address
798  *
799  * Returns the VM physical address of the translated VM virtual
800  * address given by @gva.
801  */
802 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
803 
804 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
805 {
806 	return addr_arch_gva2gpa(vm, gva);
807 }
808 
809 /*
810  * Virtual Translation Tables Dump
811  *
812  * Input Args:
813  *   stream - Output FILE stream
814  *   vm     - Virtual Machine
815  *   indent - Left margin indent amount
816  *
817  * Output Args: None
818  *
819  * Return: None
820  *
821  * Dumps to the FILE stream given by @stream, the contents of all the
822  * virtual translation tables for the VM given by @vm.
823  */
824 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
825 
826 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
827 {
828 	virt_arch_dump(stream, vm, indent);
829 }
830 
831 
832 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
833 {
834 	return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
835 }
836 
837 #endif /* SELFTEST_KVM_UTIL_BASE_H */
838