Lines Matching refs:vm

128 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)  in vm_enable_dirty_ring()  argument
130 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) in vm_enable_dirty_ring()
131 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); in vm_enable_dirty_ring()
133 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); in vm_enable_dirty_ring()
134 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
137 static void vm_open(struct kvm_vm *vm) in vm_open() argument
139 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open()
143 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open()
144 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open()
203 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) in vm_vaddr_populate_bitmap() argument
205 sparsebit_set_num(vm->vpages_valid, in vm_vaddr_populate_bitmap()
206 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap()
207 sparsebit_set_num(vm->vpages_valid, in vm_vaddr_populate_bitmap()
208 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in vm_vaddr_populate_bitmap()
209 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap()
214 struct kvm_vm *vm; in ____vm_create() local
216 vm = calloc(1, sizeof(*vm)); in ____vm_create()
217 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in ____vm_create()
219 INIT_LIST_HEAD(&vm->vcpus); in ____vm_create()
220 vm->regions.gpa_tree = RB_ROOT; in ____vm_create()
221 vm->regions.hva_tree = RB_ROOT; in ____vm_create()
222 hash_init(vm->regions.slot_hash); in ____vm_create()
224 vm->mode = mode; in ____vm_create()
225 vm->type = 0; in ____vm_create()
227 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; in ____vm_create()
228 vm->va_bits = vm_guest_mode_params[mode].va_bits; in ____vm_create()
229 vm->page_size = vm_guest_mode_params[mode].page_size; in ____vm_create()
230 vm->page_shift = vm_guest_mode_params[mode].page_shift; in ____vm_create()
233 switch (vm->mode) { in ____vm_create()
235 vm->pgtable_levels = 4; in ____vm_create()
238 vm->pgtable_levels = 3; in ____vm_create()
241 vm->pgtable_levels = 4; in ____vm_create()
244 vm->pgtable_levels = 3; in ____vm_create()
248 vm->pgtable_levels = 4; in ____vm_create()
252 vm->pgtable_levels = 3; in ____vm_create()
257 vm->pgtable_levels = 4; in ____vm_create()
260 vm->pgtable_levels = 3; in ____vm_create()
264 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in ____vm_create()
270 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in ____vm_create()
272 vm->va_bits); in ____vm_create()
274 vm->pa_bits); in ____vm_create()
275 vm->pgtable_levels = 4; in ____vm_create()
276 vm->va_bits = 48; in ____vm_create()
282 vm->pgtable_levels = 5; in ____vm_create()
285 vm->pgtable_levels = 5; in ____vm_create()
292 if (vm->pa_bits != 40) in ____vm_create()
293 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in ____vm_create()
296 vm_open(vm); in ____vm_create()
299 vm->vpages_valid = sparsebit_alloc(); in ____vm_create()
300 vm_vaddr_populate_bitmap(vm); in ____vm_create()
303 vm->max_gfn = vm_compute_max_gfn(vm); in ____vm_create()
306 vm->vpages_mapped = sparsebit_alloc(); in ____vm_create()
308 return vm; in ____vm_create()
356 struct kvm_vm *vm; in __vm_create() local
362 vm = ____vm_create(mode); in __vm_create()
364 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0); in __vm_create()
366 vm->memslots[i] = 0; in __vm_create()
368 kvm_vm_elf_load(vm, program_invocation_name); in __vm_create()
376 slot0 = memslot2region(vm, 0); in __vm_create()
377 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
379 kvm_arch_vm_post_create(vm); in __vm_create()
381 return vm; in __vm_create()
407 struct kvm_vm *vm; in __vm_create_with_vcpus() local
412 vm = __vm_create(mode, nr_vcpus, extra_mem_pages); in __vm_create_with_vcpus()
415 vcpus[i] = vm_vcpu_add(vm, i, guest_code); in __vm_create_with_vcpus()
417 return vm; in __vm_create_with_vcpus()
425 struct kvm_vm *vm; in __vm_create_with_one_vcpu() local
427 vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages, in __vm_create_with_one_vcpu()
431 return vm; in __vm_create_with_one_vcpu()
468 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, in vm_arch_vcpu_recreate() argument
471 return __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_recreate()
474 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm) in vm_recreate_with_one_vcpu() argument
476 kvm_vm_restart(vm); in vm_recreate_with_one_vcpu()
478 return vm_vcpu_recreate(vm, 0); in vm_recreate_with_one_vcpu()
571 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) in userspace_mem_region_find() argument
575 for (node = vm->regions.gpa_tree.rb_node; node; ) { in userspace_mem_region_find()
610 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, in kvm_userspace_memory_region_find() argument
615 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
639 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) in vm_vcpu_rm() argument
644 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); in vm_vcpu_rm()
676 static void __vm_mem_region_delete(struct kvm_vm *vm, in __vm_mem_region_delete() argument
683 rb_erase(&region->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
684 rb_erase(&region->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
689 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region); in __vm_mem_region_delete()
779 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) in kvm_memcmp_hva_gva() argument
794 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); in kvm_memcmp_hva_gva()
801 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
802 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva()
803 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
804 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva()
806 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
807 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
878 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in __vm_set_user_memory_region() argument
889 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region); in __vm_set_user_memory_region()
892 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in vm_set_user_memory_region() argument
895 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); in vm_set_user_memory_region()
923 void vm_userspace_mem_region_add(struct kvm_vm *vm, in vm_userspace_mem_region_add() argument
933 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, in vm_userspace_mem_region_add()
935 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); in vm_userspace_mem_region_add()
937 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add()
940 guest_paddr, vm->page_size); in vm_userspace_mem_region_add()
941 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_userspace_mem_region_add()
942 <= vm->max_gfn, "Physical range beyond maximum " in vm_userspace_mem_region_add()
946 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add()
953 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_userspace_mem_region_add()
960 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add()
965 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_userspace_mem_region_add()
983 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
1030 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
1033 region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
1040 guest_paddr >> vm->page_shift, npages); in vm_userspace_mem_region_add()
1044 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
1046 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_userspace_mem_region_add()
1055 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_userspace_mem_region_add()
1056 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_userspace_mem_region_add()
1057 hash_add(vm->regions.slot_hash, &region->slot_node, slot); in vm_userspace_mem_region_add()
1089 memslot2region(struct kvm_vm *vm, uint32_t memslot) in memslot2region() argument
1093 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
1101 vm_dump(stderr, vm, 2); in memslot2region()
1120 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) in vm_mem_region_set_flags() argument
1125 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1129 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_mem_region_set_flags()
1150 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) in vm_mem_region_move() argument
1155 region = memslot2region(vm, slot); in vm_mem_region_move()
1159 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_mem_region_move()
1179 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) in vm_mem_region_delete() argument
1181 __vm_mem_region_delete(vm, memslot2region(vm, slot), true); in vm_mem_region_delete()
1200 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) in vcpu_exists() argument
1204 list_for_each_entry(vcpu, &vm->vcpus, list) { in vcpu_exists()
1216 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in __vm_vcpu_add() argument
1221 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id); in __vm_vcpu_add()
1227 vcpu->vm = vm; in __vm_vcpu_add()
1229 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); in __vm_vcpu_add()
1241 list_add(&vcpu->list, &vm->vcpus); in __vm_vcpu_add()
1266 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, in vm_vaddr_unused_gap() argument
1269 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1272 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1273 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
1277 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1279 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1288 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1291 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1300 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1303 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
1316 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1322 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1329 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
1332 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, in __vm_vaddr_alloc() argument
1335 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in __vm_vaddr_alloc()
1337 virt_pgd_alloc(vm); in __vm_vaddr_alloc()
1338 vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, in __vm_vaddr_alloc()
1339 KVM_UTIL_MIN_PFN * vm->page_size, in __vm_vaddr_alloc()
1340 vm->memslots[type]); in __vm_vaddr_alloc()
1346 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); in __vm_vaddr_alloc()
1350 pages--, vaddr += vm->page_size, paddr += vm->page_size) { in __vm_vaddr_alloc()
1352 virt_pg_map(vm, vaddr, paddr); in __vm_vaddr_alloc()
1354 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); in __vm_vaddr_alloc()
1379 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) in vm_vaddr_alloc() argument
1381 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); in vm_vaddr_alloc()
1398 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) in vm_vaddr_alloc_pages() argument
1400 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); in vm_vaddr_alloc_pages()
1403 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) in __vm_vaddr_alloc_page() argument
1405 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); in __vm_vaddr_alloc_page()
1422 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) in vm_vaddr_alloc_page() argument
1424 return vm_vaddr_alloc_pages(vm, 1); in vm_vaddr_alloc_page()
1443 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map() argument
1446 size_t page_size = vm->page_size; in virt_map()
1453 virt_pg_map(vm, vaddr, paddr); in virt_map()
1454 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); in virt_map()
1478 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2hva() argument
1482 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1509 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) in addr_hva2gpa() argument
1513 for (node = vm->regions.hva_tree.rb_node; node; ) { in addr_hva2gpa()
1552 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2alias() argument
1557 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1569 void vm_create_irqchip(struct kvm_vm *vm) in vm_create_irqchip() argument
1571 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); in vm_create_irqchip()
1573 vm->has_irqchip = true; in vm_create_irqchip()
1635 uint32_t size = vcpu->vm->dirty_ring_size; in vcpu_map_dirty_ring()
1676 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) in __kvm_test_create_device() argument
1683 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); in __kvm_test_create_device()
1686 int __kvm_create_device(struct kvm_vm *vm, uint64_t type) in __kvm_create_device() argument
1695 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); in __kvm_create_device()
1728 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) in _kvm_irq_line() argument
1735 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); in _kvm_irq_line()
1738 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) in kvm_irq_line() argument
1740 int ret = _kvm_irq_line(vm, irq, level); in kvm_irq_line()
1776 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) in _kvm_gsi_routing_write() argument
1781 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing); in _kvm_gsi_routing_write()
1787 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) in kvm_gsi_routing_write() argument
1791 ret = _kvm_gsi_routing_write(vm, routing); in kvm_gsi_routing_write()
1810 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in vm_dump() argument
1816 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
1817 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
1818 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
1820 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1830 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
1832 vm->pgd_created); in vm_dump()
1833 if (vm->pgd_created) { in vm_dump()
1836 virt_dump(stream, vm, indent + 4); in vm_dump()
1840 list_for_each_entry(vcpu, &vm->vcpus, list) in vm_dump()
1940 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in vm_phy_pages_alloc() argument
1948 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in vm_phy_pages_alloc()
1951 paddr_min, vm->page_size); in vm_phy_pages_alloc()
1953 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
1954 base = pg = paddr_min >> vm->page_shift; in vm_phy_pages_alloc()
1968 paddr_min, vm->page_size, memslot); in vm_phy_pages_alloc()
1970 vm_dump(stderr, vm, 2); in vm_phy_pages_alloc()
1977 return base * vm->page_size; in vm_phy_pages_alloc()
1980 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, in vm_phy_page_alloc() argument
1983 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); in vm_phy_page_alloc()
1986 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) in vm_alloc_page_table() argument
1988 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, in vm_alloc_page_table()
1989 vm->memslots[MEM_REGION_PT]); in vm_alloc_page_table()
2004 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva() argument
2006 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva()
2009 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm) in vm_compute_max_gfn() argument
2011 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_compute_max_gfn()
2135 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, in __vm_get_stat() argument
2142 if (!vm->stats_fd) { in __vm_get_stat()
2143 vm->stats_fd = vm_get_stats_fd(vm); in __vm_get_stat()
2144 read_stats_header(vm->stats_fd, &vm->stats_header); in __vm_get_stat()
2145 vm->stats_desc = read_stats_descriptors(vm->stats_fd, in __vm_get_stat()
2146 &vm->stats_header); in __vm_get_stat()
2149 size_desc = get_stats_descriptor_size(&vm->stats_header); in __vm_get_stat()
2151 for (i = 0; i < vm->stats_header.num_desc; ++i) { in __vm_get_stat()
2152 desc = (void *)vm->stats_desc + (i * size_desc); in __vm_get_stat()
2157 read_stat_data(vm->stats_fd, &vm->stats_header, desc, in __vm_get_stat()
2164 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm) in kvm_arch_vm_post_create() argument