Lines Matching refs:vm
13 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument
17 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_arch_pgd_alloc()
18 vm->page_size); in virt_arch_pgd_alloc()
20 if (vm->pgd_created) in virt_arch_pgd_alloc()
23 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_arch_pgd_alloc()
25 vm->memslots[MEM_REGION_PT]); in virt_arch_pgd_alloc()
26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_arch_pgd_alloc()
28 vm->pgd = paddr; in virt_arch_pgd_alloc()
29 vm->pgd_created = true; in virt_arch_pgd_alloc()
37 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) in virt_alloc_region() argument
41 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, in virt_alloc_region()
43 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_alloc_region()
50 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) in virt_arch_pg_map() argument
55 TEST_ASSERT((gva % vm->page_size) == 0, in virt_arch_pg_map()
58 gva, vm->page_size); in virt_arch_pg_map()
59 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, in virt_arch_pg_map()
60 (gva >> vm->page_shift)), in virt_arch_pg_map()
63 TEST_ASSERT((gpa % vm->page_size) == 0, in virt_arch_pg_map()
66 gva, vm->page_size); in virt_arch_pg_map()
67 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, in virt_arch_pg_map()
70 gva, vm->max_gfn, vm->page_size); in virt_arch_pg_map()
73 entry = addr_gpa2hva(vm, vm->pgd); in virt_arch_pg_map()
77 entry[idx] = virt_alloc_region(vm, ri); in virt_arch_pg_map()
78 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN); in virt_arch_pg_map()
89 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
94 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in addr_arch_gva2gpa()
95 vm->page_size); in addr_arch_gva2gpa()
97 entry = addr_gpa2hva(vm, vm->pgd); in addr_arch_gva2gpa()
103 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN); in addr_arch_gva2gpa()
114 static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent, in virt_dump_ptes() argument
120 pte = addr_gpa2hva(vm, ptea); in virt_dump_ptes()
128 static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent, in virt_dump_region() argument
134 entry = addr_gpa2hva(vm, addr); in virt_dump_region()
141 virt_dump_region(stream, vm, indent + 2, in virt_dump_region()
144 virt_dump_ptes(stream, vm, indent + 2, in virt_dump_region()
150 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in virt_arch_dump() argument
152 if (!vm->pgd_created) in virt_arch_dump()
155 virt_dump_region(stream, vm, indent, vm->pgd); in virt_arch_dump()
158 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in vm_arch_vcpu_add() argument
168 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in vm_arch_vcpu_add()
169 vm->page_size); in vm_arch_vcpu_add()
171 stack_vaddr = __vm_vaddr_alloc(vm, stack_size, in vm_arch_vcpu_add()
175 vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_add()
184 sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */ in vm_arch_vcpu_add()