1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * KVM selftest s390x library code - CPU-related functions (page tables...) 4 * 5 * Copyright (C) 2019, Red Hat, Inc. 6 */ 7 8 #define _GNU_SOURCE /* for program_invocation_name */ 9 10 #include "processor.h" 11 #include "kvm_util.h" 12 #include "../kvm_util_internal.h" 13 14 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 15 16 #define PAGES_PER_REGION 4 17 18 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot) 19 { 20 vm_paddr_t paddr; 21 22 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", 23 vm->page_size); 24 25 if (vm->pgd_created) 26 return; 27 28 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, 29 KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot); 30 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); 31 32 vm->pgd = paddr; 33 vm->pgd_created = true; 34 } 35 36 /* 37 * Allocate 4 pages for a region/segment table (ri < 4), or one page for 38 * a page table (ri == 4). Returns a suitable region/segment table entry 39 * which points to the freshly allocated pages. 40 */ 41 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) 42 { 43 uint64_t taddr; 44 45 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, 46 KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot); 47 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); 48 49 return (taddr & REGION_ENTRY_ORIGIN) 50 | (((4 - ri) << 2) & REGION_ENTRY_TYPE) 51 | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH); 52 } 53 54 void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa, 55 uint32_t memslot) 56 { 57 int ri, idx; 58 uint64_t *entry; 59 60 TEST_ASSERT((gva % vm->page_size) == 0, 61 "Virtual address not on page boundary,\n" 62 " vaddr: 0x%lx vm->page_size: 0x%x", 63 gva, vm->page_size); 64 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 65 (gva >> vm->page_shift)), 66 "Invalid virtual address, vaddr: 0x%lx", 67 gva); 68 TEST_ASSERT((gpa % vm->page_size) == 0, 69 "Physical address not on page boundary,\n" 70 " paddr: 0x%lx vm->page_size: 0x%x", 71 gva, vm->page_size); 72 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 73 "Physical address beyond beyond maximum supported,\n" 74 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 75 gva, vm->max_gfn, vm->page_size); 76 77 /* Walk through region and segment tables */ 78 entry = addr_gpa2hva(vm, vm->pgd); 79 for (ri = 1; ri <= 4; ri++) { 80 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; 81 if (entry[idx] & REGION_ENTRY_INVALID) 82 entry[idx] = virt_alloc_region(vm, ri, memslot); 83 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN); 84 } 85 86 /* Fill in page table entry */ 87 idx = (gva >> 12) & 0x0ffu; /* page index */ 88 if (!(entry[idx] & PAGE_INVALID)) 89 fprintf(stderr, 90 "WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa); 91 entry[idx] = gpa; 92 } 93 94 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 95 { 96 int ri, idx; 97 uint64_t *entry; 98 99 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", 100 vm->page_size); 101 102 entry = addr_gpa2hva(vm, vm->pgd); 103 for (ri = 1; ri <= 4; ri++) { 104 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; 105 TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID), 106 "No region mapping for vm virtual address 0x%lx", 107 gva); 108 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN); 109 } 110 111 idx = (gva >> 12) & 0x0ffu; /* page index */ 112 113 TEST_ASSERT(!(entry[idx] & PAGE_INVALID), 114 "No page mapping for vm virtual address 0x%lx", gva); 115 116 return (entry[idx] & ~0xffful) + (gva & 0xffful); 117 } 118 119 static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent, 120 uint64_t ptea_start) 121 { 122 uint64_t *pte, ptea; 123 124 for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) { 125 pte = addr_gpa2hva(vm, ptea); 126 if (*pte & PAGE_INVALID) 127 continue; 128 fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n", 129 indent, "", ptea, *pte); 130 } 131 } 132 133 static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent, 134 uint64_t reg_tab_addr) 135 { 136 uint64_t addr, *entry; 137 138 for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) { 139 entry = addr_gpa2hva(vm, addr); 140 if (*entry & REGION_ENTRY_INVALID) 141 continue; 142 fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n", 143 indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2), 144 addr, *entry); 145 if (*entry & REGION_ENTRY_TYPE) { 146 virt_dump_region(stream, vm, indent + 2, 147 *entry & REGION_ENTRY_ORIGIN); 148 } else { 149 virt_dump_ptes(stream, vm, indent + 2, 150 *entry & REGION_ENTRY_ORIGIN); 151 } 152 } 153 } 154 155 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 156 { 157 if (!vm->pgd_created) 158 return; 159 160 virt_dump_region(stream, vm, indent, vm->pgd); 161 } 162 163 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, 164 void *guest_code) 165 { 166 /* 167 * The additional amount of pages required for the page tables is: 168 * 1 * n / 256 + 4 * (n / 256) / 2048 + 4 * (n / 256) / 2048^2 + ... 169 * which is definitely smaller than (n / 256) * 2. 170 */ 171 uint64_t extra_pg_pages = extra_mem_pages / 256 * 2; 172 struct kvm_vm *vm; 173 174 vm = vm_create(VM_MODE_DEFAULT, 175 DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR); 176 177 kvm_vm_elf_load(vm, program_invocation_name, 0, 0); 178 vm_vcpu_add_default(vm, vcpuid, guest_code); 179 180 return vm; 181 } 182 183 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) 184 { 185 size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); 186 uint64_t stack_vaddr; 187 struct kvm_regs regs; 188 struct kvm_sregs sregs; 189 struct kvm_run *run; 190 191 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", 192 vm->page_size); 193 194 stack_vaddr = vm_vaddr_alloc(vm, stack_size, 195 DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0); 196 197 vm_vcpu_add(vm, vcpuid); 198 199 /* Setup guest registers */ 200 vcpu_regs_get(vm, vcpuid, ®s); 201 regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160; 202 vcpu_regs_set(vm, vcpuid, ®s); 203 204 vcpu_sregs_get(vm, vcpuid, &sregs); 205 sregs.crs[0] |= 0x00040000; /* Enable floating point regs */ 206 sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */ 207 vcpu_sregs_set(vm, vcpuid, &sregs); 208 209 run = vcpu_state(vm, vcpuid); 210 run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */ 211 run->psw_addr = (uintptr_t)guest_code; 212 } 213 214 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) 215 { 216 va_list ap; 217 struct kvm_regs regs; 218 int i; 219 220 TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n" 221 " num: %u\n", 222 num); 223 224 va_start(ap, num); 225 vcpu_regs_get(vm, vcpuid, ®s); 226 227 for (i = 0; i < num; i++) 228 regs.gprs[i + 2] = va_arg(ap, uint64_t); 229 230 vcpu_regs_set(vm, vcpuid, ®s); 231 va_end(ap); 232 } 233 234 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) 235 { 236 struct vcpu *vcpu = vcpu_find(vm, vcpuid); 237 238 if (!vcpu) 239 return; 240 241 fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n", 242 indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr); 243 } 244