1 // SPDX-License-Identifier: GPL-2.0 2 #define _GNU_SOURCE 3 4 #include <stdio.h> 5 #include <stdlib.h> 6 #include <pthread.h> 7 #include <semaphore.h> 8 #include <sys/types.h> 9 #include <signal.h> 10 #include <errno.h> 11 #include <linux/bitmap.h> 12 #include <linux/bitops.h> 13 #include <linux/atomic.h> 14 #include <linux/sizes.h> 15 16 #include "kvm_util.h" 17 #include "test_util.h" 18 #include "guest_modes.h" 19 #include "processor.h" 20 21 static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) 22 { 23 uint64_t gpa; 24 25 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) 26 *((volatile uint64_t *)gpa) = gpa; 27 28 GUEST_DONE(); 29 } 30 31 struct vcpu_info { 32 struct kvm_vcpu *vcpu; 33 uint64_t start_gpa; 34 uint64_t end_gpa; 35 }; 36 37 static int nr_vcpus; 38 static atomic_t rendezvous; 39 40 static void rendezvous_with_boss(void) 41 { 42 int orig = atomic_read(&rendezvous); 43 44 if (orig > 0) { 45 atomic_dec_and_test(&rendezvous); 46 while (atomic_read(&rendezvous) > 0) 47 cpu_relax(); 48 } else { 49 atomic_inc(&rendezvous); 50 while (atomic_read(&rendezvous) < 0) 51 cpu_relax(); 52 } 53 } 54 55 static void run_vcpu(struct kvm_vcpu *vcpu) 56 { 57 vcpu_run(vcpu); 58 ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); 59 } 60 61 static void *vcpu_worker(void *data) 62 { 63 struct vcpu_info *info = data; 64 struct kvm_vcpu *vcpu = info->vcpu; 65 struct kvm_vm *vm = vcpu->vm; 66 struct kvm_sregs sregs; 67 struct kvm_regs regs; 68 69 vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size); 70 71 /* Snapshot regs before the first run. */ 72 vcpu_regs_get(vcpu, ®s); 73 rendezvous_with_boss(); 74 75 run_vcpu(vcpu); 76 rendezvous_with_boss(); 77 vcpu_regs_set(vcpu, ®s); 78 vcpu_sregs_get(vcpu, &sregs); 79 #ifdef __x86_64__ 80 /* Toggle CR0.WP to trigger a MMU context reset. */ 81 sregs.cr0 ^= X86_CR0_WP; 82 #endif 83 vcpu_sregs_set(vcpu, &sregs); 84 rendezvous_with_boss(); 85 86 run_vcpu(vcpu); 87 rendezvous_with_boss(); 88 89 return NULL; 90 } 91 92 static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, 93 uint64_t start_gpa, uint64_t end_gpa) 94 { 95 struct vcpu_info *info; 96 uint64_t gpa, nr_bytes; 97 pthread_t *threads; 98 int i; 99 100 threads = malloc(nr_vcpus * sizeof(*threads)); 101 TEST_ASSERT(threads, "Failed to allocate vCPU threads"); 102 103 info = malloc(nr_vcpus * sizeof(*info)); 104 TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges"); 105 106 nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) & 107 ~((uint64_t)vm->page_size - 1); 108 TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus); 109 110 for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) { 111 info[i].vcpu = vcpus[i]; 112 info[i].start_gpa = gpa; 113 info[i].end_gpa = gpa + nr_bytes; 114 pthread_create(&threads[i], NULL, vcpu_worker, &info[i]); 115 } 116 return threads; 117 } 118 119 static void rendezvous_with_vcpus(struct timespec *time, const char *name) 120 { 121 int i, rendezvoused; 122 123 pr_info("Waiting for vCPUs to finish %s...\n", name); 124 125 rendezvoused = atomic_read(&rendezvous); 126 for (i = 0; abs(rendezvoused) != 1; i++) { 127 usleep(100); 128 if (!(i & 0x3f)) 129 pr_info("\r%d vCPUs haven't rendezvoused...", 130 abs(rendezvoused) - 1); 131 rendezvoused = atomic_read(&rendezvous); 132 } 133 134 clock_gettime(CLOCK_MONOTONIC, time); 135 136 /* Release the vCPUs after getting the time of the previous action. */ 137 pr_info("\rAll vCPUs finished %s, releasing...\n", name); 138 if (rendezvoused > 0) 139 atomic_set(&rendezvous, -nr_vcpus - 1); 140 else 141 atomic_set(&rendezvous, nr_vcpus + 1); 142 } 143 144 static void calc_default_nr_vcpus(void) 145 { 146 cpu_set_t possible_mask; 147 int r; 148 149 r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask); 150 TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", 151 errno, strerror(errno)); 152 153 nr_vcpus = CPU_COUNT(&possible_mask) * 3/4; 154 TEST_ASSERT(nr_vcpus > 0, "Uh, no CPUs?"); 155 } 156 157 int main(int argc, char *argv[]) 158 { 159 /* 160 * Skip the first 4gb and slot0. slot0 maps <1gb and is used to back 161 * the guest's code, stack, and page tables. Because selftests creates 162 * an IRQCHIP, a.k.a. a local APIC, KVM creates an internal memslot 163 * just below the 4gb boundary. This test could create memory at 164 * 1gb-3gb,but it's simpler to skip straight to 4gb. 165 */ 166 const uint64_t start_gpa = SZ_4G; 167 const int first_slot = 1; 168 169 struct timespec time_start, time_run1, time_reset, time_run2; 170 uint64_t max_gpa, gpa, slot_size, max_mem, i; 171 int max_slots, slot, opt, fd; 172 bool hugepages = false; 173 struct kvm_vcpu **vcpus; 174 pthread_t *threads; 175 struct kvm_vm *vm; 176 void *mem; 177 178 /* 179 * Default to 2gb so that maxing out systems with MAXPHADDR=46, which 180 * are quite common for x86, requires changing only max_mem (KVM allows 181 * 32k memslots, 32k * 2gb == ~64tb of guest memory). 182 */ 183 slot_size = SZ_2G; 184 185 max_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); 186 TEST_ASSERT(max_slots > first_slot, "KVM is broken"); 187 188 /* All KVM MMUs should be able to survive a 128gb guest. */ 189 max_mem = 128ull * SZ_1G; 190 191 calc_default_nr_vcpus(); 192 193 while ((opt = getopt(argc, argv, "c:h:m:s:H")) != -1) { 194 switch (opt) { 195 case 'c': 196 nr_vcpus = atoi_positive("Number of vCPUs", optarg); 197 break; 198 case 'm': 199 max_mem = 1ull * atoi_positive("Memory size", optarg) * SZ_1G; 200 break; 201 case 's': 202 slot_size = 1ull * atoi_positive("Slot size", optarg) * SZ_1G; 203 break; 204 case 'H': 205 hugepages = true; 206 break; 207 case 'h': 208 default: 209 printf("usage: %s [-c nr_vcpus] [-m max_mem_in_gb] [-s slot_size_in_gb] [-H]\n", argv[0]); 210 exit(1); 211 } 212 } 213 214 vcpus = malloc(nr_vcpus * sizeof(*vcpus)); 215 TEST_ASSERT(vcpus, "Failed to allocate vCPU array"); 216 217 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); 218 219 max_gpa = vm->max_gfn << vm->page_shift; 220 TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb "); 221 222 fd = kvm_memfd_alloc(slot_size, hugepages); 223 mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 224 TEST_ASSERT(mem != MAP_FAILED, "mmap() failed"); 225 226 TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed"); 227 228 /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */ 229 for (i = 0; i < slot_size; i += vm->page_size) 230 ((uint8_t *)mem)[i] = 0xaa; 231 232 gpa = 0; 233 for (slot = first_slot; slot < max_slots; slot++) { 234 gpa = start_gpa + ((slot - first_slot) * slot_size); 235 if (gpa + slot_size > max_gpa) 236 break; 237 238 if ((gpa - start_gpa) >= max_mem) 239 break; 240 241 vm_set_user_memory_region(vm, slot, 0, gpa, slot_size, mem); 242 243 #ifdef __x86_64__ 244 /* Identity map memory in the guest using 1gb pages. */ 245 for (i = 0; i < slot_size; i += SZ_1G) 246 __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G); 247 #else 248 for (i = 0; i < slot_size; i += vm->page_size) 249 virt_pg_map(vm, gpa + i, gpa + i); 250 #endif 251 } 252 253 atomic_set(&rendezvous, nr_vcpus + 1); 254 threads = spawn_workers(vm, vcpus, start_gpa, gpa); 255 256 free(vcpus); 257 vcpus = NULL; 258 259 pr_info("Running with %lugb of guest memory and %u vCPUs\n", 260 (gpa - start_gpa) / SZ_1G, nr_vcpus); 261 262 rendezvous_with_vcpus(&time_start, "spawning"); 263 rendezvous_with_vcpus(&time_run1, "run 1"); 264 rendezvous_with_vcpus(&time_reset, "reset"); 265 rendezvous_with_vcpus(&time_run2, "run 2"); 266 267 time_run2 = timespec_sub(time_run2, time_reset); 268 time_reset = timespec_sub(time_reset, time_run1); 269 time_run1 = timespec_sub(time_run1, time_start); 270 271 pr_info("run1 = %ld.%.9lds, reset = %ld.%.9lds, run2 = %ld.%.9lds\n", 272 time_run1.tv_sec, time_run1.tv_nsec, 273 time_reset.tv_sec, time_reset.tv_nsec, 274 time_run2.tv_sec, time_run2.tv_nsec); 275 276 /* 277 * Delete even numbered slots (arbitrary) and unmap the first half of 278 * the backing (also arbitrary) to verify KVM correctly drops all 279 * references to the removed regions. 280 */ 281 for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2) 282 vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL); 283 284 munmap(mem, slot_size / 2); 285 286 /* Sanity check that the vCPUs actually ran. */ 287 for (i = 0; i < nr_vcpus; i++) 288 pthread_join(threads[i], NULL); 289 290 /* 291 * Deliberately exit without deleting the remaining memslots or closing 292 * kvm_fd to test cleanup via mmu_notifier.release. 293 */ 294 } 295