1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * KVM demand paging test 4 * Adapted from dirty_log_test.c 5 * 6 * Copyright (C) 2018, Red Hat, Inc. 7 * Copyright (C) 2019, Google, Inc. 8 */ 9 10 #define _GNU_SOURCE /* for pipe2 */ 11 12 #include <inttypes.h> 13 #include <stdio.h> 14 #include <stdlib.h> 15 #include <time.h> 16 #include <poll.h> 17 #include <pthread.h> 18 #include <linux/userfaultfd.h> 19 #include <sys/syscall.h> 20 21 #include "kvm_util.h" 22 #include "test_util.h" 23 #include "perf_test_util.h" 24 #include "guest_modes.h" 25 26 #ifdef __NR_userfaultfd 27 28 #ifdef PRINT_PER_PAGE_UPDATES 29 #define PER_PAGE_DEBUG(...) printf(__VA_ARGS__) 30 #else 31 #define PER_PAGE_DEBUG(...) _no_printf(__VA_ARGS__) 32 #endif 33 34 #ifdef PRINT_PER_VCPU_UPDATES 35 #define PER_VCPU_DEBUG(...) printf(__VA_ARGS__) 36 #else 37 #define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__) 38 #endif 39 40 static int nr_vcpus = 1; 41 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 42 static size_t demand_paging_size; 43 static char *guest_data_prototype; 44 45 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) 46 { 47 int ret; 48 int vcpu_id = vcpu_args->vcpu_id; 49 struct kvm_vm *vm = perf_test_args.vm; 50 struct kvm_run *run; 51 struct timespec start; 52 struct timespec ts_diff; 53 54 run = vcpu_state(vm, vcpu_id); 55 56 clock_gettime(CLOCK_MONOTONIC, &start); 57 58 /* Let the guest access its memory */ 59 ret = _vcpu_run(vm, vcpu_id); 60 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); 61 if (get_ucall(vm, vcpu_id, NULL) != UCALL_SYNC) { 62 TEST_ASSERT(false, 63 "Invalid guest sync status: exit_reason=%s\n", 64 exit_reason_str(run->exit_reason)); 65 } 66 67 ts_diff = timespec_elapsed(start); 68 PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id, 69 ts_diff.tv_sec, ts_diff.tv_nsec); 70 } 71 72 static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr) 73 { 74 pid_t tid = syscall(__NR_gettid); 75 struct timespec start; 76 struct timespec ts_diff; 77 int r; 78 79 clock_gettime(CLOCK_MONOTONIC, &start); 80 81 if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) { 82 struct uffdio_copy copy; 83 84 copy.src = (uint64_t)guest_data_prototype; 85 copy.dst = addr; 86 copy.len = demand_paging_size; 87 copy.mode = 0; 88 89 r = ioctl(uffd, UFFDIO_COPY, ©); 90 if (r == -1) { 91 pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d with errno: %d\n", 92 addr, tid, errno); 93 return r; 94 } 95 } else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) { 96 struct uffdio_continue cont = {0}; 97 98 cont.range.start = addr; 99 cont.range.len = demand_paging_size; 100 101 r = ioctl(uffd, UFFDIO_CONTINUE, &cont); 102 if (r == -1) { 103 pr_info("Failed UFFDIO_CONTINUE in 0x%lx from thread %d with errno: %d\n", 104 addr, tid, errno); 105 return r; 106 } 107 } else { 108 TEST_FAIL("Invalid uffd mode %d", uffd_mode); 109 } 110 111 ts_diff = timespec_elapsed(start); 112 113 PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid, 114 timespec_to_ns(ts_diff)); 115 PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n", 116 demand_paging_size, addr, tid); 117 118 return 0; 119 } 120 121 bool quit_uffd_thread; 122 123 struct uffd_handler_args { 124 int uffd_mode; 125 int uffd; 126 int pipefd; 127 useconds_t delay; 128 }; 129 130 static void *uffd_handler_thread_fn(void *arg) 131 { 132 struct uffd_handler_args *uffd_args = (struct uffd_handler_args *)arg; 133 int uffd = uffd_args->uffd; 134 int pipefd = uffd_args->pipefd; 135 useconds_t delay = uffd_args->delay; 136 int64_t pages = 0; 137 struct timespec start; 138 struct timespec ts_diff; 139 140 clock_gettime(CLOCK_MONOTONIC, &start); 141 while (!quit_uffd_thread) { 142 struct uffd_msg msg; 143 struct pollfd pollfd[2]; 144 char tmp_chr; 145 int r; 146 uint64_t addr; 147 148 pollfd[0].fd = uffd; 149 pollfd[0].events = POLLIN; 150 pollfd[1].fd = pipefd; 151 pollfd[1].events = POLLIN; 152 153 r = poll(pollfd, 2, -1); 154 switch (r) { 155 case -1: 156 pr_info("poll err"); 157 continue; 158 case 0: 159 continue; 160 case 1: 161 break; 162 default: 163 pr_info("Polling uffd returned %d", r); 164 return NULL; 165 } 166 167 if (pollfd[0].revents & POLLERR) { 168 pr_info("uffd revents has POLLERR"); 169 return NULL; 170 } 171 172 if (pollfd[1].revents & POLLIN) { 173 r = read(pollfd[1].fd, &tmp_chr, 1); 174 TEST_ASSERT(r == 1, 175 "Error reading pipefd in UFFD thread\n"); 176 return NULL; 177 } 178 179 if (!(pollfd[0].revents & POLLIN)) 180 continue; 181 182 r = read(uffd, &msg, sizeof(msg)); 183 if (r == -1) { 184 if (errno == EAGAIN) 185 continue; 186 pr_info("Read of uffd got errno %d\n", errno); 187 return NULL; 188 } 189 190 if (r != sizeof(msg)) { 191 pr_info("Read on uffd returned unexpected size: %d bytes", r); 192 return NULL; 193 } 194 195 if (!(msg.event & UFFD_EVENT_PAGEFAULT)) 196 continue; 197 198 if (delay) 199 usleep(delay); 200 addr = msg.arg.pagefault.address; 201 r = handle_uffd_page_request(uffd_args->uffd_mode, uffd, addr); 202 if (r < 0) 203 return NULL; 204 pages++; 205 } 206 207 ts_diff = timespec_elapsed(start); 208 PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n", 209 pages, ts_diff.tv_sec, ts_diff.tv_nsec, 210 pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0)); 211 212 return NULL; 213 } 214 215 static void setup_demand_paging(struct kvm_vm *vm, 216 pthread_t *uffd_handler_thread, int pipefd, 217 int uffd_mode, useconds_t uffd_delay, 218 struct uffd_handler_args *uffd_args, 219 void *hva, void *alias, uint64_t len) 220 { 221 bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR); 222 int uffd; 223 struct uffdio_api uffdio_api; 224 struct uffdio_register uffdio_register; 225 uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY; 226 227 PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n", 228 is_minor ? "MINOR" : "MISSING", 229 is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY"); 230 231 /* In order to get minor faults, prefault via the alias. */ 232 if (is_minor) { 233 size_t p; 234 235 expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE; 236 237 TEST_ASSERT(alias != NULL, "Alias required for minor faults"); 238 for (p = 0; p < (len / demand_paging_size); ++p) { 239 memcpy(alias + (p * demand_paging_size), 240 guest_data_prototype, demand_paging_size); 241 } 242 } 243 244 uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 245 TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno); 246 247 uffdio_api.api = UFFD_API; 248 uffdio_api.features = 0; 249 TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1, 250 "ioctl UFFDIO_API failed: %" PRIu64, 251 (uint64_t)uffdio_api.api); 252 253 uffdio_register.range.start = (uint64_t)hva; 254 uffdio_register.range.len = len; 255 uffdio_register.mode = uffd_mode; 256 TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1, 257 "ioctl UFFDIO_REGISTER failed"); 258 TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) == 259 expected_ioctls, "missing userfaultfd ioctls"); 260 261 uffd_args->uffd_mode = uffd_mode; 262 uffd_args->uffd = uffd; 263 uffd_args->pipefd = pipefd; 264 uffd_args->delay = uffd_delay; 265 pthread_create(uffd_handler_thread, NULL, uffd_handler_thread_fn, 266 uffd_args); 267 268 PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n", 269 hva, hva + len); 270 } 271 272 struct test_params { 273 int uffd_mode; 274 useconds_t uffd_delay; 275 enum vm_mem_backing_src_type src_type; 276 bool partition_vcpu_memory_access; 277 }; 278 279 static void run_test(enum vm_guest_mode mode, void *arg) 280 { 281 struct test_params *p = arg; 282 pthread_t *uffd_handler_threads = NULL; 283 struct uffd_handler_args *uffd_args = NULL; 284 struct timespec start; 285 struct timespec ts_diff; 286 int *pipefds = NULL; 287 struct kvm_vm *vm; 288 int vcpu_id; 289 int r; 290 291 vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, 292 p->src_type, p->partition_vcpu_memory_access); 293 294 demand_paging_size = get_backing_src_pagesz(p->src_type); 295 296 guest_data_prototype = malloc(demand_paging_size); 297 TEST_ASSERT(guest_data_prototype, 298 "Failed to allocate buffer for guest data pattern"); 299 memset(guest_data_prototype, 0xAB, demand_paging_size); 300 301 if (p->uffd_mode) { 302 uffd_handler_threads = 303 malloc(nr_vcpus * sizeof(*uffd_handler_threads)); 304 TEST_ASSERT(uffd_handler_threads, "Memory allocation failed"); 305 306 uffd_args = malloc(nr_vcpus * sizeof(*uffd_args)); 307 TEST_ASSERT(uffd_args, "Memory allocation failed"); 308 309 pipefds = malloc(sizeof(int) * nr_vcpus * 2); 310 TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd"); 311 312 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { 313 struct perf_test_vcpu_args *vcpu_args; 314 void *vcpu_hva; 315 void *vcpu_alias; 316 317 vcpu_args = &perf_test_args.vcpu_args[vcpu_id]; 318 319 /* Cache the host addresses of the region */ 320 vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa); 321 vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa); 322 323 /* 324 * Set up user fault fd to handle demand paging 325 * requests. 326 */ 327 r = pipe2(&pipefds[vcpu_id * 2], 328 O_CLOEXEC | O_NONBLOCK); 329 TEST_ASSERT(!r, "Failed to set up pipefd"); 330 331 setup_demand_paging(vm, &uffd_handler_threads[vcpu_id], 332 pipefds[vcpu_id * 2], p->uffd_mode, 333 p->uffd_delay, &uffd_args[vcpu_id], 334 vcpu_hva, vcpu_alias, 335 vcpu_args->pages * perf_test_args.guest_page_size); 336 } 337 } 338 339 pr_info("Finished creating vCPUs and starting uffd threads\n"); 340 341 clock_gettime(CLOCK_MONOTONIC, &start); 342 perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); 343 pr_info("Started all vCPUs\n"); 344 345 perf_test_join_vcpu_threads(nr_vcpus); 346 ts_diff = timespec_elapsed(start); 347 pr_info("All vCPU threads joined\n"); 348 349 if (p->uffd_mode) { 350 char c; 351 352 /* Tell the user fault fd handler threads to quit */ 353 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { 354 r = write(pipefds[vcpu_id * 2 + 1], &c, 1); 355 TEST_ASSERT(r == 1, "Unable to write to pipefd"); 356 357 pthread_join(uffd_handler_threads[vcpu_id], NULL); 358 } 359 } 360 361 pr_info("Total guest execution time: %ld.%.9lds\n", 362 ts_diff.tv_sec, ts_diff.tv_nsec); 363 pr_info("Overall demand paging rate: %f pgs/sec\n", 364 perf_test_args.vcpu_args[0].pages * nr_vcpus / 365 ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0)); 366 367 perf_test_destroy_vm(vm); 368 369 free(guest_data_prototype); 370 if (p->uffd_mode) { 371 free(uffd_handler_threads); 372 free(uffd_args); 373 free(pipefds); 374 } 375 } 376 377 static void help(char *name) 378 { 379 puts(""); 380 printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n" 381 " [-b memory] [-s type] [-v vcpus] [-o]\n", name); 382 guest_modes_help(); 383 printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n" 384 " UFFD registration mode: 'MISSING' or 'MINOR'.\n"); 385 printf(" -d: add a delay in usec to the User Fault\n" 386 " FD handler to simulate demand paging\n" 387 " overheads. Ignored without -u.\n"); 388 printf(" -b: specify the size of the memory region which should be\n" 389 " demand paged by each vCPU. e.g. 10M or 3G.\n" 390 " Default: 1G\n"); 391 backing_src_help("-s"); 392 printf(" -v: specify the number of vCPUs to run.\n"); 393 printf(" -o: Overlap guest memory accesses instead of partitioning\n" 394 " them into a separate region of memory for each vCPU.\n"); 395 puts(""); 396 exit(0); 397 } 398 399 int main(int argc, char *argv[]) 400 { 401 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); 402 struct test_params p = { 403 .src_type = DEFAULT_VM_MEM_SRC, 404 .partition_vcpu_memory_access = true, 405 }; 406 int opt; 407 408 guest_modes_append_default(); 409 410 while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) { 411 switch (opt) { 412 case 'm': 413 guest_modes_cmdline(optarg); 414 break; 415 case 'u': 416 if (!strcmp("MISSING", optarg)) 417 p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING; 418 else if (!strcmp("MINOR", optarg)) 419 p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR; 420 TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'."); 421 break; 422 case 'd': 423 p.uffd_delay = strtoul(optarg, NULL, 0); 424 TEST_ASSERT(p.uffd_delay >= 0, "A negative UFFD delay is not supported."); 425 break; 426 case 'b': 427 guest_percpu_mem_size = parse_size(optarg); 428 break; 429 case 's': 430 p.src_type = parse_backing_src_type(optarg); 431 break; 432 case 'v': 433 nr_vcpus = atoi(optarg); 434 TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, 435 "Invalid number of vcpus, must be between 1 and %d", max_vcpus); 436 break; 437 case 'o': 438 p.partition_vcpu_memory_access = false; 439 break; 440 case 'h': 441 default: 442 help(argv[0]); 443 break; 444 } 445 } 446 447 if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR && 448 !backing_src_is_shared(p.src_type)) { 449 TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s"); 450 } 451 452 for_each_guest_mode(run_test, &p); 453 454 return 0; 455 } 456 457 #else /* __NR_userfaultfd */ 458 459 #warning "missing __NR_userfaultfd definition" 460 461 int main(void) 462 { 463 print_skip("__NR_userfaultfd must be present for userfaultfd test"); 464 return KSFT_SKIP; 465 } 466 467 #endif /* __NR_userfaultfd */ 468