1 /* 2 * Postcopy migration for RAM 3 * 4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Dave Gilbert <dgilbert@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * Postcopy is a migration technique where the execution flips from the 16 * source to the destination before all the data has been copied. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "qemu/madvise.h" 21 #include "exec/target_page.h" 22 #include "migration.h" 23 #include "qemu-file.h" 24 #include "savevm.h" 25 #include "postcopy-ram.h" 26 #include "ram.h" 27 #include "qapi/error.h" 28 #include "qemu/notify.h" 29 #include "qemu/rcu.h" 30 #include "sysemu/sysemu.h" 31 #include "qemu/error-report.h" 32 #include "trace.h" 33 #include "hw/boards.h" 34 #include "exec/ramblock.h" 35 #include "socket.h" 36 #include "yank_functions.h" 37 #include "tls.h" 38 #include "qemu/userfaultfd.h" 39 40 /* Arbitrary limit on size of each discard command, 41 * keeps them around ~200 bytes 42 */ 43 #define MAX_DISCARDS_PER_COMMAND 12 44 45 struct PostcopyDiscardState { 46 const char *ramblock_name; 47 uint16_t cur_entry; 48 /* 49 * Start and length of a discard range (bytes) 50 */ 51 uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 52 uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 53 unsigned int nsentwords; 54 unsigned int nsentcmds; 55 }; 56 57 static NotifierWithReturnList postcopy_notifier_list; 58 59 void postcopy_infrastructure_init(void) 60 { 61 notifier_with_return_list_init(&postcopy_notifier_list); 62 } 63 64 void postcopy_add_notifier(NotifierWithReturn *nn) 65 { 66 notifier_with_return_list_add(&postcopy_notifier_list, nn); 67 } 68 69 void postcopy_remove_notifier(NotifierWithReturn *n) 70 { 71 notifier_with_return_remove(n); 72 } 73 74 int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) 75 { 76 struct PostcopyNotifyData pnd; 77 pnd.reason = reason; 78 pnd.errp = errp; 79 80 return notifier_with_return_list_notify(&postcopy_notifier_list, 81 &pnd); 82 } 83 84 /* 85 * NOTE: this routine is not thread safe, we can't call it concurrently. But it 86 * should be good enough for migration's purposes. 87 */ 88 void postcopy_thread_create(MigrationIncomingState *mis, 89 QemuThread *thread, const char *name, 90 void *(*fn)(void *), int joinable) 91 { 92 qemu_sem_init(&mis->thread_sync_sem, 0); 93 qemu_thread_create(thread, name, fn, mis, joinable); 94 qemu_sem_wait(&mis->thread_sync_sem); 95 qemu_sem_destroy(&mis->thread_sync_sem); 96 } 97 98 /* Postcopy needs to detect accesses to pages that haven't yet been copied 99 * across, and efficiently map new pages in, the techniques for doing this 100 * are target OS specific. 101 */ 102 #if defined(__linux__) 103 104 #include <poll.h> 105 #include <sys/ioctl.h> 106 #include <sys/syscall.h> 107 #include <asm/types.h> /* for __u64 */ 108 #endif 109 110 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 111 #include <sys/eventfd.h> 112 #include <linux/userfaultfd.h> 113 114 typedef struct PostcopyBlocktimeContext { 115 /* time when page fault initiated per vCPU */ 116 uint32_t *page_fault_vcpu_time; 117 /* page address per vCPU */ 118 uintptr_t *vcpu_addr; 119 uint32_t total_blocktime; 120 /* blocktime per vCPU */ 121 uint32_t *vcpu_blocktime; 122 /* point in time when last page fault was initiated */ 123 uint32_t last_begin; 124 /* number of vCPU are suspended */ 125 int smp_cpus_down; 126 uint64_t start_time; 127 128 /* 129 * Handler for exit event, necessary for 130 * releasing whole blocktime_ctx 131 */ 132 Notifier exit_notifier; 133 } PostcopyBlocktimeContext; 134 135 static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx) 136 { 137 g_free(ctx->page_fault_vcpu_time); 138 g_free(ctx->vcpu_addr); 139 g_free(ctx->vcpu_blocktime); 140 g_free(ctx); 141 } 142 143 static void migration_exit_cb(Notifier *n, void *data) 144 { 145 PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext, 146 exit_notifier); 147 destroy_blocktime_context(ctx); 148 } 149 150 static struct PostcopyBlocktimeContext *blocktime_context_new(void) 151 { 152 MachineState *ms = MACHINE(qdev_get_machine()); 153 unsigned int smp_cpus = ms->smp.cpus; 154 PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1); 155 ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus); 156 ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus); 157 ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus); 158 159 ctx->exit_notifier.notify = migration_exit_cb; 160 ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 161 qemu_add_exit_notifier(&ctx->exit_notifier); 162 return ctx; 163 } 164 165 static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx) 166 { 167 MachineState *ms = MACHINE(qdev_get_machine()); 168 uint32List *list = NULL; 169 int i; 170 171 for (i = ms->smp.cpus - 1; i >= 0; i--) { 172 QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]); 173 } 174 175 return list; 176 } 177 178 /* 179 * This function just populates MigrationInfo from postcopy's 180 * blocktime context. It will not populate MigrationInfo, 181 * unless postcopy-blocktime capability was set. 182 * 183 * @info: pointer to MigrationInfo to populate 184 */ 185 void fill_destination_postcopy_migration_info(MigrationInfo *info) 186 { 187 MigrationIncomingState *mis = migration_incoming_get_current(); 188 PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 189 190 if (!bc) { 191 return; 192 } 193 194 info->has_postcopy_blocktime = true; 195 info->postcopy_blocktime = bc->total_blocktime; 196 info->has_postcopy_vcpu_blocktime = true; 197 info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc); 198 } 199 200 static uint32_t get_postcopy_total_blocktime(void) 201 { 202 MigrationIncomingState *mis = migration_incoming_get_current(); 203 PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 204 205 if (!bc) { 206 return 0; 207 } 208 209 return bc->total_blocktime; 210 } 211 212 /** 213 * receive_ufd_features: check userfault fd features, to request only supported 214 * features in the future. 215 * 216 * Returns: true on success 217 * 218 * __NR_userfaultfd - should be checked before 219 * @features: out parameter will contain uffdio_api.features provided by kernel 220 * in case of success 221 */ 222 static bool receive_ufd_features(uint64_t *features) 223 { 224 struct uffdio_api api_struct = {0}; 225 int ufd; 226 bool ret = true; 227 228 ufd = uffd_open(O_CLOEXEC); 229 if (ufd == -1) { 230 error_report("%s: uffd_open() failed: %s", __func__, strerror(errno)); 231 return false; 232 } 233 234 /* ask features */ 235 api_struct.api = UFFD_API; 236 api_struct.features = 0; 237 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 238 error_report("%s: UFFDIO_API failed: %s", __func__, 239 strerror(errno)); 240 ret = false; 241 goto release_ufd; 242 } 243 244 *features = api_struct.features; 245 246 release_ufd: 247 close(ufd); 248 return ret; 249 } 250 251 /** 252 * request_ufd_features: this function should be called only once on a newly 253 * opened ufd, subsequent calls will lead to error. 254 * 255 * Returns: true on success 256 * 257 * @ufd: fd obtained from userfaultfd syscall 258 * @features: bit mask see UFFD_API_FEATURES 259 */ 260 static bool request_ufd_features(int ufd, uint64_t features) 261 { 262 struct uffdio_api api_struct = {0}; 263 uint64_t ioctl_mask; 264 265 api_struct.api = UFFD_API; 266 api_struct.features = features; 267 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 268 error_report("%s failed: UFFDIO_API failed: %s", __func__, 269 strerror(errno)); 270 return false; 271 } 272 273 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 274 (__u64)1 << _UFFDIO_UNREGISTER; 275 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 276 error_report("Missing userfault features: %" PRIx64, 277 (uint64_t)(~api_struct.ioctls & ioctl_mask)); 278 return false; 279 } 280 281 return true; 282 } 283 284 static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) 285 { 286 uint64_t asked_features = 0; 287 static uint64_t supported_features; 288 289 /* 290 * it's not possible to 291 * request UFFD_API twice per one fd 292 * userfault fd features is persistent 293 */ 294 if (!supported_features) { 295 if (!receive_ufd_features(&supported_features)) { 296 error_report("%s failed", __func__); 297 return false; 298 } 299 } 300 301 #ifdef UFFD_FEATURE_THREAD_ID 302 if (UFFD_FEATURE_THREAD_ID & supported_features) { 303 asked_features |= UFFD_FEATURE_THREAD_ID; 304 if (migrate_postcopy_blocktime()) { 305 if (!mis->blocktime_ctx) { 306 mis->blocktime_ctx = blocktime_context_new(); 307 } 308 } 309 } 310 #endif 311 312 /* 313 * request features, even if asked_features is 0, due to 314 * kernel expects UFFD_API before UFFDIO_REGISTER, per 315 * userfault file descriptor 316 */ 317 if (!request_ufd_features(ufd, asked_features)) { 318 error_report("%s failed: features %" PRIu64, __func__, 319 asked_features); 320 return false; 321 } 322 323 if (qemu_real_host_page_size() != ram_pagesize_summary()) { 324 bool have_hp = false; 325 /* We've got a huge page */ 326 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS 327 have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; 328 #endif 329 if (!have_hp) { 330 error_report("Userfault on this host does not support huge pages"); 331 return false; 332 } 333 } 334 return true; 335 } 336 337 /* Callback from postcopy_ram_supported_by_host block iterator. 338 */ 339 static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque) 340 { 341 const char *block_name = qemu_ram_get_idstr(rb); 342 ram_addr_t length = qemu_ram_get_used_length(rb); 343 size_t pagesize = qemu_ram_pagesize(rb); 344 345 if (length % pagesize) { 346 error_report("Postcopy requires RAM blocks to be a page size multiple," 347 " block %s is 0x" RAM_ADDR_FMT " bytes with a " 348 "page size of 0x%zx", block_name, length, pagesize); 349 return 1; 350 } 351 return 0; 352 } 353 354 /* 355 * Note: This has the side effect of munlock'ing all of RAM, that's 356 * normally fine since if the postcopy succeeds it gets turned back on at the 357 * end. 358 */ 359 bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 360 { 361 long pagesize = qemu_real_host_page_size(); 362 int ufd = -1; 363 bool ret = false; /* Error unless we change it */ 364 void *testarea = NULL; 365 struct uffdio_register reg_struct; 366 struct uffdio_range range_struct; 367 uint64_t feature_mask; 368 Error *local_err = NULL; 369 370 if (qemu_target_page_size() > pagesize) { 371 error_report("Target page size bigger than host page size"); 372 goto out; 373 } 374 375 ufd = uffd_open(O_CLOEXEC); 376 if (ufd == -1) { 377 error_report("%s: userfaultfd not available: %s", __func__, 378 strerror(errno)); 379 goto out; 380 } 381 382 /* Give devices a chance to object */ 383 if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) { 384 error_report_err(local_err); 385 goto out; 386 } 387 388 /* Version and features check */ 389 if (!ufd_check_and_apply(ufd, mis)) { 390 goto out; 391 } 392 393 /* We don't support postcopy with shared RAM yet */ 394 if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) { 395 goto out; 396 } 397 398 /* 399 * userfault and mlock don't go together; we'll put it back later if 400 * it was enabled. 401 */ 402 if (munlockall()) { 403 error_report("%s: munlockall: %s", __func__, strerror(errno)); 404 goto out; 405 } 406 407 /* 408 * We need to check that the ops we need are supported on anon memory 409 * To do that we need to register a chunk and see the flags that 410 * are returned. 411 */ 412 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 413 MAP_ANONYMOUS, -1, 0); 414 if (testarea == MAP_FAILED) { 415 error_report("%s: Failed to map test area: %s", __func__, 416 strerror(errno)); 417 goto out; 418 } 419 g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize)); 420 421 reg_struct.range.start = (uintptr_t)testarea; 422 reg_struct.range.len = pagesize; 423 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 424 425 if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 426 error_report("%s userfault register: %s", __func__, strerror(errno)); 427 goto out; 428 } 429 430 range_struct.start = (uintptr_t)testarea; 431 range_struct.len = pagesize; 432 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 433 error_report("%s userfault unregister: %s", __func__, strerror(errno)); 434 goto out; 435 } 436 437 feature_mask = (__u64)1 << _UFFDIO_WAKE | 438 (__u64)1 << _UFFDIO_COPY | 439 (__u64)1 << _UFFDIO_ZEROPAGE; 440 if ((reg_struct.ioctls & feature_mask) != feature_mask) { 441 error_report("Missing userfault map features: %" PRIx64, 442 (uint64_t)(~reg_struct.ioctls & feature_mask)); 443 goto out; 444 } 445 446 /* Success! */ 447 ret = true; 448 out: 449 if (testarea) { 450 munmap(testarea, pagesize); 451 } 452 if (ufd != -1) { 453 close(ufd); 454 } 455 return ret; 456 } 457 458 /* 459 * Setup an area of RAM so that it *can* be used for postcopy later; this 460 * must be done right at the start prior to pre-copy. 461 * opaque should be the MIS. 462 */ 463 static int init_range(RAMBlock *rb, void *opaque) 464 { 465 const char *block_name = qemu_ram_get_idstr(rb); 466 void *host_addr = qemu_ram_get_host_addr(rb); 467 ram_addr_t offset = qemu_ram_get_offset(rb); 468 ram_addr_t length = qemu_ram_get_used_length(rb); 469 trace_postcopy_init_range(block_name, host_addr, offset, length); 470 471 /* 472 * Save the used_length before running the guest. In case we have to 473 * resize RAM blocks when syncing RAM block sizes from the source during 474 * precopy, we'll update it manually via the ram block notifier. 475 */ 476 rb->postcopy_length = length; 477 478 /* 479 * We need the whole of RAM to be truly empty for postcopy, so things 480 * like ROMs and any data tables built during init must be zero'd 481 * - we're going to get the copy from the source anyway. 482 * (Precopy will just overwrite this data, so doesn't need the discard) 483 */ 484 if (ram_discard_range(block_name, 0, length)) { 485 return -1; 486 } 487 488 return 0; 489 } 490 491 /* 492 * At the end of migration, undo the effects of init_range 493 * opaque should be the MIS. 494 */ 495 static int cleanup_range(RAMBlock *rb, void *opaque) 496 { 497 const char *block_name = qemu_ram_get_idstr(rb); 498 void *host_addr = qemu_ram_get_host_addr(rb); 499 ram_addr_t offset = qemu_ram_get_offset(rb); 500 ram_addr_t length = rb->postcopy_length; 501 MigrationIncomingState *mis = opaque; 502 struct uffdio_range range_struct; 503 trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 504 505 /* 506 * We turned off hugepage for the precopy stage with postcopy enabled 507 * we can turn it back on now. 508 */ 509 qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 510 511 /* 512 * We can also turn off userfault now since we should have all the 513 * pages. It can be useful to leave it on to debug postcopy 514 * if you're not sure it's always getting every page. 515 */ 516 range_struct.start = (uintptr_t)host_addr; 517 range_struct.len = length; 518 519 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 520 error_report("%s: userfault unregister %s", __func__, strerror(errno)); 521 522 return -1; 523 } 524 525 return 0; 526 } 527 528 /* 529 * Initialise postcopy-ram, setting the RAM to a state where we can go into 530 * postcopy later; must be called prior to any precopy. 531 * called from arch_init's similarly named ram_postcopy_incoming_init 532 */ 533 int postcopy_ram_incoming_init(MigrationIncomingState *mis) 534 { 535 if (foreach_not_ignored_block(init_range, NULL)) { 536 return -1; 537 } 538 539 return 0; 540 } 541 542 static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis) 543 { 544 int i; 545 546 if (mis->postcopy_tmp_pages) { 547 for (i = 0; i < mis->postcopy_channels; i++) { 548 if (mis->postcopy_tmp_pages[i].tmp_huge_page) { 549 munmap(mis->postcopy_tmp_pages[i].tmp_huge_page, 550 mis->largest_page_size); 551 mis->postcopy_tmp_pages[i].tmp_huge_page = NULL; 552 } 553 } 554 g_free(mis->postcopy_tmp_pages); 555 mis->postcopy_tmp_pages = NULL; 556 } 557 558 if (mis->postcopy_tmp_zero_page) { 559 munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); 560 mis->postcopy_tmp_zero_page = NULL; 561 } 562 } 563 564 /* 565 * At the end of a migration where postcopy_ram_incoming_init was called. 566 */ 567 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 568 { 569 trace_postcopy_ram_incoming_cleanup_entry(); 570 571 if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) { 572 /* Notify the fast load thread to quit */ 573 mis->preempt_thread_status = PREEMPT_THREAD_QUIT; 574 if (mis->postcopy_qemufile_dst) { 575 qemu_file_shutdown(mis->postcopy_qemufile_dst); 576 } 577 qemu_thread_join(&mis->postcopy_prio_thread); 578 mis->preempt_thread_status = PREEMPT_THREAD_NONE; 579 } 580 581 if (mis->have_fault_thread) { 582 Error *local_err = NULL; 583 584 /* Let the fault thread quit */ 585 qatomic_set(&mis->fault_thread_quit, 1); 586 postcopy_fault_thread_notify(mis); 587 trace_postcopy_ram_incoming_cleanup_join(); 588 qemu_thread_join(&mis->fault_thread); 589 590 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) { 591 error_report_err(local_err); 592 return -1; 593 } 594 595 if (foreach_not_ignored_block(cleanup_range, mis)) { 596 return -1; 597 } 598 599 trace_postcopy_ram_incoming_cleanup_closeuf(); 600 close(mis->userfault_fd); 601 close(mis->userfault_event_fd); 602 mis->have_fault_thread = false; 603 } 604 605 if (enable_mlock) { 606 if (os_mlock() < 0) { 607 error_report("mlock: %s", strerror(errno)); 608 /* 609 * It doesn't feel right to fail at this point, we have a valid 610 * VM state. 611 */ 612 } 613 } 614 615 postcopy_temp_pages_cleanup(mis); 616 617 trace_postcopy_ram_incoming_cleanup_blocktime( 618 get_postcopy_total_blocktime()); 619 620 trace_postcopy_ram_incoming_cleanup_exit(); 621 return 0; 622 } 623 624 /* 625 * Disable huge pages on an area 626 */ 627 static int nhp_range(RAMBlock *rb, void *opaque) 628 { 629 const char *block_name = qemu_ram_get_idstr(rb); 630 void *host_addr = qemu_ram_get_host_addr(rb); 631 ram_addr_t offset = qemu_ram_get_offset(rb); 632 ram_addr_t length = rb->postcopy_length; 633 trace_postcopy_nhp_range(block_name, host_addr, offset, length); 634 635 /* 636 * Before we do discards we need to ensure those discards really 637 * do delete areas of the page, even if THP thinks a hugepage would 638 * be a good idea, so force hugepages off. 639 */ 640 qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 641 642 return 0; 643 } 644 645 /* 646 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 647 * however leaving it until after precopy means that most of the precopy 648 * data is still THPd 649 */ 650 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 651 { 652 if (foreach_not_ignored_block(nhp_range, mis)) { 653 return -1; 654 } 655 656 postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 657 658 return 0; 659 } 660 661 /* 662 * Mark the given area of RAM as requiring notification to unwritten areas 663 * Used as a callback on foreach_not_ignored_block. 664 * host_addr: Base of area to mark 665 * offset: Offset in the whole ram arena 666 * length: Length of the section 667 * opaque: MigrationIncomingState pointer 668 * Returns 0 on success 669 */ 670 static int ram_block_enable_notify(RAMBlock *rb, void *opaque) 671 { 672 MigrationIncomingState *mis = opaque; 673 struct uffdio_register reg_struct; 674 675 reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb); 676 reg_struct.range.len = rb->postcopy_length; 677 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 678 679 /* Now tell our userfault_fd that it's responsible for this area */ 680 if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 681 error_report("%s userfault register: %s", __func__, strerror(errno)); 682 return -1; 683 } 684 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 685 error_report("%s userfault: Region doesn't support COPY", __func__); 686 return -1; 687 } 688 if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { 689 qemu_ram_set_uf_zeroable(rb); 690 } 691 692 return 0; 693 } 694 695 int postcopy_wake_shared(struct PostCopyFD *pcfd, 696 uint64_t client_addr, 697 RAMBlock *rb) 698 { 699 size_t pagesize = qemu_ram_pagesize(rb); 700 struct uffdio_range range; 701 int ret; 702 trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); 703 range.start = ROUND_DOWN(client_addr, pagesize); 704 range.len = pagesize; 705 ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); 706 if (ret) { 707 error_report("%s: Failed to wake: %zx in %s (%s)", 708 __func__, (size_t)client_addr, qemu_ram_get_idstr(rb), 709 strerror(errno)); 710 } 711 return ret; 712 } 713 714 static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb, 715 ram_addr_t start, uint64_t haddr) 716 { 717 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 718 719 /* 720 * Discarded pages (via RamDiscardManager) are never migrated. On unlikely 721 * access, place a zeropage, which will also set the relevant bits in the 722 * recv_bitmap accordingly, so we won't try placing a zeropage twice. 723 * 724 * Checking a single bit is sufficient to handle pagesize > TPS as either 725 * all relevant bits are set or not. 726 */ 727 assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb))); 728 if (ramblock_page_is_discarded(rb, start)) { 729 bool received = ramblock_recv_bitmap_test_byte_offset(rb, start); 730 731 return received ? 0 : postcopy_place_page_zero(mis, aligned, rb); 732 } 733 734 return migrate_send_rp_req_pages(mis, rb, start, haddr); 735 } 736 737 /* 738 * Callback from shared fault handlers to ask for a page, 739 * the page must be specified by a RAMBlock and an offset in that rb 740 * Note: Only for use by shared fault handlers (in fault thread) 741 */ 742 int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 743 uint64_t client_addr, uint64_t rb_offset) 744 { 745 uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); 746 MigrationIncomingState *mis = migration_incoming_get_current(); 747 748 trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), 749 rb_offset); 750 if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) { 751 trace_postcopy_request_shared_page_present(pcfd->idstr, 752 qemu_ram_get_idstr(rb), rb_offset); 753 return postcopy_wake_shared(pcfd, client_addr, rb); 754 } 755 postcopy_request_page(mis, rb, aligned_rbo, client_addr); 756 return 0; 757 } 758 759 static int get_mem_fault_cpu_index(uint32_t pid) 760 { 761 CPUState *cpu_iter; 762 763 CPU_FOREACH(cpu_iter) { 764 if (cpu_iter->thread_id == pid) { 765 trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid); 766 return cpu_iter->cpu_index; 767 } 768 } 769 trace_get_mem_fault_cpu_index(-1, pid); 770 return -1; 771 } 772 773 static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc) 774 { 775 int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 776 dc->start_time; 777 return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX; 778 } 779 780 /* 781 * This function is being called when pagefault occurs. It 782 * tracks down vCPU blocking time. 783 * 784 * @addr: faulted host virtual address 785 * @ptid: faulted process thread id 786 * @rb: ramblock appropriate to addr 787 */ 788 static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, 789 RAMBlock *rb) 790 { 791 int cpu, already_received; 792 MigrationIncomingState *mis = migration_incoming_get_current(); 793 PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 794 uint32_t low_time_offset; 795 796 if (!dc || ptid == 0) { 797 return; 798 } 799 cpu = get_mem_fault_cpu_index(ptid); 800 if (cpu < 0) { 801 return; 802 } 803 804 low_time_offset = get_low_time_offset(dc); 805 if (dc->vcpu_addr[cpu] == 0) { 806 qatomic_inc(&dc->smp_cpus_down); 807 } 808 809 qatomic_xchg(&dc->last_begin, low_time_offset); 810 qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); 811 qatomic_xchg(&dc->vcpu_addr[cpu], addr); 812 813 /* 814 * check it here, not at the beginning of the function, 815 * due to, check could occur early than bitmap_set in 816 * qemu_ufd_copy_ioctl 817 */ 818 already_received = ramblock_recv_bitmap_test(rb, (void *)addr); 819 if (already_received) { 820 qatomic_xchg(&dc->vcpu_addr[cpu], 0); 821 qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); 822 qatomic_dec(&dc->smp_cpus_down); 823 } 824 trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], 825 cpu, already_received); 826 } 827 828 /* 829 * This function just provide calculated blocktime per cpu and trace it. 830 * Total blocktime is calculated in mark_postcopy_blocktime_end. 831 * 832 * 833 * Assume we have 3 CPU 834 * 835 * S1 E1 S1 E1 836 * -----***********------------xxx***************------------------------> CPU1 837 * 838 * S2 E2 839 * ------------****************xxx---------------------------------------> CPU2 840 * 841 * S3 E3 842 * ------------------------****xxx********-------------------------------> CPU3 843 * 844 * We have sequence S1,S2,E1,S3,S1,E2,E3,E1 845 * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3 846 * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 - 847 * it's a part of total blocktime. 848 * S1 - here is last_begin 849 * Legend of the picture is following: 850 * * - means blocktime per vCPU 851 * x - means overlapped blocktime (total blocktime) 852 * 853 * @addr: host virtual address 854 */ 855 static void mark_postcopy_blocktime_end(uintptr_t addr) 856 { 857 MigrationIncomingState *mis = migration_incoming_get_current(); 858 PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 859 MachineState *ms = MACHINE(qdev_get_machine()); 860 unsigned int smp_cpus = ms->smp.cpus; 861 int i, affected_cpu = 0; 862 bool vcpu_total_blocktime = false; 863 uint32_t read_vcpu_time, low_time_offset; 864 865 if (!dc) { 866 return; 867 } 868 869 low_time_offset = get_low_time_offset(dc); 870 /* lookup cpu, to clear it, 871 * that algorithm looks straightforward, but it's not 872 * optimal, more optimal algorithm is keeping tree or hash 873 * where key is address value is a list of */ 874 for (i = 0; i < smp_cpus; i++) { 875 uint32_t vcpu_blocktime = 0; 876 877 read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); 878 if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || 879 read_vcpu_time == 0) { 880 continue; 881 } 882 qatomic_xchg(&dc->vcpu_addr[i], 0); 883 vcpu_blocktime = low_time_offset - read_vcpu_time; 884 affected_cpu += 1; 885 /* we need to know is that mark_postcopy_end was due to 886 * faulted page, another possible case it's prefetched 887 * page and in that case we shouldn't be here */ 888 if (!vcpu_total_blocktime && 889 qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { 890 vcpu_total_blocktime = true; 891 } 892 /* continue cycle, due to one page could affect several vCPUs */ 893 dc->vcpu_blocktime[i] += vcpu_blocktime; 894 } 895 896 qatomic_sub(&dc->smp_cpus_down, affected_cpu); 897 if (vcpu_total_blocktime) { 898 dc->total_blocktime += low_time_offset - qatomic_fetch_add( 899 &dc->last_begin, 0); 900 } 901 trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, 902 affected_cpu); 903 } 904 905 static void postcopy_pause_fault_thread(MigrationIncomingState *mis) 906 { 907 trace_postcopy_pause_fault_thread(); 908 qemu_sem_wait(&mis->postcopy_pause_sem_fault); 909 trace_postcopy_pause_fault_thread_continued(); 910 } 911 912 /* 913 * Handle faults detected by the USERFAULT markings 914 */ 915 static void *postcopy_ram_fault_thread(void *opaque) 916 { 917 MigrationIncomingState *mis = opaque; 918 struct uffd_msg msg; 919 int ret; 920 size_t index; 921 RAMBlock *rb = NULL; 922 923 trace_postcopy_ram_fault_thread_entry(); 924 rcu_register_thread(); 925 mis->last_rb = NULL; /* last RAMBlock we sent part of */ 926 qemu_sem_post(&mis->thread_sync_sem); 927 928 struct pollfd *pfd; 929 size_t pfd_len = 2 + mis->postcopy_remote_fds->len; 930 931 pfd = g_new0(struct pollfd, pfd_len); 932 933 pfd[0].fd = mis->userfault_fd; 934 pfd[0].events = POLLIN; 935 pfd[1].fd = mis->userfault_event_fd; 936 pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 937 trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd); 938 for (index = 0; index < mis->postcopy_remote_fds->len; index++) { 939 struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds, 940 struct PostCopyFD, index); 941 pfd[2 + index].fd = pcfd->fd; 942 pfd[2 + index].events = POLLIN; 943 trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr, 944 pcfd->fd); 945 } 946 947 while (true) { 948 ram_addr_t rb_offset; 949 int poll_result; 950 951 /* 952 * We're mainly waiting for the kernel to give us a faulting HVA, 953 * however we can be told to quit via userfault_quit_fd which is 954 * an eventfd 955 */ 956 957 poll_result = poll(pfd, pfd_len, -1 /* Wait forever */); 958 if (poll_result == -1) { 959 error_report("%s: userfault poll: %s", __func__, strerror(errno)); 960 break; 961 } 962 963 if (!mis->to_src_file) { 964 /* 965 * Possibly someone tells us that the return path is 966 * broken already using the event. We should hold until 967 * the channel is rebuilt. 968 */ 969 postcopy_pause_fault_thread(mis); 970 } 971 972 if (pfd[1].revents) { 973 uint64_t tmp64 = 0; 974 975 /* Consume the signal */ 976 if (read(mis->userfault_event_fd, &tmp64, 8) != 8) { 977 /* Nothing obviously nicer than posting this error. */ 978 error_report("%s: read() failed", __func__); 979 } 980 981 if (qatomic_read(&mis->fault_thread_quit)) { 982 trace_postcopy_ram_fault_thread_quit(); 983 break; 984 } 985 } 986 987 if (pfd[0].revents) { 988 poll_result--; 989 ret = read(mis->userfault_fd, &msg, sizeof(msg)); 990 if (ret != sizeof(msg)) { 991 if (errno == EAGAIN) { 992 /* 993 * if a wake up happens on the other thread just after 994 * the poll, there is nothing to read. 995 */ 996 continue; 997 } 998 if (ret < 0) { 999 error_report("%s: Failed to read full userfault " 1000 "message: %s", 1001 __func__, strerror(errno)); 1002 break; 1003 } else { 1004 error_report("%s: Read %d bytes from userfaultfd " 1005 "expected %zd", 1006 __func__, ret, sizeof(msg)); 1007 break; /* Lost alignment, don't know what we'd read next */ 1008 } 1009 } 1010 if (msg.event != UFFD_EVENT_PAGEFAULT) { 1011 error_report("%s: Read unexpected event %ud from userfaultfd", 1012 __func__, msg.event); 1013 continue; /* It's not a page fault, shouldn't happen */ 1014 } 1015 1016 rb = qemu_ram_block_from_host( 1017 (void *)(uintptr_t)msg.arg.pagefault.address, 1018 true, &rb_offset); 1019 if (!rb) { 1020 error_report("postcopy_ram_fault_thread: Fault outside guest: %" 1021 PRIx64, (uint64_t)msg.arg.pagefault.address); 1022 break; 1023 } 1024 1025 rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); 1026 trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 1027 qemu_ram_get_idstr(rb), 1028 rb_offset, 1029 msg.arg.pagefault.feat.ptid); 1030 mark_postcopy_blocktime_begin( 1031 (uintptr_t)(msg.arg.pagefault.address), 1032 msg.arg.pagefault.feat.ptid, rb); 1033 1034 retry: 1035 /* 1036 * Send the request to the source - we want to request one 1037 * of our host page sizes (which is >= TPS) 1038 */ 1039 ret = postcopy_request_page(mis, rb, rb_offset, 1040 msg.arg.pagefault.address); 1041 if (ret) { 1042 /* May be network failure, try to wait for recovery */ 1043 postcopy_pause_fault_thread(mis); 1044 goto retry; 1045 } 1046 } 1047 1048 /* Now handle any requests from external processes on shared memory */ 1049 /* TODO: May need to handle devices deregistering during postcopy */ 1050 for (index = 2; index < pfd_len && poll_result; index++) { 1051 if (pfd[index].revents) { 1052 struct PostCopyFD *pcfd = 1053 &g_array_index(mis->postcopy_remote_fds, 1054 struct PostCopyFD, index - 2); 1055 1056 poll_result--; 1057 if (pfd[index].revents & POLLERR) { 1058 error_report("%s: POLLERR on poll %zd fd=%d", 1059 __func__, index, pcfd->fd); 1060 pfd[index].events = 0; 1061 continue; 1062 } 1063 1064 ret = read(pcfd->fd, &msg, sizeof(msg)); 1065 if (ret != sizeof(msg)) { 1066 if (errno == EAGAIN) { 1067 /* 1068 * if a wake up happens on the other thread just after 1069 * the poll, there is nothing to read. 1070 */ 1071 continue; 1072 } 1073 if (ret < 0) { 1074 error_report("%s: Failed to read full userfault " 1075 "message: %s (shared) revents=%d", 1076 __func__, strerror(errno), 1077 pfd[index].revents); 1078 /*TODO: Could just disable this sharer */ 1079 break; 1080 } else { 1081 error_report("%s: Read %d bytes from userfaultfd " 1082 "expected %zd (shared)", 1083 __func__, ret, sizeof(msg)); 1084 /*TODO: Could just disable this sharer */ 1085 break; /*Lost alignment,don't know what we'd read next*/ 1086 } 1087 } 1088 if (msg.event != UFFD_EVENT_PAGEFAULT) { 1089 error_report("%s: Read unexpected event %ud " 1090 "from userfaultfd (shared)", 1091 __func__, msg.event); 1092 continue; /* It's not a page fault, shouldn't happen */ 1093 } 1094 /* Call the device handler registered with us */ 1095 ret = pcfd->handler(pcfd, &msg); 1096 if (ret) { 1097 error_report("%s: Failed to resolve shared fault on %zd/%s", 1098 __func__, index, pcfd->idstr); 1099 /* TODO: Fail? Disable this sharer? */ 1100 } 1101 } 1102 } 1103 } 1104 rcu_unregister_thread(); 1105 trace_postcopy_ram_fault_thread_exit(); 1106 g_free(pfd); 1107 return NULL; 1108 } 1109 1110 static int postcopy_temp_pages_setup(MigrationIncomingState *mis) 1111 { 1112 PostcopyTmpPage *tmp_page; 1113 int err, i, channels; 1114 void *temp_page; 1115 1116 if (migrate_postcopy_preempt()) { 1117 /* If preemption enabled, need extra channel for urgent requests */ 1118 mis->postcopy_channels = RAM_CHANNEL_MAX; 1119 } else { 1120 /* Both precopy/postcopy on the same channel */ 1121 mis->postcopy_channels = 1; 1122 } 1123 1124 channels = mis->postcopy_channels; 1125 mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels); 1126 1127 for (i = 0; i < channels; i++) { 1128 tmp_page = &mis->postcopy_tmp_pages[i]; 1129 temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE, 1130 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 1131 if (temp_page == MAP_FAILED) { 1132 err = errno; 1133 error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s", 1134 __func__, i, strerror(err)); 1135 /* Clean up will be done later */ 1136 return -err; 1137 } 1138 tmp_page->tmp_huge_page = temp_page; 1139 /* Initialize default states for each tmp page */ 1140 postcopy_temp_page_reset(tmp_page); 1141 } 1142 1143 /* 1144 * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages 1145 */ 1146 mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, 1147 PROT_READ | PROT_WRITE, 1148 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 1149 if (mis->postcopy_tmp_zero_page == MAP_FAILED) { 1150 err = errno; 1151 mis->postcopy_tmp_zero_page = NULL; 1152 error_report("%s: Failed to map large zero page %s", 1153 __func__, strerror(err)); 1154 return -err; 1155 } 1156 1157 memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 1158 1159 return 0; 1160 } 1161 1162 int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1163 { 1164 /* Open the fd for the kernel to give us userfaults */ 1165 mis->userfault_fd = uffd_open(O_CLOEXEC | O_NONBLOCK); 1166 if (mis->userfault_fd == -1) { 1167 error_report("%s: Failed to open userfault fd: %s", __func__, 1168 strerror(errno)); 1169 return -1; 1170 } 1171 1172 /* 1173 * Although the host check already tested the API, we need to 1174 * do the check again as an ABI handshake on the new fd. 1175 */ 1176 if (!ufd_check_and_apply(mis->userfault_fd, mis)) { 1177 return -1; 1178 } 1179 1180 /* Now an eventfd we use to tell the fault-thread to quit */ 1181 mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); 1182 if (mis->userfault_event_fd == -1) { 1183 error_report("%s: Opening userfault_event_fd: %s", __func__, 1184 strerror(errno)); 1185 close(mis->userfault_fd); 1186 return -1; 1187 } 1188 1189 postcopy_thread_create(mis, &mis->fault_thread, "fault-default", 1190 postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE); 1191 mis->have_fault_thread = true; 1192 1193 /* Mark so that we get notified of accesses to unwritten areas */ 1194 if (foreach_not_ignored_block(ram_block_enable_notify, mis)) { 1195 error_report("ram_block_enable_notify failed"); 1196 return -1; 1197 } 1198 1199 if (postcopy_temp_pages_setup(mis)) { 1200 /* Error dumped in the sub-function */ 1201 return -1; 1202 } 1203 1204 if (migrate_postcopy_preempt()) { 1205 /* 1206 * This thread needs to be created after the temp pages because 1207 * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. 1208 */ 1209 postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast", 1210 postcopy_preempt_thread, QEMU_THREAD_JOINABLE); 1211 mis->preempt_thread_status = PREEMPT_THREAD_CREATED; 1212 } 1213 1214 trace_postcopy_ram_enable_notify(); 1215 1216 return 0; 1217 } 1218 1219 static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr, 1220 void *from_addr, uint64_t pagesize, RAMBlock *rb) 1221 { 1222 int userfault_fd = mis->userfault_fd; 1223 int ret; 1224 1225 if (from_addr) { 1226 struct uffdio_copy copy_struct; 1227 copy_struct.dst = (uint64_t)(uintptr_t)host_addr; 1228 copy_struct.src = (uint64_t)(uintptr_t)from_addr; 1229 copy_struct.len = pagesize; 1230 copy_struct.mode = 0; 1231 ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); 1232 } else { 1233 struct uffdio_zeropage zero_struct; 1234 zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; 1235 zero_struct.range.len = pagesize; 1236 zero_struct.mode = 0; 1237 ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); 1238 } 1239 if (!ret) { 1240 qemu_mutex_lock(&mis->page_request_mutex); 1241 ramblock_recv_bitmap_set_range(rb, host_addr, 1242 pagesize / qemu_target_page_size()); 1243 /* 1244 * If this page resolves a page fault for a previous recorded faulted 1245 * address, take a special note to maintain the requested page list. 1246 */ 1247 if (g_tree_lookup(mis->page_requested, host_addr)) { 1248 g_tree_remove(mis->page_requested, host_addr); 1249 mis->page_requested_count--; 1250 trace_postcopy_page_req_del(host_addr, mis->page_requested_count); 1251 } 1252 qemu_mutex_unlock(&mis->page_request_mutex); 1253 mark_postcopy_blocktime_end((uintptr_t)host_addr); 1254 } 1255 return ret; 1256 } 1257 1258 int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset) 1259 { 1260 int i; 1261 MigrationIncomingState *mis = migration_incoming_get_current(); 1262 GArray *pcrfds = mis->postcopy_remote_fds; 1263 1264 for (i = 0; i < pcrfds->len; i++) { 1265 struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1266 int ret = cur->waker(cur, rb, offset); 1267 if (ret) { 1268 return ret; 1269 } 1270 } 1271 return 0; 1272 } 1273 1274 /* 1275 * Place a host page (from) at (host) atomically 1276 * returns 0 on success 1277 */ 1278 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 1279 RAMBlock *rb) 1280 { 1281 size_t pagesize = qemu_ram_pagesize(rb); 1282 1283 /* copy also acks to the kernel waking the stalled thread up 1284 * TODO: We can inhibit that ack and only do it if it was requested 1285 * which would be slightly cheaper, but we'd have to be careful 1286 * of the order of updating our page state. 1287 */ 1288 if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) { 1289 int e = errno; 1290 error_report("%s: %s copy host: %p from: %p (size: %zd)", 1291 __func__, strerror(e), host, from, pagesize); 1292 1293 return -e; 1294 } 1295 1296 trace_postcopy_place_page(host); 1297 return postcopy_notify_shared_wake(rb, 1298 qemu_ram_block_host_offset(rb, host)); 1299 } 1300 1301 /* 1302 * Place a zero page at (host) atomically 1303 * returns 0 on success 1304 */ 1305 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 1306 RAMBlock *rb) 1307 { 1308 size_t pagesize = qemu_ram_pagesize(rb); 1309 trace_postcopy_place_page_zero(host); 1310 1311 /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE 1312 * but it's not available for everything (e.g. hugetlbpages) 1313 */ 1314 if (qemu_ram_is_uf_zeroable(rb)) { 1315 if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) { 1316 int e = errno; 1317 error_report("%s: %s zero host: %p", 1318 __func__, strerror(e), host); 1319 1320 return -e; 1321 } 1322 return postcopy_notify_shared_wake(rb, 1323 qemu_ram_block_host_offset(rb, 1324 host)); 1325 } else { 1326 return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb); 1327 } 1328 } 1329 1330 #else 1331 /* No target OS support, stubs just fail */ 1332 void fill_destination_postcopy_migration_info(MigrationInfo *info) 1333 { 1334 } 1335 1336 bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 1337 { 1338 error_report("%s: No OS support", __func__); 1339 return false; 1340 } 1341 1342 int postcopy_ram_incoming_init(MigrationIncomingState *mis) 1343 { 1344 error_report("postcopy_ram_incoming_init: No OS support"); 1345 return -1; 1346 } 1347 1348 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 1349 { 1350 assert(0); 1351 return -1; 1352 } 1353 1354 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 1355 { 1356 assert(0); 1357 return -1; 1358 } 1359 1360 int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 1361 uint64_t client_addr, uint64_t rb_offset) 1362 { 1363 assert(0); 1364 return -1; 1365 } 1366 1367 int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1368 { 1369 assert(0); 1370 return -1; 1371 } 1372 1373 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 1374 RAMBlock *rb) 1375 { 1376 assert(0); 1377 return -1; 1378 } 1379 1380 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 1381 RAMBlock *rb) 1382 { 1383 assert(0); 1384 return -1; 1385 } 1386 1387 int postcopy_wake_shared(struct PostCopyFD *pcfd, 1388 uint64_t client_addr, 1389 RAMBlock *rb) 1390 { 1391 assert(0); 1392 return -1; 1393 } 1394 #endif 1395 1396 /* ------------------------------------------------------------------------- */ 1397 void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page) 1398 { 1399 tmp_page->target_pages = 0; 1400 tmp_page->host_addr = NULL; 1401 /* 1402 * This is set to true when reset, and cleared as long as we received any 1403 * of the non-zero small page within this huge page. 1404 */ 1405 tmp_page->all_zero = true; 1406 } 1407 1408 void postcopy_fault_thread_notify(MigrationIncomingState *mis) 1409 { 1410 uint64_t tmp64 = 1; 1411 1412 /* 1413 * Wakeup the fault_thread. It's an eventfd that should currently 1414 * be at 0, we're going to increment it to 1 1415 */ 1416 if (write(mis->userfault_event_fd, &tmp64, 8) != 8) { 1417 /* Not much we can do here, but may as well report it */ 1418 error_report("%s: incrementing failed: %s", __func__, 1419 strerror(errno)); 1420 } 1421 } 1422 1423 /** 1424 * postcopy_discard_send_init: Called at the start of each RAMBlock before 1425 * asking to discard individual ranges. 1426 * 1427 * @ms: The current migration state. 1428 * @offset: the bitmap offset of the named RAMBlock in the migration bitmap. 1429 * @name: RAMBlock that discards will operate on. 1430 */ 1431 static PostcopyDiscardState pds = {0}; 1432 void postcopy_discard_send_init(MigrationState *ms, const char *name) 1433 { 1434 pds.ramblock_name = name; 1435 pds.cur_entry = 0; 1436 pds.nsentwords = 0; 1437 pds.nsentcmds = 0; 1438 } 1439 1440 /** 1441 * postcopy_discard_send_range: Called by the bitmap code for each chunk to 1442 * discard. May send a discard message, may just leave it queued to 1443 * be sent later. 1444 * 1445 * @ms: Current migration state. 1446 * @start,@length: a range of pages in the migration bitmap in the 1447 * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 1448 */ 1449 void postcopy_discard_send_range(MigrationState *ms, unsigned long start, 1450 unsigned long length) 1451 { 1452 size_t tp_size = qemu_target_page_size(); 1453 /* Convert to byte offsets within the RAM block */ 1454 pds.start_list[pds.cur_entry] = start * tp_size; 1455 pds.length_list[pds.cur_entry] = length * tp_size; 1456 trace_postcopy_discard_send_range(pds.ramblock_name, start, length); 1457 pds.cur_entry++; 1458 pds.nsentwords++; 1459 1460 if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) { 1461 /* Full set, ship it! */ 1462 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1463 pds.ramblock_name, 1464 pds.cur_entry, 1465 pds.start_list, 1466 pds.length_list); 1467 pds.nsentcmds++; 1468 pds.cur_entry = 0; 1469 } 1470 } 1471 1472 /** 1473 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 1474 * bitmap code. Sends any outstanding discard messages, frees the PDS 1475 * 1476 * @ms: Current migration state. 1477 */ 1478 void postcopy_discard_send_finish(MigrationState *ms) 1479 { 1480 /* Anything unsent? */ 1481 if (pds.cur_entry) { 1482 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1483 pds.ramblock_name, 1484 pds.cur_entry, 1485 pds.start_list, 1486 pds.length_list); 1487 pds.nsentcmds++; 1488 } 1489 1490 trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords, 1491 pds.nsentcmds); 1492 } 1493 1494 /* 1495 * Current state of incoming postcopy; note this is not part of 1496 * MigrationIncomingState since it's state is used during cleanup 1497 * at the end as MIS is being freed. 1498 */ 1499 static PostcopyState incoming_postcopy_state; 1500 1501 PostcopyState postcopy_state_get(void) 1502 { 1503 return qatomic_mb_read(&incoming_postcopy_state); 1504 } 1505 1506 /* Set the state and return the old state */ 1507 PostcopyState postcopy_state_set(PostcopyState new_state) 1508 { 1509 return qatomic_xchg(&incoming_postcopy_state, new_state); 1510 } 1511 1512 /* Register a handler for external shared memory postcopy 1513 * called on the destination. 1514 */ 1515 void postcopy_register_shared_ufd(struct PostCopyFD *pcfd) 1516 { 1517 MigrationIncomingState *mis = migration_incoming_get_current(); 1518 1519 mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds, 1520 *pcfd); 1521 } 1522 1523 /* Unregister a handler for external shared memory postcopy 1524 */ 1525 void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) 1526 { 1527 guint i; 1528 MigrationIncomingState *mis = migration_incoming_get_current(); 1529 GArray *pcrfds = mis->postcopy_remote_fds; 1530 1531 if (!pcrfds) { 1532 /* migration has already finished and freed the array */ 1533 return; 1534 } 1535 for (i = 0; i < pcrfds->len; i++) { 1536 struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1537 if (cur->fd == pcfd->fd) { 1538 mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i); 1539 return; 1540 } 1541 } 1542 } 1543 1544 void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) 1545 { 1546 /* 1547 * The new loading channel has its own threads, so it needs to be 1548 * blocked too. It's by default true, just be explicit. 1549 */ 1550 qemu_file_set_blocking(file, true); 1551 mis->postcopy_qemufile_dst = file; 1552 qemu_sem_post(&mis->postcopy_qemufile_dst_done); 1553 trace_postcopy_preempt_new_channel(); 1554 } 1555 1556 /* 1557 * Setup the postcopy preempt channel with the IOC. If ERROR is specified, 1558 * setup the error instead. This helper will free the ERROR if specified. 1559 */ 1560 static void 1561 postcopy_preempt_send_channel_done(MigrationState *s, 1562 QIOChannel *ioc, Error *local_err) 1563 { 1564 if (local_err) { 1565 migrate_set_error(s, local_err); 1566 error_free(local_err); 1567 } else { 1568 migration_ioc_register_yank(ioc); 1569 s->postcopy_qemufile_src = qemu_file_new_output(ioc); 1570 trace_postcopy_preempt_new_channel(); 1571 } 1572 1573 /* 1574 * Kick the waiter in all cases. The waiter should check upon 1575 * postcopy_qemufile_src to know whether it failed or not. 1576 */ 1577 qemu_sem_post(&s->postcopy_qemufile_src_sem); 1578 } 1579 1580 static void 1581 postcopy_preempt_tls_handshake(QIOTask *task, gpointer opaque) 1582 { 1583 g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); 1584 MigrationState *s = opaque; 1585 Error *local_err = NULL; 1586 1587 qio_task_propagate_error(task, &local_err); 1588 postcopy_preempt_send_channel_done(s, ioc, local_err); 1589 } 1590 1591 static void 1592 postcopy_preempt_send_channel_new(QIOTask *task, gpointer opaque) 1593 { 1594 g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); 1595 MigrationState *s = opaque; 1596 QIOChannelTLS *tioc; 1597 Error *local_err = NULL; 1598 1599 if (qio_task_propagate_error(task, &local_err)) { 1600 goto out; 1601 } 1602 1603 if (migrate_channel_requires_tls_upgrade(ioc)) { 1604 tioc = migration_tls_client_create(s, ioc, s->hostname, &local_err); 1605 if (!tioc) { 1606 goto out; 1607 } 1608 trace_postcopy_preempt_tls_handshake(); 1609 qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-preempt"); 1610 qio_channel_tls_handshake(tioc, postcopy_preempt_tls_handshake, 1611 s, NULL, NULL); 1612 /* Setup the channel until TLS handshake finished */ 1613 return; 1614 } 1615 1616 out: 1617 /* This handles both good and error cases */ 1618 postcopy_preempt_send_channel_done(s, ioc, local_err); 1619 } 1620 1621 /* 1622 * This function will kick off an async task to establish the preempt 1623 * channel, and wait until the connection setup completed. Returns 0 if 1624 * channel established, -1 for error. 1625 */ 1626 int postcopy_preempt_establish_channel(MigrationState *s) 1627 { 1628 /* If preempt not enabled, no need to wait */ 1629 if (!migrate_postcopy_preempt()) { 1630 return 0; 1631 } 1632 1633 /* 1634 * Kick off async task to establish preempt channel. Only do so with 1635 * 8.0+ machines, because 7.1/7.2 require the channel to be created in 1636 * setup phase of migration (even if racy in an unreliable network). 1637 */ 1638 if (!s->preempt_pre_7_2) { 1639 postcopy_preempt_setup(s); 1640 } 1641 1642 /* 1643 * We need the postcopy preempt channel to be established before 1644 * starting doing anything. 1645 */ 1646 qemu_sem_wait(&s->postcopy_qemufile_src_sem); 1647 1648 return s->postcopy_qemufile_src ? 0 : -1; 1649 } 1650 1651 void postcopy_preempt_setup(MigrationState *s) 1652 { 1653 /* Kick an async task to connect */ 1654 socket_send_channel_create(postcopy_preempt_send_channel_new, s); 1655 } 1656 1657 static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) 1658 { 1659 trace_postcopy_pause_fast_load(); 1660 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 1661 qemu_sem_wait(&mis->postcopy_pause_sem_fast_load); 1662 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 1663 trace_postcopy_pause_fast_load_continued(); 1664 } 1665 1666 static bool preempt_thread_should_run(MigrationIncomingState *mis) 1667 { 1668 return mis->preempt_thread_status != PREEMPT_THREAD_QUIT; 1669 } 1670 1671 void *postcopy_preempt_thread(void *opaque) 1672 { 1673 MigrationIncomingState *mis = opaque; 1674 int ret; 1675 1676 trace_postcopy_preempt_thread_entry(); 1677 1678 rcu_register_thread(); 1679 1680 qemu_sem_post(&mis->thread_sync_sem); 1681 1682 /* 1683 * The preempt channel is established in asynchronous way. Wait 1684 * for its completion. 1685 */ 1686 qemu_sem_wait(&mis->postcopy_qemufile_dst_done); 1687 1688 /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */ 1689 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 1690 while (preempt_thread_should_run(mis)) { 1691 ret = ram_load_postcopy(mis->postcopy_qemufile_dst, 1692 RAM_CHANNEL_POSTCOPY); 1693 /* If error happened, go into recovery routine */ 1694 if (ret && preempt_thread_should_run(mis)) { 1695 postcopy_pause_ram_fast_load(mis); 1696 } else { 1697 /* We're done */ 1698 break; 1699 } 1700 } 1701 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 1702 1703 rcu_unregister_thread(); 1704 1705 trace_postcopy_preempt_thread_exit(); 1706 1707 return NULL; 1708 } 1709