1 /* 2 * Postcopy migration for RAM 3 * 4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Dave Gilbert <dgilbert@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * Postcopy is a migration technique where the execution flips from the 16 * source to the destination before all the data has been copied. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "exec/target_page.h" 21 #include "migration.h" 22 #include "qemu-file.h" 23 #include "savevm.h" 24 #include "postcopy-ram.h" 25 #include "ram.h" 26 #include "qapi/error.h" 27 #include "qemu/notify.h" 28 #include "qemu/rcu.h" 29 #include "sysemu/sysemu.h" 30 #include "qemu/error-report.h" 31 #include "trace.h" 32 #include "hw/boards.h" 33 34 /* Arbitrary limit on size of each discard command, 35 * keeps them around ~200 bytes 36 */ 37 #define MAX_DISCARDS_PER_COMMAND 12 38 39 struct PostcopyDiscardState { 40 const char *ramblock_name; 41 uint16_t cur_entry; 42 /* 43 * Start and length of a discard range (bytes) 44 */ 45 uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 46 uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 47 unsigned int nsentwords; 48 unsigned int nsentcmds; 49 }; 50 51 static NotifierWithReturnList postcopy_notifier_list; 52 53 void postcopy_infrastructure_init(void) 54 { 55 notifier_with_return_list_init(&postcopy_notifier_list); 56 } 57 58 void postcopy_add_notifier(NotifierWithReturn *nn) 59 { 60 notifier_with_return_list_add(&postcopy_notifier_list, nn); 61 } 62 63 void postcopy_remove_notifier(NotifierWithReturn *n) 64 { 65 notifier_with_return_remove(n); 66 } 67 68 int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) 69 { 70 struct PostcopyNotifyData pnd; 71 pnd.reason = reason; 72 pnd.errp = errp; 73 74 return notifier_with_return_list_notify(&postcopy_notifier_list, 75 &pnd); 76 } 77 78 /* Postcopy needs to detect accesses to pages that haven't yet been copied 79 * across, and efficiently map new pages in, the techniques for doing this 80 * are target OS specific. 81 */ 82 #if defined(__linux__) 83 84 #include <poll.h> 85 #include <sys/ioctl.h> 86 #include <sys/syscall.h> 87 #include <asm/types.h> /* for __u64 */ 88 #endif 89 90 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 91 #include <sys/eventfd.h> 92 #include <linux/userfaultfd.h> 93 94 typedef struct PostcopyBlocktimeContext { 95 /* time when page fault initiated per vCPU */ 96 uint32_t *page_fault_vcpu_time; 97 /* page address per vCPU */ 98 uintptr_t *vcpu_addr; 99 uint32_t total_blocktime; 100 /* blocktime per vCPU */ 101 uint32_t *vcpu_blocktime; 102 /* point in time when last page fault was initiated */ 103 uint32_t last_begin; 104 /* number of vCPU are suspended */ 105 int smp_cpus_down; 106 uint64_t start_time; 107 108 /* 109 * Handler for exit event, necessary for 110 * releasing whole blocktime_ctx 111 */ 112 Notifier exit_notifier; 113 } PostcopyBlocktimeContext; 114 115 static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx) 116 { 117 g_free(ctx->page_fault_vcpu_time); 118 g_free(ctx->vcpu_addr); 119 g_free(ctx->vcpu_blocktime); 120 g_free(ctx); 121 } 122 123 static void migration_exit_cb(Notifier *n, void *data) 124 { 125 PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext, 126 exit_notifier); 127 destroy_blocktime_context(ctx); 128 } 129 130 static struct PostcopyBlocktimeContext *blocktime_context_new(void) 131 { 132 MachineState *ms = MACHINE(qdev_get_machine()); 133 unsigned int smp_cpus = ms->smp.cpus; 134 PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1); 135 ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus); 136 ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus); 137 ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus); 138 139 ctx->exit_notifier.notify = migration_exit_cb; 140 ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 141 qemu_add_exit_notifier(&ctx->exit_notifier); 142 return ctx; 143 } 144 145 static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx) 146 { 147 MachineState *ms = MACHINE(qdev_get_machine()); 148 uint32List *list = NULL, *entry = NULL; 149 int i; 150 151 for (i = ms->smp.cpus - 1; i >= 0; i--) { 152 entry = g_new0(uint32List, 1); 153 entry->value = ctx->vcpu_blocktime[i]; 154 entry->next = list; 155 list = entry; 156 } 157 158 return list; 159 } 160 161 /* 162 * This function just populates MigrationInfo from postcopy's 163 * blocktime context. It will not populate MigrationInfo, 164 * unless postcopy-blocktime capability was set. 165 * 166 * @info: pointer to MigrationInfo to populate 167 */ 168 void fill_destination_postcopy_migration_info(MigrationInfo *info) 169 { 170 MigrationIncomingState *mis = migration_incoming_get_current(); 171 PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 172 173 if (!bc) { 174 return; 175 } 176 177 info->has_postcopy_blocktime = true; 178 info->postcopy_blocktime = bc->total_blocktime; 179 info->has_postcopy_vcpu_blocktime = true; 180 info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc); 181 } 182 183 static uint32_t get_postcopy_total_blocktime(void) 184 { 185 MigrationIncomingState *mis = migration_incoming_get_current(); 186 PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 187 188 if (!bc) { 189 return 0; 190 } 191 192 return bc->total_blocktime; 193 } 194 195 /** 196 * receive_ufd_features: check userfault fd features, to request only supported 197 * features in the future. 198 * 199 * Returns: true on success 200 * 201 * __NR_userfaultfd - should be checked before 202 * @features: out parameter will contain uffdio_api.features provided by kernel 203 * in case of success 204 */ 205 static bool receive_ufd_features(uint64_t *features) 206 { 207 struct uffdio_api api_struct = {0}; 208 int ufd; 209 bool ret = true; 210 211 /* if we are here __NR_userfaultfd should exists */ 212 ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 213 if (ufd == -1) { 214 error_report("%s: syscall __NR_userfaultfd failed: %s", __func__, 215 strerror(errno)); 216 return false; 217 } 218 219 /* ask features */ 220 api_struct.api = UFFD_API; 221 api_struct.features = 0; 222 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 223 error_report("%s: UFFDIO_API failed: %s", __func__, 224 strerror(errno)); 225 ret = false; 226 goto release_ufd; 227 } 228 229 *features = api_struct.features; 230 231 release_ufd: 232 close(ufd); 233 return ret; 234 } 235 236 /** 237 * request_ufd_features: this function should be called only once on a newly 238 * opened ufd, subsequent calls will lead to error. 239 * 240 * Returns: true on success 241 * 242 * @ufd: fd obtained from userfaultfd syscall 243 * @features: bit mask see UFFD_API_FEATURES 244 */ 245 static bool request_ufd_features(int ufd, uint64_t features) 246 { 247 struct uffdio_api api_struct = {0}; 248 uint64_t ioctl_mask; 249 250 api_struct.api = UFFD_API; 251 api_struct.features = features; 252 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 253 error_report("%s failed: UFFDIO_API failed: %s", __func__, 254 strerror(errno)); 255 return false; 256 } 257 258 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 259 (__u64)1 << _UFFDIO_UNREGISTER; 260 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 261 error_report("Missing userfault features: %" PRIx64, 262 (uint64_t)(~api_struct.ioctls & ioctl_mask)); 263 return false; 264 } 265 266 return true; 267 } 268 269 static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) 270 { 271 uint64_t asked_features = 0; 272 static uint64_t supported_features; 273 274 /* 275 * it's not possible to 276 * request UFFD_API twice per one fd 277 * userfault fd features is persistent 278 */ 279 if (!supported_features) { 280 if (!receive_ufd_features(&supported_features)) { 281 error_report("%s failed", __func__); 282 return false; 283 } 284 } 285 286 #ifdef UFFD_FEATURE_THREAD_ID 287 if (migrate_postcopy_blocktime() && mis && 288 UFFD_FEATURE_THREAD_ID & supported_features) { 289 /* kernel supports that feature */ 290 /* don't create blocktime_context if it exists */ 291 if (!mis->blocktime_ctx) { 292 mis->blocktime_ctx = blocktime_context_new(); 293 } 294 295 asked_features |= UFFD_FEATURE_THREAD_ID; 296 } 297 #endif 298 299 /* 300 * request features, even if asked_features is 0, due to 301 * kernel expects UFFD_API before UFFDIO_REGISTER, per 302 * userfault file descriptor 303 */ 304 if (!request_ufd_features(ufd, asked_features)) { 305 error_report("%s failed: features %" PRIu64, __func__, 306 asked_features); 307 return false; 308 } 309 310 if (qemu_real_host_page_size != ram_pagesize_summary()) { 311 bool have_hp = false; 312 /* We've got a huge page */ 313 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS 314 have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; 315 #endif 316 if (!have_hp) { 317 error_report("Userfault on this host does not support huge pages"); 318 return false; 319 } 320 } 321 return true; 322 } 323 324 /* Callback from postcopy_ram_supported_by_host block iterator. 325 */ 326 static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque) 327 { 328 const char *block_name = qemu_ram_get_idstr(rb); 329 ram_addr_t length = qemu_ram_get_used_length(rb); 330 size_t pagesize = qemu_ram_pagesize(rb); 331 332 if (length % pagesize) { 333 error_report("Postcopy requires RAM blocks to be a page size multiple," 334 " block %s is 0x" RAM_ADDR_FMT " bytes with a " 335 "page size of 0x%zx", block_name, length, pagesize); 336 return 1; 337 } 338 return 0; 339 } 340 341 /* 342 * Note: This has the side effect of munlock'ing all of RAM, that's 343 * normally fine since if the postcopy succeeds it gets turned back on at the 344 * end. 345 */ 346 bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 347 { 348 long pagesize = qemu_real_host_page_size; 349 int ufd = -1; 350 bool ret = false; /* Error unless we change it */ 351 void *testarea = NULL; 352 struct uffdio_register reg_struct; 353 struct uffdio_range range_struct; 354 uint64_t feature_mask; 355 Error *local_err = NULL; 356 357 if (qemu_target_page_size() > pagesize) { 358 error_report("Target page size bigger than host page size"); 359 goto out; 360 } 361 362 ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 363 if (ufd == -1) { 364 error_report("%s: userfaultfd not available: %s", __func__, 365 strerror(errno)); 366 goto out; 367 } 368 369 /* Give devices a chance to object */ 370 if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) { 371 error_report_err(local_err); 372 goto out; 373 } 374 375 /* Version and features check */ 376 if (!ufd_check_and_apply(ufd, mis)) { 377 goto out; 378 } 379 380 /* We don't support postcopy with shared RAM yet */ 381 if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) { 382 goto out; 383 } 384 385 /* 386 * userfault and mlock don't go together; we'll put it back later if 387 * it was enabled. 388 */ 389 if (munlockall()) { 390 error_report("%s: munlockall: %s", __func__, strerror(errno)); 391 goto out; 392 } 393 394 /* 395 * We need to check that the ops we need are supported on anon memory 396 * To do that we need to register a chunk and see the flags that 397 * are returned. 398 */ 399 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 400 MAP_ANONYMOUS, -1, 0); 401 if (testarea == MAP_FAILED) { 402 error_report("%s: Failed to map test area: %s", __func__, 403 strerror(errno)); 404 goto out; 405 } 406 g_assert(((size_t)testarea & (pagesize-1)) == 0); 407 408 reg_struct.range.start = (uintptr_t)testarea; 409 reg_struct.range.len = pagesize; 410 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 411 412 if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 413 error_report("%s userfault register: %s", __func__, strerror(errno)); 414 goto out; 415 } 416 417 range_struct.start = (uintptr_t)testarea; 418 range_struct.len = pagesize; 419 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 420 error_report("%s userfault unregister: %s", __func__, strerror(errno)); 421 goto out; 422 } 423 424 feature_mask = (__u64)1 << _UFFDIO_WAKE | 425 (__u64)1 << _UFFDIO_COPY | 426 (__u64)1 << _UFFDIO_ZEROPAGE; 427 if ((reg_struct.ioctls & feature_mask) != feature_mask) { 428 error_report("Missing userfault map features: %" PRIx64, 429 (uint64_t)(~reg_struct.ioctls & feature_mask)); 430 goto out; 431 } 432 433 /* Success! */ 434 ret = true; 435 out: 436 if (testarea) { 437 munmap(testarea, pagesize); 438 } 439 if (ufd != -1) { 440 close(ufd); 441 } 442 return ret; 443 } 444 445 /* 446 * Setup an area of RAM so that it *can* be used for postcopy later; this 447 * must be done right at the start prior to pre-copy. 448 * opaque should be the MIS. 449 */ 450 static int init_range(RAMBlock *rb, void *opaque) 451 { 452 const char *block_name = qemu_ram_get_idstr(rb); 453 void *host_addr = qemu_ram_get_host_addr(rb); 454 ram_addr_t offset = qemu_ram_get_offset(rb); 455 ram_addr_t length = qemu_ram_get_used_length(rb); 456 trace_postcopy_init_range(block_name, host_addr, offset, length); 457 458 /* 459 * We need the whole of RAM to be truly empty for postcopy, so things 460 * like ROMs and any data tables built during init must be zero'd 461 * - we're going to get the copy from the source anyway. 462 * (Precopy will just overwrite this data, so doesn't need the discard) 463 */ 464 if (ram_discard_range(block_name, 0, length)) { 465 return -1; 466 } 467 468 return 0; 469 } 470 471 /* 472 * At the end of migration, undo the effects of init_range 473 * opaque should be the MIS. 474 */ 475 static int cleanup_range(RAMBlock *rb, void *opaque) 476 { 477 const char *block_name = qemu_ram_get_idstr(rb); 478 void *host_addr = qemu_ram_get_host_addr(rb); 479 ram_addr_t offset = qemu_ram_get_offset(rb); 480 ram_addr_t length = qemu_ram_get_used_length(rb); 481 MigrationIncomingState *mis = opaque; 482 struct uffdio_range range_struct; 483 trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 484 485 /* 486 * We turned off hugepage for the precopy stage with postcopy enabled 487 * we can turn it back on now. 488 */ 489 qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 490 491 /* 492 * We can also turn off userfault now since we should have all the 493 * pages. It can be useful to leave it on to debug postcopy 494 * if you're not sure it's always getting every page. 495 */ 496 range_struct.start = (uintptr_t)host_addr; 497 range_struct.len = length; 498 499 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 500 error_report("%s: userfault unregister %s", __func__, strerror(errno)); 501 502 return -1; 503 } 504 505 return 0; 506 } 507 508 /* 509 * Initialise postcopy-ram, setting the RAM to a state where we can go into 510 * postcopy later; must be called prior to any precopy. 511 * called from arch_init's similarly named ram_postcopy_incoming_init 512 */ 513 int postcopy_ram_incoming_init(MigrationIncomingState *mis) 514 { 515 if (foreach_not_ignored_block(init_range, NULL)) { 516 return -1; 517 } 518 519 return 0; 520 } 521 522 /* 523 * At the end of a migration where postcopy_ram_incoming_init was called. 524 */ 525 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 526 { 527 trace_postcopy_ram_incoming_cleanup_entry(); 528 529 if (mis->have_fault_thread) { 530 Error *local_err = NULL; 531 532 /* Let the fault thread quit */ 533 qatomic_set(&mis->fault_thread_quit, 1); 534 postcopy_fault_thread_notify(mis); 535 trace_postcopy_ram_incoming_cleanup_join(); 536 qemu_thread_join(&mis->fault_thread); 537 538 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) { 539 error_report_err(local_err); 540 return -1; 541 } 542 543 if (foreach_not_ignored_block(cleanup_range, mis)) { 544 return -1; 545 } 546 547 trace_postcopy_ram_incoming_cleanup_closeuf(); 548 close(mis->userfault_fd); 549 close(mis->userfault_event_fd); 550 mis->have_fault_thread = false; 551 } 552 553 if (enable_mlock) { 554 if (os_mlock() < 0) { 555 error_report("mlock: %s", strerror(errno)); 556 /* 557 * It doesn't feel right to fail at this point, we have a valid 558 * VM state. 559 */ 560 } 561 } 562 563 if (mis->postcopy_tmp_page) { 564 munmap(mis->postcopy_tmp_page, mis->largest_page_size); 565 mis->postcopy_tmp_page = NULL; 566 } 567 if (mis->postcopy_tmp_zero_page) { 568 munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); 569 mis->postcopy_tmp_zero_page = NULL; 570 } 571 trace_postcopy_ram_incoming_cleanup_blocktime( 572 get_postcopy_total_blocktime()); 573 574 trace_postcopy_ram_incoming_cleanup_exit(); 575 return 0; 576 } 577 578 /* 579 * Disable huge pages on an area 580 */ 581 static int nhp_range(RAMBlock *rb, void *opaque) 582 { 583 const char *block_name = qemu_ram_get_idstr(rb); 584 void *host_addr = qemu_ram_get_host_addr(rb); 585 ram_addr_t offset = qemu_ram_get_offset(rb); 586 ram_addr_t length = qemu_ram_get_used_length(rb); 587 trace_postcopy_nhp_range(block_name, host_addr, offset, length); 588 589 /* 590 * Before we do discards we need to ensure those discards really 591 * do delete areas of the page, even if THP thinks a hugepage would 592 * be a good idea, so force hugepages off. 593 */ 594 qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 595 596 return 0; 597 } 598 599 /* 600 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 601 * however leaving it until after precopy means that most of the precopy 602 * data is still THPd 603 */ 604 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 605 { 606 if (foreach_not_ignored_block(nhp_range, mis)) { 607 return -1; 608 } 609 610 postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 611 612 return 0; 613 } 614 615 /* 616 * Mark the given area of RAM as requiring notification to unwritten areas 617 * Used as a callback on foreach_not_ignored_block. 618 * host_addr: Base of area to mark 619 * offset: Offset in the whole ram arena 620 * length: Length of the section 621 * opaque: MigrationIncomingState pointer 622 * Returns 0 on success 623 */ 624 static int ram_block_enable_notify(RAMBlock *rb, void *opaque) 625 { 626 MigrationIncomingState *mis = opaque; 627 struct uffdio_register reg_struct; 628 629 reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb); 630 reg_struct.range.len = qemu_ram_get_used_length(rb); 631 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 632 633 /* Now tell our userfault_fd that it's responsible for this area */ 634 if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 635 error_report("%s userfault register: %s", __func__, strerror(errno)); 636 return -1; 637 } 638 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 639 error_report("%s userfault: Region doesn't support COPY", __func__); 640 return -1; 641 } 642 if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { 643 qemu_ram_set_uf_zeroable(rb); 644 } 645 646 return 0; 647 } 648 649 int postcopy_wake_shared(struct PostCopyFD *pcfd, 650 uint64_t client_addr, 651 RAMBlock *rb) 652 { 653 size_t pagesize = qemu_ram_pagesize(rb); 654 struct uffdio_range range; 655 int ret; 656 trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); 657 range.start = client_addr & ~(pagesize - 1); 658 range.len = pagesize; 659 ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); 660 if (ret) { 661 error_report("%s: Failed to wake: %zx in %s (%s)", 662 __func__, (size_t)client_addr, qemu_ram_get_idstr(rb), 663 strerror(errno)); 664 } 665 return ret; 666 } 667 668 /* 669 * Callback from shared fault handlers to ask for a page, 670 * the page must be specified by a RAMBlock and an offset in that rb 671 * Note: Only for use by shared fault handlers (in fault thread) 672 */ 673 int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 674 uint64_t client_addr, uint64_t rb_offset) 675 { 676 size_t pagesize = qemu_ram_pagesize(rb); 677 uint64_t aligned_rbo = rb_offset & ~(pagesize - 1); 678 MigrationIncomingState *mis = migration_incoming_get_current(); 679 680 trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), 681 rb_offset); 682 if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) { 683 trace_postcopy_request_shared_page_present(pcfd->idstr, 684 qemu_ram_get_idstr(rb), rb_offset); 685 return postcopy_wake_shared(pcfd, client_addr, rb); 686 } 687 migrate_send_rp_req_pages(mis, rb, aligned_rbo); 688 return 0; 689 } 690 691 static int get_mem_fault_cpu_index(uint32_t pid) 692 { 693 CPUState *cpu_iter; 694 695 CPU_FOREACH(cpu_iter) { 696 if (cpu_iter->thread_id == pid) { 697 trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid); 698 return cpu_iter->cpu_index; 699 } 700 } 701 trace_get_mem_fault_cpu_index(-1, pid); 702 return -1; 703 } 704 705 static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc) 706 { 707 int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 708 dc->start_time; 709 return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX; 710 } 711 712 /* 713 * This function is being called when pagefault occurs. It 714 * tracks down vCPU blocking time. 715 * 716 * @addr: faulted host virtual address 717 * @ptid: faulted process thread id 718 * @rb: ramblock appropriate to addr 719 */ 720 static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, 721 RAMBlock *rb) 722 { 723 int cpu, already_received; 724 MigrationIncomingState *mis = migration_incoming_get_current(); 725 PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 726 uint32_t low_time_offset; 727 728 if (!dc || ptid == 0) { 729 return; 730 } 731 cpu = get_mem_fault_cpu_index(ptid); 732 if (cpu < 0) { 733 return; 734 } 735 736 low_time_offset = get_low_time_offset(dc); 737 if (dc->vcpu_addr[cpu] == 0) { 738 qatomic_inc(&dc->smp_cpus_down); 739 } 740 741 qatomic_xchg(&dc->last_begin, low_time_offset); 742 qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); 743 qatomic_xchg(&dc->vcpu_addr[cpu], addr); 744 745 /* 746 * check it here, not at the beginning of the function, 747 * due to, check could occur early than bitmap_set in 748 * qemu_ufd_copy_ioctl 749 */ 750 already_received = ramblock_recv_bitmap_test(rb, (void *)addr); 751 if (already_received) { 752 qatomic_xchg(&dc->vcpu_addr[cpu], 0); 753 qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); 754 qatomic_dec(&dc->smp_cpus_down); 755 } 756 trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], 757 cpu, already_received); 758 } 759 760 /* 761 * This function just provide calculated blocktime per cpu and trace it. 762 * Total blocktime is calculated in mark_postcopy_blocktime_end. 763 * 764 * 765 * Assume we have 3 CPU 766 * 767 * S1 E1 S1 E1 768 * -----***********------------xxx***************------------------------> CPU1 769 * 770 * S2 E2 771 * ------------****************xxx---------------------------------------> CPU2 772 * 773 * S3 E3 774 * ------------------------****xxx********-------------------------------> CPU3 775 * 776 * We have sequence S1,S2,E1,S3,S1,E2,E3,E1 777 * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3 778 * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 - 779 * it's a part of total blocktime. 780 * S1 - here is last_begin 781 * Legend of the picture is following: 782 * * - means blocktime per vCPU 783 * x - means overlapped blocktime (total blocktime) 784 * 785 * @addr: host virtual address 786 */ 787 static void mark_postcopy_blocktime_end(uintptr_t addr) 788 { 789 MigrationIncomingState *mis = migration_incoming_get_current(); 790 PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 791 MachineState *ms = MACHINE(qdev_get_machine()); 792 unsigned int smp_cpus = ms->smp.cpus; 793 int i, affected_cpu = 0; 794 bool vcpu_total_blocktime = false; 795 uint32_t read_vcpu_time, low_time_offset; 796 797 if (!dc) { 798 return; 799 } 800 801 low_time_offset = get_low_time_offset(dc); 802 /* lookup cpu, to clear it, 803 * that algorithm looks straightforward, but it's not 804 * optimal, more optimal algorithm is keeping tree or hash 805 * where key is address value is a list of */ 806 for (i = 0; i < smp_cpus; i++) { 807 uint32_t vcpu_blocktime = 0; 808 809 read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); 810 if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || 811 read_vcpu_time == 0) { 812 continue; 813 } 814 qatomic_xchg(&dc->vcpu_addr[i], 0); 815 vcpu_blocktime = low_time_offset - read_vcpu_time; 816 affected_cpu += 1; 817 /* we need to know is that mark_postcopy_end was due to 818 * faulted page, another possible case it's prefetched 819 * page and in that case we shouldn't be here */ 820 if (!vcpu_total_blocktime && 821 qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { 822 vcpu_total_blocktime = true; 823 } 824 /* continue cycle, due to one page could affect several vCPUs */ 825 dc->vcpu_blocktime[i] += vcpu_blocktime; 826 } 827 828 qatomic_sub(&dc->smp_cpus_down, affected_cpu); 829 if (vcpu_total_blocktime) { 830 dc->total_blocktime += low_time_offset - qatomic_fetch_add( 831 &dc->last_begin, 0); 832 } 833 trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, 834 affected_cpu); 835 } 836 837 static bool postcopy_pause_fault_thread(MigrationIncomingState *mis) 838 { 839 trace_postcopy_pause_fault_thread(); 840 841 qemu_sem_wait(&mis->postcopy_pause_sem_fault); 842 843 trace_postcopy_pause_fault_thread_continued(); 844 845 return true; 846 } 847 848 /* 849 * Handle faults detected by the USERFAULT markings 850 */ 851 static void *postcopy_ram_fault_thread(void *opaque) 852 { 853 MigrationIncomingState *mis = opaque; 854 struct uffd_msg msg; 855 int ret; 856 size_t index; 857 RAMBlock *rb = NULL; 858 859 trace_postcopy_ram_fault_thread_entry(); 860 rcu_register_thread(); 861 mis->last_rb = NULL; /* last RAMBlock we sent part of */ 862 qemu_sem_post(&mis->fault_thread_sem); 863 864 struct pollfd *pfd; 865 size_t pfd_len = 2 + mis->postcopy_remote_fds->len; 866 867 pfd = g_new0(struct pollfd, pfd_len); 868 869 pfd[0].fd = mis->userfault_fd; 870 pfd[0].events = POLLIN; 871 pfd[1].fd = mis->userfault_event_fd; 872 pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 873 trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd); 874 for (index = 0; index < mis->postcopy_remote_fds->len; index++) { 875 struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds, 876 struct PostCopyFD, index); 877 pfd[2 + index].fd = pcfd->fd; 878 pfd[2 + index].events = POLLIN; 879 trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr, 880 pcfd->fd); 881 } 882 883 while (true) { 884 ram_addr_t rb_offset; 885 int poll_result; 886 887 /* 888 * We're mainly waiting for the kernel to give us a faulting HVA, 889 * however we can be told to quit via userfault_quit_fd which is 890 * an eventfd 891 */ 892 893 poll_result = poll(pfd, pfd_len, -1 /* Wait forever */); 894 if (poll_result == -1) { 895 error_report("%s: userfault poll: %s", __func__, strerror(errno)); 896 break; 897 } 898 899 if (!mis->to_src_file) { 900 /* 901 * Possibly someone tells us that the return path is 902 * broken already using the event. We should hold until 903 * the channel is rebuilt. 904 */ 905 if (postcopy_pause_fault_thread(mis)) { 906 mis->last_rb = NULL; 907 /* Continue to read the userfaultfd */ 908 } else { 909 error_report("%s: paused but don't allow to continue", 910 __func__); 911 break; 912 } 913 } 914 915 if (pfd[1].revents) { 916 uint64_t tmp64 = 0; 917 918 /* Consume the signal */ 919 if (read(mis->userfault_event_fd, &tmp64, 8) != 8) { 920 /* Nothing obviously nicer than posting this error. */ 921 error_report("%s: read() failed", __func__); 922 } 923 924 if (qatomic_read(&mis->fault_thread_quit)) { 925 trace_postcopy_ram_fault_thread_quit(); 926 break; 927 } 928 } 929 930 if (pfd[0].revents) { 931 poll_result--; 932 ret = read(mis->userfault_fd, &msg, sizeof(msg)); 933 if (ret != sizeof(msg)) { 934 if (errno == EAGAIN) { 935 /* 936 * if a wake up happens on the other thread just after 937 * the poll, there is nothing to read. 938 */ 939 continue; 940 } 941 if (ret < 0) { 942 error_report("%s: Failed to read full userfault " 943 "message: %s", 944 __func__, strerror(errno)); 945 break; 946 } else { 947 error_report("%s: Read %d bytes from userfaultfd " 948 "expected %zd", 949 __func__, ret, sizeof(msg)); 950 break; /* Lost alignment, don't know what we'd read next */ 951 } 952 } 953 if (msg.event != UFFD_EVENT_PAGEFAULT) { 954 error_report("%s: Read unexpected event %ud from userfaultfd", 955 __func__, msg.event); 956 continue; /* It's not a page fault, shouldn't happen */ 957 } 958 959 rb = qemu_ram_block_from_host( 960 (void *)(uintptr_t)msg.arg.pagefault.address, 961 true, &rb_offset); 962 if (!rb) { 963 error_report("postcopy_ram_fault_thread: Fault outside guest: %" 964 PRIx64, (uint64_t)msg.arg.pagefault.address); 965 break; 966 } 967 968 rb_offset &= ~(qemu_ram_pagesize(rb) - 1); 969 trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 970 qemu_ram_get_idstr(rb), 971 rb_offset, 972 msg.arg.pagefault.feat.ptid); 973 mark_postcopy_blocktime_begin( 974 (uintptr_t)(msg.arg.pagefault.address), 975 msg.arg.pagefault.feat.ptid, rb); 976 977 retry: 978 /* 979 * Send the request to the source - we want to request one 980 * of our host page sizes (which is >= TPS) 981 */ 982 ret = migrate_send_rp_req_pages(mis, rb, rb_offset); 983 if (ret) { 984 /* May be network failure, try to wait for recovery */ 985 if (ret == -EIO && postcopy_pause_fault_thread(mis)) { 986 /* We got reconnected somehow, try to continue */ 987 mis->last_rb = NULL; 988 goto retry; 989 } else { 990 /* This is a unavoidable fault */ 991 error_report("%s: migrate_send_rp_req_pages() get %d", 992 __func__, ret); 993 break; 994 } 995 } 996 } 997 998 /* Now handle any requests from external processes on shared memory */ 999 /* TODO: May need to handle devices deregistering during postcopy */ 1000 for (index = 2; index < pfd_len && poll_result; index++) { 1001 if (pfd[index].revents) { 1002 struct PostCopyFD *pcfd = 1003 &g_array_index(mis->postcopy_remote_fds, 1004 struct PostCopyFD, index - 2); 1005 1006 poll_result--; 1007 if (pfd[index].revents & POLLERR) { 1008 error_report("%s: POLLERR on poll %zd fd=%d", 1009 __func__, index, pcfd->fd); 1010 pfd[index].events = 0; 1011 continue; 1012 } 1013 1014 ret = read(pcfd->fd, &msg, sizeof(msg)); 1015 if (ret != sizeof(msg)) { 1016 if (errno == EAGAIN) { 1017 /* 1018 * if a wake up happens on the other thread just after 1019 * the poll, there is nothing to read. 1020 */ 1021 continue; 1022 } 1023 if (ret < 0) { 1024 error_report("%s: Failed to read full userfault " 1025 "message: %s (shared) revents=%d", 1026 __func__, strerror(errno), 1027 pfd[index].revents); 1028 /*TODO: Could just disable this sharer */ 1029 break; 1030 } else { 1031 error_report("%s: Read %d bytes from userfaultfd " 1032 "expected %zd (shared)", 1033 __func__, ret, sizeof(msg)); 1034 /*TODO: Could just disable this sharer */ 1035 break; /*Lost alignment,don't know what we'd read next*/ 1036 } 1037 } 1038 if (msg.event != UFFD_EVENT_PAGEFAULT) { 1039 error_report("%s: Read unexpected event %ud " 1040 "from userfaultfd (shared)", 1041 __func__, msg.event); 1042 continue; /* It's not a page fault, shouldn't happen */ 1043 } 1044 /* Call the device handler registered with us */ 1045 ret = pcfd->handler(pcfd, &msg); 1046 if (ret) { 1047 error_report("%s: Failed to resolve shared fault on %zd/%s", 1048 __func__, index, pcfd->idstr); 1049 /* TODO: Fail? Disable this sharer? */ 1050 } 1051 } 1052 } 1053 } 1054 rcu_unregister_thread(); 1055 trace_postcopy_ram_fault_thread_exit(); 1056 g_free(pfd); 1057 return NULL; 1058 } 1059 1060 int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1061 { 1062 /* Open the fd for the kernel to give us userfaults */ 1063 mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 1064 if (mis->userfault_fd == -1) { 1065 error_report("%s: Failed to open userfault fd: %s", __func__, 1066 strerror(errno)); 1067 return -1; 1068 } 1069 1070 /* 1071 * Although the host check already tested the API, we need to 1072 * do the check again as an ABI handshake on the new fd. 1073 */ 1074 if (!ufd_check_and_apply(mis->userfault_fd, mis)) { 1075 return -1; 1076 } 1077 1078 /* Now an eventfd we use to tell the fault-thread to quit */ 1079 mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); 1080 if (mis->userfault_event_fd == -1) { 1081 error_report("%s: Opening userfault_event_fd: %s", __func__, 1082 strerror(errno)); 1083 close(mis->userfault_fd); 1084 return -1; 1085 } 1086 1087 qemu_sem_init(&mis->fault_thread_sem, 0); 1088 qemu_thread_create(&mis->fault_thread, "postcopy/fault", 1089 postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); 1090 qemu_sem_wait(&mis->fault_thread_sem); 1091 qemu_sem_destroy(&mis->fault_thread_sem); 1092 mis->have_fault_thread = true; 1093 1094 /* Mark so that we get notified of accesses to unwritten areas */ 1095 if (foreach_not_ignored_block(ram_block_enable_notify, mis)) { 1096 error_report("ram_block_enable_notify failed"); 1097 return -1; 1098 } 1099 1100 mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, 1101 PROT_READ | PROT_WRITE, MAP_PRIVATE | 1102 MAP_ANONYMOUS, -1, 0); 1103 if (mis->postcopy_tmp_page == MAP_FAILED) { 1104 mis->postcopy_tmp_page = NULL; 1105 error_report("%s: Failed to map postcopy_tmp_page %s", 1106 __func__, strerror(errno)); 1107 return -1; 1108 } 1109 1110 /* 1111 * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages 1112 */ 1113 mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, 1114 PROT_READ | PROT_WRITE, 1115 MAP_PRIVATE | MAP_ANONYMOUS, 1116 -1, 0); 1117 if (mis->postcopy_tmp_zero_page == MAP_FAILED) { 1118 int e = errno; 1119 mis->postcopy_tmp_zero_page = NULL; 1120 error_report("%s: Failed to map large zero page %s", 1121 __func__, strerror(e)); 1122 return -e; 1123 } 1124 memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 1125 1126 trace_postcopy_ram_enable_notify(); 1127 1128 return 0; 1129 } 1130 1131 static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr, 1132 void *from_addr, uint64_t pagesize, RAMBlock *rb) 1133 { 1134 int ret; 1135 if (from_addr) { 1136 struct uffdio_copy copy_struct; 1137 copy_struct.dst = (uint64_t)(uintptr_t)host_addr; 1138 copy_struct.src = (uint64_t)(uintptr_t)from_addr; 1139 copy_struct.len = pagesize; 1140 copy_struct.mode = 0; 1141 ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); 1142 } else { 1143 struct uffdio_zeropage zero_struct; 1144 zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; 1145 zero_struct.range.len = pagesize; 1146 zero_struct.mode = 0; 1147 ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); 1148 } 1149 if (!ret) { 1150 ramblock_recv_bitmap_set_range(rb, host_addr, 1151 pagesize / qemu_target_page_size()); 1152 mark_postcopy_blocktime_end((uintptr_t)host_addr); 1153 1154 } 1155 return ret; 1156 } 1157 1158 int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset) 1159 { 1160 int i; 1161 MigrationIncomingState *mis = migration_incoming_get_current(); 1162 GArray *pcrfds = mis->postcopy_remote_fds; 1163 1164 for (i = 0; i < pcrfds->len; i++) { 1165 struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1166 int ret = cur->waker(cur, rb, offset); 1167 if (ret) { 1168 return ret; 1169 } 1170 } 1171 return 0; 1172 } 1173 1174 /* 1175 * Place a host page (from) at (host) atomically 1176 * returns 0 on success 1177 */ 1178 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 1179 RAMBlock *rb) 1180 { 1181 size_t pagesize = qemu_ram_pagesize(rb); 1182 1183 /* copy also acks to the kernel waking the stalled thread up 1184 * TODO: We can inhibit that ack and only do it if it was requested 1185 * which would be slightly cheaper, but we'd have to be careful 1186 * of the order of updating our page state. 1187 */ 1188 if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) { 1189 int e = errno; 1190 error_report("%s: %s copy host: %p from: %p (size: %zd)", 1191 __func__, strerror(e), host, from, pagesize); 1192 1193 return -e; 1194 } 1195 1196 trace_postcopy_place_page(host); 1197 return postcopy_notify_shared_wake(rb, 1198 qemu_ram_block_host_offset(rb, host)); 1199 } 1200 1201 /* 1202 * Place a zero page at (host) atomically 1203 * returns 0 on success 1204 */ 1205 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 1206 RAMBlock *rb) 1207 { 1208 size_t pagesize = qemu_ram_pagesize(rb); 1209 trace_postcopy_place_page_zero(host); 1210 1211 /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE 1212 * but it's not available for everything (e.g. hugetlbpages) 1213 */ 1214 if (qemu_ram_is_uf_zeroable(rb)) { 1215 if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, pagesize, rb)) { 1216 int e = errno; 1217 error_report("%s: %s zero host: %p", 1218 __func__, strerror(e), host); 1219 1220 return -e; 1221 } 1222 return postcopy_notify_shared_wake(rb, 1223 qemu_ram_block_host_offset(rb, 1224 host)); 1225 } else { 1226 return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb); 1227 } 1228 } 1229 1230 #else 1231 /* No target OS support, stubs just fail */ 1232 void fill_destination_postcopy_migration_info(MigrationInfo *info) 1233 { 1234 } 1235 1236 bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 1237 { 1238 error_report("%s: No OS support", __func__); 1239 return false; 1240 } 1241 1242 int postcopy_ram_incoming_init(MigrationIncomingState *mis) 1243 { 1244 error_report("postcopy_ram_incoming_init: No OS support"); 1245 return -1; 1246 } 1247 1248 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 1249 { 1250 assert(0); 1251 return -1; 1252 } 1253 1254 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 1255 { 1256 assert(0); 1257 return -1; 1258 } 1259 1260 int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 1261 uint64_t client_addr, uint64_t rb_offset) 1262 { 1263 assert(0); 1264 return -1; 1265 } 1266 1267 int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1268 { 1269 assert(0); 1270 return -1; 1271 } 1272 1273 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 1274 RAMBlock *rb) 1275 { 1276 assert(0); 1277 return -1; 1278 } 1279 1280 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 1281 RAMBlock *rb) 1282 { 1283 assert(0); 1284 return -1; 1285 } 1286 1287 int postcopy_wake_shared(struct PostCopyFD *pcfd, 1288 uint64_t client_addr, 1289 RAMBlock *rb) 1290 { 1291 assert(0); 1292 return -1; 1293 } 1294 #endif 1295 1296 /* ------------------------------------------------------------------------- */ 1297 1298 void postcopy_fault_thread_notify(MigrationIncomingState *mis) 1299 { 1300 uint64_t tmp64 = 1; 1301 1302 /* 1303 * Wakeup the fault_thread. It's an eventfd that should currently 1304 * be at 0, we're going to increment it to 1 1305 */ 1306 if (write(mis->userfault_event_fd, &tmp64, 8) != 8) { 1307 /* Not much we can do here, but may as well report it */ 1308 error_report("%s: incrementing failed: %s", __func__, 1309 strerror(errno)); 1310 } 1311 } 1312 1313 /** 1314 * postcopy_discard_send_init: Called at the start of each RAMBlock before 1315 * asking to discard individual ranges. 1316 * 1317 * @ms: The current migration state. 1318 * @offset: the bitmap offset of the named RAMBlock in the migration bitmap. 1319 * @name: RAMBlock that discards will operate on. 1320 */ 1321 static PostcopyDiscardState pds = {0}; 1322 void postcopy_discard_send_init(MigrationState *ms, const char *name) 1323 { 1324 pds.ramblock_name = name; 1325 pds.cur_entry = 0; 1326 pds.nsentwords = 0; 1327 pds.nsentcmds = 0; 1328 } 1329 1330 /** 1331 * postcopy_discard_send_range: Called by the bitmap code for each chunk to 1332 * discard. May send a discard message, may just leave it queued to 1333 * be sent later. 1334 * 1335 * @ms: Current migration state. 1336 * @start,@length: a range of pages in the migration bitmap in the 1337 * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 1338 */ 1339 void postcopy_discard_send_range(MigrationState *ms, unsigned long start, 1340 unsigned long length) 1341 { 1342 size_t tp_size = qemu_target_page_size(); 1343 /* Convert to byte offsets within the RAM block */ 1344 pds.start_list[pds.cur_entry] = start * tp_size; 1345 pds.length_list[pds.cur_entry] = length * tp_size; 1346 trace_postcopy_discard_send_range(pds.ramblock_name, start, length); 1347 pds.cur_entry++; 1348 pds.nsentwords++; 1349 1350 if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) { 1351 /* Full set, ship it! */ 1352 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1353 pds.ramblock_name, 1354 pds.cur_entry, 1355 pds.start_list, 1356 pds.length_list); 1357 pds.nsentcmds++; 1358 pds.cur_entry = 0; 1359 } 1360 } 1361 1362 /** 1363 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 1364 * bitmap code. Sends any outstanding discard messages, frees the PDS 1365 * 1366 * @ms: Current migration state. 1367 */ 1368 void postcopy_discard_send_finish(MigrationState *ms) 1369 { 1370 /* Anything unsent? */ 1371 if (pds.cur_entry) { 1372 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1373 pds.ramblock_name, 1374 pds.cur_entry, 1375 pds.start_list, 1376 pds.length_list); 1377 pds.nsentcmds++; 1378 } 1379 1380 trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords, 1381 pds.nsentcmds); 1382 } 1383 1384 /* 1385 * Current state of incoming postcopy; note this is not part of 1386 * MigrationIncomingState since it's state is used during cleanup 1387 * at the end as MIS is being freed. 1388 */ 1389 static PostcopyState incoming_postcopy_state; 1390 1391 PostcopyState postcopy_state_get(void) 1392 { 1393 return qatomic_mb_read(&incoming_postcopy_state); 1394 } 1395 1396 /* Set the state and return the old state */ 1397 PostcopyState postcopy_state_set(PostcopyState new_state) 1398 { 1399 return qatomic_xchg(&incoming_postcopy_state, new_state); 1400 } 1401 1402 /* Register a handler for external shared memory postcopy 1403 * called on the destination. 1404 */ 1405 void postcopy_register_shared_ufd(struct PostCopyFD *pcfd) 1406 { 1407 MigrationIncomingState *mis = migration_incoming_get_current(); 1408 1409 mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds, 1410 *pcfd); 1411 } 1412 1413 /* Unregister a handler for external shared memory postcopy 1414 */ 1415 void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) 1416 { 1417 guint i; 1418 MigrationIncomingState *mis = migration_incoming_get_current(); 1419 GArray *pcrfds = mis->postcopy_remote_fds; 1420 1421 for (i = 0; i < pcrfds->len; i++) { 1422 struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1423 if (cur->fd == pcfd->fd) { 1424 mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i); 1425 return; 1426 } 1427 } 1428 } 1429