1 /* 2 * Postcopy migration for RAM 3 * 4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Dave Gilbert <dgilbert@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * Postcopy is a migration technique where the execution flips from the 16 * source to the destination before all the data has been copied. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "exec/target_page.h" 21 #include "migration.h" 22 #include "qemu-file.h" 23 #include "savevm.h" 24 #include "postcopy-ram.h" 25 #include "ram.h" 26 #include "qapi/error.h" 27 #include "qemu/notify.h" 28 #include "sysemu/sysemu.h" 29 #include "sysemu/balloon.h" 30 #include "qemu/error-report.h" 31 #include "trace.h" 32 33 /* Arbitrary limit on size of each discard command, 34 * keeps them around ~200 bytes 35 */ 36 #define MAX_DISCARDS_PER_COMMAND 12 37 38 struct PostcopyDiscardState { 39 const char *ramblock_name; 40 uint16_t cur_entry; 41 /* 42 * Start and length of a discard range (bytes) 43 */ 44 uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 45 uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 46 unsigned int nsentwords; 47 unsigned int nsentcmds; 48 }; 49 50 static NotifierWithReturnList postcopy_notifier_list; 51 52 void postcopy_infrastructure_init(void) 53 { 54 notifier_with_return_list_init(&postcopy_notifier_list); 55 } 56 57 void postcopy_add_notifier(NotifierWithReturn *nn) 58 { 59 notifier_with_return_list_add(&postcopy_notifier_list, nn); 60 } 61 62 void postcopy_remove_notifier(NotifierWithReturn *n) 63 { 64 notifier_with_return_remove(n); 65 } 66 67 int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) 68 { 69 struct PostcopyNotifyData pnd; 70 pnd.reason = reason; 71 pnd.errp = errp; 72 73 return notifier_with_return_list_notify(&postcopy_notifier_list, 74 &pnd); 75 } 76 77 /* Postcopy needs to detect accesses to pages that haven't yet been copied 78 * across, and efficiently map new pages in, the techniques for doing this 79 * are target OS specific. 80 */ 81 #if defined(__linux__) 82 83 #include <poll.h> 84 #include <sys/ioctl.h> 85 #include <sys/syscall.h> 86 #include <asm/types.h> /* for __u64 */ 87 #endif 88 89 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 90 #include <sys/eventfd.h> 91 #include <linux/userfaultfd.h> 92 93 typedef struct PostcopyBlocktimeContext { 94 /* time when page fault initiated per vCPU */ 95 uint32_t *page_fault_vcpu_time; 96 /* page address per vCPU */ 97 uintptr_t *vcpu_addr; 98 uint32_t total_blocktime; 99 /* blocktime per vCPU */ 100 uint32_t *vcpu_blocktime; 101 /* point in time when last page fault was initiated */ 102 uint32_t last_begin; 103 /* number of vCPU are suspended */ 104 int smp_cpus_down; 105 uint64_t start_time; 106 107 /* 108 * Handler for exit event, necessary for 109 * releasing whole blocktime_ctx 110 */ 111 Notifier exit_notifier; 112 } PostcopyBlocktimeContext; 113 114 static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx) 115 { 116 g_free(ctx->page_fault_vcpu_time); 117 g_free(ctx->vcpu_addr); 118 g_free(ctx->vcpu_blocktime); 119 g_free(ctx); 120 } 121 122 static void migration_exit_cb(Notifier *n, void *data) 123 { 124 PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext, 125 exit_notifier); 126 destroy_blocktime_context(ctx); 127 } 128 129 static struct PostcopyBlocktimeContext *blocktime_context_new(void) 130 { 131 PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1); 132 ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus); 133 ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus); 134 ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus); 135 136 ctx->exit_notifier.notify = migration_exit_cb; 137 ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 138 qemu_add_exit_notifier(&ctx->exit_notifier); 139 return ctx; 140 } 141 142 static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx) 143 { 144 uint32List *list = NULL, *entry = NULL; 145 int i; 146 147 for (i = smp_cpus - 1; i >= 0; i--) { 148 entry = g_new0(uint32List, 1); 149 entry->value = ctx->vcpu_blocktime[i]; 150 entry->next = list; 151 list = entry; 152 } 153 154 return list; 155 } 156 157 /* 158 * This function just populates MigrationInfo from postcopy's 159 * blocktime context. It will not populate MigrationInfo, 160 * unless postcopy-blocktime capability was set. 161 * 162 * @info: pointer to MigrationInfo to populate 163 */ 164 void fill_destination_postcopy_migration_info(MigrationInfo *info) 165 { 166 MigrationIncomingState *mis = migration_incoming_get_current(); 167 PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 168 169 if (!bc) { 170 return; 171 } 172 173 info->has_postcopy_blocktime = true; 174 info->postcopy_blocktime = bc->total_blocktime; 175 info->has_postcopy_vcpu_blocktime = true; 176 info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc); 177 } 178 179 static uint32_t get_postcopy_total_blocktime(void) 180 { 181 MigrationIncomingState *mis = migration_incoming_get_current(); 182 PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 183 184 if (!bc) { 185 return 0; 186 } 187 188 return bc->total_blocktime; 189 } 190 191 /** 192 * receive_ufd_features: check userfault fd features, to request only supported 193 * features in the future. 194 * 195 * Returns: true on success 196 * 197 * __NR_userfaultfd - should be checked before 198 * @features: out parameter will contain uffdio_api.features provided by kernel 199 * in case of success 200 */ 201 static bool receive_ufd_features(uint64_t *features) 202 { 203 struct uffdio_api api_struct = {0}; 204 int ufd; 205 bool ret = true; 206 207 /* if we are here __NR_userfaultfd should exists */ 208 ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 209 if (ufd == -1) { 210 error_report("%s: syscall __NR_userfaultfd failed: %s", __func__, 211 strerror(errno)); 212 return false; 213 } 214 215 /* ask features */ 216 api_struct.api = UFFD_API; 217 api_struct.features = 0; 218 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 219 error_report("%s: UFFDIO_API failed: %s", __func__, 220 strerror(errno)); 221 ret = false; 222 goto release_ufd; 223 } 224 225 *features = api_struct.features; 226 227 release_ufd: 228 close(ufd); 229 return ret; 230 } 231 232 /** 233 * request_ufd_features: this function should be called only once on a newly 234 * opened ufd, subsequent calls will lead to error. 235 * 236 * Returns: true on succes 237 * 238 * @ufd: fd obtained from userfaultfd syscall 239 * @features: bit mask see UFFD_API_FEATURES 240 */ 241 static bool request_ufd_features(int ufd, uint64_t features) 242 { 243 struct uffdio_api api_struct = {0}; 244 uint64_t ioctl_mask; 245 246 api_struct.api = UFFD_API; 247 api_struct.features = features; 248 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 249 error_report("%s failed: UFFDIO_API failed: %s", __func__, 250 strerror(errno)); 251 return false; 252 } 253 254 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 255 (__u64)1 << _UFFDIO_UNREGISTER; 256 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 257 error_report("Missing userfault features: %" PRIx64, 258 (uint64_t)(~api_struct.ioctls & ioctl_mask)); 259 return false; 260 } 261 262 return true; 263 } 264 265 static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) 266 { 267 uint64_t asked_features = 0; 268 static uint64_t supported_features; 269 270 /* 271 * it's not possible to 272 * request UFFD_API twice per one fd 273 * userfault fd features is persistent 274 */ 275 if (!supported_features) { 276 if (!receive_ufd_features(&supported_features)) { 277 error_report("%s failed", __func__); 278 return false; 279 } 280 } 281 282 #ifdef UFFD_FEATURE_THREAD_ID 283 if (migrate_postcopy_blocktime() && mis && 284 UFFD_FEATURE_THREAD_ID & supported_features) { 285 /* kernel supports that feature */ 286 /* don't create blocktime_context if it exists */ 287 if (!mis->blocktime_ctx) { 288 mis->blocktime_ctx = blocktime_context_new(); 289 } 290 291 asked_features |= UFFD_FEATURE_THREAD_ID; 292 } 293 #endif 294 295 /* 296 * request features, even if asked_features is 0, due to 297 * kernel expects UFFD_API before UFFDIO_REGISTER, per 298 * userfault file descriptor 299 */ 300 if (!request_ufd_features(ufd, asked_features)) { 301 error_report("%s failed: features %" PRIu64, __func__, 302 asked_features); 303 return false; 304 } 305 306 if (getpagesize() != ram_pagesize_summary()) { 307 bool have_hp = false; 308 /* We've got a huge page */ 309 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS 310 have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; 311 #endif 312 if (!have_hp) { 313 error_report("Userfault on this host does not support huge pages"); 314 return false; 315 } 316 } 317 return true; 318 } 319 320 /* Callback from postcopy_ram_supported_by_host block iterator. 321 */ 322 static int test_ramblock_postcopiable(const char *block_name, void *host_addr, 323 ram_addr_t offset, ram_addr_t length, void *opaque) 324 { 325 RAMBlock *rb = qemu_ram_block_by_name(block_name); 326 size_t pagesize = qemu_ram_pagesize(rb); 327 328 if (length % pagesize) { 329 error_report("Postcopy requires RAM blocks to be a page size multiple," 330 " block %s is 0x" RAM_ADDR_FMT " bytes with a " 331 "page size of 0x%zx", block_name, length, pagesize); 332 return 1; 333 } 334 return 0; 335 } 336 337 /* 338 * Note: This has the side effect of munlock'ing all of RAM, that's 339 * normally fine since if the postcopy succeeds it gets turned back on at the 340 * end. 341 */ 342 bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 343 { 344 long pagesize = getpagesize(); 345 int ufd = -1; 346 bool ret = false; /* Error unless we change it */ 347 void *testarea = NULL; 348 struct uffdio_register reg_struct; 349 struct uffdio_range range_struct; 350 uint64_t feature_mask; 351 Error *local_err = NULL; 352 353 if (qemu_target_page_size() > pagesize) { 354 error_report("Target page size bigger than host page size"); 355 goto out; 356 } 357 358 ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 359 if (ufd == -1) { 360 error_report("%s: userfaultfd not available: %s", __func__, 361 strerror(errno)); 362 goto out; 363 } 364 365 /* Give devices a chance to object */ 366 if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) { 367 error_report_err(local_err); 368 goto out; 369 } 370 371 /* Version and features check */ 372 if (!ufd_check_and_apply(ufd, mis)) { 373 goto out; 374 } 375 376 /* We don't support postcopy with shared RAM yet */ 377 if (qemu_ram_foreach_migratable_block(test_ramblock_postcopiable, NULL)) { 378 goto out; 379 } 380 381 /* 382 * userfault and mlock don't go together; we'll put it back later if 383 * it was enabled. 384 */ 385 if (munlockall()) { 386 error_report("%s: munlockall: %s", __func__, strerror(errno)); 387 return -1; 388 } 389 390 /* 391 * We need to check that the ops we need are supported on anon memory 392 * To do that we need to register a chunk and see the flags that 393 * are returned. 394 */ 395 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 396 MAP_ANONYMOUS, -1, 0); 397 if (testarea == MAP_FAILED) { 398 error_report("%s: Failed to map test area: %s", __func__, 399 strerror(errno)); 400 goto out; 401 } 402 g_assert(((size_t)testarea & (pagesize-1)) == 0); 403 404 reg_struct.range.start = (uintptr_t)testarea; 405 reg_struct.range.len = pagesize; 406 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 407 408 if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 409 error_report("%s userfault register: %s", __func__, strerror(errno)); 410 goto out; 411 } 412 413 range_struct.start = (uintptr_t)testarea; 414 range_struct.len = pagesize; 415 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 416 error_report("%s userfault unregister: %s", __func__, strerror(errno)); 417 goto out; 418 } 419 420 feature_mask = (__u64)1 << _UFFDIO_WAKE | 421 (__u64)1 << _UFFDIO_COPY | 422 (__u64)1 << _UFFDIO_ZEROPAGE; 423 if ((reg_struct.ioctls & feature_mask) != feature_mask) { 424 error_report("Missing userfault map features: %" PRIx64, 425 (uint64_t)(~reg_struct.ioctls & feature_mask)); 426 goto out; 427 } 428 429 /* Success! */ 430 ret = true; 431 out: 432 if (testarea) { 433 munmap(testarea, pagesize); 434 } 435 if (ufd != -1) { 436 close(ufd); 437 } 438 return ret; 439 } 440 441 /* 442 * Setup an area of RAM so that it *can* be used for postcopy later; this 443 * must be done right at the start prior to pre-copy. 444 * opaque should be the MIS. 445 */ 446 static int init_range(const char *block_name, void *host_addr, 447 ram_addr_t offset, ram_addr_t length, void *opaque) 448 { 449 trace_postcopy_init_range(block_name, host_addr, offset, length); 450 451 /* 452 * We need the whole of RAM to be truly empty for postcopy, so things 453 * like ROMs and any data tables built during init must be zero'd 454 * - we're going to get the copy from the source anyway. 455 * (Precopy will just overwrite this data, so doesn't need the discard) 456 */ 457 if (ram_discard_range(block_name, 0, length)) { 458 return -1; 459 } 460 461 return 0; 462 } 463 464 /* 465 * At the end of migration, undo the effects of init_range 466 * opaque should be the MIS. 467 */ 468 static int cleanup_range(const char *block_name, void *host_addr, 469 ram_addr_t offset, ram_addr_t length, void *opaque) 470 { 471 MigrationIncomingState *mis = opaque; 472 struct uffdio_range range_struct; 473 trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 474 475 /* 476 * We turned off hugepage for the precopy stage with postcopy enabled 477 * we can turn it back on now. 478 */ 479 qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 480 481 /* 482 * We can also turn off userfault now since we should have all the 483 * pages. It can be useful to leave it on to debug postcopy 484 * if you're not sure it's always getting every page. 485 */ 486 range_struct.start = (uintptr_t)host_addr; 487 range_struct.len = length; 488 489 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 490 error_report("%s: userfault unregister %s", __func__, strerror(errno)); 491 492 return -1; 493 } 494 495 return 0; 496 } 497 498 /* 499 * Initialise postcopy-ram, setting the RAM to a state where we can go into 500 * postcopy later; must be called prior to any precopy. 501 * called from arch_init's similarly named ram_postcopy_incoming_init 502 */ 503 int postcopy_ram_incoming_init(MigrationIncomingState *mis) 504 { 505 if (qemu_ram_foreach_migratable_block(init_range, NULL)) { 506 return -1; 507 } 508 509 return 0; 510 } 511 512 /* 513 * Manage a single vote to the QEMU balloon inhibitor for all postcopy usage, 514 * last caller wins. 515 */ 516 static void postcopy_balloon_inhibit(bool state) 517 { 518 static bool cur_state = false; 519 520 if (state != cur_state) { 521 qemu_balloon_inhibit(state); 522 cur_state = state; 523 } 524 } 525 526 /* 527 * At the end of a migration where postcopy_ram_incoming_init was called. 528 */ 529 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 530 { 531 trace_postcopy_ram_incoming_cleanup_entry(); 532 533 if (mis->have_fault_thread) { 534 Error *local_err = NULL; 535 536 /* Let the fault thread quit */ 537 atomic_set(&mis->fault_thread_quit, 1); 538 postcopy_fault_thread_notify(mis); 539 trace_postcopy_ram_incoming_cleanup_join(); 540 qemu_thread_join(&mis->fault_thread); 541 542 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) { 543 error_report_err(local_err); 544 return -1; 545 } 546 547 if (qemu_ram_foreach_migratable_block(cleanup_range, mis)) { 548 return -1; 549 } 550 551 trace_postcopy_ram_incoming_cleanup_closeuf(); 552 close(mis->userfault_fd); 553 close(mis->userfault_event_fd); 554 mis->have_fault_thread = false; 555 } 556 557 postcopy_balloon_inhibit(false); 558 559 if (enable_mlock) { 560 if (os_mlock() < 0) { 561 error_report("mlock: %s", strerror(errno)); 562 /* 563 * It doesn't feel right to fail at this point, we have a valid 564 * VM state. 565 */ 566 } 567 } 568 569 postcopy_state_set(POSTCOPY_INCOMING_END); 570 571 if (mis->postcopy_tmp_page) { 572 munmap(mis->postcopy_tmp_page, mis->largest_page_size); 573 mis->postcopy_tmp_page = NULL; 574 } 575 if (mis->postcopy_tmp_zero_page) { 576 munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); 577 mis->postcopy_tmp_zero_page = NULL; 578 } 579 trace_postcopy_ram_incoming_cleanup_blocktime( 580 get_postcopy_total_blocktime()); 581 582 trace_postcopy_ram_incoming_cleanup_exit(); 583 return 0; 584 } 585 586 /* 587 * Disable huge pages on an area 588 */ 589 static int nhp_range(const char *block_name, void *host_addr, 590 ram_addr_t offset, ram_addr_t length, void *opaque) 591 { 592 trace_postcopy_nhp_range(block_name, host_addr, offset, length); 593 594 /* 595 * Before we do discards we need to ensure those discards really 596 * do delete areas of the page, even if THP thinks a hugepage would 597 * be a good idea, so force hugepages off. 598 */ 599 qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 600 601 return 0; 602 } 603 604 /* 605 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 606 * however leaving it until after precopy means that most of the precopy 607 * data is still THPd 608 */ 609 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 610 { 611 if (qemu_ram_foreach_migratable_block(nhp_range, mis)) { 612 return -1; 613 } 614 615 postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 616 617 return 0; 618 } 619 620 /* 621 * Mark the given area of RAM as requiring notification to unwritten areas 622 * Used as a callback on qemu_ram_foreach_migratable_block. 623 * host_addr: Base of area to mark 624 * offset: Offset in the whole ram arena 625 * length: Length of the section 626 * opaque: MigrationIncomingState pointer 627 * Returns 0 on success 628 */ 629 static int ram_block_enable_notify(const char *block_name, void *host_addr, 630 ram_addr_t offset, ram_addr_t length, 631 void *opaque) 632 { 633 MigrationIncomingState *mis = opaque; 634 struct uffdio_register reg_struct; 635 636 reg_struct.range.start = (uintptr_t)host_addr; 637 reg_struct.range.len = length; 638 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 639 640 /* Now tell our userfault_fd that it's responsible for this area */ 641 if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 642 error_report("%s userfault register: %s", __func__, strerror(errno)); 643 return -1; 644 } 645 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 646 error_report("%s userfault: Region doesn't support COPY", __func__); 647 return -1; 648 } 649 if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { 650 RAMBlock *rb = qemu_ram_block_by_name(block_name); 651 qemu_ram_set_uf_zeroable(rb); 652 } 653 654 return 0; 655 } 656 657 int postcopy_wake_shared(struct PostCopyFD *pcfd, 658 uint64_t client_addr, 659 RAMBlock *rb) 660 { 661 size_t pagesize = qemu_ram_pagesize(rb); 662 struct uffdio_range range; 663 int ret; 664 trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); 665 range.start = client_addr & ~(pagesize - 1); 666 range.len = pagesize; 667 ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); 668 if (ret) { 669 error_report("%s: Failed to wake: %zx in %s (%s)", 670 __func__, (size_t)client_addr, qemu_ram_get_idstr(rb), 671 strerror(errno)); 672 } 673 return ret; 674 } 675 676 /* 677 * Callback from shared fault handlers to ask for a page, 678 * the page must be specified by a RAMBlock and an offset in that rb 679 * Note: Only for use by shared fault handlers (in fault thread) 680 */ 681 int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 682 uint64_t client_addr, uint64_t rb_offset) 683 { 684 size_t pagesize = qemu_ram_pagesize(rb); 685 uint64_t aligned_rbo = rb_offset & ~(pagesize - 1); 686 MigrationIncomingState *mis = migration_incoming_get_current(); 687 688 trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), 689 rb_offset); 690 if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) { 691 trace_postcopy_request_shared_page_present(pcfd->idstr, 692 qemu_ram_get_idstr(rb), rb_offset); 693 return postcopy_wake_shared(pcfd, client_addr, rb); 694 } 695 if (rb != mis->last_rb) { 696 mis->last_rb = rb; 697 migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb), 698 aligned_rbo, pagesize); 699 } else { 700 /* Save some space */ 701 migrate_send_rp_req_pages(mis, NULL, aligned_rbo, pagesize); 702 } 703 return 0; 704 } 705 706 static int get_mem_fault_cpu_index(uint32_t pid) 707 { 708 CPUState *cpu_iter; 709 710 CPU_FOREACH(cpu_iter) { 711 if (cpu_iter->thread_id == pid) { 712 trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid); 713 return cpu_iter->cpu_index; 714 } 715 } 716 trace_get_mem_fault_cpu_index(-1, pid); 717 return -1; 718 } 719 720 static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc) 721 { 722 int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 723 dc->start_time; 724 return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX; 725 } 726 727 /* 728 * This function is being called when pagefault occurs. It 729 * tracks down vCPU blocking time. 730 * 731 * @addr: faulted host virtual address 732 * @ptid: faulted process thread id 733 * @rb: ramblock appropriate to addr 734 */ 735 static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, 736 RAMBlock *rb) 737 { 738 int cpu, already_received; 739 MigrationIncomingState *mis = migration_incoming_get_current(); 740 PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 741 uint32_t low_time_offset; 742 743 if (!dc || ptid == 0) { 744 return; 745 } 746 cpu = get_mem_fault_cpu_index(ptid); 747 if (cpu < 0) { 748 return; 749 } 750 751 low_time_offset = get_low_time_offset(dc); 752 if (dc->vcpu_addr[cpu] == 0) { 753 atomic_inc(&dc->smp_cpus_down); 754 } 755 756 atomic_xchg(&dc->last_begin, low_time_offset); 757 atomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); 758 atomic_xchg(&dc->vcpu_addr[cpu], addr); 759 760 /* check it here, not at the begining of the function, 761 * due to, check could accur early than bitmap_set in 762 * qemu_ufd_copy_ioctl */ 763 already_received = ramblock_recv_bitmap_test(rb, (void *)addr); 764 if (already_received) { 765 atomic_xchg(&dc->vcpu_addr[cpu], 0); 766 atomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); 767 atomic_dec(&dc->smp_cpus_down); 768 } 769 trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], 770 cpu, already_received); 771 } 772 773 /* 774 * This function just provide calculated blocktime per cpu and trace it. 775 * Total blocktime is calculated in mark_postcopy_blocktime_end. 776 * 777 * 778 * Assume we have 3 CPU 779 * 780 * S1 E1 S1 E1 781 * -----***********------------xxx***************------------------------> CPU1 782 * 783 * S2 E2 784 * ------------****************xxx---------------------------------------> CPU2 785 * 786 * S3 E3 787 * ------------------------****xxx********-------------------------------> CPU3 788 * 789 * We have sequence S1,S2,E1,S3,S1,E2,E3,E1 790 * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3 791 * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 - 792 * it's a part of total blocktime. 793 * S1 - here is last_begin 794 * Legend of the picture is following: 795 * * - means blocktime per vCPU 796 * x - means overlapped blocktime (total blocktime) 797 * 798 * @addr: host virtual address 799 */ 800 static void mark_postcopy_blocktime_end(uintptr_t addr) 801 { 802 MigrationIncomingState *mis = migration_incoming_get_current(); 803 PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 804 int i, affected_cpu = 0; 805 bool vcpu_total_blocktime = false; 806 uint32_t read_vcpu_time, low_time_offset; 807 808 if (!dc) { 809 return; 810 } 811 812 low_time_offset = get_low_time_offset(dc); 813 /* lookup cpu, to clear it, 814 * that algorithm looks straighforward, but it's not 815 * optimal, more optimal algorithm is keeping tree or hash 816 * where key is address value is a list of */ 817 for (i = 0; i < smp_cpus; i++) { 818 uint32_t vcpu_blocktime = 0; 819 820 read_vcpu_time = atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); 821 if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || 822 read_vcpu_time == 0) { 823 continue; 824 } 825 atomic_xchg(&dc->vcpu_addr[i], 0); 826 vcpu_blocktime = low_time_offset - read_vcpu_time; 827 affected_cpu += 1; 828 /* we need to know is that mark_postcopy_end was due to 829 * faulted page, another possible case it's prefetched 830 * page and in that case we shouldn't be here */ 831 if (!vcpu_total_blocktime && 832 atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { 833 vcpu_total_blocktime = true; 834 } 835 /* continue cycle, due to one page could affect several vCPUs */ 836 dc->vcpu_blocktime[i] += vcpu_blocktime; 837 } 838 839 atomic_sub(&dc->smp_cpus_down, affected_cpu); 840 if (vcpu_total_blocktime) { 841 dc->total_blocktime += low_time_offset - atomic_fetch_add( 842 &dc->last_begin, 0); 843 } 844 trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, 845 affected_cpu); 846 } 847 848 static bool postcopy_pause_fault_thread(MigrationIncomingState *mis) 849 { 850 trace_postcopy_pause_fault_thread(); 851 852 qemu_sem_wait(&mis->postcopy_pause_sem_fault); 853 854 trace_postcopy_pause_fault_thread_continued(); 855 856 return true; 857 } 858 859 /* 860 * Handle faults detected by the USERFAULT markings 861 */ 862 static void *postcopy_ram_fault_thread(void *opaque) 863 { 864 MigrationIncomingState *mis = opaque; 865 struct uffd_msg msg; 866 int ret; 867 size_t index; 868 RAMBlock *rb = NULL; 869 870 trace_postcopy_ram_fault_thread_entry(); 871 rcu_register_thread(); 872 mis->last_rb = NULL; /* last RAMBlock we sent part of */ 873 qemu_sem_post(&mis->fault_thread_sem); 874 875 struct pollfd *pfd; 876 size_t pfd_len = 2 + mis->postcopy_remote_fds->len; 877 878 pfd = g_new0(struct pollfd, pfd_len); 879 880 pfd[0].fd = mis->userfault_fd; 881 pfd[0].events = POLLIN; 882 pfd[1].fd = mis->userfault_event_fd; 883 pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 884 trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd); 885 for (index = 0; index < mis->postcopy_remote_fds->len; index++) { 886 struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds, 887 struct PostCopyFD, index); 888 pfd[2 + index].fd = pcfd->fd; 889 pfd[2 + index].events = POLLIN; 890 trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr, 891 pcfd->fd); 892 } 893 894 while (true) { 895 ram_addr_t rb_offset; 896 int poll_result; 897 898 /* 899 * We're mainly waiting for the kernel to give us a faulting HVA, 900 * however we can be told to quit via userfault_quit_fd which is 901 * an eventfd 902 */ 903 904 poll_result = poll(pfd, pfd_len, -1 /* Wait forever */); 905 if (poll_result == -1) { 906 error_report("%s: userfault poll: %s", __func__, strerror(errno)); 907 break; 908 } 909 910 if (!mis->to_src_file) { 911 /* 912 * Possibly someone tells us that the return path is 913 * broken already using the event. We should hold until 914 * the channel is rebuilt. 915 */ 916 if (postcopy_pause_fault_thread(mis)) { 917 mis->last_rb = NULL; 918 /* Continue to read the userfaultfd */ 919 } else { 920 error_report("%s: paused but don't allow to continue", 921 __func__); 922 break; 923 } 924 } 925 926 if (pfd[1].revents) { 927 uint64_t tmp64 = 0; 928 929 /* Consume the signal */ 930 if (read(mis->userfault_event_fd, &tmp64, 8) != 8) { 931 /* Nothing obviously nicer than posting this error. */ 932 error_report("%s: read() failed", __func__); 933 } 934 935 if (atomic_read(&mis->fault_thread_quit)) { 936 trace_postcopy_ram_fault_thread_quit(); 937 break; 938 } 939 } 940 941 if (pfd[0].revents) { 942 poll_result--; 943 ret = read(mis->userfault_fd, &msg, sizeof(msg)); 944 if (ret != sizeof(msg)) { 945 if (errno == EAGAIN) { 946 /* 947 * if a wake up happens on the other thread just after 948 * the poll, there is nothing to read. 949 */ 950 continue; 951 } 952 if (ret < 0) { 953 error_report("%s: Failed to read full userfault " 954 "message: %s", 955 __func__, strerror(errno)); 956 break; 957 } else { 958 error_report("%s: Read %d bytes from userfaultfd " 959 "expected %zd", 960 __func__, ret, sizeof(msg)); 961 break; /* Lost alignment, don't know what we'd read next */ 962 } 963 } 964 if (msg.event != UFFD_EVENT_PAGEFAULT) { 965 error_report("%s: Read unexpected event %ud from userfaultfd", 966 __func__, msg.event); 967 continue; /* It's not a page fault, shouldn't happen */ 968 } 969 970 rb = qemu_ram_block_from_host( 971 (void *)(uintptr_t)msg.arg.pagefault.address, 972 true, &rb_offset); 973 if (!rb) { 974 error_report("postcopy_ram_fault_thread: Fault outside guest: %" 975 PRIx64, (uint64_t)msg.arg.pagefault.address); 976 break; 977 } 978 979 rb_offset &= ~(qemu_ram_pagesize(rb) - 1); 980 trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 981 qemu_ram_get_idstr(rb), 982 rb_offset, 983 msg.arg.pagefault.feat.ptid); 984 mark_postcopy_blocktime_begin( 985 (uintptr_t)(msg.arg.pagefault.address), 986 msg.arg.pagefault.feat.ptid, rb); 987 988 retry: 989 /* 990 * Send the request to the source - we want to request one 991 * of our host page sizes (which is >= TPS) 992 */ 993 if (rb != mis->last_rb) { 994 mis->last_rb = rb; 995 ret = migrate_send_rp_req_pages(mis, 996 qemu_ram_get_idstr(rb), 997 rb_offset, 998 qemu_ram_pagesize(rb)); 999 } else { 1000 /* Save some space */ 1001 ret = migrate_send_rp_req_pages(mis, 1002 NULL, 1003 rb_offset, 1004 qemu_ram_pagesize(rb)); 1005 } 1006 1007 if (ret) { 1008 /* May be network failure, try to wait for recovery */ 1009 if (ret == -EIO && postcopy_pause_fault_thread(mis)) { 1010 /* We got reconnected somehow, try to continue */ 1011 mis->last_rb = NULL; 1012 goto retry; 1013 } else { 1014 /* This is a unavoidable fault */ 1015 error_report("%s: migrate_send_rp_req_pages() get %d", 1016 __func__, ret); 1017 break; 1018 } 1019 } 1020 } 1021 1022 /* Now handle any requests from external processes on shared memory */ 1023 /* TODO: May need to handle devices deregistering during postcopy */ 1024 for (index = 2; index < pfd_len && poll_result; index++) { 1025 if (pfd[index].revents) { 1026 struct PostCopyFD *pcfd = 1027 &g_array_index(mis->postcopy_remote_fds, 1028 struct PostCopyFD, index - 2); 1029 1030 poll_result--; 1031 if (pfd[index].revents & POLLERR) { 1032 error_report("%s: POLLERR on poll %zd fd=%d", 1033 __func__, index, pcfd->fd); 1034 pfd[index].events = 0; 1035 continue; 1036 } 1037 1038 ret = read(pcfd->fd, &msg, sizeof(msg)); 1039 if (ret != sizeof(msg)) { 1040 if (errno == EAGAIN) { 1041 /* 1042 * if a wake up happens on the other thread just after 1043 * the poll, there is nothing to read. 1044 */ 1045 continue; 1046 } 1047 if (ret < 0) { 1048 error_report("%s: Failed to read full userfault " 1049 "message: %s (shared) revents=%d", 1050 __func__, strerror(errno), 1051 pfd[index].revents); 1052 /*TODO: Could just disable this sharer */ 1053 break; 1054 } else { 1055 error_report("%s: Read %d bytes from userfaultfd " 1056 "expected %zd (shared)", 1057 __func__, ret, sizeof(msg)); 1058 /*TODO: Could just disable this sharer */ 1059 break; /*Lost alignment,don't know what we'd read next*/ 1060 } 1061 } 1062 if (msg.event != UFFD_EVENT_PAGEFAULT) { 1063 error_report("%s: Read unexpected event %ud " 1064 "from userfaultfd (shared)", 1065 __func__, msg.event); 1066 continue; /* It's not a page fault, shouldn't happen */ 1067 } 1068 /* Call the device handler registered with us */ 1069 ret = pcfd->handler(pcfd, &msg); 1070 if (ret) { 1071 error_report("%s: Failed to resolve shared fault on %zd/%s", 1072 __func__, index, pcfd->idstr); 1073 /* TODO: Fail? Disable this sharer? */ 1074 } 1075 } 1076 } 1077 } 1078 rcu_unregister_thread(); 1079 trace_postcopy_ram_fault_thread_exit(); 1080 g_free(pfd); 1081 return NULL; 1082 } 1083 1084 int postcopy_ram_enable_notify(MigrationIncomingState *mis) 1085 { 1086 /* Open the fd for the kernel to give us userfaults */ 1087 mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 1088 if (mis->userfault_fd == -1) { 1089 error_report("%s: Failed to open userfault fd: %s", __func__, 1090 strerror(errno)); 1091 return -1; 1092 } 1093 1094 /* 1095 * Although the host check already tested the API, we need to 1096 * do the check again as an ABI handshake on the new fd. 1097 */ 1098 if (!ufd_check_and_apply(mis->userfault_fd, mis)) { 1099 return -1; 1100 } 1101 1102 /* Now an eventfd we use to tell the fault-thread to quit */ 1103 mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); 1104 if (mis->userfault_event_fd == -1) { 1105 error_report("%s: Opening userfault_event_fd: %s", __func__, 1106 strerror(errno)); 1107 close(mis->userfault_fd); 1108 return -1; 1109 } 1110 1111 qemu_sem_init(&mis->fault_thread_sem, 0); 1112 qemu_thread_create(&mis->fault_thread, "postcopy/fault", 1113 postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); 1114 qemu_sem_wait(&mis->fault_thread_sem); 1115 qemu_sem_destroy(&mis->fault_thread_sem); 1116 mis->have_fault_thread = true; 1117 1118 /* Mark so that we get notified of accesses to unwritten areas */ 1119 if (qemu_ram_foreach_migratable_block(ram_block_enable_notify, mis)) { 1120 error_report("ram_block_enable_notify failed"); 1121 return -1; 1122 } 1123 1124 /* 1125 * Ballooning can mark pages as absent while we're postcopying 1126 * that would cause false userfaults. 1127 */ 1128 postcopy_balloon_inhibit(true); 1129 1130 trace_postcopy_ram_enable_notify(); 1131 1132 return 0; 1133 } 1134 1135 static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr, 1136 void *from_addr, uint64_t pagesize, RAMBlock *rb) 1137 { 1138 int ret; 1139 if (from_addr) { 1140 struct uffdio_copy copy_struct; 1141 copy_struct.dst = (uint64_t)(uintptr_t)host_addr; 1142 copy_struct.src = (uint64_t)(uintptr_t)from_addr; 1143 copy_struct.len = pagesize; 1144 copy_struct.mode = 0; 1145 ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); 1146 } else { 1147 struct uffdio_zeropage zero_struct; 1148 zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; 1149 zero_struct.range.len = pagesize; 1150 zero_struct.mode = 0; 1151 ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); 1152 } 1153 if (!ret) { 1154 ramblock_recv_bitmap_set_range(rb, host_addr, 1155 pagesize / qemu_target_page_size()); 1156 mark_postcopy_blocktime_end((uintptr_t)host_addr); 1157 1158 } 1159 return ret; 1160 } 1161 1162 int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset) 1163 { 1164 int i; 1165 MigrationIncomingState *mis = migration_incoming_get_current(); 1166 GArray *pcrfds = mis->postcopy_remote_fds; 1167 1168 for (i = 0; i < pcrfds->len; i++) { 1169 struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1170 int ret = cur->waker(cur, rb, offset); 1171 if (ret) { 1172 return ret; 1173 } 1174 } 1175 return 0; 1176 } 1177 1178 /* 1179 * Place a host page (from) at (host) atomically 1180 * returns 0 on success 1181 */ 1182 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 1183 RAMBlock *rb) 1184 { 1185 size_t pagesize = qemu_ram_pagesize(rb); 1186 1187 /* copy also acks to the kernel waking the stalled thread up 1188 * TODO: We can inhibit that ack and only do it if it was requested 1189 * which would be slightly cheaper, but we'd have to be careful 1190 * of the order of updating our page state. 1191 */ 1192 if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) { 1193 int e = errno; 1194 error_report("%s: %s copy host: %p from: %p (size: %zd)", 1195 __func__, strerror(e), host, from, pagesize); 1196 1197 return -e; 1198 } 1199 1200 trace_postcopy_place_page(host); 1201 return postcopy_notify_shared_wake(rb, 1202 qemu_ram_block_host_offset(rb, host)); 1203 } 1204 1205 /* 1206 * Place a zero page at (host) atomically 1207 * returns 0 on success 1208 */ 1209 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 1210 RAMBlock *rb) 1211 { 1212 size_t pagesize = qemu_ram_pagesize(rb); 1213 trace_postcopy_place_page_zero(host); 1214 1215 /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE 1216 * but it's not available for everything (e.g. hugetlbpages) 1217 */ 1218 if (qemu_ram_is_uf_zeroable(rb)) { 1219 if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, pagesize, rb)) { 1220 int e = errno; 1221 error_report("%s: %s zero host: %p", 1222 __func__, strerror(e), host); 1223 1224 return -e; 1225 } 1226 return postcopy_notify_shared_wake(rb, 1227 qemu_ram_block_host_offset(rb, 1228 host)); 1229 } else { 1230 /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */ 1231 if (!mis->postcopy_tmp_zero_page) { 1232 mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, 1233 PROT_READ | PROT_WRITE, 1234 MAP_PRIVATE | MAP_ANONYMOUS, 1235 -1, 0); 1236 if (mis->postcopy_tmp_zero_page == MAP_FAILED) { 1237 int e = errno; 1238 mis->postcopy_tmp_zero_page = NULL; 1239 error_report("%s: %s mapping large zero page", 1240 __func__, strerror(e)); 1241 return -e; 1242 } 1243 memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 1244 } 1245 return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, 1246 rb); 1247 } 1248 } 1249 1250 /* 1251 * Returns a target page of memory that can be mapped at a later point in time 1252 * using postcopy_place_page 1253 * The same address is used repeatedly, postcopy_place_page just takes the 1254 * backing page away. 1255 * Returns: Pointer to allocated page 1256 * 1257 */ 1258 void *postcopy_get_tmp_page(MigrationIncomingState *mis) 1259 { 1260 if (!mis->postcopy_tmp_page) { 1261 mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, 1262 PROT_READ | PROT_WRITE, MAP_PRIVATE | 1263 MAP_ANONYMOUS, -1, 0); 1264 if (mis->postcopy_tmp_page == MAP_FAILED) { 1265 mis->postcopy_tmp_page = NULL; 1266 error_report("%s: %s", __func__, strerror(errno)); 1267 return NULL; 1268 } 1269 } 1270 1271 return mis->postcopy_tmp_page; 1272 } 1273 1274 #else 1275 /* No target OS support, stubs just fail */ 1276 void fill_destination_postcopy_migration_info(MigrationInfo *info) 1277 { 1278 } 1279 1280 bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 1281 { 1282 error_report("%s: No OS support", __func__); 1283 return false; 1284 } 1285 1286 int postcopy_ram_incoming_init(MigrationIncomingState *mis) 1287 { 1288 error_report("postcopy_ram_incoming_init: No OS support"); 1289 return -1; 1290 } 1291 1292 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 1293 { 1294 assert(0); 1295 return -1; 1296 } 1297 1298 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 1299 { 1300 assert(0); 1301 return -1; 1302 } 1303 1304 int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 1305 uint64_t client_addr, uint64_t rb_offset) 1306 { 1307 assert(0); 1308 return -1; 1309 } 1310 1311 int postcopy_ram_enable_notify(MigrationIncomingState *mis) 1312 { 1313 assert(0); 1314 return -1; 1315 } 1316 1317 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 1318 RAMBlock *rb) 1319 { 1320 assert(0); 1321 return -1; 1322 } 1323 1324 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 1325 RAMBlock *rb) 1326 { 1327 assert(0); 1328 return -1; 1329 } 1330 1331 void *postcopy_get_tmp_page(MigrationIncomingState *mis) 1332 { 1333 assert(0); 1334 return NULL; 1335 } 1336 1337 int postcopy_wake_shared(struct PostCopyFD *pcfd, 1338 uint64_t client_addr, 1339 RAMBlock *rb) 1340 { 1341 assert(0); 1342 return -1; 1343 } 1344 #endif 1345 1346 /* ------------------------------------------------------------------------- */ 1347 1348 void postcopy_fault_thread_notify(MigrationIncomingState *mis) 1349 { 1350 uint64_t tmp64 = 1; 1351 1352 /* 1353 * Wakeup the fault_thread. It's an eventfd that should currently 1354 * be at 0, we're going to increment it to 1 1355 */ 1356 if (write(mis->userfault_event_fd, &tmp64, 8) != 8) { 1357 /* Not much we can do here, but may as well report it */ 1358 error_report("%s: incrementing failed: %s", __func__, 1359 strerror(errno)); 1360 } 1361 } 1362 1363 /** 1364 * postcopy_discard_send_init: Called at the start of each RAMBlock before 1365 * asking to discard individual ranges. 1366 * 1367 * @ms: The current migration state. 1368 * @offset: the bitmap offset of the named RAMBlock in the migration 1369 * bitmap. 1370 * @name: RAMBlock that discards will operate on. 1371 * 1372 * returns: a new PDS. 1373 */ 1374 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms, 1375 const char *name) 1376 { 1377 PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState)); 1378 1379 if (res) { 1380 res->ramblock_name = name; 1381 } 1382 1383 return res; 1384 } 1385 1386 /** 1387 * postcopy_discard_send_range: Called by the bitmap code for each chunk to 1388 * discard. May send a discard message, may just leave it queued to 1389 * be sent later. 1390 * 1391 * @ms: Current migration state. 1392 * @pds: Structure initialised by postcopy_discard_send_init(). 1393 * @start,@length: a range of pages in the migration bitmap in the 1394 * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 1395 */ 1396 void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, 1397 unsigned long start, unsigned long length) 1398 { 1399 size_t tp_size = qemu_target_page_size(); 1400 /* Convert to byte offsets within the RAM block */ 1401 pds->start_list[pds->cur_entry] = start * tp_size; 1402 pds->length_list[pds->cur_entry] = length * tp_size; 1403 trace_postcopy_discard_send_range(pds->ramblock_name, start, length); 1404 pds->cur_entry++; 1405 pds->nsentwords++; 1406 1407 if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) { 1408 /* Full set, ship it! */ 1409 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1410 pds->ramblock_name, 1411 pds->cur_entry, 1412 pds->start_list, 1413 pds->length_list); 1414 pds->nsentcmds++; 1415 pds->cur_entry = 0; 1416 } 1417 } 1418 1419 /** 1420 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 1421 * bitmap code. Sends any outstanding discard messages, frees the PDS 1422 * 1423 * @ms: Current migration state. 1424 * @pds: Structure initialised by postcopy_discard_send_init(). 1425 */ 1426 void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds) 1427 { 1428 /* Anything unsent? */ 1429 if (pds->cur_entry) { 1430 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1431 pds->ramblock_name, 1432 pds->cur_entry, 1433 pds->start_list, 1434 pds->length_list); 1435 pds->nsentcmds++; 1436 } 1437 1438 trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords, 1439 pds->nsentcmds); 1440 1441 g_free(pds); 1442 } 1443 1444 /* 1445 * Current state of incoming postcopy; note this is not part of 1446 * MigrationIncomingState since it's state is used during cleanup 1447 * at the end as MIS is being freed. 1448 */ 1449 static PostcopyState incoming_postcopy_state; 1450 1451 PostcopyState postcopy_state_get(void) 1452 { 1453 return atomic_mb_read(&incoming_postcopy_state); 1454 } 1455 1456 /* Set the state and return the old state */ 1457 PostcopyState postcopy_state_set(PostcopyState new_state) 1458 { 1459 return atomic_xchg(&incoming_postcopy_state, new_state); 1460 } 1461 1462 /* Register a handler for external shared memory postcopy 1463 * called on the destination. 1464 */ 1465 void postcopy_register_shared_ufd(struct PostCopyFD *pcfd) 1466 { 1467 MigrationIncomingState *mis = migration_incoming_get_current(); 1468 1469 mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds, 1470 *pcfd); 1471 } 1472 1473 /* Unregister a handler for external shared memory postcopy 1474 */ 1475 void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) 1476 { 1477 guint i; 1478 MigrationIncomingState *mis = migration_incoming_get_current(); 1479 GArray *pcrfds = mis->postcopy_remote_fds; 1480 1481 for (i = 0; i < pcrfds->len; i++) { 1482 struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1483 if (cur->fd == pcfd->fd) { 1484 mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i); 1485 return; 1486 } 1487 } 1488 } 1489