1 /* 2 * Postcopy migration for RAM 3 * 4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Dave Gilbert <dgilbert@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * Postcopy is a migration technique where the execution flips from the 16 * source to the destination before all the data has been copied. 17 */ 18 19 #include "qemu/osdep.h" 20 21 #include "qemu-common.h" 22 #include "migration/migration.h" 23 #include "migration/postcopy-ram.h" 24 #include "sysemu/sysemu.h" 25 #include "sysemu/balloon.h" 26 #include "qemu/error-report.h" 27 #include "trace.h" 28 29 /* Arbitrary limit on size of each discard command, 30 * keeps them around ~200 bytes 31 */ 32 #define MAX_DISCARDS_PER_COMMAND 12 33 34 struct PostcopyDiscardState { 35 const char *ramblock_name; 36 uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */ 37 uint16_t cur_entry; 38 /* 39 * Start and length of a discard range (bytes) 40 */ 41 uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 42 uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 43 unsigned int nsentwords; 44 unsigned int nsentcmds; 45 }; 46 47 /* Postcopy needs to detect accesses to pages that haven't yet been copied 48 * across, and efficiently map new pages in, the techniques for doing this 49 * are target OS specific. 50 */ 51 #if defined(__linux__) 52 53 #include <poll.h> 54 #include <sys/ioctl.h> 55 #include <sys/syscall.h> 56 #include <asm/types.h> /* for __u64 */ 57 #endif 58 59 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 60 #include <sys/eventfd.h> 61 #include <linux/userfaultfd.h> 62 63 static bool ufd_version_check(int ufd) 64 { 65 struct uffdio_api api_struct; 66 uint64_t ioctl_mask; 67 68 api_struct.api = UFFD_API; 69 api_struct.features = 0; 70 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 71 error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s", 72 strerror(errno)); 73 return false; 74 } 75 76 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 77 (__u64)1 << _UFFDIO_UNREGISTER; 78 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 79 error_report("Missing userfault features: %" PRIx64, 80 (uint64_t)(~api_struct.ioctls & ioctl_mask)); 81 return false; 82 } 83 84 if (getpagesize() != ram_pagesize_summary()) { 85 bool have_hp = false; 86 /* We've got a huge page */ 87 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS 88 have_hp = api_struct.features & UFFD_FEATURE_MISSING_HUGETLBFS; 89 #endif 90 if (!have_hp) { 91 error_report("Userfault on this host does not support huge pages"); 92 return false; 93 } 94 } 95 return true; 96 } 97 98 /* Callback from postcopy_ram_supported_by_host block iterator. 99 */ 100 static int test_range_shared(const char *block_name, void *host_addr, 101 ram_addr_t offset, ram_addr_t length, void *opaque) 102 { 103 if (qemu_ram_is_shared(qemu_ram_block_by_name(block_name))) { 104 error_report("Postcopy on shared RAM (%s) is not yet supported", 105 block_name); 106 return 1; 107 } 108 return 0; 109 } 110 111 /* 112 * Note: This has the side effect of munlock'ing all of RAM, that's 113 * normally fine since if the postcopy succeeds it gets turned back on at the 114 * end. 115 */ 116 bool postcopy_ram_supported_by_host(void) 117 { 118 long pagesize = getpagesize(); 119 int ufd = -1; 120 bool ret = false; /* Error unless we change it */ 121 void *testarea = NULL; 122 struct uffdio_register reg_struct; 123 struct uffdio_range range_struct; 124 uint64_t feature_mask; 125 126 if (qemu_target_page_size() > pagesize) { 127 error_report("Target page size bigger than host page size"); 128 goto out; 129 } 130 131 ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 132 if (ufd == -1) { 133 error_report("%s: userfaultfd not available: %s", __func__, 134 strerror(errno)); 135 goto out; 136 } 137 138 /* Version and features check */ 139 if (!ufd_version_check(ufd)) { 140 goto out; 141 } 142 143 /* We don't support postcopy with shared RAM yet */ 144 if (qemu_ram_foreach_block(test_range_shared, NULL)) { 145 goto out; 146 } 147 148 /* 149 * userfault and mlock don't go together; we'll put it back later if 150 * it was enabled. 151 */ 152 if (munlockall()) { 153 error_report("%s: munlockall: %s", __func__, strerror(errno)); 154 return -1; 155 } 156 157 /* 158 * We need to check that the ops we need are supported on anon memory 159 * To do that we need to register a chunk and see the flags that 160 * are returned. 161 */ 162 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 163 MAP_ANONYMOUS, -1, 0); 164 if (testarea == MAP_FAILED) { 165 error_report("%s: Failed to map test area: %s", __func__, 166 strerror(errno)); 167 goto out; 168 } 169 g_assert(((size_t)testarea & (pagesize-1)) == 0); 170 171 reg_struct.range.start = (uintptr_t)testarea; 172 reg_struct.range.len = pagesize; 173 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 174 175 if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 176 error_report("%s userfault register: %s", __func__, strerror(errno)); 177 goto out; 178 } 179 180 range_struct.start = (uintptr_t)testarea; 181 range_struct.len = pagesize; 182 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 183 error_report("%s userfault unregister: %s", __func__, strerror(errno)); 184 goto out; 185 } 186 187 feature_mask = (__u64)1 << _UFFDIO_WAKE | 188 (__u64)1 << _UFFDIO_COPY | 189 (__u64)1 << _UFFDIO_ZEROPAGE; 190 if ((reg_struct.ioctls & feature_mask) != feature_mask) { 191 error_report("Missing userfault map features: %" PRIx64, 192 (uint64_t)(~reg_struct.ioctls & feature_mask)); 193 goto out; 194 } 195 196 /* Success! */ 197 ret = true; 198 out: 199 if (testarea) { 200 munmap(testarea, pagesize); 201 } 202 if (ufd != -1) { 203 close(ufd); 204 } 205 return ret; 206 } 207 208 /* 209 * Setup an area of RAM so that it *can* be used for postcopy later; this 210 * must be done right at the start prior to pre-copy. 211 * opaque should be the MIS. 212 */ 213 static int init_range(const char *block_name, void *host_addr, 214 ram_addr_t offset, ram_addr_t length, void *opaque) 215 { 216 trace_postcopy_init_range(block_name, host_addr, offset, length); 217 218 /* 219 * We need the whole of RAM to be truly empty for postcopy, so things 220 * like ROMs and any data tables built during init must be zero'd 221 * - we're going to get the copy from the source anyway. 222 * (Precopy will just overwrite this data, so doesn't need the discard) 223 */ 224 if (ram_discard_range(block_name, 0, length)) { 225 return -1; 226 } 227 228 return 0; 229 } 230 231 /* 232 * At the end of migration, undo the effects of init_range 233 * opaque should be the MIS. 234 */ 235 static int cleanup_range(const char *block_name, void *host_addr, 236 ram_addr_t offset, ram_addr_t length, void *opaque) 237 { 238 MigrationIncomingState *mis = opaque; 239 struct uffdio_range range_struct; 240 trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 241 242 /* 243 * We turned off hugepage for the precopy stage with postcopy enabled 244 * we can turn it back on now. 245 */ 246 qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 247 248 /* 249 * We can also turn off userfault now since we should have all the 250 * pages. It can be useful to leave it on to debug postcopy 251 * if you're not sure it's always getting every page. 252 */ 253 range_struct.start = (uintptr_t)host_addr; 254 range_struct.len = length; 255 256 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 257 error_report("%s: userfault unregister %s", __func__, strerror(errno)); 258 259 return -1; 260 } 261 262 return 0; 263 } 264 265 /* 266 * Initialise postcopy-ram, setting the RAM to a state where we can go into 267 * postcopy later; must be called prior to any precopy. 268 * called from arch_init's similarly named ram_postcopy_incoming_init 269 */ 270 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 271 { 272 if (qemu_ram_foreach_block(init_range, NULL)) { 273 return -1; 274 } 275 276 return 0; 277 } 278 279 /* 280 * At the end of a migration where postcopy_ram_incoming_init was called. 281 */ 282 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 283 { 284 trace_postcopy_ram_incoming_cleanup_entry(); 285 286 if (mis->have_fault_thread) { 287 uint64_t tmp64; 288 289 if (qemu_ram_foreach_block(cleanup_range, mis)) { 290 return -1; 291 } 292 /* 293 * Tell the fault_thread to exit, it's an eventfd that should 294 * currently be at 0, we're going to increment it to 1 295 */ 296 tmp64 = 1; 297 if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) { 298 trace_postcopy_ram_incoming_cleanup_join(); 299 qemu_thread_join(&mis->fault_thread); 300 } else { 301 /* Not much we can do here, but may as well report it */ 302 error_report("%s: incrementing userfault_quit_fd: %s", __func__, 303 strerror(errno)); 304 } 305 trace_postcopy_ram_incoming_cleanup_closeuf(); 306 close(mis->userfault_fd); 307 close(mis->userfault_quit_fd); 308 mis->have_fault_thread = false; 309 } 310 311 qemu_balloon_inhibit(false); 312 313 if (enable_mlock) { 314 if (os_mlock() < 0) { 315 error_report("mlock: %s", strerror(errno)); 316 /* 317 * It doesn't feel right to fail at this point, we have a valid 318 * VM state. 319 */ 320 } 321 } 322 323 postcopy_state_set(POSTCOPY_INCOMING_END); 324 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 325 326 if (mis->postcopy_tmp_page) { 327 munmap(mis->postcopy_tmp_page, mis->largest_page_size); 328 mis->postcopy_tmp_page = NULL; 329 } 330 if (mis->postcopy_tmp_zero_page) { 331 munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); 332 mis->postcopy_tmp_zero_page = NULL; 333 } 334 trace_postcopy_ram_incoming_cleanup_exit(); 335 return 0; 336 } 337 338 /* 339 * Disable huge pages on an area 340 */ 341 static int nhp_range(const char *block_name, void *host_addr, 342 ram_addr_t offset, ram_addr_t length, void *opaque) 343 { 344 trace_postcopy_nhp_range(block_name, host_addr, offset, length); 345 346 /* 347 * Before we do discards we need to ensure those discards really 348 * do delete areas of the page, even if THP thinks a hugepage would 349 * be a good idea, so force hugepages off. 350 */ 351 qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 352 353 return 0; 354 } 355 356 /* 357 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 358 * however leaving it until after precopy means that most of the precopy 359 * data is still THPd 360 */ 361 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 362 { 363 if (qemu_ram_foreach_block(nhp_range, mis)) { 364 return -1; 365 } 366 367 postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 368 369 return 0; 370 } 371 372 /* 373 * Mark the given area of RAM as requiring notification to unwritten areas 374 * Used as a callback on qemu_ram_foreach_block. 375 * host_addr: Base of area to mark 376 * offset: Offset in the whole ram arena 377 * length: Length of the section 378 * opaque: MigrationIncomingState pointer 379 * Returns 0 on success 380 */ 381 static int ram_block_enable_notify(const char *block_name, void *host_addr, 382 ram_addr_t offset, ram_addr_t length, 383 void *opaque) 384 { 385 MigrationIncomingState *mis = opaque; 386 struct uffdio_register reg_struct; 387 388 reg_struct.range.start = (uintptr_t)host_addr; 389 reg_struct.range.len = length; 390 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 391 392 /* Now tell our userfault_fd that it's responsible for this area */ 393 if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 394 error_report("%s userfault register: %s", __func__, strerror(errno)); 395 return -1; 396 } 397 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 398 error_report("%s userfault: Region doesn't support COPY", __func__); 399 return -1; 400 } 401 402 return 0; 403 } 404 405 /* 406 * Handle faults detected by the USERFAULT markings 407 */ 408 static void *postcopy_ram_fault_thread(void *opaque) 409 { 410 MigrationIncomingState *mis = opaque; 411 struct uffd_msg msg; 412 int ret; 413 RAMBlock *rb = NULL; 414 RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */ 415 416 trace_postcopy_ram_fault_thread_entry(); 417 qemu_sem_post(&mis->fault_thread_sem); 418 419 while (true) { 420 ram_addr_t rb_offset; 421 struct pollfd pfd[2]; 422 423 /* 424 * We're mainly waiting for the kernel to give us a faulting HVA, 425 * however we can be told to quit via userfault_quit_fd which is 426 * an eventfd 427 */ 428 pfd[0].fd = mis->userfault_fd; 429 pfd[0].events = POLLIN; 430 pfd[0].revents = 0; 431 pfd[1].fd = mis->userfault_quit_fd; 432 pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 433 pfd[1].revents = 0; 434 435 if (poll(pfd, 2, -1 /* Wait forever */) == -1) { 436 error_report("%s: userfault poll: %s", __func__, strerror(errno)); 437 break; 438 } 439 440 if (pfd[1].revents) { 441 trace_postcopy_ram_fault_thread_quit(); 442 break; 443 } 444 445 ret = read(mis->userfault_fd, &msg, sizeof(msg)); 446 if (ret != sizeof(msg)) { 447 if (errno == EAGAIN) { 448 /* 449 * if a wake up happens on the other thread just after 450 * the poll, there is nothing to read. 451 */ 452 continue; 453 } 454 if (ret < 0) { 455 error_report("%s: Failed to read full userfault message: %s", 456 __func__, strerror(errno)); 457 break; 458 } else { 459 error_report("%s: Read %d bytes from userfaultfd expected %zd", 460 __func__, ret, sizeof(msg)); 461 break; /* Lost alignment, don't know what we'd read next */ 462 } 463 } 464 if (msg.event != UFFD_EVENT_PAGEFAULT) { 465 error_report("%s: Read unexpected event %ud from userfaultfd", 466 __func__, msg.event); 467 continue; /* It's not a page fault, shouldn't happen */ 468 } 469 470 rb = qemu_ram_block_from_host( 471 (void *)(uintptr_t)msg.arg.pagefault.address, 472 true, &rb_offset); 473 if (!rb) { 474 error_report("postcopy_ram_fault_thread: Fault outside guest: %" 475 PRIx64, (uint64_t)msg.arg.pagefault.address); 476 break; 477 } 478 479 rb_offset &= ~(qemu_ram_pagesize(rb) - 1); 480 trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 481 qemu_ram_get_idstr(rb), 482 rb_offset); 483 484 /* 485 * Send the request to the source - we want to request one 486 * of our host page sizes (which is >= TPS) 487 */ 488 if (rb != last_rb) { 489 last_rb = rb; 490 migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb), 491 rb_offset, qemu_ram_pagesize(rb)); 492 } else { 493 /* Save some space */ 494 migrate_send_rp_req_pages(mis, NULL, 495 rb_offset, qemu_ram_pagesize(rb)); 496 } 497 } 498 trace_postcopy_ram_fault_thread_exit(); 499 return NULL; 500 } 501 502 int postcopy_ram_enable_notify(MigrationIncomingState *mis) 503 { 504 /* Open the fd for the kernel to give us userfaults */ 505 mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 506 if (mis->userfault_fd == -1) { 507 error_report("%s: Failed to open userfault fd: %s", __func__, 508 strerror(errno)); 509 return -1; 510 } 511 512 /* 513 * Although the host check already tested the API, we need to 514 * do the check again as an ABI handshake on the new fd. 515 */ 516 if (!ufd_version_check(mis->userfault_fd)) { 517 return -1; 518 } 519 520 /* Now an eventfd we use to tell the fault-thread to quit */ 521 mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC); 522 if (mis->userfault_quit_fd == -1) { 523 error_report("%s: Opening userfault_quit_fd: %s", __func__, 524 strerror(errno)); 525 close(mis->userfault_fd); 526 return -1; 527 } 528 529 qemu_sem_init(&mis->fault_thread_sem, 0); 530 qemu_thread_create(&mis->fault_thread, "postcopy/fault", 531 postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); 532 qemu_sem_wait(&mis->fault_thread_sem); 533 qemu_sem_destroy(&mis->fault_thread_sem); 534 mis->have_fault_thread = true; 535 536 /* Mark so that we get notified of accesses to unwritten areas */ 537 if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) { 538 return -1; 539 } 540 541 /* 542 * Ballooning can mark pages as absent while we're postcopying 543 * that would cause false userfaults. 544 */ 545 qemu_balloon_inhibit(true); 546 547 trace_postcopy_ram_enable_notify(); 548 549 return 0; 550 } 551 552 /* 553 * Place a host page (from) at (host) atomically 554 * returns 0 on success 555 */ 556 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 557 size_t pagesize) 558 { 559 struct uffdio_copy copy_struct; 560 561 copy_struct.dst = (uint64_t)(uintptr_t)host; 562 copy_struct.src = (uint64_t)(uintptr_t)from; 563 copy_struct.len = pagesize; 564 copy_struct.mode = 0; 565 566 /* copy also acks to the kernel waking the stalled thread up 567 * TODO: We can inhibit that ack and only do it if it was requested 568 * which would be slightly cheaper, but we'd have to be careful 569 * of the order of updating our page state. 570 */ 571 if (ioctl(mis->userfault_fd, UFFDIO_COPY, ©_struct)) { 572 int e = errno; 573 error_report("%s: %s copy host: %p from: %p (size: %zd)", 574 __func__, strerror(e), host, from, pagesize); 575 576 return -e; 577 } 578 579 trace_postcopy_place_page(host); 580 return 0; 581 } 582 583 /* 584 * Place a zero page at (host) atomically 585 * returns 0 on success 586 */ 587 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 588 size_t pagesize) 589 { 590 trace_postcopy_place_page_zero(host); 591 592 if (pagesize == getpagesize()) { 593 struct uffdio_zeropage zero_struct; 594 zero_struct.range.start = (uint64_t)(uintptr_t)host; 595 zero_struct.range.len = getpagesize(); 596 zero_struct.mode = 0; 597 598 if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) { 599 int e = errno; 600 error_report("%s: %s zero host: %p", 601 __func__, strerror(e), host); 602 603 return -e; 604 } 605 } else { 606 /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */ 607 if (!mis->postcopy_tmp_zero_page) { 608 mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, 609 PROT_READ | PROT_WRITE, 610 MAP_PRIVATE | MAP_ANONYMOUS, 611 -1, 0); 612 if (mis->postcopy_tmp_zero_page == MAP_FAILED) { 613 int e = errno; 614 mis->postcopy_tmp_zero_page = NULL; 615 error_report("%s: %s mapping large zero page", 616 __func__, strerror(e)); 617 return -e; 618 } 619 memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 620 } 621 return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, 622 pagesize); 623 } 624 625 return 0; 626 } 627 628 /* 629 * Returns a target page of memory that can be mapped at a later point in time 630 * using postcopy_place_page 631 * The same address is used repeatedly, postcopy_place_page just takes the 632 * backing page away. 633 * Returns: Pointer to allocated page 634 * 635 */ 636 void *postcopy_get_tmp_page(MigrationIncomingState *mis) 637 { 638 if (!mis->postcopy_tmp_page) { 639 mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, 640 PROT_READ | PROT_WRITE, MAP_PRIVATE | 641 MAP_ANONYMOUS, -1, 0); 642 if (mis->postcopy_tmp_page == MAP_FAILED) { 643 mis->postcopy_tmp_page = NULL; 644 error_report("%s: %s", __func__, strerror(errno)); 645 return NULL; 646 } 647 } 648 649 return mis->postcopy_tmp_page; 650 } 651 652 #else 653 /* No target OS support, stubs just fail */ 654 bool postcopy_ram_supported_by_host(void) 655 { 656 error_report("%s: No OS support", __func__); 657 return false; 658 } 659 660 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 661 { 662 error_report("postcopy_ram_incoming_init: No OS support"); 663 return -1; 664 } 665 666 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 667 { 668 assert(0); 669 return -1; 670 } 671 672 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 673 { 674 assert(0); 675 return -1; 676 } 677 678 int postcopy_ram_enable_notify(MigrationIncomingState *mis) 679 { 680 assert(0); 681 return -1; 682 } 683 684 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 685 size_t pagesize) 686 { 687 assert(0); 688 return -1; 689 } 690 691 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 692 size_t pagesize) 693 { 694 assert(0); 695 return -1; 696 } 697 698 void *postcopy_get_tmp_page(MigrationIncomingState *mis) 699 { 700 assert(0); 701 return NULL; 702 } 703 704 #endif 705 706 /* ------------------------------------------------------------------------- */ 707 708 /** 709 * postcopy_discard_send_init: Called at the start of each RAMBlock before 710 * asking to discard individual ranges. 711 * 712 * @ms: The current migration state. 713 * @offset: the bitmap offset of the named RAMBlock in the migration 714 * bitmap. 715 * @name: RAMBlock that discards will operate on. 716 * 717 * returns: a new PDS. 718 */ 719 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms, 720 unsigned long offset, 721 const char *name) 722 { 723 PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState)); 724 725 if (res) { 726 res->ramblock_name = name; 727 res->offset = offset; 728 } 729 730 return res; 731 } 732 733 /** 734 * postcopy_discard_send_range: Called by the bitmap code for each chunk to 735 * discard. May send a discard message, may just leave it queued to 736 * be sent later. 737 * 738 * @ms: Current migration state. 739 * @pds: Structure initialised by postcopy_discard_send_init(). 740 * @start,@length: a range of pages in the migration bitmap in the 741 * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 742 */ 743 void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, 744 unsigned long start, unsigned long length) 745 { 746 size_t tp_size = qemu_target_page_size(); 747 /* Convert to byte offsets within the RAM block */ 748 pds->start_list[pds->cur_entry] = (start - pds->offset) * tp_size; 749 pds->length_list[pds->cur_entry] = length * tp_size; 750 trace_postcopy_discard_send_range(pds->ramblock_name, start, length); 751 pds->cur_entry++; 752 pds->nsentwords++; 753 754 if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) { 755 /* Full set, ship it! */ 756 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 757 pds->ramblock_name, 758 pds->cur_entry, 759 pds->start_list, 760 pds->length_list); 761 pds->nsentcmds++; 762 pds->cur_entry = 0; 763 } 764 } 765 766 /** 767 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 768 * bitmap code. Sends any outstanding discard messages, frees the PDS 769 * 770 * @ms: Current migration state. 771 * @pds: Structure initialised by postcopy_discard_send_init(). 772 */ 773 void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds) 774 { 775 /* Anything unsent? */ 776 if (pds->cur_entry) { 777 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 778 pds->ramblock_name, 779 pds->cur_entry, 780 pds->start_list, 781 pds->length_list); 782 pds->nsentcmds++; 783 } 784 785 trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords, 786 pds->nsentcmds); 787 788 g_free(pds); 789 } 790