1 /* 2 * Postcopy migration for RAM 3 * 4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Dave Gilbert <dgilbert@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * Postcopy is a migration technique where the execution flips from the 16 * source to the destination before all the data has been copied. 17 */ 18 19 #include <glib.h> 20 #include <stdio.h> 21 #include <unistd.h> 22 23 #include "qemu-common.h" 24 #include "migration/migration.h" 25 #include "migration/postcopy-ram.h" 26 #include "sysemu/sysemu.h" 27 #include "sysemu/balloon.h" 28 #include "qemu/error-report.h" 29 #include "trace.h" 30 31 /* Arbitrary limit on size of each discard command, 32 * keeps them around ~200 bytes 33 */ 34 #define MAX_DISCARDS_PER_COMMAND 12 35 36 struct PostcopyDiscardState { 37 const char *ramblock_name; 38 uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */ 39 uint16_t cur_entry; 40 /* 41 * Start and length of a discard range (bytes) 42 */ 43 uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 44 uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 45 unsigned int nsentwords; 46 unsigned int nsentcmds; 47 }; 48 49 /* Postcopy needs to detect accesses to pages that haven't yet been copied 50 * across, and efficiently map new pages in, the techniques for doing this 51 * are target OS specific. 52 */ 53 #if defined(__linux__) 54 55 #include <poll.h> 56 #include <sys/eventfd.h> 57 #include <sys/mman.h> 58 #include <sys/ioctl.h> 59 #include <sys/syscall.h> 60 #include <sys/types.h> 61 #include <asm/types.h> /* for __u64 */ 62 #endif 63 64 #if defined(__linux__) && defined(__NR_userfaultfd) 65 #include <linux/userfaultfd.h> 66 67 static bool ufd_version_check(int ufd) 68 { 69 struct uffdio_api api_struct; 70 uint64_t ioctl_mask; 71 72 api_struct.api = UFFD_API; 73 api_struct.features = 0; 74 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 75 error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s", 76 strerror(errno)); 77 return false; 78 } 79 80 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 81 (__u64)1 << _UFFDIO_UNREGISTER; 82 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 83 error_report("Missing userfault features: %" PRIx64, 84 (uint64_t)(~api_struct.ioctls & ioctl_mask)); 85 return false; 86 } 87 88 return true; 89 } 90 91 /* 92 * Note: This has the side effect of munlock'ing all of RAM, that's 93 * normally fine since if the postcopy succeeds it gets turned back on at the 94 * end. 95 */ 96 bool postcopy_ram_supported_by_host(void) 97 { 98 long pagesize = getpagesize(); 99 int ufd = -1; 100 bool ret = false; /* Error unless we change it */ 101 void *testarea = NULL; 102 struct uffdio_register reg_struct; 103 struct uffdio_range range_struct; 104 uint64_t feature_mask; 105 106 if ((1ul << qemu_target_page_bits()) > pagesize) { 107 error_report("Target page size bigger than host page size"); 108 goto out; 109 } 110 111 ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 112 if (ufd == -1) { 113 error_report("%s: userfaultfd not available: %s", __func__, 114 strerror(errno)); 115 goto out; 116 } 117 118 /* Version and features check */ 119 if (!ufd_version_check(ufd)) { 120 goto out; 121 } 122 123 /* 124 * userfault and mlock don't go together; we'll put it back later if 125 * it was enabled. 126 */ 127 if (munlockall()) { 128 error_report("%s: munlockall: %s", __func__, strerror(errno)); 129 return -1; 130 } 131 132 /* 133 * We need to check that the ops we need are supported on anon memory 134 * To do that we need to register a chunk and see the flags that 135 * are returned. 136 */ 137 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 138 MAP_ANONYMOUS, -1, 0); 139 if (testarea == MAP_FAILED) { 140 error_report("%s: Failed to map test area: %s", __func__, 141 strerror(errno)); 142 goto out; 143 } 144 g_assert(((size_t)testarea & (pagesize-1)) == 0); 145 146 reg_struct.range.start = (uintptr_t)testarea; 147 reg_struct.range.len = pagesize; 148 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 149 150 if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 151 error_report("%s userfault register: %s", __func__, strerror(errno)); 152 goto out; 153 } 154 155 range_struct.start = (uintptr_t)testarea; 156 range_struct.len = pagesize; 157 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 158 error_report("%s userfault unregister: %s", __func__, strerror(errno)); 159 goto out; 160 } 161 162 feature_mask = (__u64)1 << _UFFDIO_WAKE | 163 (__u64)1 << _UFFDIO_COPY | 164 (__u64)1 << _UFFDIO_ZEROPAGE; 165 if ((reg_struct.ioctls & feature_mask) != feature_mask) { 166 error_report("Missing userfault map features: %" PRIx64, 167 (uint64_t)(~reg_struct.ioctls & feature_mask)); 168 goto out; 169 } 170 171 /* Success! */ 172 ret = true; 173 out: 174 if (testarea) { 175 munmap(testarea, pagesize); 176 } 177 if (ufd != -1) { 178 close(ufd); 179 } 180 return ret; 181 } 182 183 /** 184 * postcopy_ram_discard_range: Discard a range of memory. 185 * We can assume that if we've been called postcopy_ram_hosttest returned true. 186 * 187 * @mis: Current incoming migration state. 188 * @start, @length: range of memory to discard. 189 * 190 * returns: 0 on success. 191 */ 192 int postcopy_ram_discard_range(MigrationIncomingState *mis, uint8_t *start, 193 size_t length) 194 { 195 trace_postcopy_ram_discard_range(start, length); 196 if (madvise(start, length, MADV_DONTNEED)) { 197 error_report("%s MADV_DONTNEED: %s", __func__, strerror(errno)); 198 return -1; 199 } 200 201 return 0; 202 } 203 204 /* 205 * Setup an area of RAM so that it *can* be used for postcopy later; this 206 * must be done right at the start prior to pre-copy. 207 * opaque should be the MIS. 208 */ 209 static int init_range(const char *block_name, void *host_addr, 210 ram_addr_t offset, ram_addr_t length, void *opaque) 211 { 212 MigrationIncomingState *mis = opaque; 213 214 trace_postcopy_init_range(block_name, host_addr, offset, length); 215 216 /* 217 * We need the whole of RAM to be truly empty for postcopy, so things 218 * like ROMs and any data tables built during init must be zero'd 219 * - we're going to get the copy from the source anyway. 220 * (Precopy will just overwrite this data, so doesn't need the discard) 221 */ 222 if (postcopy_ram_discard_range(mis, host_addr, length)) { 223 return -1; 224 } 225 226 return 0; 227 } 228 229 /* 230 * At the end of migration, undo the effects of init_range 231 * opaque should be the MIS. 232 */ 233 static int cleanup_range(const char *block_name, void *host_addr, 234 ram_addr_t offset, ram_addr_t length, void *opaque) 235 { 236 MigrationIncomingState *mis = opaque; 237 struct uffdio_range range_struct; 238 trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 239 240 /* 241 * We turned off hugepage for the precopy stage with postcopy enabled 242 * we can turn it back on now. 243 */ 244 if (qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE)) { 245 error_report("%s HUGEPAGE: %s", __func__, strerror(errno)); 246 return -1; 247 } 248 249 /* 250 * We can also turn off userfault now since we should have all the 251 * pages. It can be useful to leave it on to debug postcopy 252 * if you're not sure it's always getting every page. 253 */ 254 range_struct.start = (uintptr_t)host_addr; 255 range_struct.len = length; 256 257 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 258 error_report("%s: userfault unregister %s", __func__, strerror(errno)); 259 260 return -1; 261 } 262 263 return 0; 264 } 265 266 /* 267 * Initialise postcopy-ram, setting the RAM to a state where we can go into 268 * postcopy later; must be called prior to any precopy. 269 * called from arch_init's similarly named ram_postcopy_incoming_init 270 */ 271 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 272 { 273 if (qemu_ram_foreach_block(init_range, mis)) { 274 return -1; 275 } 276 277 return 0; 278 } 279 280 /* 281 * At the end of a migration where postcopy_ram_incoming_init was called. 282 */ 283 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 284 { 285 trace_postcopy_ram_incoming_cleanup_entry(); 286 287 if (mis->have_fault_thread) { 288 uint64_t tmp64; 289 290 if (qemu_ram_foreach_block(cleanup_range, mis)) { 291 return -1; 292 } 293 /* 294 * Tell the fault_thread to exit, it's an eventfd that should 295 * currently be at 0, we're going to increment it to 1 296 */ 297 tmp64 = 1; 298 if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) { 299 trace_postcopy_ram_incoming_cleanup_join(); 300 qemu_thread_join(&mis->fault_thread); 301 } else { 302 /* Not much we can do here, but may as well report it */ 303 error_report("%s: incrementing userfault_quit_fd: %s", __func__, 304 strerror(errno)); 305 } 306 trace_postcopy_ram_incoming_cleanup_closeuf(); 307 close(mis->userfault_fd); 308 close(mis->userfault_quit_fd); 309 mis->have_fault_thread = false; 310 } 311 312 qemu_balloon_inhibit(false); 313 314 if (enable_mlock) { 315 if (os_mlock() < 0) { 316 error_report("mlock: %s", strerror(errno)); 317 /* 318 * It doesn't feel right to fail at this point, we have a valid 319 * VM state. 320 */ 321 } 322 } 323 324 postcopy_state_set(POSTCOPY_INCOMING_END); 325 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 326 327 if (mis->postcopy_tmp_page) { 328 munmap(mis->postcopy_tmp_page, getpagesize()); 329 mis->postcopy_tmp_page = NULL; 330 } 331 trace_postcopy_ram_incoming_cleanup_exit(); 332 return 0; 333 } 334 335 /* 336 * Disable huge pages on an area 337 */ 338 static int nhp_range(const char *block_name, void *host_addr, 339 ram_addr_t offset, ram_addr_t length, void *opaque) 340 { 341 trace_postcopy_nhp_range(block_name, host_addr, offset, length); 342 343 /* 344 * Before we do discards we need to ensure those discards really 345 * do delete areas of the page, even if THP thinks a hugepage would 346 * be a good idea, so force hugepages off. 347 */ 348 if (qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE)) { 349 error_report("%s: NOHUGEPAGE: %s", __func__, strerror(errno)); 350 return -1; 351 } 352 353 return 0; 354 } 355 356 /* 357 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 358 * however leaving it until after precopy means that most of the precopy 359 * data is still THPd 360 */ 361 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 362 { 363 if (qemu_ram_foreach_block(nhp_range, mis)) { 364 return -1; 365 } 366 367 postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 368 369 return 0; 370 } 371 372 /* 373 * Mark the given area of RAM as requiring notification to unwritten areas 374 * Used as a callback on qemu_ram_foreach_block. 375 * host_addr: Base of area to mark 376 * offset: Offset in the whole ram arena 377 * length: Length of the section 378 * opaque: MigrationIncomingState pointer 379 * Returns 0 on success 380 */ 381 static int ram_block_enable_notify(const char *block_name, void *host_addr, 382 ram_addr_t offset, ram_addr_t length, 383 void *opaque) 384 { 385 MigrationIncomingState *mis = opaque; 386 struct uffdio_register reg_struct; 387 388 reg_struct.range.start = (uintptr_t)host_addr; 389 reg_struct.range.len = length; 390 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 391 392 /* Now tell our userfault_fd that it's responsible for this area */ 393 if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 394 error_report("%s userfault register: %s", __func__, strerror(errno)); 395 return -1; 396 } 397 398 return 0; 399 } 400 401 /* 402 * Handle faults detected by the USERFAULT markings 403 */ 404 static void *postcopy_ram_fault_thread(void *opaque) 405 { 406 MigrationIncomingState *mis = opaque; 407 struct uffd_msg msg; 408 int ret; 409 size_t hostpagesize = getpagesize(); 410 RAMBlock *rb = NULL; 411 RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */ 412 413 trace_postcopy_ram_fault_thread_entry(); 414 qemu_sem_post(&mis->fault_thread_sem); 415 416 while (true) { 417 ram_addr_t rb_offset; 418 ram_addr_t in_raspace; 419 struct pollfd pfd[2]; 420 421 /* 422 * We're mainly waiting for the kernel to give us a faulting HVA, 423 * however we can be told to quit via userfault_quit_fd which is 424 * an eventfd 425 */ 426 pfd[0].fd = mis->userfault_fd; 427 pfd[0].events = POLLIN; 428 pfd[0].revents = 0; 429 pfd[1].fd = mis->userfault_quit_fd; 430 pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 431 pfd[1].revents = 0; 432 433 if (poll(pfd, 2, -1 /* Wait forever */) == -1) { 434 error_report("%s: userfault poll: %s", __func__, strerror(errno)); 435 break; 436 } 437 438 if (pfd[1].revents) { 439 trace_postcopy_ram_fault_thread_quit(); 440 break; 441 } 442 443 ret = read(mis->userfault_fd, &msg, sizeof(msg)); 444 if (ret != sizeof(msg)) { 445 if (errno == EAGAIN) { 446 /* 447 * if a wake up happens on the other thread just after 448 * the poll, there is nothing to read. 449 */ 450 continue; 451 } 452 if (ret < 0) { 453 error_report("%s: Failed to read full userfault message: %s", 454 __func__, strerror(errno)); 455 break; 456 } else { 457 error_report("%s: Read %d bytes from userfaultfd expected %zd", 458 __func__, ret, sizeof(msg)); 459 break; /* Lost alignment, don't know what we'd read next */ 460 } 461 } 462 if (msg.event != UFFD_EVENT_PAGEFAULT) { 463 error_report("%s: Read unexpected event %ud from userfaultfd", 464 __func__, msg.event); 465 continue; /* It's not a page fault, shouldn't happen */ 466 } 467 468 rb = qemu_ram_block_from_host( 469 (void *)(uintptr_t)msg.arg.pagefault.address, 470 true, &in_raspace, &rb_offset); 471 if (!rb) { 472 error_report("postcopy_ram_fault_thread: Fault outside guest: %" 473 PRIx64, (uint64_t)msg.arg.pagefault.address); 474 break; 475 } 476 477 rb_offset &= ~(hostpagesize - 1); 478 trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 479 qemu_ram_get_idstr(rb), 480 rb_offset); 481 482 /* 483 * Send the request to the source - we want to request one 484 * of our host page sizes (which is >= TPS) 485 */ 486 if (rb != last_rb) { 487 last_rb = rb; 488 migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb), 489 rb_offset, hostpagesize); 490 } else { 491 /* Save some space */ 492 migrate_send_rp_req_pages(mis, NULL, 493 rb_offset, hostpagesize); 494 } 495 } 496 trace_postcopy_ram_fault_thread_exit(); 497 return NULL; 498 } 499 500 int postcopy_ram_enable_notify(MigrationIncomingState *mis) 501 { 502 /* Open the fd for the kernel to give us userfaults */ 503 mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 504 if (mis->userfault_fd == -1) { 505 error_report("%s: Failed to open userfault fd: %s", __func__, 506 strerror(errno)); 507 return -1; 508 } 509 510 /* 511 * Although the host check already tested the API, we need to 512 * do the check again as an ABI handshake on the new fd. 513 */ 514 if (!ufd_version_check(mis->userfault_fd)) { 515 return -1; 516 } 517 518 /* Now an eventfd we use to tell the fault-thread to quit */ 519 mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC); 520 if (mis->userfault_quit_fd == -1) { 521 error_report("%s: Opening userfault_quit_fd: %s", __func__, 522 strerror(errno)); 523 close(mis->userfault_fd); 524 return -1; 525 } 526 527 qemu_sem_init(&mis->fault_thread_sem, 0); 528 qemu_thread_create(&mis->fault_thread, "postcopy/fault", 529 postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); 530 qemu_sem_wait(&mis->fault_thread_sem); 531 qemu_sem_destroy(&mis->fault_thread_sem); 532 mis->have_fault_thread = true; 533 534 /* Mark so that we get notified of accesses to unwritten areas */ 535 if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) { 536 return -1; 537 } 538 539 /* 540 * Ballooning can mark pages as absent while we're postcopying 541 * that would cause false userfaults. 542 */ 543 qemu_balloon_inhibit(true); 544 545 trace_postcopy_ram_enable_notify(); 546 547 return 0; 548 } 549 550 /* 551 * Place a host page (from) at (host) atomically 552 * returns 0 on success 553 */ 554 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from) 555 { 556 struct uffdio_copy copy_struct; 557 558 copy_struct.dst = (uint64_t)(uintptr_t)host; 559 copy_struct.src = (uint64_t)(uintptr_t)from; 560 copy_struct.len = getpagesize(); 561 copy_struct.mode = 0; 562 563 /* copy also acks to the kernel waking the stalled thread up 564 * TODO: We can inhibit that ack and only do it if it was requested 565 * which would be slightly cheaper, but we'd have to be careful 566 * of the order of updating our page state. 567 */ 568 if (ioctl(mis->userfault_fd, UFFDIO_COPY, ©_struct)) { 569 int e = errno; 570 error_report("%s: %s copy host: %p from: %p", 571 __func__, strerror(e), host, from); 572 573 return -e; 574 } 575 576 trace_postcopy_place_page(host); 577 return 0; 578 } 579 580 /* 581 * Place a zero page at (host) atomically 582 * returns 0 on success 583 */ 584 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host) 585 { 586 struct uffdio_zeropage zero_struct; 587 588 zero_struct.range.start = (uint64_t)(uintptr_t)host; 589 zero_struct.range.len = getpagesize(); 590 zero_struct.mode = 0; 591 592 if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) { 593 int e = errno; 594 error_report("%s: %s zero host: %p", 595 __func__, strerror(e), host); 596 597 return -e; 598 } 599 600 trace_postcopy_place_page_zero(host); 601 return 0; 602 } 603 604 /* 605 * Returns a target page of memory that can be mapped at a later point in time 606 * using postcopy_place_page 607 * The same address is used repeatedly, postcopy_place_page just takes the 608 * backing page away. 609 * Returns: Pointer to allocated page 610 * 611 */ 612 void *postcopy_get_tmp_page(MigrationIncomingState *mis) 613 { 614 if (!mis->postcopy_tmp_page) { 615 mis->postcopy_tmp_page = mmap(NULL, getpagesize(), 616 PROT_READ | PROT_WRITE, MAP_PRIVATE | 617 MAP_ANONYMOUS, -1, 0); 618 if (!mis->postcopy_tmp_page) { 619 error_report("%s: %s", __func__, strerror(errno)); 620 return NULL; 621 } 622 } 623 624 return mis->postcopy_tmp_page; 625 } 626 627 #else 628 /* No target OS support, stubs just fail */ 629 bool postcopy_ram_supported_by_host(void) 630 { 631 error_report("%s: No OS support", __func__); 632 return false; 633 } 634 635 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 636 { 637 error_report("postcopy_ram_incoming_init: No OS support"); 638 return -1; 639 } 640 641 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 642 { 643 assert(0); 644 return -1; 645 } 646 647 int postcopy_ram_discard_range(MigrationIncomingState *mis, uint8_t *start, 648 size_t length) 649 { 650 assert(0); 651 return -1; 652 } 653 654 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 655 { 656 assert(0); 657 return -1; 658 } 659 660 int postcopy_ram_enable_notify(MigrationIncomingState *mis) 661 { 662 assert(0); 663 return -1; 664 } 665 666 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from) 667 { 668 assert(0); 669 return -1; 670 } 671 672 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host) 673 { 674 assert(0); 675 return -1; 676 } 677 678 void *postcopy_get_tmp_page(MigrationIncomingState *mis) 679 { 680 assert(0); 681 return NULL; 682 } 683 684 #endif 685 686 /* ------------------------------------------------------------------------- */ 687 688 /** 689 * postcopy_discard_send_init: Called at the start of each RAMBlock before 690 * asking to discard individual ranges. 691 * 692 * @ms: The current migration state. 693 * @offset: the bitmap offset of the named RAMBlock in the migration 694 * bitmap. 695 * @name: RAMBlock that discards will operate on. 696 * 697 * returns: a new PDS. 698 */ 699 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms, 700 unsigned long offset, 701 const char *name) 702 { 703 PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState)); 704 705 if (res) { 706 res->ramblock_name = name; 707 res->offset = offset; 708 } 709 710 return res; 711 } 712 713 /** 714 * postcopy_discard_send_range: Called by the bitmap code for each chunk to 715 * discard. May send a discard message, may just leave it queued to 716 * be sent later. 717 * 718 * @ms: Current migration state. 719 * @pds: Structure initialised by postcopy_discard_send_init(). 720 * @start,@length: a range of pages in the migration bitmap in the 721 * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 722 */ 723 void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, 724 unsigned long start, unsigned long length) 725 { 726 size_t tp_bits = qemu_target_page_bits(); 727 /* Convert to byte offsets within the RAM block */ 728 pds->start_list[pds->cur_entry] = (start - pds->offset) << tp_bits; 729 pds->length_list[pds->cur_entry] = length << tp_bits; 730 trace_postcopy_discard_send_range(pds->ramblock_name, start, length); 731 pds->cur_entry++; 732 pds->nsentwords++; 733 734 if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) { 735 /* Full set, ship it! */ 736 qemu_savevm_send_postcopy_ram_discard(ms->file, pds->ramblock_name, 737 pds->cur_entry, 738 pds->start_list, 739 pds->length_list); 740 pds->nsentcmds++; 741 pds->cur_entry = 0; 742 } 743 } 744 745 /** 746 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 747 * bitmap code. Sends any outstanding discard messages, frees the PDS 748 * 749 * @ms: Current migration state. 750 * @pds: Structure initialised by postcopy_discard_send_init(). 751 */ 752 void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds) 753 { 754 /* Anything unsent? */ 755 if (pds->cur_entry) { 756 qemu_savevm_send_postcopy_ram_discard(ms->file, pds->ramblock_name, 757 pds->cur_entry, 758 pds->start_list, 759 pds->length_list); 760 pds->nsentcmds++; 761 } 762 763 trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords, 764 pds->nsentcmds); 765 766 g_free(pds); 767 } 768