1 /* 2 * Postcopy migration for RAM 3 * 4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Dave Gilbert <dgilbert@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * Postcopy is a migration technique where the execution flips from the 16 * source to the destination before all the data has been copied. 17 */ 18 19 #include "qemu/osdep.h" 20 21 #include "qemu-common.h" 22 #include "migration/migration.h" 23 #include "migration/postcopy-ram.h" 24 #include "sysemu/sysemu.h" 25 #include "sysemu/balloon.h" 26 #include "qemu/error-report.h" 27 #include "trace.h" 28 29 /* Arbitrary limit on size of each discard command, 30 * keeps them around ~200 bytes 31 */ 32 #define MAX_DISCARDS_PER_COMMAND 12 33 34 struct PostcopyDiscardState { 35 const char *ramblock_name; 36 uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */ 37 uint16_t cur_entry; 38 /* 39 * Start and length of a discard range (bytes) 40 */ 41 uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 42 uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 43 unsigned int nsentwords; 44 unsigned int nsentcmds; 45 }; 46 47 /* Postcopy needs to detect accesses to pages that haven't yet been copied 48 * across, and efficiently map new pages in, the techniques for doing this 49 * are target OS specific. 50 */ 51 #if defined(__linux__) 52 53 #include <poll.h> 54 #include <sys/mman.h> 55 #include <sys/ioctl.h> 56 #include <sys/syscall.h> 57 #include <asm/types.h> /* for __u64 */ 58 #endif 59 60 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 61 #include <sys/eventfd.h> 62 #include <linux/userfaultfd.h> 63 64 static bool ufd_version_check(int ufd) 65 { 66 struct uffdio_api api_struct; 67 uint64_t ioctl_mask; 68 69 api_struct.api = UFFD_API; 70 api_struct.features = 0; 71 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 72 error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s", 73 strerror(errno)); 74 return false; 75 } 76 77 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 78 (__u64)1 << _UFFDIO_UNREGISTER; 79 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 80 error_report("Missing userfault features: %" PRIx64, 81 (uint64_t)(~api_struct.ioctls & ioctl_mask)); 82 return false; 83 } 84 85 return true; 86 } 87 88 /* 89 * Note: This has the side effect of munlock'ing all of RAM, that's 90 * normally fine since if the postcopy succeeds it gets turned back on at the 91 * end. 92 */ 93 bool postcopy_ram_supported_by_host(void) 94 { 95 long pagesize = getpagesize(); 96 int ufd = -1; 97 bool ret = false; /* Error unless we change it */ 98 void *testarea = NULL; 99 struct uffdio_register reg_struct; 100 struct uffdio_range range_struct; 101 uint64_t feature_mask; 102 103 if ((1ul << qemu_target_page_bits()) > pagesize) { 104 error_report("Target page size bigger than host page size"); 105 goto out; 106 } 107 108 ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 109 if (ufd == -1) { 110 error_report("%s: userfaultfd not available: %s", __func__, 111 strerror(errno)); 112 goto out; 113 } 114 115 /* Version and features check */ 116 if (!ufd_version_check(ufd)) { 117 goto out; 118 } 119 120 /* 121 * userfault and mlock don't go together; we'll put it back later if 122 * it was enabled. 123 */ 124 if (munlockall()) { 125 error_report("%s: munlockall: %s", __func__, strerror(errno)); 126 return -1; 127 } 128 129 /* 130 * We need to check that the ops we need are supported on anon memory 131 * To do that we need to register a chunk and see the flags that 132 * are returned. 133 */ 134 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 135 MAP_ANONYMOUS, -1, 0); 136 if (testarea == MAP_FAILED) { 137 error_report("%s: Failed to map test area: %s", __func__, 138 strerror(errno)); 139 goto out; 140 } 141 g_assert(((size_t)testarea & (pagesize-1)) == 0); 142 143 reg_struct.range.start = (uintptr_t)testarea; 144 reg_struct.range.len = pagesize; 145 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 146 147 if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 148 error_report("%s userfault register: %s", __func__, strerror(errno)); 149 goto out; 150 } 151 152 range_struct.start = (uintptr_t)testarea; 153 range_struct.len = pagesize; 154 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 155 error_report("%s userfault unregister: %s", __func__, strerror(errno)); 156 goto out; 157 } 158 159 feature_mask = (__u64)1 << _UFFDIO_WAKE | 160 (__u64)1 << _UFFDIO_COPY | 161 (__u64)1 << _UFFDIO_ZEROPAGE; 162 if ((reg_struct.ioctls & feature_mask) != feature_mask) { 163 error_report("Missing userfault map features: %" PRIx64, 164 (uint64_t)(~reg_struct.ioctls & feature_mask)); 165 goto out; 166 } 167 168 /* Success! */ 169 ret = true; 170 out: 171 if (testarea) { 172 munmap(testarea, pagesize); 173 } 174 if (ufd != -1) { 175 close(ufd); 176 } 177 return ret; 178 } 179 180 /** 181 * postcopy_ram_discard_range: Discard a range of memory. 182 * We can assume that if we've been called postcopy_ram_hosttest returned true. 183 * 184 * @mis: Current incoming migration state. 185 * @start, @length: range of memory to discard. 186 * 187 * returns: 0 on success. 188 */ 189 int postcopy_ram_discard_range(MigrationIncomingState *mis, uint8_t *start, 190 size_t length) 191 { 192 trace_postcopy_ram_discard_range(start, length); 193 if (madvise(start, length, MADV_DONTNEED)) { 194 error_report("%s MADV_DONTNEED: %s", __func__, strerror(errno)); 195 return -1; 196 } 197 198 return 0; 199 } 200 201 /* 202 * Setup an area of RAM so that it *can* be used for postcopy later; this 203 * must be done right at the start prior to pre-copy. 204 * opaque should be the MIS. 205 */ 206 static int init_range(const char *block_name, void *host_addr, 207 ram_addr_t offset, ram_addr_t length, void *opaque) 208 { 209 MigrationIncomingState *mis = opaque; 210 211 trace_postcopy_init_range(block_name, host_addr, offset, length); 212 213 /* 214 * We need the whole of RAM to be truly empty for postcopy, so things 215 * like ROMs and any data tables built during init must be zero'd 216 * - we're going to get the copy from the source anyway. 217 * (Precopy will just overwrite this data, so doesn't need the discard) 218 */ 219 if (postcopy_ram_discard_range(mis, host_addr, length)) { 220 return -1; 221 } 222 223 return 0; 224 } 225 226 /* 227 * At the end of migration, undo the effects of init_range 228 * opaque should be the MIS. 229 */ 230 static int cleanup_range(const char *block_name, void *host_addr, 231 ram_addr_t offset, ram_addr_t length, void *opaque) 232 { 233 MigrationIncomingState *mis = opaque; 234 struct uffdio_range range_struct; 235 trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 236 237 /* 238 * We turned off hugepage for the precopy stage with postcopy enabled 239 * we can turn it back on now. 240 */ 241 qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 242 243 /* 244 * We can also turn off userfault now since we should have all the 245 * pages. It can be useful to leave it on to debug postcopy 246 * if you're not sure it's always getting every page. 247 */ 248 range_struct.start = (uintptr_t)host_addr; 249 range_struct.len = length; 250 251 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 252 error_report("%s: userfault unregister %s", __func__, strerror(errno)); 253 254 return -1; 255 } 256 257 return 0; 258 } 259 260 /* 261 * Initialise postcopy-ram, setting the RAM to a state where we can go into 262 * postcopy later; must be called prior to any precopy. 263 * called from arch_init's similarly named ram_postcopy_incoming_init 264 */ 265 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 266 { 267 if (qemu_ram_foreach_block(init_range, mis)) { 268 return -1; 269 } 270 271 return 0; 272 } 273 274 /* 275 * At the end of a migration where postcopy_ram_incoming_init was called. 276 */ 277 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 278 { 279 trace_postcopy_ram_incoming_cleanup_entry(); 280 281 if (mis->have_fault_thread) { 282 uint64_t tmp64; 283 284 if (qemu_ram_foreach_block(cleanup_range, mis)) { 285 return -1; 286 } 287 /* 288 * Tell the fault_thread to exit, it's an eventfd that should 289 * currently be at 0, we're going to increment it to 1 290 */ 291 tmp64 = 1; 292 if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) { 293 trace_postcopy_ram_incoming_cleanup_join(); 294 qemu_thread_join(&mis->fault_thread); 295 } else { 296 /* Not much we can do here, but may as well report it */ 297 error_report("%s: incrementing userfault_quit_fd: %s", __func__, 298 strerror(errno)); 299 } 300 trace_postcopy_ram_incoming_cleanup_closeuf(); 301 close(mis->userfault_fd); 302 close(mis->userfault_quit_fd); 303 mis->have_fault_thread = false; 304 } 305 306 qemu_balloon_inhibit(false); 307 308 if (enable_mlock) { 309 if (os_mlock() < 0) { 310 error_report("mlock: %s", strerror(errno)); 311 /* 312 * It doesn't feel right to fail at this point, we have a valid 313 * VM state. 314 */ 315 } 316 } 317 318 postcopy_state_set(POSTCOPY_INCOMING_END); 319 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 320 321 if (mis->postcopy_tmp_page) { 322 munmap(mis->postcopy_tmp_page, getpagesize()); 323 mis->postcopy_tmp_page = NULL; 324 } 325 trace_postcopy_ram_incoming_cleanup_exit(); 326 return 0; 327 } 328 329 /* 330 * Disable huge pages on an area 331 */ 332 static int nhp_range(const char *block_name, void *host_addr, 333 ram_addr_t offset, ram_addr_t length, void *opaque) 334 { 335 trace_postcopy_nhp_range(block_name, host_addr, offset, length); 336 337 /* 338 * Before we do discards we need to ensure those discards really 339 * do delete areas of the page, even if THP thinks a hugepage would 340 * be a good idea, so force hugepages off. 341 */ 342 qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 343 344 return 0; 345 } 346 347 /* 348 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 349 * however leaving it until after precopy means that most of the precopy 350 * data is still THPd 351 */ 352 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 353 { 354 if (qemu_ram_foreach_block(nhp_range, mis)) { 355 return -1; 356 } 357 358 postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 359 360 return 0; 361 } 362 363 /* 364 * Mark the given area of RAM as requiring notification to unwritten areas 365 * Used as a callback on qemu_ram_foreach_block. 366 * host_addr: Base of area to mark 367 * offset: Offset in the whole ram arena 368 * length: Length of the section 369 * opaque: MigrationIncomingState pointer 370 * Returns 0 on success 371 */ 372 static int ram_block_enable_notify(const char *block_name, void *host_addr, 373 ram_addr_t offset, ram_addr_t length, 374 void *opaque) 375 { 376 MigrationIncomingState *mis = opaque; 377 struct uffdio_register reg_struct; 378 379 reg_struct.range.start = (uintptr_t)host_addr; 380 reg_struct.range.len = length; 381 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 382 383 /* Now tell our userfault_fd that it's responsible for this area */ 384 if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 385 error_report("%s userfault register: %s", __func__, strerror(errno)); 386 return -1; 387 } 388 389 return 0; 390 } 391 392 /* 393 * Handle faults detected by the USERFAULT markings 394 */ 395 static void *postcopy_ram_fault_thread(void *opaque) 396 { 397 MigrationIncomingState *mis = opaque; 398 struct uffd_msg msg; 399 int ret; 400 size_t hostpagesize = getpagesize(); 401 RAMBlock *rb = NULL; 402 RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */ 403 404 trace_postcopy_ram_fault_thread_entry(); 405 qemu_sem_post(&mis->fault_thread_sem); 406 407 while (true) { 408 ram_addr_t rb_offset; 409 struct pollfd pfd[2]; 410 411 /* 412 * We're mainly waiting for the kernel to give us a faulting HVA, 413 * however we can be told to quit via userfault_quit_fd which is 414 * an eventfd 415 */ 416 pfd[0].fd = mis->userfault_fd; 417 pfd[0].events = POLLIN; 418 pfd[0].revents = 0; 419 pfd[1].fd = mis->userfault_quit_fd; 420 pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 421 pfd[1].revents = 0; 422 423 if (poll(pfd, 2, -1 /* Wait forever */) == -1) { 424 error_report("%s: userfault poll: %s", __func__, strerror(errno)); 425 break; 426 } 427 428 if (pfd[1].revents) { 429 trace_postcopy_ram_fault_thread_quit(); 430 break; 431 } 432 433 ret = read(mis->userfault_fd, &msg, sizeof(msg)); 434 if (ret != sizeof(msg)) { 435 if (errno == EAGAIN) { 436 /* 437 * if a wake up happens on the other thread just after 438 * the poll, there is nothing to read. 439 */ 440 continue; 441 } 442 if (ret < 0) { 443 error_report("%s: Failed to read full userfault message: %s", 444 __func__, strerror(errno)); 445 break; 446 } else { 447 error_report("%s: Read %d bytes from userfaultfd expected %zd", 448 __func__, ret, sizeof(msg)); 449 break; /* Lost alignment, don't know what we'd read next */ 450 } 451 } 452 if (msg.event != UFFD_EVENT_PAGEFAULT) { 453 error_report("%s: Read unexpected event %ud from userfaultfd", 454 __func__, msg.event); 455 continue; /* It's not a page fault, shouldn't happen */ 456 } 457 458 rb = qemu_ram_block_from_host( 459 (void *)(uintptr_t)msg.arg.pagefault.address, 460 true, &rb_offset); 461 if (!rb) { 462 error_report("postcopy_ram_fault_thread: Fault outside guest: %" 463 PRIx64, (uint64_t)msg.arg.pagefault.address); 464 break; 465 } 466 467 rb_offset &= ~(hostpagesize - 1); 468 trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 469 qemu_ram_get_idstr(rb), 470 rb_offset); 471 472 /* 473 * Send the request to the source - we want to request one 474 * of our host page sizes (which is >= TPS) 475 */ 476 if (rb != last_rb) { 477 last_rb = rb; 478 migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb), 479 rb_offset, hostpagesize); 480 } else { 481 /* Save some space */ 482 migrate_send_rp_req_pages(mis, NULL, 483 rb_offset, hostpagesize); 484 } 485 } 486 trace_postcopy_ram_fault_thread_exit(); 487 return NULL; 488 } 489 490 int postcopy_ram_enable_notify(MigrationIncomingState *mis) 491 { 492 /* Open the fd for the kernel to give us userfaults */ 493 mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 494 if (mis->userfault_fd == -1) { 495 error_report("%s: Failed to open userfault fd: %s", __func__, 496 strerror(errno)); 497 return -1; 498 } 499 500 /* 501 * Although the host check already tested the API, we need to 502 * do the check again as an ABI handshake on the new fd. 503 */ 504 if (!ufd_version_check(mis->userfault_fd)) { 505 return -1; 506 } 507 508 /* Now an eventfd we use to tell the fault-thread to quit */ 509 mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC); 510 if (mis->userfault_quit_fd == -1) { 511 error_report("%s: Opening userfault_quit_fd: %s", __func__, 512 strerror(errno)); 513 close(mis->userfault_fd); 514 return -1; 515 } 516 517 qemu_sem_init(&mis->fault_thread_sem, 0); 518 qemu_thread_create(&mis->fault_thread, "postcopy/fault", 519 postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); 520 qemu_sem_wait(&mis->fault_thread_sem); 521 qemu_sem_destroy(&mis->fault_thread_sem); 522 mis->have_fault_thread = true; 523 524 /* Mark so that we get notified of accesses to unwritten areas */ 525 if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) { 526 return -1; 527 } 528 529 /* 530 * Ballooning can mark pages as absent while we're postcopying 531 * that would cause false userfaults. 532 */ 533 qemu_balloon_inhibit(true); 534 535 trace_postcopy_ram_enable_notify(); 536 537 return 0; 538 } 539 540 /* 541 * Place a host page (from) at (host) atomically 542 * returns 0 on success 543 */ 544 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from) 545 { 546 struct uffdio_copy copy_struct; 547 548 copy_struct.dst = (uint64_t)(uintptr_t)host; 549 copy_struct.src = (uint64_t)(uintptr_t)from; 550 copy_struct.len = getpagesize(); 551 copy_struct.mode = 0; 552 553 /* copy also acks to the kernel waking the stalled thread up 554 * TODO: We can inhibit that ack and only do it if it was requested 555 * which would be slightly cheaper, but we'd have to be careful 556 * of the order of updating our page state. 557 */ 558 if (ioctl(mis->userfault_fd, UFFDIO_COPY, ©_struct)) { 559 int e = errno; 560 error_report("%s: %s copy host: %p from: %p", 561 __func__, strerror(e), host, from); 562 563 return -e; 564 } 565 566 trace_postcopy_place_page(host); 567 return 0; 568 } 569 570 /* 571 * Place a zero page at (host) atomically 572 * returns 0 on success 573 */ 574 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host) 575 { 576 struct uffdio_zeropage zero_struct; 577 578 zero_struct.range.start = (uint64_t)(uintptr_t)host; 579 zero_struct.range.len = getpagesize(); 580 zero_struct.mode = 0; 581 582 if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) { 583 int e = errno; 584 error_report("%s: %s zero host: %p", 585 __func__, strerror(e), host); 586 587 return -e; 588 } 589 590 trace_postcopy_place_page_zero(host); 591 return 0; 592 } 593 594 /* 595 * Returns a target page of memory that can be mapped at a later point in time 596 * using postcopy_place_page 597 * The same address is used repeatedly, postcopy_place_page just takes the 598 * backing page away. 599 * Returns: Pointer to allocated page 600 * 601 */ 602 void *postcopy_get_tmp_page(MigrationIncomingState *mis) 603 { 604 if (!mis->postcopy_tmp_page) { 605 mis->postcopy_tmp_page = mmap(NULL, getpagesize(), 606 PROT_READ | PROT_WRITE, MAP_PRIVATE | 607 MAP_ANONYMOUS, -1, 0); 608 if (!mis->postcopy_tmp_page) { 609 error_report("%s: %s", __func__, strerror(errno)); 610 return NULL; 611 } 612 } 613 614 return mis->postcopy_tmp_page; 615 } 616 617 #else 618 /* No target OS support, stubs just fail */ 619 bool postcopy_ram_supported_by_host(void) 620 { 621 error_report("%s: No OS support", __func__); 622 return false; 623 } 624 625 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 626 { 627 error_report("postcopy_ram_incoming_init: No OS support"); 628 return -1; 629 } 630 631 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 632 { 633 assert(0); 634 return -1; 635 } 636 637 int postcopy_ram_discard_range(MigrationIncomingState *mis, uint8_t *start, 638 size_t length) 639 { 640 assert(0); 641 return -1; 642 } 643 644 int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 645 { 646 assert(0); 647 return -1; 648 } 649 650 int postcopy_ram_enable_notify(MigrationIncomingState *mis) 651 { 652 assert(0); 653 return -1; 654 } 655 656 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from) 657 { 658 assert(0); 659 return -1; 660 } 661 662 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host) 663 { 664 assert(0); 665 return -1; 666 } 667 668 void *postcopy_get_tmp_page(MigrationIncomingState *mis) 669 { 670 assert(0); 671 return NULL; 672 } 673 674 #endif 675 676 /* ------------------------------------------------------------------------- */ 677 678 /** 679 * postcopy_discard_send_init: Called at the start of each RAMBlock before 680 * asking to discard individual ranges. 681 * 682 * @ms: The current migration state. 683 * @offset: the bitmap offset of the named RAMBlock in the migration 684 * bitmap. 685 * @name: RAMBlock that discards will operate on. 686 * 687 * returns: a new PDS. 688 */ 689 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms, 690 unsigned long offset, 691 const char *name) 692 { 693 PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState)); 694 695 if (res) { 696 res->ramblock_name = name; 697 res->offset = offset; 698 } 699 700 return res; 701 } 702 703 /** 704 * postcopy_discard_send_range: Called by the bitmap code for each chunk to 705 * discard. May send a discard message, may just leave it queued to 706 * be sent later. 707 * 708 * @ms: Current migration state. 709 * @pds: Structure initialised by postcopy_discard_send_init(). 710 * @start,@length: a range of pages in the migration bitmap in the 711 * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 712 */ 713 void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, 714 unsigned long start, unsigned long length) 715 { 716 size_t tp_bits = qemu_target_page_bits(); 717 /* Convert to byte offsets within the RAM block */ 718 pds->start_list[pds->cur_entry] = (start - pds->offset) << tp_bits; 719 pds->length_list[pds->cur_entry] = length << tp_bits; 720 trace_postcopy_discard_send_range(pds->ramblock_name, start, length); 721 pds->cur_entry++; 722 pds->nsentwords++; 723 724 if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) { 725 /* Full set, ship it! */ 726 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 727 pds->ramblock_name, 728 pds->cur_entry, 729 pds->start_list, 730 pds->length_list); 731 pds->nsentcmds++; 732 pds->cur_entry = 0; 733 } 734 } 735 736 /** 737 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 738 * bitmap code. Sends any outstanding discard messages, frees the PDS 739 * 740 * @ms: Current migration state. 741 * @pds: Structure initialised by postcopy_discard_send_init(). 742 */ 743 void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds) 744 { 745 /* Anything unsent? */ 746 if (pds->cur_entry) { 747 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 748 pds->ramblock_name, 749 pds->cur_entry, 750 pds->start_list, 751 pds->length_list); 752 pds->nsentcmds++; 753 } 754 755 trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords, 756 pds->nsentcmds); 757 758 g_free(pds); 759 } 760