xref: /openbmc/qemu/migration/ram.c (revision afb81fe8)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2011-2015 Red Hat Inc
6  *
7  * Authors:
8  *  Juan Quintela <quintela@redhat.com>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 
29 #include "qemu/osdep.h"
30 #include "qemu/cutils.h"
31 #include "qemu/bitops.h"
32 #include "qemu/bitmap.h"
33 #include "qemu/madvise.h"
34 #include "qemu/main-loop.h"
35 #include "xbzrle.h"
36 #include "ram-compress.h"
37 #include "ram.h"
38 #include "migration.h"
39 #include "migration-stats.h"
40 #include "migration/register.h"
41 #include "migration/misc.h"
42 #include "qemu-file.h"
43 #include "postcopy-ram.h"
44 #include "page_cache.h"
45 #include "qemu/error-report.h"
46 #include "qapi/error.h"
47 #include "qapi/qapi-types-migration.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qapi-commands-migration.h"
50 #include "qapi/qmp/qerror.h"
51 #include "trace.h"
52 #include "exec/ram_addr.h"
53 #include "exec/target_page.h"
54 #include "qemu/rcu_queue.h"
55 #include "migration/colo.h"
56 #include "block.h"
57 #include "sysemu/cpu-throttle.h"
58 #include "savevm.h"
59 #include "qemu/iov.h"
60 #include "multifd.h"
61 #include "sysemu/runstate.h"
62 #include "options.h"
63 #include "sysemu/dirtylimit.h"
64 #include "sysemu/kvm.h"
65 
66 #include "hw/boards.h" /* for machine_dump_guest_core() */
67 
68 #if defined(__linux__)
69 #include "qemu/userfaultfd.h"
70 #endif /* defined(__linux__) */
71 
72 /***********************************************************/
73 /* ram save/restore */
74 
75 /*
76  * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
77  * worked for pages that were filled with the same char.  We switched
78  * it to only search for the zero value.  And to avoid confusion with
79  * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
80  */
81 /*
82  * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now
83  */
84 #define RAM_SAVE_FLAG_FULL     0x01
85 #define RAM_SAVE_FLAG_ZERO     0x02
86 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
87 #define RAM_SAVE_FLAG_PAGE     0x08
88 #define RAM_SAVE_FLAG_EOS      0x10
89 #define RAM_SAVE_FLAG_CONTINUE 0x20
90 #define RAM_SAVE_FLAG_XBZRLE   0x40
91 /* 0x80 is reserved in qemu-file.h for RAM_SAVE_FLAG_HOOK */
92 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
93 #define RAM_SAVE_FLAG_MULTIFD_FLUSH    0x200
94 /* We can't use any flag that is bigger than 0x200 */
95 
96 XBZRLECacheStats xbzrle_counters;
97 
98 /* used by the search for pages to send */
99 struct PageSearchStatus {
100     /* The migration channel used for a specific host page */
101     QEMUFile    *pss_channel;
102     /* Last block from where we have sent data */
103     RAMBlock *last_sent_block;
104     /* Current block being searched */
105     RAMBlock    *block;
106     /* Current page to search from */
107     unsigned long page;
108     /* Set once we wrap around */
109     bool         complete_round;
110     /* Whether we're sending a host page */
111     bool          host_page_sending;
112     /* The start/end of current host page.  Invalid if host_page_sending==false */
113     unsigned long host_page_start;
114     unsigned long host_page_end;
115 };
116 typedef struct PageSearchStatus PageSearchStatus;
117 
118 /* struct contains XBZRLE cache and a static page
119    used by the compression */
120 static struct {
121     /* buffer used for XBZRLE encoding */
122     uint8_t *encoded_buf;
123     /* buffer for storing page content */
124     uint8_t *current_buf;
125     /* Cache for XBZRLE, Protected by lock. */
126     PageCache *cache;
127     QemuMutex lock;
128     /* it will store a page full of zeros */
129     uint8_t *zero_target_page;
130     /* buffer used for XBZRLE decoding */
131     uint8_t *decoded_buf;
132 } XBZRLE;
133 
134 static void XBZRLE_cache_lock(void)
135 {
136     if (migrate_xbzrle()) {
137         qemu_mutex_lock(&XBZRLE.lock);
138     }
139 }
140 
141 static void XBZRLE_cache_unlock(void)
142 {
143     if (migrate_xbzrle()) {
144         qemu_mutex_unlock(&XBZRLE.lock);
145     }
146 }
147 
148 /**
149  * xbzrle_cache_resize: resize the xbzrle cache
150  *
151  * This function is called from migrate_params_apply in main
152  * thread, possibly while a migration is in progress.  A running
153  * migration may be using the cache and might finish during this call,
154  * hence changes to the cache are protected by XBZRLE.lock().
155  *
156  * Returns 0 for success or -1 for error
157  *
158  * @new_size: new cache size
159  * @errp: set *errp if the check failed, with reason
160  */
161 int xbzrle_cache_resize(uint64_t new_size, Error **errp)
162 {
163     PageCache *new_cache;
164     int64_t ret = 0;
165 
166     /* Check for truncation */
167     if (new_size != (size_t)new_size) {
168         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
169                    "exceeding address space");
170         return -1;
171     }
172 
173     if (new_size == migrate_xbzrle_cache_size()) {
174         /* nothing to do */
175         return 0;
176     }
177 
178     XBZRLE_cache_lock();
179 
180     if (XBZRLE.cache != NULL) {
181         new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
182         if (!new_cache) {
183             ret = -1;
184             goto out;
185         }
186 
187         cache_fini(XBZRLE.cache);
188         XBZRLE.cache = new_cache;
189     }
190 out:
191     XBZRLE_cache_unlock();
192     return ret;
193 }
194 
195 static bool postcopy_preempt_active(void)
196 {
197     return migrate_postcopy_preempt() && migration_in_postcopy();
198 }
199 
200 bool migrate_ram_is_ignored(RAMBlock *block)
201 {
202     return !qemu_ram_is_migratable(block) ||
203            (migrate_ignore_shared() && qemu_ram_is_shared(block)
204                                     && qemu_ram_is_named_file(block));
205 }
206 
207 #undef RAMBLOCK_FOREACH
208 
209 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
210 {
211     RAMBlock *block;
212     int ret = 0;
213 
214     RCU_READ_LOCK_GUARD();
215 
216     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
217         ret = func(block, opaque);
218         if (ret) {
219             break;
220         }
221     }
222     return ret;
223 }
224 
225 static void ramblock_recv_map_init(void)
226 {
227     RAMBlock *rb;
228 
229     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
230         assert(!rb->receivedmap);
231         rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
232     }
233 }
234 
235 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
236 {
237     return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
238                     rb->receivedmap);
239 }
240 
241 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
242 {
243     return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
244 }
245 
246 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
247 {
248     set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
249 }
250 
251 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
252                                     size_t nr)
253 {
254     bitmap_set_atomic(rb->receivedmap,
255                       ramblock_recv_bitmap_offset(host_addr, rb),
256                       nr);
257 }
258 
259 #define  RAMBLOCK_RECV_BITMAP_ENDING  (0x0123456789abcdefULL)
260 
261 /*
262  * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
263  *
264  * Returns >0 if success with sent bytes, or <0 if error.
265  */
266 int64_t ramblock_recv_bitmap_send(QEMUFile *file,
267                                   const char *block_name)
268 {
269     RAMBlock *block = qemu_ram_block_by_name(block_name);
270     unsigned long *le_bitmap, nbits;
271     uint64_t size;
272 
273     if (!block) {
274         error_report("%s: invalid block name: %s", __func__, block_name);
275         return -1;
276     }
277 
278     nbits = block->postcopy_length >> TARGET_PAGE_BITS;
279 
280     /*
281      * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
282      * machines we may need 4 more bytes for padding (see below
283      * comment). So extend it a bit before hand.
284      */
285     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
286 
287     /*
288      * Always use little endian when sending the bitmap. This is
289      * required that when source and destination VMs are not using the
290      * same endianness. (Note: big endian won't work.)
291      */
292     bitmap_to_le(le_bitmap, block->receivedmap, nbits);
293 
294     /* Size of the bitmap, in bytes */
295     size = DIV_ROUND_UP(nbits, 8);
296 
297     /*
298      * size is always aligned to 8 bytes for 64bit machines, but it
299      * may not be true for 32bit machines. We need this padding to
300      * make sure the migration can survive even between 32bit and
301      * 64bit machines.
302      */
303     size = ROUND_UP(size, 8);
304 
305     qemu_put_be64(file, size);
306     qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
307     /*
308      * Mark as an end, in case the middle part is screwed up due to
309      * some "mysterious" reason.
310      */
311     qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
312     qemu_fflush(file);
313 
314     g_free(le_bitmap);
315 
316     if (qemu_file_get_error(file)) {
317         return qemu_file_get_error(file);
318     }
319 
320     return size + sizeof(size);
321 }
322 
323 /*
324  * An outstanding page request, on the source, having been received
325  * and queued
326  */
327 struct RAMSrcPageRequest {
328     RAMBlock *rb;
329     hwaddr    offset;
330     hwaddr    len;
331 
332     QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
333 };
334 
335 /* State of RAM for migration */
336 struct RAMState {
337     /*
338      * PageSearchStatus structures for the channels when send pages.
339      * Protected by the bitmap_mutex.
340      */
341     PageSearchStatus pss[RAM_CHANNEL_MAX];
342     /* UFFD file descriptor, used in 'write-tracking' migration */
343     int uffdio_fd;
344     /* total ram size in bytes */
345     uint64_t ram_bytes_total;
346     /* Last block that we have visited searching for dirty pages */
347     RAMBlock *last_seen_block;
348     /* Last dirty target page we have sent */
349     ram_addr_t last_page;
350     /* last ram version we have seen */
351     uint32_t last_version;
352     /* How many times we have dirty too many pages */
353     int dirty_rate_high_cnt;
354     /* these variables are used for bitmap sync */
355     /* last time we did a full bitmap_sync */
356     int64_t time_last_bitmap_sync;
357     /* bytes transferred at start_time */
358     uint64_t bytes_xfer_prev;
359     /* number of dirty pages since start_time */
360     uint64_t num_dirty_pages_period;
361     /* xbzrle misses since the beginning of the period */
362     uint64_t xbzrle_cache_miss_prev;
363     /* Amount of xbzrle pages since the beginning of the period */
364     uint64_t xbzrle_pages_prev;
365     /* Amount of xbzrle encoded bytes since the beginning of the period */
366     uint64_t xbzrle_bytes_prev;
367     /* Are we really using XBZRLE (e.g., after the first round). */
368     bool xbzrle_started;
369     /* Are we on the last stage of migration */
370     bool last_stage;
371     /* compression statistics since the beginning of the period */
372     /* amount of count that no free thread to compress data */
373     uint64_t compress_thread_busy_prev;
374     /* amount bytes after compression */
375     uint64_t compressed_size_prev;
376     /* amount of compressed pages */
377     uint64_t compress_pages_prev;
378 
379     /* total handled target pages at the beginning of period */
380     uint64_t target_page_count_prev;
381     /* total handled target pages since start */
382     uint64_t target_page_count;
383     /* number of dirty bits in the bitmap */
384     uint64_t migration_dirty_pages;
385     /*
386      * Protects:
387      * - dirty/clear bitmap
388      * - migration_dirty_pages
389      * - pss structures
390      */
391     QemuMutex bitmap_mutex;
392     /* The RAMBlock used in the last src_page_requests */
393     RAMBlock *last_req_rb;
394     /* Queue of outstanding page requests from the destination */
395     QemuMutex src_page_req_mutex;
396     QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
397 };
398 typedef struct RAMState RAMState;
399 
400 static RAMState *ram_state;
401 
402 static NotifierWithReturnList precopy_notifier_list;
403 
404 /* Whether postcopy has queued requests? */
405 static bool postcopy_has_request(RAMState *rs)
406 {
407     return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests);
408 }
409 
410 void precopy_infrastructure_init(void)
411 {
412     notifier_with_return_list_init(&precopy_notifier_list);
413 }
414 
415 void precopy_add_notifier(NotifierWithReturn *n)
416 {
417     notifier_with_return_list_add(&precopy_notifier_list, n);
418 }
419 
420 void precopy_remove_notifier(NotifierWithReturn *n)
421 {
422     notifier_with_return_remove(n);
423 }
424 
425 int precopy_notify(PrecopyNotifyReason reason, Error **errp)
426 {
427     PrecopyNotifyData pnd;
428     pnd.reason = reason;
429     pnd.errp = errp;
430 
431     return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
432 }
433 
434 uint64_t ram_bytes_remaining(void)
435 {
436     return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
437                        0;
438 }
439 
440 void ram_transferred_add(uint64_t bytes)
441 {
442     if (runstate_is_running()) {
443         stat64_add(&mig_stats.precopy_bytes, bytes);
444     } else if (migration_in_postcopy()) {
445         stat64_add(&mig_stats.postcopy_bytes, bytes);
446     } else {
447         stat64_add(&mig_stats.downtime_bytes, bytes);
448     }
449     stat64_add(&mig_stats.transferred, bytes);
450 }
451 
452 struct MigrationOps {
453     int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss);
454 };
455 typedef struct MigrationOps MigrationOps;
456 
457 MigrationOps *migration_ops;
458 
459 static int ram_save_host_page_urgent(PageSearchStatus *pss);
460 
461 /* NOTE: page is the PFN not real ram_addr_t. */
462 static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page)
463 {
464     pss->block = rb;
465     pss->page = page;
466     pss->complete_round = false;
467 }
468 
469 /*
470  * Check whether two PSSs are actively sending the same page.  Return true
471  * if it is, false otherwise.
472  */
473 static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2)
474 {
475     return pss1->host_page_sending && pss2->host_page_sending &&
476         (pss1->host_page_start == pss2->host_page_start);
477 }
478 
479 /**
480  * save_page_header: write page header to wire
481  *
482  * If this is the 1st block, it also writes the block identification
483  *
484  * Returns the number of bytes written
485  *
486  * @pss: current PSS channel status
487  * @block: block that contains the page we want to send
488  * @offset: offset inside the block for the page
489  *          in the lower bits, it contains flags
490  */
491 static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f,
492                                RAMBlock *block, ram_addr_t offset)
493 {
494     size_t size, len;
495     bool same_block = (block == pss->last_sent_block);
496 
497     if (same_block) {
498         offset |= RAM_SAVE_FLAG_CONTINUE;
499     }
500     qemu_put_be64(f, offset);
501     size = 8;
502 
503     if (!same_block) {
504         len = strlen(block->idstr);
505         qemu_put_byte(f, len);
506         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
507         size += 1 + len;
508         pss->last_sent_block = block;
509     }
510     return size;
511 }
512 
513 /**
514  * mig_throttle_guest_down: throttle down the guest
515  *
516  * Reduce amount of guest cpu execution to hopefully slow down memory
517  * writes. If guest dirty memory rate is reduced below the rate at
518  * which we can transfer pages to the destination then we should be
519  * able to complete migration. Some workloads dirty memory way too
520  * fast and will not effectively converge, even with auto-converge.
521  */
522 static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
523                                     uint64_t bytes_dirty_threshold)
524 {
525     uint64_t pct_initial = migrate_cpu_throttle_initial();
526     uint64_t pct_increment = migrate_cpu_throttle_increment();
527     bool pct_tailslow = migrate_cpu_throttle_tailslow();
528     int pct_max = migrate_max_cpu_throttle();
529 
530     uint64_t throttle_now = cpu_throttle_get_percentage();
531     uint64_t cpu_now, cpu_ideal, throttle_inc;
532 
533     /* We have not started throttling yet. Let's start it. */
534     if (!cpu_throttle_active()) {
535         cpu_throttle_set(pct_initial);
536     } else {
537         /* Throttling already on, just increase the rate */
538         if (!pct_tailslow) {
539             throttle_inc = pct_increment;
540         } else {
541             /* Compute the ideal CPU percentage used by Guest, which may
542              * make the dirty rate match the dirty rate threshold. */
543             cpu_now = 100 - throttle_now;
544             cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 /
545                         bytes_dirty_period);
546             throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment);
547         }
548         cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max));
549     }
550 }
551 
552 void mig_throttle_counter_reset(void)
553 {
554     RAMState *rs = ram_state;
555 
556     rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
557     rs->num_dirty_pages_period = 0;
558     rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred);
559 }
560 
561 /**
562  * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
563  *
564  * @rs: current RAM state
565  * @current_addr: address for the zero page
566  *
567  * Update the xbzrle cache to reflect a page that's been sent as all 0.
568  * The important thing is that a stale (not-yet-0'd) page be replaced
569  * by the new data.
570  * As a bonus, if the page wasn't in the cache it gets added so that
571  * when a small write is made into the 0'd page it gets XBZRLE sent.
572  */
573 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
574 {
575     /* We don't care if this fails to allocate a new cache page
576      * as long as it updated an old one */
577     cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
578                  stat64_get(&mig_stats.dirty_sync_count));
579 }
580 
581 #define ENCODING_FLAG_XBZRLE 0x1
582 
583 /**
584  * save_xbzrle_page: compress and send current page
585  *
586  * Returns: 1 means that we wrote the page
587  *          0 means that page is identical to the one already sent
588  *          -1 means that xbzrle would be longer than normal
589  *
590  * @rs: current RAM state
591  * @pss: current PSS channel
592  * @current_data: pointer to the address of the page contents
593  * @current_addr: addr of the page
594  * @block: block that contains the page we want to send
595  * @offset: offset inside the block for the page
596  */
597 static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
598                             uint8_t **current_data, ram_addr_t current_addr,
599                             RAMBlock *block, ram_addr_t offset)
600 {
601     int encoded_len = 0, bytes_xbzrle;
602     uint8_t *prev_cached_page;
603     QEMUFile *file = pss->pss_channel;
604     uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
605 
606     if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) {
607         xbzrle_counters.cache_miss++;
608         if (!rs->last_stage) {
609             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
610                              generation) == -1) {
611                 return -1;
612             } else {
613                 /* update *current_data when the page has been
614                    inserted into cache */
615                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
616             }
617         }
618         return -1;
619     }
620 
621     /*
622      * Reaching here means the page has hit the xbzrle cache, no matter what
623      * encoding result it is (normal encoding, overflow or skipping the page),
624      * count the page as encoded. This is used to calculate the encoding rate.
625      *
626      * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
627      * 2nd page turns out to be skipped (i.e. no new bytes written to the
628      * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
629      * skipped page included. In this way, the encoding rate can tell if the
630      * guest page is good for xbzrle encoding.
631      */
632     xbzrle_counters.pages++;
633     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
634 
635     /* save current buffer into memory */
636     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
637 
638     /* XBZRLE encoding (if there is no overflow) */
639     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
640                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
641                                        TARGET_PAGE_SIZE);
642 
643     /*
644      * Update the cache contents, so that it corresponds to the data
645      * sent, in all cases except where we skip the page.
646      */
647     if (!rs->last_stage && encoded_len != 0) {
648         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
649         /*
650          * In the case where we couldn't compress, ensure that the caller
651          * sends the data from the cache, since the guest might have
652          * changed the RAM since we copied it.
653          */
654         *current_data = prev_cached_page;
655     }
656 
657     if (encoded_len == 0) {
658         trace_save_xbzrle_page_skipping();
659         return 0;
660     } else if (encoded_len == -1) {
661         trace_save_xbzrle_page_overflow();
662         xbzrle_counters.overflow++;
663         xbzrle_counters.bytes += TARGET_PAGE_SIZE;
664         return -1;
665     }
666 
667     /* Send XBZRLE based compressed page */
668     bytes_xbzrle = save_page_header(pss, pss->pss_channel, block,
669                                     offset | RAM_SAVE_FLAG_XBZRLE);
670     qemu_put_byte(file, ENCODING_FLAG_XBZRLE);
671     qemu_put_be16(file, encoded_len);
672     qemu_put_buffer(file, XBZRLE.encoded_buf, encoded_len);
673     bytes_xbzrle += encoded_len + 1 + 2;
674     /*
675      * Like compressed_size (please see update_compress_thread_counts),
676      * the xbzrle encoded bytes don't count the 8 byte header with
677      * RAM_SAVE_FLAG_CONTINUE.
678      */
679     xbzrle_counters.bytes += bytes_xbzrle - 8;
680     ram_transferred_add(bytes_xbzrle);
681 
682     return 1;
683 }
684 
685 /**
686  * pss_find_next_dirty: find the next dirty page of current ramblock
687  *
688  * This function updates pss->page to point to the next dirty page index
689  * within the ramblock to migrate, or the end of ramblock when nothing
690  * found.  Note that when pss->host_page_sending==true it means we're
691  * during sending a host page, so we won't look for dirty page that is
692  * outside the host page boundary.
693  *
694  * @pss: the current page search status
695  */
696 static void pss_find_next_dirty(PageSearchStatus *pss)
697 {
698     RAMBlock *rb = pss->block;
699     unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
700     unsigned long *bitmap = rb->bmap;
701 
702     if (migrate_ram_is_ignored(rb)) {
703         /* Points directly to the end, so we know no dirty page */
704         pss->page = size;
705         return;
706     }
707 
708     /*
709      * If during sending a host page, only look for dirty pages within the
710      * current host page being send.
711      */
712     if (pss->host_page_sending) {
713         assert(pss->host_page_end);
714         size = MIN(size, pss->host_page_end);
715     }
716 
717     pss->page = find_next_bit(bitmap, size, pss->page);
718 }
719 
720 static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb,
721                                                        unsigned long page)
722 {
723     uint8_t shift;
724     hwaddr size, start;
725 
726     if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) {
727         return;
728     }
729 
730     shift = rb->clear_bmap_shift;
731     /*
732      * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
733      * can make things easier sometimes since then start address
734      * of the small chunk will always be 64 pages aligned so the
735      * bitmap will always be aligned to unsigned long. We should
736      * even be able to remove this restriction but I'm simply
737      * keeping it.
738      */
739     assert(shift >= 6);
740 
741     size = 1ULL << (TARGET_PAGE_BITS + shift);
742     start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size);
743     trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
744     memory_region_clear_dirty_bitmap(rb->mr, start, size);
745 }
746 
747 static void
748 migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb,
749                                                  unsigned long start,
750                                                  unsigned long npages)
751 {
752     unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift;
753     unsigned long chunk_start = QEMU_ALIGN_DOWN(start, chunk_pages);
754     unsigned long chunk_end = QEMU_ALIGN_UP(start + npages, chunk_pages);
755 
756     /*
757      * Clear pages from start to start + npages - 1, so the end boundary is
758      * exclusive.
759      */
760     for (i = chunk_start; i < chunk_end; i += chunk_pages) {
761         migration_clear_memory_region_dirty_bitmap(rb, i);
762     }
763 }
764 
765 /*
766  * colo_bitmap_find_diry:find contiguous dirty pages from start
767  *
768  * Returns the page offset within memory region of the start of the contiguout
769  * dirty page
770  *
771  * @rs: current RAM state
772  * @rb: RAMBlock where to search for dirty pages
773  * @start: page where we start the search
774  * @num: the number of contiguous dirty pages
775  */
776 static inline
777 unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
778                                      unsigned long start, unsigned long *num)
779 {
780     unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
781     unsigned long *bitmap = rb->bmap;
782     unsigned long first, next;
783 
784     *num = 0;
785 
786     if (migrate_ram_is_ignored(rb)) {
787         return size;
788     }
789 
790     first = find_next_bit(bitmap, size, start);
791     if (first >= size) {
792         return first;
793     }
794     next = find_next_zero_bit(bitmap, size, first + 1);
795     assert(next >= first);
796     *num = next - first;
797     return first;
798 }
799 
800 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
801                                                 RAMBlock *rb,
802                                                 unsigned long page)
803 {
804     bool ret;
805 
806     /*
807      * Clear dirty bitmap if needed.  This _must_ be called before we
808      * send any of the page in the chunk because we need to make sure
809      * we can capture further page content changes when we sync dirty
810      * log the next time.  So as long as we are going to send any of
811      * the page in the chunk we clear the remote dirty bitmap for all.
812      * Clearing it earlier won't be a problem, but too late will.
813      */
814     migration_clear_memory_region_dirty_bitmap(rb, page);
815 
816     ret = test_and_clear_bit(page, rb->bmap);
817     if (ret) {
818         rs->migration_dirty_pages--;
819     }
820 
821     return ret;
822 }
823 
824 static void dirty_bitmap_clear_section(MemoryRegionSection *section,
825                                        void *opaque)
826 {
827     const hwaddr offset = section->offset_within_region;
828     const hwaddr size = int128_get64(section->size);
829     const unsigned long start = offset >> TARGET_PAGE_BITS;
830     const unsigned long npages = size >> TARGET_PAGE_BITS;
831     RAMBlock *rb = section->mr->ram_block;
832     uint64_t *cleared_bits = opaque;
833 
834     /*
835      * We don't grab ram_state->bitmap_mutex because we expect to run
836      * only when starting migration or during postcopy recovery where
837      * we don't have concurrent access.
838      */
839     if (!migration_in_postcopy() && !migrate_background_snapshot()) {
840         migration_clear_memory_region_dirty_bitmap_range(rb, start, npages);
841     }
842     *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages);
843     bitmap_clear(rb->bmap, start, npages);
844 }
845 
846 /*
847  * Exclude all dirty pages from migration that fall into a discarded range as
848  * managed by a RamDiscardManager responsible for the mapped memory region of
849  * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
850  *
851  * Discarded pages ("logically unplugged") have undefined content and must
852  * not get migrated, because even reading these pages for migration might
853  * result in undesired behavior.
854  *
855  * Returns the number of cleared bits in the RAMBlock dirty bitmap.
856  *
857  * Note: The result is only stable while migrating (precopy/postcopy).
858  */
859 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb)
860 {
861     uint64_t cleared_bits = 0;
862 
863     if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) {
864         RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
865         MemoryRegionSection section = {
866             .mr = rb->mr,
867             .offset_within_region = 0,
868             .size = int128_make64(qemu_ram_get_used_length(rb)),
869         };
870 
871         ram_discard_manager_replay_discarded(rdm, &section,
872                                              dirty_bitmap_clear_section,
873                                              &cleared_bits);
874     }
875     return cleared_bits;
876 }
877 
878 /*
879  * Check if a host-page aligned page falls into a discarded range as managed by
880  * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
881  *
882  * Note: The result is only stable while migrating (precopy/postcopy).
883  */
884 bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start)
885 {
886     if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
887         RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
888         MemoryRegionSection section = {
889             .mr = rb->mr,
890             .offset_within_region = start,
891             .size = int128_make64(qemu_ram_pagesize(rb)),
892         };
893 
894         return !ram_discard_manager_is_populated(rdm, &section);
895     }
896     return false;
897 }
898 
899 /* Called with RCU critical section */
900 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
901 {
902     uint64_t new_dirty_pages =
903         cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length);
904 
905     rs->migration_dirty_pages += new_dirty_pages;
906     rs->num_dirty_pages_period += new_dirty_pages;
907 }
908 
909 /**
910  * ram_pagesize_summary: calculate all the pagesizes of a VM
911  *
912  * Returns a summary bitmap of the page sizes of all RAMBlocks
913  *
914  * For VMs with just normal pages this is equivalent to the host page
915  * size. If it's got some huge pages then it's the OR of all the
916  * different page sizes.
917  */
918 uint64_t ram_pagesize_summary(void)
919 {
920     RAMBlock *block;
921     uint64_t summary = 0;
922 
923     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
924         summary |= block->page_size;
925     }
926 
927     return summary;
928 }
929 
930 uint64_t ram_get_total_transferred_pages(void)
931 {
932     return stat64_get(&mig_stats.normal_pages) +
933         stat64_get(&mig_stats.zero_pages) +
934         compression_counters.pages + xbzrle_counters.pages;
935 }
936 
937 static void migration_update_rates(RAMState *rs, int64_t end_time)
938 {
939     uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
940     double compressed_size;
941 
942     /* calculate period counters */
943     stat64_set(&mig_stats.dirty_pages_rate,
944                rs->num_dirty_pages_period * 1000 /
945                (end_time - rs->time_last_bitmap_sync));
946 
947     if (!page_count) {
948         return;
949     }
950 
951     if (migrate_xbzrle()) {
952         double encoded_size, unencoded_size;
953 
954         xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
955             rs->xbzrle_cache_miss_prev) / page_count;
956         rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
957         unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) *
958                          TARGET_PAGE_SIZE;
959         encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev;
960         if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) {
961             xbzrle_counters.encoding_rate = 0;
962         } else {
963             xbzrle_counters.encoding_rate = unencoded_size / encoded_size;
964         }
965         rs->xbzrle_pages_prev = xbzrle_counters.pages;
966         rs->xbzrle_bytes_prev = xbzrle_counters.bytes;
967     }
968 
969     if (migrate_compress()) {
970         compression_counters.busy_rate = (double)(compression_counters.busy -
971             rs->compress_thread_busy_prev) / page_count;
972         rs->compress_thread_busy_prev = compression_counters.busy;
973 
974         compressed_size = compression_counters.compressed_size -
975                           rs->compressed_size_prev;
976         if (compressed_size) {
977             double uncompressed_size = (compression_counters.pages -
978                                     rs->compress_pages_prev) * TARGET_PAGE_SIZE;
979 
980             /* Compression-Ratio = Uncompressed-size / Compressed-size */
981             compression_counters.compression_rate =
982                                         uncompressed_size / compressed_size;
983 
984             rs->compress_pages_prev = compression_counters.pages;
985             rs->compressed_size_prev = compression_counters.compressed_size;
986         }
987     }
988 }
989 
990 /*
991  * Enable dirty-limit to throttle down the guest
992  */
993 static void migration_dirty_limit_guest(void)
994 {
995     /*
996      * dirty page rate quota for all vCPUs fetched from
997      * migration parameter 'vcpu_dirty_limit'
998      */
999     static int64_t quota_dirtyrate;
1000     MigrationState *s = migrate_get_current();
1001 
1002     /*
1003      * If dirty limit already enabled and migration parameter
1004      * vcpu-dirty-limit untouched.
1005      */
1006     if (dirtylimit_in_service() &&
1007         quota_dirtyrate == s->parameters.vcpu_dirty_limit) {
1008         return;
1009     }
1010 
1011     quota_dirtyrate = s->parameters.vcpu_dirty_limit;
1012 
1013     /*
1014      * Set all vCPU a quota dirtyrate, note that the second
1015      * parameter will be ignored if setting all vCPU for the vm
1016      */
1017     qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL);
1018     trace_migration_dirty_limit_guest(quota_dirtyrate);
1019 }
1020 
1021 static void migration_trigger_throttle(RAMState *rs)
1022 {
1023     uint64_t threshold = migrate_throttle_trigger_threshold();
1024     uint64_t bytes_xfer_period =
1025         stat64_get(&mig_stats.transferred) - rs->bytes_xfer_prev;
1026     uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
1027     uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
1028 
1029     /* During block migration the auto-converge logic incorrectly detects
1030      * that ram migration makes no progress. Avoid this by disabling the
1031      * throttling logic during the bulk phase of block migration. */
1032     if (blk_mig_bulk_active()) {
1033         return;
1034     }
1035 
1036     /*
1037      * The following detection logic can be refined later. For now:
1038      * Check to see if the ratio between dirtied bytes and the approx.
1039      * amount of bytes that just got transferred since the last time
1040      * we were in this routine reaches the threshold. If that happens
1041      * twice, start or increase throttling.
1042      */
1043     if ((bytes_dirty_period > bytes_dirty_threshold) &&
1044         (++rs->dirty_rate_high_cnt >= 2)) {
1045         rs->dirty_rate_high_cnt = 0;
1046         if (migrate_auto_converge()) {
1047             trace_migration_throttle();
1048             mig_throttle_guest_down(bytes_dirty_period,
1049                                     bytes_dirty_threshold);
1050         } else if (migrate_dirty_limit()) {
1051             migration_dirty_limit_guest();
1052         }
1053     }
1054 }
1055 
1056 static void migration_bitmap_sync(RAMState *rs, bool last_stage)
1057 {
1058     RAMBlock *block;
1059     int64_t end_time;
1060 
1061     stat64_add(&mig_stats.dirty_sync_count, 1);
1062 
1063     if (!rs->time_last_bitmap_sync) {
1064         rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1065     }
1066 
1067     trace_migration_bitmap_sync_start();
1068     memory_global_dirty_log_sync(last_stage);
1069 
1070     qemu_mutex_lock(&rs->bitmap_mutex);
1071     WITH_RCU_READ_LOCK_GUARD() {
1072         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1073             ramblock_sync_dirty_bitmap(rs, block);
1074         }
1075         stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining());
1076     }
1077     qemu_mutex_unlock(&rs->bitmap_mutex);
1078 
1079     memory_global_after_dirty_log_sync();
1080     trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1081 
1082     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1083 
1084     /* more than 1 second = 1000 millisecons */
1085     if (end_time > rs->time_last_bitmap_sync + 1000) {
1086         migration_trigger_throttle(rs);
1087 
1088         migration_update_rates(rs, end_time);
1089 
1090         rs->target_page_count_prev = rs->target_page_count;
1091 
1092         /* reset period counters */
1093         rs->time_last_bitmap_sync = end_time;
1094         rs->num_dirty_pages_period = 0;
1095         rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred);
1096     }
1097     if (migrate_events()) {
1098         uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
1099         qapi_event_send_migration_pass(generation);
1100     }
1101 }
1102 
1103 static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage)
1104 {
1105     Error *local_err = NULL;
1106 
1107     /*
1108      * The current notifier usage is just an optimization to migration, so we
1109      * don't stop the normal migration process in the error case.
1110      */
1111     if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1112         error_report_err(local_err);
1113         local_err = NULL;
1114     }
1115 
1116     migration_bitmap_sync(rs, last_stage);
1117 
1118     if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1119         error_report_err(local_err);
1120     }
1121 }
1122 
1123 void ram_release_page(const char *rbname, uint64_t offset)
1124 {
1125     if (!migrate_release_ram() || !migration_in_postcopy()) {
1126         return;
1127     }
1128 
1129     ram_discard_range(rbname, offset, TARGET_PAGE_SIZE);
1130 }
1131 
1132 /**
1133  * save_zero_page_to_file: send the zero page to the file
1134  *
1135  * Returns the size of data written to the file, 0 means the page is not
1136  * a zero page
1137  *
1138  * @pss: current PSS channel
1139  * @block: block that contains the page we want to send
1140  * @offset: offset inside the block for the page
1141  */
1142 static int save_zero_page_to_file(PageSearchStatus *pss, QEMUFile *file,
1143                                   RAMBlock *block, ram_addr_t offset)
1144 {
1145     uint8_t *p = block->host + offset;
1146     int len = 0;
1147 
1148     if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
1149         len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO);
1150         qemu_put_byte(file, 0);
1151         len += 1;
1152         ram_release_page(block->idstr, offset);
1153     }
1154     return len;
1155 }
1156 
1157 /**
1158  * save_zero_page: send the zero page to the stream
1159  *
1160  * Returns the number of pages written.
1161  *
1162  * @pss: current PSS channel
1163  * @block: block that contains the page we want to send
1164  * @offset: offset inside the block for the page
1165  */
1166 static int save_zero_page(PageSearchStatus *pss, QEMUFile *f, RAMBlock *block,
1167                           ram_addr_t offset)
1168 {
1169     int len = save_zero_page_to_file(pss, f, block, offset);
1170 
1171     if (len) {
1172         stat64_add(&mig_stats.zero_pages, 1);
1173         ram_transferred_add(len);
1174         return 1;
1175     }
1176     return -1;
1177 }
1178 
1179 /*
1180  * @pages: the number of pages written by the control path,
1181  *        < 0 - error
1182  *        > 0 - number of pages written
1183  *
1184  * Return true if the pages has been saved, otherwise false is returned.
1185  */
1186 static bool control_save_page(PageSearchStatus *pss, RAMBlock *block,
1187                               ram_addr_t offset, int *pages)
1188 {
1189     int ret;
1190 
1191     ret = ram_control_save_page(pss->pss_channel, block->offset, offset,
1192                                 TARGET_PAGE_SIZE);
1193     if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1194         return false;
1195     }
1196 
1197     if (ret == RAM_SAVE_CONTROL_DELAYED) {
1198         *pages = 1;
1199         return true;
1200     }
1201     *pages = ret;
1202     return true;
1203 }
1204 
1205 /*
1206  * directly send the page to the stream
1207  *
1208  * Returns the number of pages written.
1209  *
1210  * @pss: current PSS channel
1211  * @block: block that contains the page we want to send
1212  * @offset: offset inside the block for the page
1213  * @buf: the page to be sent
1214  * @async: send to page asyncly
1215  */
1216 static int save_normal_page(PageSearchStatus *pss, RAMBlock *block,
1217                             ram_addr_t offset, uint8_t *buf, bool async)
1218 {
1219     QEMUFile *file = pss->pss_channel;
1220 
1221     ram_transferred_add(save_page_header(pss, pss->pss_channel, block,
1222                                          offset | RAM_SAVE_FLAG_PAGE));
1223     if (async) {
1224         qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
1225                               migrate_release_ram() &&
1226                               migration_in_postcopy());
1227     } else {
1228         qemu_put_buffer(file, buf, TARGET_PAGE_SIZE);
1229     }
1230     ram_transferred_add(TARGET_PAGE_SIZE);
1231     stat64_add(&mig_stats.normal_pages, 1);
1232     return 1;
1233 }
1234 
1235 /**
1236  * ram_save_page: send the given page to the stream
1237  *
1238  * Returns the number of pages written.
1239  *          < 0 - error
1240  *          >=0 - Number of pages written - this might legally be 0
1241  *                if xbzrle noticed the page was the same.
1242  *
1243  * @rs: current RAM state
1244  * @block: block that contains the page we want to send
1245  * @offset: offset inside the block for the page
1246  */
1247 static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
1248 {
1249     int pages = -1;
1250     uint8_t *p;
1251     bool send_async = true;
1252     RAMBlock *block = pss->block;
1253     ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
1254     ram_addr_t current_addr = block->offset + offset;
1255 
1256     p = block->host + offset;
1257     trace_ram_save_page(block->idstr, (uint64_t)offset, p);
1258 
1259     XBZRLE_cache_lock();
1260     if (rs->xbzrle_started && !migration_in_postcopy()) {
1261         pages = save_xbzrle_page(rs, pss, &p, current_addr,
1262                                  block, offset);
1263         if (!rs->last_stage) {
1264             /* Can't send this cached data async, since the cache page
1265              * might get updated before it gets to the wire
1266              */
1267             send_async = false;
1268         }
1269     }
1270 
1271     /* XBZRLE overflow or normal page */
1272     if (pages == -1) {
1273         pages = save_normal_page(pss, block, offset, p, send_async);
1274     }
1275 
1276     XBZRLE_cache_unlock();
1277 
1278     return pages;
1279 }
1280 
1281 static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block,
1282                                  ram_addr_t offset)
1283 {
1284     if (multifd_queue_page(file, block, offset) < 0) {
1285         return -1;
1286     }
1287     stat64_add(&mig_stats.normal_pages, 1);
1288 
1289     return 1;
1290 }
1291 
1292 static void
1293 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
1294 {
1295     ram_transferred_add(bytes_xmit);
1296 
1297     if (param->result == RES_ZEROPAGE) {
1298         stat64_add(&mig_stats.zero_pages, 1);
1299         return;
1300     }
1301 
1302     /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
1303     compression_counters.compressed_size += bytes_xmit - 8;
1304     compression_counters.pages++;
1305 }
1306 
1307 static bool save_page_use_compression(RAMState *rs);
1308 
1309 static int send_queued_data(CompressParam *param)
1310 {
1311     PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY];
1312     MigrationState *ms = migrate_get_current();
1313     QEMUFile *file = ms->to_dst_file;
1314     int len = 0;
1315 
1316     RAMBlock *block = param->block;
1317     ram_addr_t offset = param->offset;
1318 
1319     if (param->result == RES_NONE) {
1320         return 0;
1321     }
1322 
1323     assert(block == pss->last_sent_block);
1324 
1325     if (param->result == RES_ZEROPAGE) {
1326         assert(qemu_file_buffer_empty(param->file));
1327         len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO);
1328         qemu_put_byte(file, 0);
1329         len += 1;
1330         ram_release_page(block->idstr, offset);
1331     } else if (param->result == RES_COMPRESS) {
1332         assert(!qemu_file_buffer_empty(param->file));
1333         len += save_page_header(pss, file, block,
1334                                 offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
1335         len += qemu_put_qemu_file(file, param->file);
1336     } else {
1337         abort();
1338     }
1339 
1340     update_compress_thread_counts(param, len);
1341 
1342     return len;
1343 }
1344 
1345 static void ram_flush_compressed_data(RAMState *rs)
1346 {
1347     if (!save_page_use_compression(rs)) {
1348         return;
1349     }
1350 
1351     flush_compressed_data(send_queued_data);
1352 }
1353 
1354 #define PAGE_ALL_CLEAN 0
1355 #define PAGE_TRY_AGAIN 1
1356 #define PAGE_DIRTY_FOUND 2
1357 /**
1358  * find_dirty_block: find the next dirty page and update any state
1359  * associated with the search process.
1360  *
1361  * Returns:
1362  *         <0: An error happened
1363  *         PAGE_ALL_CLEAN: no dirty page found, give up
1364  *         PAGE_TRY_AGAIN: no dirty page found, retry for next block
1365  *         PAGE_DIRTY_FOUND: dirty page found
1366  *
1367  * @rs: current RAM state
1368  * @pss: data about the state of the current dirty page scan
1369  * @again: set to false if the search has scanned the whole of RAM
1370  */
1371 static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
1372 {
1373     /* Update pss->page for the next dirty bit in ramblock */
1374     pss_find_next_dirty(pss);
1375 
1376     if (pss->complete_round && pss->block == rs->last_seen_block &&
1377         pss->page >= rs->last_page) {
1378         /*
1379          * We've been once around the RAM and haven't found anything.
1380          * Give up.
1381          */
1382         return PAGE_ALL_CLEAN;
1383     }
1384     if (!offset_in_ramblock(pss->block,
1385                             ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) {
1386         /* Didn't find anything in this RAM Block */
1387         pss->page = 0;
1388         pss->block = QLIST_NEXT_RCU(pss->block, next);
1389         if (!pss->block) {
1390             if (!migrate_multifd_flush_after_each_section()) {
1391                 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
1392                 int ret = multifd_send_sync_main(f);
1393                 if (ret < 0) {
1394                     return ret;
1395                 }
1396                 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
1397                 qemu_fflush(f);
1398             }
1399             /*
1400              * If memory migration starts over, we will meet a dirtied page
1401              * which may still exists in compression threads's ring, so we
1402              * should flush the compressed data to make sure the new page
1403              * is not overwritten by the old one in the destination.
1404              *
1405              * Also If xbzrle is on, stop using the data compression at this
1406              * point. In theory, xbzrle can do better than compression.
1407              */
1408             ram_flush_compressed_data(rs);
1409 
1410             /* Hit the end of the list */
1411             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1412             /* Flag that we've looped */
1413             pss->complete_round = true;
1414             /* After the first round, enable XBZRLE. */
1415             if (migrate_xbzrle()) {
1416                 rs->xbzrle_started = true;
1417             }
1418         }
1419         /* Didn't find anything this time, but try again on the new block */
1420         return PAGE_TRY_AGAIN;
1421     } else {
1422         /* We've found something */
1423         return PAGE_DIRTY_FOUND;
1424     }
1425 }
1426 
1427 /**
1428  * unqueue_page: gets a page of the queue
1429  *
1430  * Helper for 'get_queued_page' - gets a page off the queue
1431  *
1432  * Returns the block of the page (or NULL if none available)
1433  *
1434  * @rs: current RAM state
1435  * @offset: used to return the offset within the RAMBlock
1436  */
1437 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
1438 {
1439     struct RAMSrcPageRequest *entry;
1440     RAMBlock *block = NULL;
1441 
1442     if (!postcopy_has_request(rs)) {
1443         return NULL;
1444     }
1445 
1446     QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
1447 
1448     /*
1449      * This should _never_ change even after we take the lock, because no one
1450      * should be taking anything off the request list other than us.
1451      */
1452     assert(postcopy_has_request(rs));
1453 
1454     entry = QSIMPLEQ_FIRST(&rs->src_page_requests);
1455     block = entry->rb;
1456     *offset = entry->offset;
1457 
1458     if (entry->len > TARGET_PAGE_SIZE) {
1459         entry->len -= TARGET_PAGE_SIZE;
1460         entry->offset += TARGET_PAGE_SIZE;
1461     } else {
1462         memory_region_unref(block->mr);
1463         QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1464         g_free(entry);
1465         migration_consume_urgent_request();
1466     }
1467 
1468     return block;
1469 }
1470 
1471 #if defined(__linux__)
1472 /**
1473  * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1474  *   is found, return RAM block pointer and page offset
1475  *
1476  * Returns pointer to the RAMBlock containing faulting page,
1477  *   NULL if no write faults are pending
1478  *
1479  * @rs: current RAM state
1480  * @offset: page offset from the beginning of the block
1481  */
1482 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1483 {
1484     struct uffd_msg uffd_msg;
1485     void *page_address;
1486     RAMBlock *block;
1487     int res;
1488 
1489     if (!migrate_background_snapshot()) {
1490         return NULL;
1491     }
1492 
1493     res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1);
1494     if (res <= 0) {
1495         return NULL;
1496     }
1497 
1498     page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address;
1499     block = qemu_ram_block_from_host(page_address, false, offset);
1500     assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0);
1501     return block;
1502 }
1503 
1504 /**
1505  * ram_save_release_protection: release UFFD write protection after
1506  *   a range of pages has been saved
1507  *
1508  * @rs: current RAM state
1509  * @pss: page-search-status structure
1510  * @start_page: index of the first page in the range relative to pss->block
1511  *
1512  * Returns 0 on success, negative value in case of an error
1513 */
1514 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1515         unsigned long start_page)
1516 {
1517     int res = 0;
1518 
1519     /* Check if page is from UFFD-managed region. */
1520     if (pss->block->flags & RAM_UF_WRITEPROTECT) {
1521         void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS);
1522         uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS;
1523 
1524         /* Flush async buffers before un-protect. */
1525         qemu_fflush(pss->pss_channel);
1526         /* Un-protect memory range. */
1527         res = uffd_change_protection(rs->uffdio_fd, page_address, run_length,
1528                 false, false);
1529     }
1530 
1531     return res;
1532 }
1533 
1534 /* ram_write_tracking_available: check if kernel supports required UFFD features
1535  *
1536  * Returns true if supports, false otherwise
1537  */
1538 bool ram_write_tracking_available(void)
1539 {
1540     uint64_t uffd_features;
1541     int res;
1542 
1543     res = uffd_query_features(&uffd_features);
1544     return (res == 0 &&
1545             (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0);
1546 }
1547 
1548 /* ram_write_tracking_compatible: check if guest configuration is
1549  *   compatible with 'write-tracking'
1550  *
1551  * Returns true if compatible, false otherwise
1552  */
1553 bool ram_write_tracking_compatible(void)
1554 {
1555     const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT);
1556     int uffd_fd;
1557     RAMBlock *block;
1558     bool ret = false;
1559 
1560     /* Open UFFD file descriptor */
1561     uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false);
1562     if (uffd_fd < 0) {
1563         return false;
1564     }
1565 
1566     RCU_READ_LOCK_GUARD();
1567 
1568     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1569         uint64_t uffd_ioctls;
1570 
1571         /* Nothing to do with read-only and MMIO-writable regions */
1572         if (block->mr->readonly || block->mr->rom_device) {
1573             continue;
1574         }
1575         /* Try to register block memory via UFFD-IO to track writes */
1576         if (uffd_register_memory(uffd_fd, block->host, block->max_length,
1577                 UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) {
1578             goto out;
1579         }
1580         if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) {
1581             goto out;
1582         }
1583     }
1584     ret = true;
1585 
1586 out:
1587     uffd_close_fd(uffd_fd);
1588     return ret;
1589 }
1590 
1591 static inline void populate_read_range(RAMBlock *block, ram_addr_t offset,
1592                                        ram_addr_t size)
1593 {
1594     const ram_addr_t end = offset + size;
1595 
1596     /*
1597      * We read one byte of each page; this will preallocate page tables if
1598      * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1599      * where no page was populated yet. This might require adaption when
1600      * supporting other mappings, like shmem.
1601      */
1602     for (; offset < end; offset += block->page_size) {
1603         char tmp = *((char *)block->host + offset);
1604 
1605         /* Don't optimize the read out */
1606         asm volatile("" : "+r" (tmp));
1607     }
1608 }
1609 
1610 static inline int populate_read_section(MemoryRegionSection *section,
1611                                         void *opaque)
1612 {
1613     const hwaddr size = int128_get64(section->size);
1614     hwaddr offset = section->offset_within_region;
1615     RAMBlock *block = section->mr->ram_block;
1616 
1617     populate_read_range(block, offset, size);
1618     return 0;
1619 }
1620 
1621 /*
1622  * ram_block_populate_read: preallocate page tables and populate pages in the
1623  *   RAM block by reading a byte of each page.
1624  *
1625  * Since it's solely used for userfault_fd WP feature, here we just
1626  *   hardcode page size to qemu_real_host_page_size.
1627  *
1628  * @block: RAM block to populate
1629  */
1630 static void ram_block_populate_read(RAMBlock *rb)
1631 {
1632     /*
1633      * Skip populating all pages that fall into a discarded range as managed by
1634      * a RamDiscardManager responsible for the mapped memory region of the
1635      * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1636      * must not get populated automatically. We don't have to track
1637      * modifications via userfaultfd WP reliably, because these pages will
1638      * not be part of the migration stream either way -- see
1639      * ramblock_dirty_bitmap_exclude_discarded_pages().
1640      *
1641      * Note: The result is only stable while migrating (precopy/postcopy).
1642      */
1643     if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
1644         RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1645         MemoryRegionSection section = {
1646             .mr = rb->mr,
1647             .offset_within_region = 0,
1648             .size = rb->mr->size,
1649         };
1650 
1651         ram_discard_manager_replay_populated(rdm, &section,
1652                                              populate_read_section, NULL);
1653     } else {
1654         populate_read_range(rb, 0, rb->used_length);
1655     }
1656 }
1657 
1658 /*
1659  * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1660  */
1661 void ram_write_tracking_prepare(void)
1662 {
1663     RAMBlock *block;
1664 
1665     RCU_READ_LOCK_GUARD();
1666 
1667     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1668         /* Nothing to do with read-only and MMIO-writable regions */
1669         if (block->mr->readonly || block->mr->rom_device) {
1670             continue;
1671         }
1672 
1673         /*
1674          * Populate pages of the RAM block before enabling userfault_fd
1675          * write protection.
1676          *
1677          * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1678          * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1679          * pages with pte_none() entries in page table.
1680          */
1681         ram_block_populate_read(block);
1682     }
1683 }
1684 
1685 static inline int uffd_protect_section(MemoryRegionSection *section,
1686                                        void *opaque)
1687 {
1688     const hwaddr size = int128_get64(section->size);
1689     const hwaddr offset = section->offset_within_region;
1690     RAMBlock *rb = section->mr->ram_block;
1691     int uffd_fd = (uintptr_t)opaque;
1692 
1693     return uffd_change_protection(uffd_fd, rb->host + offset, size, true,
1694                                   false);
1695 }
1696 
1697 static int ram_block_uffd_protect(RAMBlock *rb, int uffd_fd)
1698 {
1699     assert(rb->flags & RAM_UF_WRITEPROTECT);
1700 
1701     /* See ram_block_populate_read() */
1702     if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
1703         RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1704         MemoryRegionSection section = {
1705             .mr = rb->mr,
1706             .offset_within_region = 0,
1707             .size = rb->mr->size,
1708         };
1709 
1710         return ram_discard_manager_replay_populated(rdm, &section,
1711                                                     uffd_protect_section,
1712                                                     (void *)(uintptr_t)uffd_fd);
1713     }
1714     return uffd_change_protection(uffd_fd, rb->host,
1715                                   rb->used_length, true, false);
1716 }
1717 
1718 /*
1719  * ram_write_tracking_start: start UFFD-WP memory tracking
1720  *
1721  * Returns 0 for success or negative value in case of error
1722  */
1723 int ram_write_tracking_start(void)
1724 {
1725     int uffd_fd;
1726     RAMState *rs = ram_state;
1727     RAMBlock *block;
1728 
1729     /* Open UFFD file descriptor */
1730     uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true);
1731     if (uffd_fd < 0) {
1732         return uffd_fd;
1733     }
1734     rs->uffdio_fd = uffd_fd;
1735 
1736     RCU_READ_LOCK_GUARD();
1737 
1738     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1739         /* Nothing to do with read-only and MMIO-writable regions */
1740         if (block->mr->readonly || block->mr->rom_device) {
1741             continue;
1742         }
1743 
1744         /* Register block memory with UFFD to track writes */
1745         if (uffd_register_memory(rs->uffdio_fd, block->host,
1746                 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) {
1747             goto fail;
1748         }
1749         block->flags |= RAM_UF_WRITEPROTECT;
1750         memory_region_ref(block->mr);
1751 
1752         /* Apply UFFD write protection to the block memory range */
1753         if (ram_block_uffd_protect(block, uffd_fd)) {
1754             goto fail;
1755         }
1756 
1757         trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size,
1758                 block->host, block->max_length);
1759     }
1760 
1761     return 0;
1762 
1763 fail:
1764     error_report("ram_write_tracking_start() failed: restoring initial memory state");
1765 
1766     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1767         if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
1768             continue;
1769         }
1770         uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
1771         /* Cleanup flags and remove reference */
1772         block->flags &= ~RAM_UF_WRITEPROTECT;
1773         memory_region_unref(block->mr);
1774     }
1775 
1776     uffd_close_fd(uffd_fd);
1777     rs->uffdio_fd = -1;
1778     return -1;
1779 }
1780 
1781 /**
1782  * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1783  */
1784 void ram_write_tracking_stop(void)
1785 {
1786     RAMState *rs = ram_state;
1787     RAMBlock *block;
1788 
1789     RCU_READ_LOCK_GUARD();
1790 
1791     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1792         if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
1793             continue;
1794         }
1795         uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
1796 
1797         trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size,
1798                 block->host, block->max_length);
1799 
1800         /* Cleanup flags and remove reference */
1801         block->flags &= ~RAM_UF_WRITEPROTECT;
1802         memory_region_unref(block->mr);
1803     }
1804 
1805     /* Finally close UFFD file descriptor */
1806     uffd_close_fd(rs->uffdio_fd);
1807     rs->uffdio_fd = -1;
1808 }
1809 
1810 #else
1811 /* No target OS support, stubs just fail or ignore */
1812 
1813 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1814 {
1815     (void) rs;
1816     (void) offset;
1817 
1818     return NULL;
1819 }
1820 
1821 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1822         unsigned long start_page)
1823 {
1824     (void) rs;
1825     (void) pss;
1826     (void) start_page;
1827 
1828     return 0;
1829 }
1830 
1831 bool ram_write_tracking_available(void)
1832 {
1833     return false;
1834 }
1835 
1836 bool ram_write_tracking_compatible(void)
1837 {
1838     assert(0);
1839     return false;
1840 }
1841 
1842 int ram_write_tracking_start(void)
1843 {
1844     assert(0);
1845     return -1;
1846 }
1847 
1848 void ram_write_tracking_stop(void)
1849 {
1850     assert(0);
1851 }
1852 #endif /* defined(__linux__) */
1853 
1854 /**
1855  * get_queued_page: unqueue a page from the postcopy requests
1856  *
1857  * Skips pages that are already sent (!dirty)
1858  *
1859  * Returns true if a queued page is found
1860  *
1861  * @rs: current RAM state
1862  * @pss: data about the state of the current dirty page scan
1863  */
1864 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
1865 {
1866     RAMBlock  *block;
1867     ram_addr_t offset;
1868     bool dirty;
1869 
1870     do {
1871         block = unqueue_page(rs, &offset);
1872         /*
1873          * We're sending this page, and since it's postcopy nothing else
1874          * will dirty it, and we must make sure it doesn't get sent again
1875          * even if this queue request was received after the background
1876          * search already sent it.
1877          */
1878         if (block) {
1879             unsigned long page;
1880 
1881             page = offset >> TARGET_PAGE_BITS;
1882             dirty = test_bit(page, block->bmap);
1883             if (!dirty) {
1884                 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
1885                                                 page);
1886             } else {
1887                 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
1888             }
1889         }
1890 
1891     } while (block && !dirty);
1892 
1893     if (!block) {
1894         /*
1895          * Poll write faults too if background snapshot is enabled; that's
1896          * when we have vcpus got blocked by the write protected pages.
1897          */
1898         block = poll_fault_page(rs, &offset);
1899     }
1900 
1901     if (block) {
1902         /*
1903          * We want the background search to continue from the queued page
1904          * since the guest is likely to want other pages near to the page
1905          * it just requested.
1906          */
1907         pss->block = block;
1908         pss->page = offset >> TARGET_PAGE_BITS;
1909 
1910         /*
1911          * This unqueued page would break the "one round" check, even is
1912          * really rare.
1913          */
1914         pss->complete_round = false;
1915     }
1916 
1917     return !!block;
1918 }
1919 
1920 /**
1921  * migration_page_queue_free: drop any remaining pages in the ram
1922  * request queue
1923  *
1924  * It should be empty at the end anyway, but in error cases there may
1925  * be some left.  in case that there is any page left, we drop it.
1926  *
1927  */
1928 static void migration_page_queue_free(RAMState *rs)
1929 {
1930     struct RAMSrcPageRequest *mspr, *next_mspr;
1931     /* This queue generally should be empty - but in the case of a failed
1932      * migration might have some droppings in.
1933      */
1934     RCU_READ_LOCK_GUARD();
1935     QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
1936         memory_region_unref(mspr->rb->mr);
1937         QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1938         g_free(mspr);
1939     }
1940 }
1941 
1942 /**
1943  * ram_save_queue_pages: queue the page for transmission
1944  *
1945  * A request from postcopy destination for example.
1946  *
1947  * Returns zero on success or negative on error
1948  *
1949  * @rbname: Name of the RAMBLock of the request. NULL means the
1950  *          same that last one.
1951  * @start: starting address from the start of the RAMBlock
1952  * @len: length (in bytes) to send
1953  */
1954 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
1955 {
1956     RAMBlock *ramblock;
1957     RAMState *rs = ram_state;
1958 
1959     stat64_add(&mig_stats.postcopy_requests, 1);
1960     RCU_READ_LOCK_GUARD();
1961 
1962     if (!rbname) {
1963         /* Reuse last RAMBlock */
1964         ramblock = rs->last_req_rb;
1965 
1966         if (!ramblock) {
1967             /*
1968              * Shouldn't happen, we can't reuse the last RAMBlock if
1969              * it's the 1st request.
1970              */
1971             error_report("ram_save_queue_pages no previous block");
1972             return -1;
1973         }
1974     } else {
1975         ramblock = qemu_ram_block_by_name(rbname);
1976 
1977         if (!ramblock) {
1978             /* We shouldn't be asked for a non-existent RAMBlock */
1979             error_report("ram_save_queue_pages no block '%s'", rbname);
1980             return -1;
1981         }
1982         rs->last_req_rb = ramblock;
1983     }
1984     trace_ram_save_queue_pages(ramblock->idstr, start, len);
1985     if (!offset_in_ramblock(ramblock, start + len - 1)) {
1986         error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1987                      RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1988                      __func__, start, len, ramblock->used_length);
1989         return -1;
1990     }
1991 
1992     /*
1993      * When with postcopy preempt, we send back the page directly in the
1994      * rp-return thread.
1995      */
1996     if (postcopy_preempt_active()) {
1997         ram_addr_t page_start = start >> TARGET_PAGE_BITS;
1998         size_t page_size = qemu_ram_pagesize(ramblock);
1999         PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY];
2000         int ret = 0;
2001 
2002         qemu_mutex_lock(&rs->bitmap_mutex);
2003 
2004         pss_init(pss, ramblock, page_start);
2005         /*
2006          * Always use the preempt channel, and make sure it's there.  It's
2007          * safe to access without lock, because when rp-thread is running
2008          * we should be the only one who operates on the qemufile
2009          */
2010         pss->pss_channel = migrate_get_current()->postcopy_qemufile_src;
2011         assert(pss->pss_channel);
2012 
2013         /*
2014          * It must be either one or multiple of host page size.  Just
2015          * assert; if something wrong we're mostly split brain anyway.
2016          */
2017         assert(len % page_size == 0);
2018         while (len) {
2019             if (ram_save_host_page_urgent(pss)) {
2020                 error_report("%s: ram_save_host_page_urgent() failed: "
2021                              "ramblock=%s, start_addr=0x"RAM_ADDR_FMT,
2022                              __func__, ramblock->idstr, start);
2023                 ret = -1;
2024                 break;
2025             }
2026             /*
2027              * NOTE: after ram_save_host_page_urgent() succeeded, pss->page
2028              * will automatically be moved and point to the next host page
2029              * we're going to send, so no need to update here.
2030              *
2031              * Normally QEMU never sends >1 host page in requests, so
2032              * logically we don't even need that as the loop should only
2033              * run once, but just to be consistent.
2034              */
2035             len -= page_size;
2036         };
2037         qemu_mutex_unlock(&rs->bitmap_mutex);
2038 
2039         return ret;
2040     }
2041 
2042     struct RAMSrcPageRequest *new_entry =
2043         g_new0(struct RAMSrcPageRequest, 1);
2044     new_entry->rb = ramblock;
2045     new_entry->offset = start;
2046     new_entry->len = len;
2047 
2048     memory_region_ref(ramblock->mr);
2049     qemu_mutex_lock(&rs->src_page_req_mutex);
2050     QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
2051     migration_make_urgent_request();
2052     qemu_mutex_unlock(&rs->src_page_req_mutex);
2053 
2054     return 0;
2055 }
2056 
2057 static bool save_page_use_compression(RAMState *rs)
2058 {
2059     if (!migrate_compress()) {
2060         return false;
2061     }
2062 
2063     /*
2064      * If xbzrle is enabled (e.g., after first round of migration), stop
2065      * using the data compression. In theory, xbzrle can do better than
2066      * compression.
2067      */
2068     if (rs->xbzrle_started) {
2069         return false;
2070     }
2071 
2072     return true;
2073 }
2074 
2075 /*
2076  * try to compress the page before posting it out, return true if the page
2077  * has been properly handled by compression, otherwise needs other
2078  * paths to handle it
2079  */
2080 static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
2081                                RAMBlock *block, ram_addr_t offset)
2082 {
2083     if (!save_page_use_compression(rs)) {
2084         return false;
2085     }
2086 
2087     /*
2088      * When starting the process of a new block, the first page of
2089      * the block should be sent out before other pages in the same
2090      * block, and all the pages in last block should have been sent
2091      * out, keeping this order is important, because the 'cont' flag
2092      * is used to avoid resending the block name.
2093      *
2094      * We post the fist page as normal page as compression will take
2095      * much CPU resource.
2096      */
2097     if (block != pss->last_sent_block) {
2098         ram_flush_compressed_data(rs);
2099         return false;
2100     }
2101 
2102     if (compress_page_with_multi_thread(block, offset, send_queued_data) > 0) {
2103         return true;
2104     }
2105 
2106     compression_counters.busy++;
2107     return false;
2108 }
2109 
2110 /**
2111  * ram_save_target_page_legacy: save one target page
2112  *
2113  * Returns the number of pages written
2114  *
2115  * @rs: current RAM state
2116  * @pss: data about the page we want to send
2117  */
2118 static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
2119 {
2120     RAMBlock *block = pss->block;
2121     ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
2122     int res;
2123 
2124     if (control_save_page(pss, block, offset, &res)) {
2125         return res;
2126     }
2127 
2128     if (save_compress_page(rs, pss, block, offset)) {
2129         return 1;
2130     }
2131 
2132     res = save_zero_page(pss, pss->pss_channel, block, offset);
2133     if (res > 0) {
2134         /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2135          * page would be stale
2136          */
2137         if (rs->xbzrle_started) {
2138             XBZRLE_cache_lock();
2139             xbzrle_cache_zero_page(rs, block->offset + offset);
2140             XBZRLE_cache_unlock();
2141         }
2142         return res;
2143     }
2144 
2145     /*
2146      * Do not use multifd in postcopy as one whole host page should be
2147      * placed.  Meanwhile postcopy requires atomic update of pages, so even
2148      * if host page size == guest page size the dest guest during run may
2149      * still see partially copied pages which is data corruption.
2150      */
2151     if (migrate_multifd() && !migration_in_postcopy()) {
2152         return ram_save_multifd_page(pss->pss_channel, block, offset);
2153     }
2154 
2155     return ram_save_page(rs, pss);
2156 }
2157 
2158 /* Should be called before sending a host page */
2159 static void pss_host_page_prepare(PageSearchStatus *pss)
2160 {
2161     /* How many guest pages are there in one host page? */
2162     size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2163 
2164     pss->host_page_sending = true;
2165     if (guest_pfns <= 1) {
2166         /*
2167          * This covers both when guest psize == host psize, or when guest
2168          * has larger psize than the host (guest_pfns==0).
2169          *
2170          * For the latter, we always send one whole guest page per
2171          * iteration of the host page (example: an Alpha VM on x86 host
2172          * will have guest psize 8K while host psize 4K).
2173          */
2174         pss->host_page_start = pss->page;
2175         pss->host_page_end = pss->page + 1;
2176     } else {
2177         /*
2178          * The host page spans over multiple guest pages, we send them
2179          * within the same host page iteration.
2180          */
2181         pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns);
2182         pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns);
2183     }
2184 }
2185 
2186 /*
2187  * Whether the page pointed by PSS is within the host page being sent.
2188  * Must be called after a previous pss_host_page_prepare().
2189  */
2190 static bool pss_within_range(PageSearchStatus *pss)
2191 {
2192     ram_addr_t ram_addr;
2193 
2194     assert(pss->host_page_sending);
2195 
2196     /* Over host-page boundary? */
2197     if (pss->page >= pss->host_page_end) {
2198         return false;
2199     }
2200 
2201     ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
2202 
2203     return offset_in_ramblock(pss->block, ram_addr);
2204 }
2205 
2206 static void pss_host_page_finish(PageSearchStatus *pss)
2207 {
2208     pss->host_page_sending = false;
2209     /* This is not needed, but just to reset it */
2210     pss->host_page_start = pss->host_page_end = 0;
2211 }
2212 
2213 /*
2214  * Send an urgent host page specified by `pss'.  Need to be called with
2215  * bitmap_mutex held.
2216  *
2217  * Returns 0 if save host page succeeded, false otherwise.
2218  */
2219 static int ram_save_host_page_urgent(PageSearchStatus *pss)
2220 {
2221     bool page_dirty, sent = false;
2222     RAMState *rs = ram_state;
2223     int ret = 0;
2224 
2225     trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page);
2226     pss_host_page_prepare(pss);
2227 
2228     /*
2229      * If precopy is sending the same page, let it be done in precopy, or
2230      * we could send the same page in two channels and none of them will
2231      * receive the whole page.
2232      */
2233     if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) {
2234         trace_postcopy_preempt_hit(pss->block->idstr,
2235                                    pss->page << TARGET_PAGE_BITS);
2236         return 0;
2237     }
2238 
2239     do {
2240         page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page);
2241 
2242         if (page_dirty) {
2243             /* Be strict to return code; it must be 1, or what else? */
2244             if (migration_ops->ram_save_target_page(rs, pss) != 1) {
2245                 error_report_once("%s: ram_save_target_page failed", __func__);
2246                 ret = -1;
2247                 goto out;
2248             }
2249             sent = true;
2250         }
2251         pss_find_next_dirty(pss);
2252     } while (pss_within_range(pss));
2253 out:
2254     pss_host_page_finish(pss);
2255     /* For urgent requests, flush immediately if sent */
2256     if (sent) {
2257         qemu_fflush(pss->pss_channel);
2258     }
2259     return ret;
2260 }
2261 
2262 /**
2263  * ram_save_host_page: save a whole host page
2264  *
2265  * Starting at *offset send pages up to the end of the current host
2266  * page. It's valid for the initial offset to point into the middle of
2267  * a host page in which case the remainder of the hostpage is sent.
2268  * Only dirty target pages are sent. Note that the host page size may
2269  * be a huge page for this block.
2270  *
2271  * The saving stops at the boundary of the used_length of the block
2272  * if the RAMBlock isn't a multiple of the host page size.
2273  *
2274  * The caller must be with ram_state.bitmap_mutex held to call this
2275  * function.  Note that this function can temporarily release the lock, but
2276  * when the function is returned it'll make sure the lock is still held.
2277  *
2278  * Returns the number of pages written or negative on error
2279  *
2280  * @rs: current RAM state
2281  * @pss: data about the page we want to send
2282  */
2283 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
2284 {
2285     bool page_dirty, preempt_active = postcopy_preempt_active();
2286     int tmppages, pages = 0;
2287     size_t pagesize_bits =
2288         qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2289     unsigned long start_page = pss->page;
2290     int res;
2291 
2292     if (migrate_ram_is_ignored(pss->block)) {
2293         error_report("block %s should not be migrated !", pss->block->idstr);
2294         return 0;
2295     }
2296 
2297     /* Update host page boundary information */
2298     pss_host_page_prepare(pss);
2299 
2300     do {
2301         page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page);
2302 
2303         /* Check the pages is dirty and if it is send it */
2304         if (page_dirty) {
2305             /*
2306              * Properly yield the lock only in postcopy preempt mode
2307              * because both migration thread and rp-return thread can
2308              * operate on the bitmaps.
2309              */
2310             if (preempt_active) {
2311                 qemu_mutex_unlock(&rs->bitmap_mutex);
2312             }
2313             tmppages = migration_ops->ram_save_target_page(rs, pss);
2314             if (tmppages >= 0) {
2315                 pages += tmppages;
2316                 /*
2317                  * Allow rate limiting to happen in the middle of huge pages if
2318                  * something is sent in the current iteration.
2319                  */
2320                 if (pagesize_bits > 1 && tmppages > 0) {
2321                     migration_rate_limit();
2322                 }
2323             }
2324             if (preempt_active) {
2325                 qemu_mutex_lock(&rs->bitmap_mutex);
2326             }
2327         } else {
2328             tmppages = 0;
2329         }
2330 
2331         if (tmppages < 0) {
2332             pss_host_page_finish(pss);
2333             return tmppages;
2334         }
2335 
2336         pss_find_next_dirty(pss);
2337     } while (pss_within_range(pss));
2338 
2339     pss_host_page_finish(pss);
2340 
2341     res = ram_save_release_protection(rs, pss, start_page);
2342     return (res < 0 ? res : pages);
2343 }
2344 
2345 /**
2346  * ram_find_and_save_block: finds a dirty page and sends it to f
2347  *
2348  * Called within an RCU critical section.
2349  *
2350  * Returns the number of pages written where zero means no dirty pages,
2351  * or negative on error
2352  *
2353  * @rs: current RAM state
2354  *
2355  * On systems where host-page-size > target-page-size it will send all the
2356  * pages in a host page that are dirty.
2357  */
2358 static int ram_find_and_save_block(RAMState *rs)
2359 {
2360     PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
2361     int pages = 0;
2362 
2363     /* No dirty page as there is zero RAM */
2364     if (!rs->ram_bytes_total) {
2365         return pages;
2366     }
2367 
2368     /*
2369      * Always keep last_seen_block/last_page valid during this procedure,
2370      * because find_dirty_block() relies on these values (e.g., we compare
2371      * last_seen_block with pss.block to see whether we searched all the
2372      * ramblocks) to detect the completion of migration.  Having NULL value
2373      * of last_seen_block can conditionally cause below loop to run forever.
2374      */
2375     if (!rs->last_seen_block) {
2376         rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks);
2377         rs->last_page = 0;
2378     }
2379 
2380     pss_init(pss, rs->last_seen_block, rs->last_page);
2381 
2382     while (true){
2383         if (!get_queued_page(rs, pss)) {
2384             /* priority queue empty, so just search for something dirty */
2385             int res = find_dirty_block(rs, pss);
2386             if (res != PAGE_DIRTY_FOUND) {
2387                 if (res == PAGE_ALL_CLEAN) {
2388                     break;
2389                 } else if (res == PAGE_TRY_AGAIN) {
2390                     continue;
2391                 } else if (res < 0) {
2392                     pages = res;
2393                     break;
2394                 }
2395             }
2396         }
2397         pages = ram_save_host_page(rs, pss);
2398         if (pages) {
2399             break;
2400         }
2401     }
2402 
2403     rs->last_seen_block = pss->block;
2404     rs->last_page = pss->page;
2405 
2406     return pages;
2407 }
2408 
2409 static uint64_t ram_bytes_total_with_ignored(void)
2410 {
2411     RAMBlock *block;
2412     uint64_t total = 0;
2413 
2414     RCU_READ_LOCK_GUARD();
2415 
2416     RAMBLOCK_FOREACH_MIGRATABLE(block) {
2417         total += block->used_length;
2418     }
2419     return total;
2420 }
2421 
2422 uint64_t ram_bytes_total(void)
2423 {
2424     RAMBlock *block;
2425     uint64_t total = 0;
2426 
2427     RCU_READ_LOCK_GUARD();
2428 
2429     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2430         total += block->used_length;
2431     }
2432     return total;
2433 }
2434 
2435 static void xbzrle_load_setup(void)
2436 {
2437     XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2438 }
2439 
2440 static void xbzrle_load_cleanup(void)
2441 {
2442     g_free(XBZRLE.decoded_buf);
2443     XBZRLE.decoded_buf = NULL;
2444 }
2445 
2446 static void ram_state_cleanup(RAMState **rsp)
2447 {
2448     if (*rsp) {
2449         migration_page_queue_free(*rsp);
2450         qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2451         qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2452         g_free(*rsp);
2453         *rsp = NULL;
2454     }
2455 }
2456 
2457 static void xbzrle_cleanup(void)
2458 {
2459     XBZRLE_cache_lock();
2460     if (XBZRLE.cache) {
2461         cache_fini(XBZRLE.cache);
2462         g_free(XBZRLE.encoded_buf);
2463         g_free(XBZRLE.current_buf);
2464         g_free(XBZRLE.zero_target_page);
2465         XBZRLE.cache = NULL;
2466         XBZRLE.encoded_buf = NULL;
2467         XBZRLE.current_buf = NULL;
2468         XBZRLE.zero_target_page = NULL;
2469     }
2470     XBZRLE_cache_unlock();
2471 }
2472 
2473 static void ram_save_cleanup(void *opaque)
2474 {
2475     RAMState **rsp = opaque;
2476     RAMBlock *block;
2477 
2478     /* We don't use dirty log with background snapshots */
2479     if (!migrate_background_snapshot()) {
2480         /* caller have hold iothread lock or is in a bh, so there is
2481          * no writing race against the migration bitmap
2482          */
2483         if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
2484             /*
2485              * do not stop dirty log without starting it, since
2486              * memory_global_dirty_log_stop will assert that
2487              * memory_global_dirty_log_start/stop used in pairs
2488              */
2489             memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
2490         }
2491     }
2492 
2493     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2494         g_free(block->clear_bmap);
2495         block->clear_bmap = NULL;
2496         g_free(block->bmap);
2497         block->bmap = NULL;
2498     }
2499 
2500     xbzrle_cleanup();
2501     compress_threads_save_cleanup();
2502     ram_state_cleanup(rsp);
2503     g_free(migration_ops);
2504     migration_ops = NULL;
2505 }
2506 
2507 static void ram_state_reset(RAMState *rs)
2508 {
2509     int i;
2510 
2511     for (i = 0; i < RAM_CHANNEL_MAX; i++) {
2512         rs->pss[i].last_sent_block = NULL;
2513     }
2514 
2515     rs->last_seen_block = NULL;
2516     rs->last_page = 0;
2517     rs->last_version = ram_list.version;
2518     rs->xbzrle_started = false;
2519 }
2520 
2521 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2522 
2523 /* **** functions for postcopy ***** */
2524 
2525 void ram_postcopy_migrated_memory_release(MigrationState *ms)
2526 {
2527     struct RAMBlock *block;
2528 
2529     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2530         unsigned long *bitmap = block->bmap;
2531         unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2532         unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2533 
2534         while (run_start < range) {
2535             unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2536             ram_discard_range(block->idstr,
2537                               ((ram_addr_t)run_start) << TARGET_PAGE_BITS,
2538                               ((ram_addr_t)(run_end - run_start))
2539                                 << TARGET_PAGE_BITS);
2540             run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2541         }
2542     }
2543 }
2544 
2545 /**
2546  * postcopy_send_discard_bm_ram: discard a RAMBlock
2547  *
2548  * Callback from postcopy_each_ram_send_discard for each RAMBlock
2549  *
2550  * @ms: current migration state
2551  * @block: RAMBlock to discard
2552  */
2553 static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
2554 {
2555     unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2556     unsigned long current;
2557     unsigned long *bitmap = block->bmap;
2558 
2559     for (current = 0; current < end; ) {
2560         unsigned long one = find_next_bit(bitmap, end, current);
2561         unsigned long zero, discard_length;
2562 
2563         if (one >= end) {
2564             break;
2565         }
2566 
2567         zero = find_next_zero_bit(bitmap, end, one + 1);
2568 
2569         if (zero >= end) {
2570             discard_length = end - one;
2571         } else {
2572             discard_length = zero - one;
2573         }
2574         postcopy_discard_send_range(ms, one, discard_length);
2575         current = one + discard_length;
2576     }
2577 }
2578 
2579 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block);
2580 
2581 /**
2582  * postcopy_each_ram_send_discard: discard all RAMBlocks
2583  *
2584  * Utility for the outgoing postcopy code.
2585  *   Calls postcopy_send_discard_bm_ram for each RAMBlock
2586  *   passing it bitmap indexes and name.
2587  * (qemu_ram_foreach_block ends up passing unscaled lengths
2588  *  which would mean postcopy code would have to deal with target page)
2589  *
2590  * @ms: current migration state
2591  */
2592 static void postcopy_each_ram_send_discard(MigrationState *ms)
2593 {
2594     struct RAMBlock *block;
2595 
2596     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2597         postcopy_discard_send_init(ms, block->idstr);
2598 
2599         /*
2600          * Deal with TPS != HPS and huge pages.  It discard any partially sent
2601          * host-page size chunks, mark any partially dirty host-page size
2602          * chunks as all dirty.  In this case the host-page is the host-page
2603          * for the particular RAMBlock, i.e. it might be a huge page.
2604          */
2605         postcopy_chunk_hostpages_pass(ms, block);
2606 
2607         /*
2608          * Postcopy sends chunks of bitmap over the wire, but it
2609          * just needs indexes at this point, avoids it having
2610          * target page specific code.
2611          */
2612         postcopy_send_discard_bm_ram(ms, block);
2613         postcopy_discard_send_finish(ms);
2614     }
2615 }
2616 
2617 /**
2618  * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2619  *
2620  * Helper for postcopy_chunk_hostpages; it's called twice to
2621  * canonicalize the two bitmaps, that are similar, but one is
2622  * inverted.
2623  *
2624  * Postcopy requires that all target pages in a hostpage are dirty or
2625  * clean, not a mix.  This function canonicalizes the bitmaps.
2626  *
2627  * @ms: current migration state
2628  * @block: block that contains the page we want to canonicalize
2629  */
2630 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
2631 {
2632     RAMState *rs = ram_state;
2633     unsigned long *bitmap = block->bmap;
2634     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2635     unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2636     unsigned long run_start;
2637 
2638     if (block->page_size == TARGET_PAGE_SIZE) {
2639         /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2640         return;
2641     }
2642 
2643     /* Find a dirty page */
2644     run_start = find_next_bit(bitmap, pages, 0);
2645 
2646     while (run_start < pages) {
2647 
2648         /*
2649          * If the start of this run of pages is in the middle of a host
2650          * page, then we need to fixup this host page.
2651          */
2652         if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
2653             /* Find the end of this run */
2654             run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
2655             /*
2656              * If the end isn't at the start of a host page, then the
2657              * run doesn't finish at the end of a host page
2658              * and we need to discard.
2659              */
2660         }
2661 
2662         if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
2663             unsigned long page;
2664             unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2665                                                              host_ratio);
2666             run_start = QEMU_ALIGN_UP(run_start, host_ratio);
2667 
2668             /* Clean up the bitmap */
2669             for (page = fixup_start_addr;
2670                  page < fixup_start_addr + host_ratio; page++) {
2671                 /*
2672                  * Remark them as dirty, updating the count for any pages
2673                  * that weren't previously dirty.
2674                  */
2675                 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
2676             }
2677         }
2678 
2679         /* Find the next dirty page for the next iteration */
2680         run_start = find_next_bit(bitmap, pages, run_start);
2681     }
2682 }
2683 
2684 /**
2685  * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2686  *
2687  * Transmit the set of pages to be discarded after precopy to the target
2688  * these are pages that:
2689  *     a) Have been previously transmitted but are now dirty again
2690  *     b) Pages that have never been transmitted, this ensures that
2691  *        any pages on the destination that have been mapped by background
2692  *        tasks get discarded (transparent huge pages is the specific concern)
2693  * Hopefully this is pretty sparse
2694  *
2695  * @ms: current migration state
2696  */
2697 void ram_postcopy_send_discard_bitmap(MigrationState *ms)
2698 {
2699     RAMState *rs = ram_state;
2700 
2701     RCU_READ_LOCK_GUARD();
2702 
2703     /* This should be our last sync, the src is now paused */
2704     migration_bitmap_sync(rs, false);
2705 
2706     /* Easiest way to make sure we don't resume in the middle of a host-page */
2707     rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
2708     rs->last_seen_block = NULL;
2709     rs->last_page = 0;
2710 
2711     postcopy_each_ram_send_discard(ms);
2712 
2713     trace_ram_postcopy_send_discard_bitmap();
2714 }
2715 
2716 /**
2717  * ram_discard_range: discard dirtied pages at the beginning of postcopy
2718  *
2719  * Returns zero on success
2720  *
2721  * @rbname: name of the RAMBlock of the request. NULL means the
2722  *          same that last one.
2723  * @start: RAMBlock starting page
2724  * @length: RAMBlock size
2725  */
2726 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
2727 {
2728     trace_ram_discard_range(rbname, start, length);
2729 
2730     RCU_READ_LOCK_GUARD();
2731     RAMBlock *rb = qemu_ram_block_by_name(rbname);
2732 
2733     if (!rb) {
2734         error_report("ram_discard_range: Failed to find block '%s'", rbname);
2735         return -1;
2736     }
2737 
2738     /*
2739      * On source VM, we don't need to update the received bitmap since
2740      * we don't even have one.
2741      */
2742     if (rb->receivedmap) {
2743         bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2744                      length >> qemu_target_page_bits());
2745     }
2746 
2747     return ram_block_discard_range(rb, start, length);
2748 }
2749 
2750 /*
2751  * For every allocation, we will try not to crash the VM if the
2752  * allocation failed.
2753  */
2754 static int xbzrle_init(void)
2755 {
2756     Error *local_err = NULL;
2757 
2758     if (!migrate_xbzrle()) {
2759         return 0;
2760     }
2761 
2762     XBZRLE_cache_lock();
2763 
2764     XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2765     if (!XBZRLE.zero_target_page) {
2766         error_report("%s: Error allocating zero page", __func__);
2767         goto err_out;
2768     }
2769 
2770     XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2771                               TARGET_PAGE_SIZE, &local_err);
2772     if (!XBZRLE.cache) {
2773         error_report_err(local_err);
2774         goto free_zero_page;
2775     }
2776 
2777     XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2778     if (!XBZRLE.encoded_buf) {
2779         error_report("%s: Error allocating encoded_buf", __func__);
2780         goto free_cache;
2781     }
2782 
2783     XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2784     if (!XBZRLE.current_buf) {
2785         error_report("%s: Error allocating current_buf", __func__);
2786         goto free_encoded_buf;
2787     }
2788 
2789     /* We are all good */
2790     XBZRLE_cache_unlock();
2791     return 0;
2792 
2793 free_encoded_buf:
2794     g_free(XBZRLE.encoded_buf);
2795     XBZRLE.encoded_buf = NULL;
2796 free_cache:
2797     cache_fini(XBZRLE.cache);
2798     XBZRLE.cache = NULL;
2799 free_zero_page:
2800     g_free(XBZRLE.zero_target_page);
2801     XBZRLE.zero_target_page = NULL;
2802 err_out:
2803     XBZRLE_cache_unlock();
2804     return -ENOMEM;
2805 }
2806 
2807 static int ram_state_init(RAMState **rsp)
2808 {
2809     *rsp = g_try_new0(RAMState, 1);
2810 
2811     if (!*rsp) {
2812         error_report("%s: Init ramstate fail", __func__);
2813         return -1;
2814     }
2815 
2816     qemu_mutex_init(&(*rsp)->bitmap_mutex);
2817     qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2818     QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
2819     (*rsp)->ram_bytes_total = ram_bytes_total();
2820 
2821     /*
2822      * Count the total number of pages used by ram blocks not including any
2823      * gaps due to alignment or unplugs.
2824      * This must match with the initial values of dirty bitmap.
2825      */
2826     (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS;
2827     ram_state_reset(*rsp);
2828 
2829     return 0;
2830 }
2831 
2832 static void ram_list_init_bitmaps(void)
2833 {
2834     MigrationState *ms = migrate_get_current();
2835     RAMBlock *block;
2836     unsigned long pages;
2837     uint8_t shift;
2838 
2839     /* Skip setting bitmap if there is no RAM */
2840     if (ram_bytes_total()) {
2841         shift = ms->clear_bitmap_shift;
2842         if (shift > CLEAR_BITMAP_SHIFT_MAX) {
2843             error_report("clear_bitmap_shift (%u) too big, using "
2844                          "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
2845             shift = CLEAR_BITMAP_SHIFT_MAX;
2846         } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
2847             error_report("clear_bitmap_shift (%u) too small, using "
2848                          "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
2849             shift = CLEAR_BITMAP_SHIFT_MIN;
2850         }
2851 
2852         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2853             pages = block->max_length >> TARGET_PAGE_BITS;
2854             /*
2855              * The initial dirty bitmap for migration must be set with all
2856              * ones to make sure we'll migrate every guest RAM page to
2857              * destination.
2858              * Here we set RAMBlock.bmap all to 1 because when rebegin a
2859              * new migration after a failed migration, ram_list.
2860              * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2861              * guest memory.
2862              */
2863             block->bmap = bitmap_new(pages);
2864             bitmap_set(block->bmap, 0, pages);
2865             block->clear_bmap_shift = shift;
2866             block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
2867         }
2868     }
2869 }
2870 
2871 static void migration_bitmap_clear_discarded_pages(RAMState *rs)
2872 {
2873     unsigned long pages;
2874     RAMBlock *rb;
2875 
2876     RCU_READ_LOCK_GUARD();
2877 
2878     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
2879             pages = ramblock_dirty_bitmap_clear_discarded_pages(rb);
2880             rs->migration_dirty_pages -= pages;
2881     }
2882 }
2883 
2884 static void ram_init_bitmaps(RAMState *rs)
2885 {
2886     /* For memory_global_dirty_log_start below.  */
2887     qemu_mutex_lock_iothread();
2888     qemu_mutex_lock_ramlist();
2889 
2890     WITH_RCU_READ_LOCK_GUARD() {
2891         ram_list_init_bitmaps();
2892         /* We don't use dirty log with background snapshots */
2893         if (!migrate_background_snapshot()) {
2894             memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
2895             migration_bitmap_sync_precopy(rs, false);
2896         }
2897     }
2898     qemu_mutex_unlock_ramlist();
2899     qemu_mutex_unlock_iothread();
2900 
2901     /*
2902      * After an eventual first bitmap sync, fixup the initial bitmap
2903      * containing all 1s to exclude any discarded pages from migration.
2904      */
2905     migration_bitmap_clear_discarded_pages(rs);
2906 }
2907 
2908 static int ram_init_all(RAMState **rsp)
2909 {
2910     if (ram_state_init(rsp)) {
2911         return -1;
2912     }
2913 
2914     if (xbzrle_init()) {
2915         ram_state_cleanup(rsp);
2916         return -1;
2917     }
2918 
2919     ram_init_bitmaps(*rsp);
2920 
2921     return 0;
2922 }
2923 
2924 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
2925 {
2926     RAMBlock *block;
2927     uint64_t pages = 0;
2928 
2929     /*
2930      * Postcopy is not using xbzrle/compression, so no need for that.
2931      * Also, since source are already halted, we don't need to care
2932      * about dirty page logging as well.
2933      */
2934 
2935     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2936         pages += bitmap_count_one(block->bmap,
2937                                   block->used_length >> TARGET_PAGE_BITS);
2938     }
2939 
2940     /* This may not be aligned with current bitmaps. Recalculate. */
2941     rs->migration_dirty_pages = pages;
2942 
2943     ram_state_reset(rs);
2944 
2945     /* Update RAMState cache of output QEMUFile */
2946     rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out;
2947 
2948     trace_ram_state_resume_prepare(pages);
2949 }
2950 
2951 /*
2952  * This function clears bits of the free pages reported by the caller from the
2953  * migration dirty bitmap. @addr is the host address corresponding to the
2954  * start of the continuous guest free pages, and @len is the total bytes of
2955  * those pages.
2956  */
2957 void qemu_guest_free_page_hint(void *addr, size_t len)
2958 {
2959     RAMBlock *block;
2960     ram_addr_t offset;
2961     size_t used_len, start, npages;
2962     MigrationState *s = migrate_get_current();
2963 
2964     /* This function is currently expected to be used during live migration */
2965     if (!migration_is_setup_or_active(s->state)) {
2966         return;
2967     }
2968 
2969     for (; len > 0; len -= used_len, addr += used_len) {
2970         block = qemu_ram_block_from_host(addr, false, &offset);
2971         if (unlikely(!block || offset >= block->used_length)) {
2972             /*
2973              * The implementation might not support RAMBlock resize during
2974              * live migration, but it could happen in theory with future
2975              * updates. So we add a check here to capture that case.
2976              */
2977             error_report_once("%s unexpected error", __func__);
2978             return;
2979         }
2980 
2981         if (len <= block->used_length - offset) {
2982             used_len = len;
2983         } else {
2984             used_len = block->used_length - offset;
2985         }
2986 
2987         start = offset >> TARGET_PAGE_BITS;
2988         npages = used_len >> TARGET_PAGE_BITS;
2989 
2990         qemu_mutex_lock(&ram_state->bitmap_mutex);
2991         /*
2992          * The skipped free pages are equavalent to be sent from clear_bmap's
2993          * perspective, so clear the bits from the memory region bitmap which
2994          * are initially set. Otherwise those skipped pages will be sent in
2995          * the next round after syncing from the memory region bitmap.
2996          */
2997         migration_clear_memory_region_dirty_bitmap_range(block, start, npages);
2998         ram_state->migration_dirty_pages -=
2999                       bitmap_count_one_with_offset(block->bmap, start, npages);
3000         bitmap_clear(block->bmap, start, npages);
3001         qemu_mutex_unlock(&ram_state->bitmap_mutex);
3002     }
3003 }
3004 
3005 /*
3006  * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3007  * long-running RCU critical section.  When rcu-reclaims in the code
3008  * start to become numerous it will be necessary to reduce the
3009  * granularity of these critical sections.
3010  */
3011 
3012 /**
3013  * ram_save_setup: Setup RAM for migration
3014  *
3015  * Returns zero to indicate success and negative for error
3016  *
3017  * @f: QEMUFile where to send the data
3018  * @opaque: RAMState pointer
3019  */
3020 static int ram_save_setup(QEMUFile *f, void *opaque)
3021 {
3022     RAMState **rsp = opaque;
3023     RAMBlock *block;
3024     int ret;
3025 
3026     if (compress_threads_save_setup()) {
3027         return -1;
3028     }
3029 
3030     /* migration has already setup the bitmap, reuse it. */
3031     if (!migration_in_colo_state()) {
3032         if (ram_init_all(rsp) != 0) {
3033             compress_threads_save_cleanup();
3034             return -1;
3035         }
3036     }
3037     (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f;
3038 
3039     WITH_RCU_READ_LOCK_GUARD() {
3040         qemu_put_be64(f, ram_bytes_total_with_ignored()
3041                          | RAM_SAVE_FLAG_MEM_SIZE);
3042 
3043         RAMBLOCK_FOREACH_MIGRATABLE(block) {
3044             qemu_put_byte(f, strlen(block->idstr));
3045             qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3046             qemu_put_be64(f, block->used_length);
3047             if (migrate_postcopy_ram() && block->page_size !=
3048                                           qemu_host_page_size) {
3049                 qemu_put_be64(f, block->page_size);
3050             }
3051             if (migrate_ignore_shared()) {
3052                 qemu_put_be64(f, block->mr->addr);
3053             }
3054         }
3055     }
3056 
3057     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3058     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3059 
3060     migration_ops = g_malloc0(sizeof(MigrationOps));
3061     migration_ops->ram_save_target_page = ram_save_target_page_legacy;
3062     ret = multifd_send_sync_main(f);
3063     if (ret < 0) {
3064         return ret;
3065     }
3066 
3067     if (!migrate_multifd_flush_after_each_section()) {
3068         qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
3069     }
3070 
3071     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3072     qemu_fflush(f);
3073 
3074     return 0;
3075 }
3076 
3077 /**
3078  * ram_save_iterate: iterative stage for migration
3079  *
3080  * Returns zero to indicate success and negative for error
3081  *
3082  * @f: QEMUFile where to send the data
3083  * @opaque: RAMState pointer
3084  */
3085 static int ram_save_iterate(QEMUFile *f, void *opaque)
3086 {
3087     RAMState **temp = opaque;
3088     RAMState *rs = *temp;
3089     int ret = 0;
3090     int i;
3091     int64_t t0;
3092     int done = 0;
3093 
3094     if (blk_mig_bulk_active()) {
3095         /* Avoid transferring ram during bulk phase of block migration as
3096          * the bulk phase will usually take a long time and transferring
3097          * ram updates during that time is pointless. */
3098         goto out;
3099     }
3100 
3101     /*
3102      * We'll take this lock a little bit long, but it's okay for two reasons.
3103      * Firstly, the only possible other thread to take it is who calls
3104      * qemu_guest_free_page_hint(), which should be rare; secondly, see
3105      * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
3106      * guarantees that we'll at least released it in a regular basis.
3107      */
3108     qemu_mutex_lock(&rs->bitmap_mutex);
3109     WITH_RCU_READ_LOCK_GUARD() {
3110         if (ram_list.version != rs->last_version) {
3111             ram_state_reset(rs);
3112         }
3113 
3114         /* Read version before ram_list.blocks */
3115         smp_rmb();
3116 
3117         ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3118 
3119         t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3120         i = 0;
3121         while ((ret = migration_rate_exceeded(f)) == 0 ||
3122                postcopy_has_request(rs)) {
3123             int pages;
3124 
3125             if (qemu_file_get_error(f)) {
3126                 break;
3127             }
3128 
3129             pages = ram_find_and_save_block(rs);
3130             /* no more pages to sent */
3131             if (pages == 0) {
3132                 done = 1;
3133                 break;
3134             }
3135 
3136             if (pages < 0) {
3137                 qemu_file_set_error(f, pages);
3138                 break;
3139             }
3140 
3141             rs->target_page_count += pages;
3142 
3143             /*
3144              * During postcopy, it is necessary to make sure one whole host
3145              * page is sent in one chunk.
3146              */
3147             if (migrate_postcopy_ram()) {
3148                 ram_flush_compressed_data(rs);
3149             }
3150 
3151             /*
3152              * we want to check in the 1st loop, just in case it was the 1st
3153              * time and we had to sync the dirty bitmap.
3154              * qemu_clock_get_ns() is a bit expensive, so we only check each
3155              * some iterations
3156              */
3157             if ((i & 63) == 0) {
3158                 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
3159                               1000000;
3160                 if (t1 > MAX_WAIT) {
3161                     trace_ram_save_iterate_big_wait(t1, i);
3162                     break;
3163                 }
3164             }
3165             i++;
3166         }
3167     }
3168     qemu_mutex_unlock(&rs->bitmap_mutex);
3169 
3170     /*
3171      * Must occur before EOS (or any QEMUFile operation)
3172      * because of RDMA protocol.
3173      */
3174     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3175 
3176 out:
3177     if (ret >= 0
3178         && migration_is_setup_or_active(migrate_get_current()->state)) {
3179         if (migrate_multifd_flush_after_each_section()) {
3180             ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
3181             if (ret < 0) {
3182                 return ret;
3183             }
3184         }
3185 
3186         qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3187         qemu_fflush(f);
3188         ram_transferred_add(8);
3189 
3190         ret = qemu_file_get_error(f);
3191     }
3192     if (ret < 0) {
3193         return ret;
3194     }
3195 
3196     return done;
3197 }
3198 
3199 /**
3200  * ram_save_complete: function called to send the remaining amount of ram
3201  *
3202  * Returns zero to indicate success or negative on error
3203  *
3204  * Called with iothread lock
3205  *
3206  * @f: QEMUFile where to send the data
3207  * @opaque: RAMState pointer
3208  */
3209 static int ram_save_complete(QEMUFile *f, void *opaque)
3210 {
3211     RAMState **temp = opaque;
3212     RAMState *rs = *temp;
3213     int ret = 0;
3214 
3215     rs->last_stage = !migration_in_colo_state();
3216 
3217     WITH_RCU_READ_LOCK_GUARD() {
3218         if (!migration_in_postcopy()) {
3219             migration_bitmap_sync_precopy(rs, true);
3220         }
3221 
3222         ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3223 
3224         /* try transferring iterative blocks of memory */
3225 
3226         /* flush all remaining blocks regardless of rate limiting */
3227         qemu_mutex_lock(&rs->bitmap_mutex);
3228         while (true) {
3229             int pages;
3230 
3231             pages = ram_find_and_save_block(rs);
3232             /* no more blocks to sent */
3233             if (pages == 0) {
3234                 break;
3235             }
3236             if (pages < 0) {
3237                 ret = pages;
3238                 break;
3239             }
3240         }
3241         qemu_mutex_unlock(&rs->bitmap_mutex);
3242 
3243         ram_flush_compressed_data(rs);
3244         ram_control_after_iterate(f, RAM_CONTROL_FINISH);
3245     }
3246 
3247     if (ret < 0) {
3248         return ret;
3249     }
3250 
3251     ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
3252     if (ret < 0) {
3253         return ret;
3254     }
3255 
3256     if (!migrate_multifd_flush_after_each_section()) {
3257         qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
3258     }
3259     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3260     qemu_fflush(f);
3261 
3262     return 0;
3263 }
3264 
3265 static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy,
3266                                        uint64_t *can_postcopy)
3267 {
3268     RAMState **temp = opaque;
3269     RAMState *rs = *temp;
3270 
3271     uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3272 
3273     if (migrate_postcopy_ram()) {
3274         /* We can do postcopy, and all the data is postcopiable */
3275         *can_postcopy += remaining_size;
3276     } else {
3277         *must_precopy += remaining_size;
3278     }
3279 }
3280 
3281 static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
3282                                     uint64_t *can_postcopy)
3283 {
3284     MigrationState *s = migrate_get_current();
3285     RAMState **temp = opaque;
3286     RAMState *rs = *temp;
3287 
3288     uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3289 
3290     if (!migration_in_postcopy() && remaining_size < s->threshold_size) {
3291         qemu_mutex_lock_iothread();
3292         WITH_RCU_READ_LOCK_GUARD() {
3293             migration_bitmap_sync_precopy(rs, false);
3294         }
3295         qemu_mutex_unlock_iothread();
3296         remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3297     }
3298 
3299     if (migrate_postcopy_ram()) {
3300         /* We can do postcopy, and all the data is postcopiable */
3301         *can_postcopy += remaining_size;
3302     } else {
3303         *must_precopy += remaining_size;
3304     }
3305 }
3306 
3307 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3308 {
3309     unsigned int xh_len;
3310     int xh_flags;
3311     uint8_t *loaded_data;
3312 
3313     /* extract RLE header */
3314     xh_flags = qemu_get_byte(f);
3315     xh_len = qemu_get_be16(f);
3316 
3317     if (xh_flags != ENCODING_FLAG_XBZRLE) {
3318         error_report("Failed to load XBZRLE page - wrong compression!");
3319         return -1;
3320     }
3321 
3322     if (xh_len > TARGET_PAGE_SIZE) {
3323         error_report("Failed to load XBZRLE page - len overflow!");
3324         return -1;
3325     }
3326     loaded_data = XBZRLE.decoded_buf;
3327     /* load data and decode */
3328     /* it can change loaded_data to point to an internal buffer */
3329     qemu_get_buffer_in_place(f, &loaded_data, xh_len);
3330 
3331     /* decode RLE */
3332     if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3333                              TARGET_PAGE_SIZE) == -1) {
3334         error_report("Failed to load XBZRLE page - decode error!");
3335         return -1;
3336     }
3337 
3338     return 0;
3339 }
3340 
3341 /**
3342  * ram_block_from_stream: read a RAMBlock id from the migration stream
3343  *
3344  * Must be called from within a rcu critical section.
3345  *
3346  * Returns a pointer from within the RCU-protected ram_list.
3347  *
3348  * @mis: the migration incoming state pointer
3349  * @f: QEMUFile where to read the data from
3350  * @flags: Page flags (mostly to see if it's a continuation of previous block)
3351  * @channel: the channel we're using
3352  */
3353 static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis,
3354                                               QEMUFile *f, int flags,
3355                                               int channel)
3356 {
3357     RAMBlock *block = mis->last_recv_block[channel];
3358     char id[256];
3359     uint8_t len;
3360 
3361     if (flags & RAM_SAVE_FLAG_CONTINUE) {
3362         if (!block) {
3363             error_report("Ack, bad migration stream!");
3364             return NULL;
3365         }
3366         return block;
3367     }
3368 
3369     len = qemu_get_byte(f);
3370     qemu_get_buffer(f, (uint8_t *)id, len);
3371     id[len] = 0;
3372 
3373     block = qemu_ram_block_by_name(id);
3374     if (!block) {
3375         error_report("Can't find block %s", id);
3376         return NULL;
3377     }
3378 
3379     if (migrate_ram_is_ignored(block)) {
3380         error_report("block %s should not be migrated !", id);
3381         return NULL;
3382     }
3383 
3384     mis->last_recv_block[channel] = block;
3385 
3386     return block;
3387 }
3388 
3389 static inline void *host_from_ram_block_offset(RAMBlock *block,
3390                                                ram_addr_t offset)
3391 {
3392     if (!offset_in_ramblock(block, offset)) {
3393         return NULL;
3394     }
3395 
3396     return block->host + offset;
3397 }
3398 
3399 static void *host_page_from_ram_block_offset(RAMBlock *block,
3400                                              ram_addr_t offset)
3401 {
3402     /* Note: Explicitly no check against offset_in_ramblock(). */
3403     return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset),
3404                                    block->page_size);
3405 }
3406 
3407 static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block,
3408                                                          ram_addr_t offset)
3409 {
3410     return ((uintptr_t)block->host + offset) & (block->page_size - 1);
3411 }
3412 
3413 void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages)
3414 {
3415     qemu_mutex_lock(&ram_state->bitmap_mutex);
3416     for (int i = 0; i < pages; i++) {
3417         ram_addr_t offset = normal[i];
3418         ram_state->migration_dirty_pages += !test_and_set_bit(
3419                                                 offset >> TARGET_PAGE_BITS,
3420                                                 block->bmap);
3421     }
3422     qemu_mutex_unlock(&ram_state->bitmap_mutex);
3423 }
3424 
3425 static inline void *colo_cache_from_block_offset(RAMBlock *block,
3426                              ram_addr_t offset, bool record_bitmap)
3427 {
3428     if (!offset_in_ramblock(block, offset)) {
3429         return NULL;
3430     }
3431     if (!block->colo_cache) {
3432         error_report("%s: colo_cache is NULL in block :%s",
3433                      __func__, block->idstr);
3434         return NULL;
3435     }
3436 
3437     /*
3438     * During colo checkpoint, we need bitmap of these migrated pages.
3439     * It help us to decide which pages in ram cache should be flushed
3440     * into VM's RAM later.
3441     */
3442     if (record_bitmap) {
3443         colo_record_bitmap(block, &offset, 1);
3444     }
3445     return block->colo_cache + offset;
3446 }
3447 
3448 /**
3449  * ram_handle_compressed: handle the zero page case
3450  *
3451  * If a page (or a whole RDMA chunk) has been
3452  * determined to be zero, then zap it.
3453  *
3454  * @host: host address for the zero page
3455  * @ch: what the page is filled from.  We only support zero
3456  * @size: size of the zero page
3457  */
3458 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3459 {
3460     if (ch != 0 || !buffer_is_zero(host, size)) {
3461         memset(host, ch, size);
3462     }
3463 }
3464 
3465 static void colo_init_ram_state(void)
3466 {
3467     ram_state_init(&ram_state);
3468 }
3469 
3470 /*
3471  * colo cache: this is for secondary VM, we cache the whole
3472  * memory of the secondary VM, it is need to hold the global lock
3473  * to call this helper.
3474  */
3475 int colo_init_ram_cache(void)
3476 {
3477     RAMBlock *block;
3478 
3479     WITH_RCU_READ_LOCK_GUARD() {
3480         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3481             block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3482                                                     NULL, false, false);
3483             if (!block->colo_cache) {
3484                 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3485                              "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3486                              block->used_length);
3487                 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3488                     if (block->colo_cache) {
3489                         qemu_anon_ram_free(block->colo_cache, block->used_length);
3490                         block->colo_cache = NULL;
3491                     }
3492                 }
3493                 return -errno;
3494             }
3495             if (!machine_dump_guest_core(current_machine)) {
3496                 qemu_madvise(block->colo_cache, block->used_length,
3497                              QEMU_MADV_DONTDUMP);
3498             }
3499         }
3500     }
3501 
3502     /*
3503     * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3504     * with to decide which page in cache should be flushed into SVM's RAM. Here
3505     * we use the same name 'ram_bitmap' as for migration.
3506     */
3507     if (ram_bytes_total()) {
3508         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3509             unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3510             block->bmap = bitmap_new(pages);
3511         }
3512     }
3513 
3514     colo_init_ram_state();
3515     return 0;
3516 }
3517 
3518 /* TODO: duplicated with ram_init_bitmaps */
3519 void colo_incoming_start_dirty_log(void)
3520 {
3521     RAMBlock *block = NULL;
3522     /* For memory_global_dirty_log_start below. */
3523     qemu_mutex_lock_iothread();
3524     qemu_mutex_lock_ramlist();
3525 
3526     memory_global_dirty_log_sync(false);
3527     WITH_RCU_READ_LOCK_GUARD() {
3528         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3529             ramblock_sync_dirty_bitmap(ram_state, block);
3530             /* Discard this dirty bitmap record */
3531             bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS);
3532         }
3533         memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
3534     }
3535     ram_state->migration_dirty_pages = 0;
3536     qemu_mutex_unlock_ramlist();
3537     qemu_mutex_unlock_iothread();
3538 }
3539 
3540 /* It is need to hold the global lock to call this helper */
3541 void colo_release_ram_cache(void)
3542 {
3543     RAMBlock *block;
3544 
3545     memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
3546     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3547         g_free(block->bmap);
3548         block->bmap = NULL;
3549     }
3550 
3551     WITH_RCU_READ_LOCK_GUARD() {
3552         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3553             if (block->colo_cache) {
3554                 qemu_anon_ram_free(block->colo_cache, block->used_length);
3555                 block->colo_cache = NULL;
3556             }
3557         }
3558     }
3559     ram_state_cleanup(&ram_state);
3560 }
3561 
3562 /**
3563  * ram_load_setup: Setup RAM for migration incoming side
3564  *
3565  * Returns zero to indicate success and negative for error
3566  *
3567  * @f: QEMUFile where to receive the data
3568  * @opaque: RAMState pointer
3569  */
3570 static int ram_load_setup(QEMUFile *f, void *opaque)
3571 {
3572     xbzrle_load_setup();
3573     ramblock_recv_map_init();
3574 
3575     return 0;
3576 }
3577 
3578 static int ram_load_cleanup(void *opaque)
3579 {
3580     RAMBlock *rb;
3581 
3582     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3583         qemu_ram_block_writeback(rb);
3584     }
3585 
3586     xbzrle_load_cleanup();
3587 
3588     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3589         g_free(rb->receivedmap);
3590         rb->receivedmap = NULL;
3591     }
3592 
3593     return 0;
3594 }
3595 
3596 /**
3597  * ram_postcopy_incoming_init: allocate postcopy data structures
3598  *
3599  * Returns 0 for success and negative if there was one error
3600  *
3601  * @mis: current migration incoming state
3602  *
3603  * Allocate data structures etc needed by incoming migration with
3604  * postcopy-ram. postcopy-ram's similarly names
3605  * postcopy_ram_incoming_init does the work.
3606  */
3607 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3608 {
3609     return postcopy_ram_incoming_init(mis);
3610 }
3611 
3612 /**
3613  * ram_load_postcopy: load a page in postcopy case
3614  *
3615  * Returns 0 for success or -errno in case of error
3616  *
3617  * Called in postcopy mode by ram_load().
3618  * rcu_read_lock is taken prior to this being called.
3619  *
3620  * @f: QEMUFile where to send the data
3621  * @channel: the channel to use for loading
3622  */
3623 int ram_load_postcopy(QEMUFile *f, int channel)
3624 {
3625     int flags = 0, ret = 0;
3626     bool place_needed = false;
3627     bool matches_target_page_size = false;
3628     MigrationIncomingState *mis = migration_incoming_get_current();
3629     PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel];
3630 
3631     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3632         ram_addr_t addr;
3633         void *page_buffer = NULL;
3634         void *place_source = NULL;
3635         RAMBlock *block = NULL;
3636         uint8_t ch;
3637         int len;
3638 
3639         addr = qemu_get_be64(f);
3640 
3641         /*
3642          * If qemu file error, we should stop here, and then "addr"
3643          * may be invalid
3644          */
3645         ret = qemu_file_get_error(f);
3646         if (ret) {
3647             break;
3648         }
3649 
3650         flags = addr & ~TARGET_PAGE_MASK;
3651         addr &= TARGET_PAGE_MASK;
3652 
3653         trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags);
3654         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
3655                      RAM_SAVE_FLAG_COMPRESS_PAGE)) {
3656             block = ram_block_from_stream(mis, f, flags, channel);
3657             if (!block) {
3658                 ret = -EINVAL;
3659                 break;
3660             }
3661 
3662             /*
3663              * Relying on used_length is racy and can result in false positives.
3664              * We might place pages beyond used_length in case RAM was shrunk
3665              * while in postcopy, which is fine - trying to place via
3666              * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3667              */
3668             if (!block->host || addr >= block->postcopy_length) {
3669                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3670                 ret = -EINVAL;
3671                 break;
3672             }
3673             tmp_page->target_pages++;
3674             matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
3675             /*
3676              * Postcopy requires that we place whole host pages atomically;
3677              * these may be huge pages for RAMBlocks that are backed by
3678              * hugetlbfs.
3679              * To make it atomic, the data is read into a temporary page
3680              * that's moved into place later.
3681              * The migration protocol uses,  possibly smaller, target-pages
3682              * however the source ensures it always sends all the components
3683              * of a host page in one chunk.
3684              */
3685             page_buffer = tmp_page->tmp_huge_page +
3686                           host_page_offset_from_ram_block_offset(block, addr);
3687             /* If all TP are zero then we can optimise the place */
3688             if (tmp_page->target_pages == 1) {
3689                 tmp_page->host_addr =
3690                     host_page_from_ram_block_offset(block, addr);
3691             } else if (tmp_page->host_addr !=
3692                        host_page_from_ram_block_offset(block, addr)) {
3693                 /* not the 1st TP within the HP */
3694                 error_report("Non-same host page detected on channel %d: "
3695                              "Target host page %p, received host page %p "
3696                              "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)",
3697                              channel, tmp_page->host_addr,
3698                              host_page_from_ram_block_offset(block, addr),
3699                              block->idstr, addr, tmp_page->target_pages);
3700                 ret = -EINVAL;
3701                 break;
3702             }
3703 
3704             /*
3705              * If it's the last part of a host page then we place the host
3706              * page
3707              */
3708             if (tmp_page->target_pages ==
3709                 (block->page_size / TARGET_PAGE_SIZE)) {
3710                 place_needed = true;
3711             }
3712             place_source = tmp_page->tmp_huge_page;
3713         }
3714 
3715         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3716         case RAM_SAVE_FLAG_ZERO:
3717             ch = qemu_get_byte(f);
3718             /*
3719              * Can skip to set page_buffer when
3720              * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3721              */
3722             if (ch || !matches_target_page_size) {
3723                 memset(page_buffer, ch, TARGET_PAGE_SIZE);
3724             }
3725             if (ch) {
3726                 tmp_page->all_zero = false;
3727             }
3728             break;
3729 
3730         case RAM_SAVE_FLAG_PAGE:
3731             tmp_page->all_zero = false;
3732             if (!matches_target_page_size) {
3733                 /* For huge pages, we always use temporary buffer */
3734                 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3735             } else {
3736                 /*
3737                  * For small pages that matches target page size, we
3738                  * avoid the qemu_file copy.  Instead we directly use
3739                  * the buffer of QEMUFile to place the page.  Note: we
3740                  * cannot do any QEMUFile operation before using that
3741                  * buffer to make sure the buffer is valid when
3742                  * placing the page.
3743                  */
3744                 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3745                                          TARGET_PAGE_SIZE);
3746             }
3747             break;
3748         case RAM_SAVE_FLAG_COMPRESS_PAGE:
3749             tmp_page->all_zero = false;
3750             len = qemu_get_be32(f);
3751             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3752                 error_report("Invalid compressed data length: %d", len);
3753                 ret = -EINVAL;
3754                 break;
3755             }
3756             decompress_data_with_multi_threads(f, page_buffer, len);
3757             break;
3758         case RAM_SAVE_FLAG_MULTIFD_FLUSH:
3759             multifd_recv_sync_main();
3760             break;
3761         case RAM_SAVE_FLAG_EOS:
3762             /* normal exit */
3763             if (migrate_multifd_flush_after_each_section()) {
3764                 multifd_recv_sync_main();
3765             }
3766             break;
3767         default:
3768             error_report("Unknown combination of migration flags: 0x%x"
3769                          " (postcopy mode)", flags);
3770             ret = -EINVAL;
3771             break;
3772         }
3773 
3774         /* Got the whole host page, wait for decompress before placing. */
3775         if (place_needed) {
3776             ret |= wait_for_decompress_done();
3777         }
3778 
3779         /* Detect for any possible file errors */
3780         if (!ret && qemu_file_get_error(f)) {
3781             ret = qemu_file_get_error(f);
3782         }
3783 
3784         if (!ret && place_needed) {
3785             if (tmp_page->all_zero) {
3786                 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block);
3787             } else {
3788                 ret = postcopy_place_page(mis, tmp_page->host_addr,
3789                                           place_source, block);
3790             }
3791             place_needed = false;
3792             postcopy_temp_page_reset(tmp_page);
3793         }
3794     }
3795 
3796     return ret;
3797 }
3798 
3799 static bool postcopy_is_running(void)
3800 {
3801     PostcopyState ps = postcopy_state_get();
3802     return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3803 }
3804 
3805 /*
3806  * Flush content of RAM cache into SVM's memory.
3807  * Only flush the pages that be dirtied by PVM or SVM or both.
3808  */
3809 void colo_flush_ram_cache(void)
3810 {
3811     RAMBlock *block = NULL;
3812     void *dst_host;
3813     void *src_host;
3814     unsigned long offset = 0;
3815 
3816     memory_global_dirty_log_sync(false);
3817     qemu_mutex_lock(&ram_state->bitmap_mutex);
3818     WITH_RCU_READ_LOCK_GUARD() {
3819         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3820             ramblock_sync_dirty_bitmap(ram_state, block);
3821         }
3822     }
3823 
3824     trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
3825     WITH_RCU_READ_LOCK_GUARD() {
3826         block = QLIST_FIRST_RCU(&ram_list.blocks);
3827 
3828         while (block) {
3829             unsigned long num = 0;
3830 
3831             offset = colo_bitmap_find_dirty(ram_state, block, offset, &num);
3832             if (!offset_in_ramblock(block,
3833                                     ((ram_addr_t)offset) << TARGET_PAGE_BITS)) {
3834                 offset = 0;
3835                 num = 0;
3836                 block = QLIST_NEXT_RCU(block, next);
3837             } else {
3838                 unsigned long i = 0;
3839 
3840                 for (i = 0; i < num; i++) {
3841                     migration_bitmap_clear_dirty(ram_state, block, offset + i);
3842                 }
3843                 dst_host = block->host
3844                          + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
3845                 src_host = block->colo_cache
3846                          + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
3847                 memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num);
3848                 offset += num;
3849             }
3850         }
3851     }
3852     qemu_mutex_unlock(&ram_state->bitmap_mutex);
3853     trace_colo_flush_ram_cache_end();
3854 }
3855 
3856 /**
3857  * ram_load_precopy: load pages in precopy case
3858  *
3859  * Returns 0 for success or -errno in case of error
3860  *
3861  * Called in precopy mode by ram_load().
3862  * rcu_read_lock is taken prior to this being called.
3863  *
3864  * @f: QEMUFile where to send the data
3865  */
3866 static int ram_load_precopy(QEMUFile *f)
3867 {
3868     MigrationIncomingState *mis = migration_incoming_get_current();
3869     int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0;
3870     /* ADVISE is earlier, it shows the source has the postcopy capability on */
3871     bool postcopy_advised = migration_incoming_postcopy_advised();
3872     if (!migrate_compress()) {
3873         invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
3874     }
3875 
3876     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3877         ram_addr_t addr, total_ram_bytes;
3878         void *host = NULL, *host_bak = NULL;
3879         uint8_t ch;
3880 
3881         /*
3882          * Yield periodically to let main loop run, but an iteration of
3883          * the main loop is expensive, so do it each some iterations
3884          */
3885         if ((i & 32767) == 0 && qemu_in_coroutine()) {
3886             aio_co_schedule(qemu_get_current_aio_context(),
3887                             qemu_coroutine_self());
3888             qemu_coroutine_yield();
3889         }
3890         i++;
3891 
3892         addr = qemu_get_be64(f);
3893         flags = addr & ~TARGET_PAGE_MASK;
3894         addr &= TARGET_PAGE_MASK;
3895 
3896         if (flags & invalid_flags) {
3897             if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
3898                 error_report("Received an unexpected compressed page");
3899             }
3900 
3901             ret = -EINVAL;
3902             break;
3903         }
3904 
3905         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
3906                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
3907             RAMBlock *block = ram_block_from_stream(mis, f, flags,
3908                                                     RAM_CHANNEL_PRECOPY);
3909 
3910             host = host_from_ram_block_offset(block, addr);
3911             /*
3912              * After going into COLO stage, we should not load the page
3913              * into SVM's memory directly, we put them into colo_cache firstly.
3914              * NOTE: We need to keep a copy of SVM's ram in colo_cache.
3915              * Previously, we copied all these memory in preparing stage of COLO
3916              * while we need to stop VM, which is a time-consuming process.
3917              * Here we optimize it by a trick, back-up every page while in
3918              * migration process while COLO is enabled, though it affects the
3919              * speed of the migration, but it obviously reduce the downtime of
3920              * back-up all SVM'S memory in COLO preparing stage.
3921              */
3922             if (migration_incoming_colo_enabled()) {
3923                 if (migration_incoming_in_colo_state()) {
3924                     /* In COLO stage, put all pages into cache temporarily */
3925                     host = colo_cache_from_block_offset(block, addr, true);
3926                 } else {
3927                    /*
3928                     * In migration stage but before COLO stage,
3929                     * Put all pages into both cache and SVM's memory.
3930                     */
3931                     host_bak = colo_cache_from_block_offset(block, addr, false);
3932                 }
3933             }
3934             if (!host) {
3935                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3936                 ret = -EINVAL;
3937                 break;
3938             }
3939             if (!migration_incoming_in_colo_state()) {
3940                 ramblock_recv_bitmap_set(block, host);
3941             }
3942 
3943             trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
3944         }
3945 
3946         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3947         case RAM_SAVE_FLAG_MEM_SIZE:
3948             /* Synchronize RAM block list */
3949             total_ram_bytes = addr;
3950             while (!ret && total_ram_bytes) {
3951                 RAMBlock *block;
3952                 char id[256];
3953                 ram_addr_t length;
3954 
3955                 len = qemu_get_byte(f);
3956                 qemu_get_buffer(f, (uint8_t *)id, len);
3957                 id[len] = 0;
3958                 length = qemu_get_be64(f);
3959 
3960                 block = qemu_ram_block_by_name(id);
3961                 if (block && !qemu_ram_is_migratable(block)) {
3962                     error_report("block %s should not be migrated !", id);
3963                     ret = -EINVAL;
3964                 } else if (block) {
3965                     if (length != block->used_length) {
3966                         Error *local_err = NULL;
3967 
3968                         ret = qemu_ram_resize(block, length,
3969                                               &local_err);
3970                         if (local_err) {
3971                             error_report_err(local_err);
3972                         }
3973                     }
3974                     /* For postcopy we need to check hugepage sizes match */
3975                     if (postcopy_advised && migrate_postcopy_ram() &&
3976                         block->page_size != qemu_host_page_size) {
3977                         uint64_t remote_page_size = qemu_get_be64(f);
3978                         if (remote_page_size != block->page_size) {
3979                             error_report("Mismatched RAM page size %s "
3980                                          "(local) %zd != %" PRId64,
3981                                          id, block->page_size,
3982                                          remote_page_size);
3983                             ret = -EINVAL;
3984                         }
3985                     }
3986                     if (migrate_ignore_shared()) {
3987                         hwaddr addr2 = qemu_get_be64(f);
3988                         if (migrate_ram_is_ignored(block) &&
3989                             block->mr->addr != addr2) {
3990                             error_report("Mismatched GPAs for block %s "
3991                                          "%" PRId64 "!= %" PRId64,
3992                                          id, (uint64_t)addr2,
3993                                          (uint64_t)block->mr->addr);
3994                             ret = -EINVAL;
3995                         }
3996                     }
3997                     ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
3998                                           block->idstr);
3999                 } else {
4000                     error_report("Unknown ramblock \"%s\", cannot "
4001                                  "accept migration", id);
4002                     ret = -EINVAL;
4003                 }
4004 
4005                 total_ram_bytes -= length;
4006             }
4007             break;
4008 
4009         case RAM_SAVE_FLAG_ZERO:
4010             ch = qemu_get_byte(f);
4011             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4012             break;
4013 
4014         case RAM_SAVE_FLAG_PAGE:
4015             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4016             break;
4017 
4018         case RAM_SAVE_FLAG_COMPRESS_PAGE:
4019             len = qemu_get_be32(f);
4020             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4021                 error_report("Invalid compressed data length: %d", len);
4022                 ret = -EINVAL;
4023                 break;
4024             }
4025             decompress_data_with_multi_threads(f, host, len);
4026             break;
4027 
4028         case RAM_SAVE_FLAG_XBZRLE:
4029             if (load_xbzrle(f, addr, host) < 0) {
4030                 error_report("Failed to decompress XBZRLE page at "
4031                              RAM_ADDR_FMT, addr);
4032                 ret = -EINVAL;
4033                 break;
4034             }
4035             break;
4036         case RAM_SAVE_FLAG_MULTIFD_FLUSH:
4037             multifd_recv_sync_main();
4038             break;
4039         case RAM_SAVE_FLAG_EOS:
4040             /* normal exit */
4041             if (migrate_multifd_flush_after_each_section()) {
4042                 multifd_recv_sync_main();
4043             }
4044             break;
4045         case RAM_SAVE_FLAG_HOOK:
4046             ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
4047             break;
4048         default:
4049             error_report("Unknown combination of migration flags: 0x%x", flags);
4050             ret = -EINVAL;
4051         }
4052         if (!ret) {
4053             ret = qemu_file_get_error(f);
4054         }
4055         if (!ret && host_bak) {
4056             memcpy(host_bak, host, TARGET_PAGE_SIZE);
4057         }
4058     }
4059 
4060     ret |= wait_for_decompress_done();
4061     return ret;
4062 }
4063 
4064 static int ram_load(QEMUFile *f, void *opaque, int version_id)
4065 {
4066     int ret = 0;
4067     static uint64_t seq_iter;
4068     /*
4069      * If system is running in postcopy mode, page inserts to host memory must
4070      * be atomic
4071      */
4072     bool postcopy_running = postcopy_is_running();
4073 
4074     seq_iter++;
4075 
4076     if (version_id != 4) {
4077         return -EINVAL;
4078     }
4079 
4080     /*
4081      * This RCU critical section can be very long running.
4082      * When RCU reclaims in the code start to become numerous,
4083      * it will be necessary to reduce the granularity of this
4084      * critical section.
4085      */
4086     WITH_RCU_READ_LOCK_GUARD() {
4087         if (postcopy_running) {
4088             /*
4089              * Note!  Here RAM_CHANNEL_PRECOPY is the precopy channel of
4090              * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to
4091              * service fast page faults.
4092              */
4093             ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY);
4094         } else {
4095             ret = ram_load_precopy(f);
4096         }
4097     }
4098     trace_ram_load_complete(ret, seq_iter);
4099 
4100     return ret;
4101 }
4102 
4103 static bool ram_has_postcopy(void *opaque)
4104 {
4105     RAMBlock *rb;
4106     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4107         if (ramblock_is_pmem(rb)) {
4108             info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4109                          "is not supported now!", rb->idstr, rb->host);
4110             return false;
4111         }
4112     }
4113 
4114     return migrate_postcopy_ram();
4115 }
4116 
4117 /* Sync all the dirty bitmap with destination VM.  */
4118 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4119 {
4120     RAMBlock *block;
4121     QEMUFile *file = s->to_dst_file;
4122     int ramblock_count = 0;
4123 
4124     trace_ram_dirty_bitmap_sync_start();
4125 
4126     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4127         qemu_savevm_send_recv_bitmap(file, block->idstr);
4128         trace_ram_dirty_bitmap_request(block->idstr);
4129         ramblock_count++;
4130     }
4131 
4132     trace_ram_dirty_bitmap_sync_wait();
4133 
4134     /* Wait until all the ramblocks' dirty bitmap synced */
4135     while (ramblock_count--) {
4136         qemu_sem_wait(&s->rp_state.rp_sem);
4137     }
4138 
4139     trace_ram_dirty_bitmap_sync_complete();
4140 
4141     return 0;
4142 }
4143 
4144 static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4145 {
4146     qemu_sem_post(&s->rp_state.rp_sem);
4147 }
4148 
4149 /*
4150  * Read the received bitmap, revert it as the initial dirty bitmap.
4151  * This is only used when the postcopy migration is paused but wants
4152  * to resume from a middle point.
4153  */
4154 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4155 {
4156     int ret = -EINVAL;
4157     /* from_dst_file is always valid because we're within rp_thread */
4158     QEMUFile *file = s->rp_state.from_dst_file;
4159     unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
4160     uint64_t local_size = DIV_ROUND_UP(nbits, 8);
4161     uint64_t size, end_mark;
4162 
4163     trace_ram_dirty_bitmap_reload_begin(block->idstr);
4164 
4165     if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4166         error_report("%s: incorrect state %s", __func__,
4167                      MigrationStatus_str(s->state));
4168         return -EINVAL;
4169     }
4170 
4171     /*
4172      * Note: see comments in ramblock_recv_bitmap_send() on why we
4173      * need the endianness conversion, and the paddings.
4174      */
4175     local_size = ROUND_UP(local_size, 8);
4176 
4177     /* Add paddings */
4178     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4179 
4180     size = qemu_get_be64(file);
4181 
4182     /* The size of the bitmap should match with our ramblock */
4183     if (size != local_size) {
4184         error_report("%s: ramblock '%s' bitmap size mismatch "
4185                      "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4186                      block->idstr, size, local_size);
4187         ret = -EINVAL;
4188         goto out;
4189     }
4190 
4191     size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4192     end_mark = qemu_get_be64(file);
4193 
4194     ret = qemu_file_get_error(file);
4195     if (ret || size != local_size) {
4196         error_report("%s: read bitmap failed for ramblock '%s': %d"
4197                      " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4198                      __func__, block->idstr, ret, local_size, size);
4199         ret = -EIO;
4200         goto out;
4201     }
4202 
4203     if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4204         error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIx64,
4205                      __func__, block->idstr, end_mark);
4206         ret = -EINVAL;
4207         goto out;
4208     }
4209 
4210     /*
4211      * Endianness conversion. We are during postcopy (though paused).
4212      * The dirty bitmap won't change. We can directly modify it.
4213      */
4214     bitmap_from_le(block->bmap, le_bitmap, nbits);
4215 
4216     /*
4217      * What we received is "received bitmap". Revert it as the initial
4218      * dirty bitmap for this ramblock.
4219      */
4220     bitmap_complement(block->bmap, block->bmap, nbits);
4221 
4222     /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4223     ramblock_dirty_bitmap_clear_discarded_pages(block);
4224 
4225     /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4226     trace_ram_dirty_bitmap_reload_complete(block->idstr);
4227 
4228     /*
4229      * We succeeded to sync bitmap for current ramblock. If this is
4230      * the last one to sync, we need to notify the main send thread.
4231      */
4232     ram_dirty_bitmap_reload_notify(s);
4233 
4234     ret = 0;
4235 out:
4236     g_free(le_bitmap);
4237     return ret;
4238 }
4239 
4240 static int ram_resume_prepare(MigrationState *s, void *opaque)
4241 {
4242     RAMState *rs = *(RAMState **)opaque;
4243     int ret;
4244 
4245     ret = ram_dirty_bitmap_sync_all(s, rs);
4246     if (ret) {
4247         return ret;
4248     }
4249 
4250     ram_state_resume_prepare(rs, s->to_dst_file);
4251 
4252     return 0;
4253 }
4254 
4255 void postcopy_preempt_shutdown_file(MigrationState *s)
4256 {
4257     qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS);
4258     qemu_fflush(s->postcopy_qemufile_src);
4259 }
4260 
4261 static SaveVMHandlers savevm_ram_handlers = {
4262     .save_setup = ram_save_setup,
4263     .save_live_iterate = ram_save_iterate,
4264     .save_live_complete_postcopy = ram_save_complete,
4265     .save_live_complete_precopy = ram_save_complete,
4266     .has_postcopy = ram_has_postcopy,
4267     .state_pending_exact = ram_state_pending_exact,
4268     .state_pending_estimate = ram_state_pending_estimate,
4269     .load_state = ram_load,
4270     .save_cleanup = ram_save_cleanup,
4271     .load_setup = ram_load_setup,
4272     .load_cleanup = ram_load_cleanup,
4273     .resume_prepare = ram_resume_prepare,
4274 };
4275 
4276 static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
4277                                       size_t old_size, size_t new_size)
4278 {
4279     PostcopyState ps = postcopy_state_get();
4280     ram_addr_t offset;
4281     RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset);
4282     Error *err = NULL;
4283 
4284     if (migrate_ram_is_ignored(rb)) {
4285         return;
4286     }
4287 
4288     if (!migration_is_idle()) {
4289         /*
4290          * Precopy code on the source cannot deal with the size of RAM blocks
4291          * changing at random points in time - especially after sending the
4292          * RAM block sizes in the migration stream, they must no longer change.
4293          * Abort and indicate a proper reason.
4294          */
4295         error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr);
4296         migration_cancel(err);
4297         error_free(err);
4298     }
4299 
4300     switch (ps) {
4301     case POSTCOPY_INCOMING_ADVISE:
4302         /*
4303          * Update what ram_postcopy_incoming_init()->init_range() does at the
4304          * time postcopy was advised. Syncing RAM blocks with the source will
4305          * result in RAM resizes.
4306          */
4307         if (old_size < new_size) {
4308             if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) {
4309                 error_report("RAM block '%s' discard of resized RAM failed",
4310                              rb->idstr);
4311             }
4312         }
4313         rb->postcopy_length = new_size;
4314         break;
4315     case POSTCOPY_INCOMING_NONE:
4316     case POSTCOPY_INCOMING_RUNNING:
4317     case POSTCOPY_INCOMING_END:
4318         /*
4319          * Once our guest is running, postcopy does no longer care about
4320          * resizes. When growing, the new memory was not available on the
4321          * source, no handler needed.
4322          */
4323         break;
4324     default:
4325         error_report("RAM block '%s' resized during postcopy state: %d",
4326                      rb->idstr, ps);
4327         exit(-1);
4328     }
4329 }
4330 
4331 static RAMBlockNotifier ram_mig_ram_notifier = {
4332     .ram_block_resized = ram_mig_ram_block_resized,
4333 };
4334 
4335 void ram_mig_init(void)
4336 {
4337     qemu_mutex_init(&XBZRLE.lock);
4338     register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
4339     ram_block_notifier_add(&ram_mig_ram_notifier);
4340 }
4341