xref: /openbmc/qemu/migration/ram.c (revision f1f7e4bf)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2011-2015 Red Hat Inc
6  *
7  * Authors:
8  *  Juan Quintela <quintela@redhat.com>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 #include <stdint.h>
29 #include <zlib.h>
30 #include "qemu/bitops.h"
31 #include "qemu/bitmap.h"
32 #include "qemu/timer.h"
33 #include "qemu/main-loop.h"
34 #include "migration/migration.h"
35 #include "migration/postcopy-ram.h"
36 #include "exec/address-spaces.h"
37 #include "migration/page_cache.h"
38 #include "qemu/error-report.h"
39 #include "trace.h"
40 #include "exec/ram_addr.h"
41 #include "qemu/rcu_queue.h"
42 
43 #ifdef DEBUG_MIGRATION_RAM
44 #define DPRINTF(fmt, ...) \
45     do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0)
46 #else
47 #define DPRINTF(fmt, ...) \
48     do { } while (0)
49 #endif
50 
51 static int dirty_rate_high_cnt;
52 
53 static uint64_t bitmap_sync_count;
54 
55 /***********************************************************/
56 /* ram save/restore */
57 
58 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
59 #define RAM_SAVE_FLAG_COMPRESS 0x02
60 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
61 #define RAM_SAVE_FLAG_PAGE     0x08
62 #define RAM_SAVE_FLAG_EOS      0x10
63 #define RAM_SAVE_FLAG_CONTINUE 0x20
64 #define RAM_SAVE_FLAG_XBZRLE   0x40
65 /* 0x80 is reserved in migration.h start with 0x100 next */
66 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
67 
68 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
69 
70 static inline bool is_zero_range(uint8_t *p, uint64_t size)
71 {
72     return buffer_find_nonzero_offset(p, size) == size;
73 }
74 
75 /* struct contains XBZRLE cache and a static page
76    used by the compression */
77 static struct {
78     /* buffer used for XBZRLE encoding */
79     uint8_t *encoded_buf;
80     /* buffer for storing page content */
81     uint8_t *current_buf;
82     /* Cache for XBZRLE, Protected by lock. */
83     PageCache *cache;
84     QemuMutex lock;
85 } XBZRLE;
86 
87 /* buffer used for XBZRLE decoding */
88 static uint8_t *xbzrle_decoded_buf;
89 
90 static void XBZRLE_cache_lock(void)
91 {
92     if (migrate_use_xbzrle())
93         qemu_mutex_lock(&XBZRLE.lock);
94 }
95 
96 static void XBZRLE_cache_unlock(void)
97 {
98     if (migrate_use_xbzrle())
99         qemu_mutex_unlock(&XBZRLE.lock);
100 }
101 
102 /*
103  * called from qmp_migrate_set_cache_size in main thread, possibly while
104  * a migration is in progress.
105  * A running migration maybe using the cache and might finish during this
106  * call, hence changes to the cache are protected by XBZRLE.lock().
107  */
108 int64_t xbzrle_cache_resize(int64_t new_size)
109 {
110     PageCache *new_cache;
111     int64_t ret;
112 
113     if (new_size < TARGET_PAGE_SIZE) {
114         return -1;
115     }
116 
117     XBZRLE_cache_lock();
118 
119     if (XBZRLE.cache != NULL) {
120         if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
121             goto out_new_size;
122         }
123         new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
124                                         TARGET_PAGE_SIZE);
125         if (!new_cache) {
126             error_report("Error creating cache");
127             ret = -1;
128             goto out;
129         }
130 
131         cache_fini(XBZRLE.cache);
132         XBZRLE.cache = new_cache;
133     }
134 
135 out_new_size:
136     ret = pow2floor(new_size);
137 out:
138     XBZRLE_cache_unlock();
139     return ret;
140 }
141 
142 /* accounting for migration statistics */
143 typedef struct AccountingInfo {
144     uint64_t dup_pages;
145     uint64_t skipped_pages;
146     uint64_t norm_pages;
147     uint64_t iterations;
148     uint64_t xbzrle_bytes;
149     uint64_t xbzrle_pages;
150     uint64_t xbzrle_cache_miss;
151     double xbzrle_cache_miss_rate;
152     uint64_t xbzrle_overflows;
153 } AccountingInfo;
154 
155 static AccountingInfo acct_info;
156 
157 static void acct_clear(void)
158 {
159     memset(&acct_info, 0, sizeof(acct_info));
160 }
161 
162 uint64_t dup_mig_bytes_transferred(void)
163 {
164     return acct_info.dup_pages * TARGET_PAGE_SIZE;
165 }
166 
167 uint64_t dup_mig_pages_transferred(void)
168 {
169     return acct_info.dup_pages;
170 }
171 
172 uint64_t skipped_mig_bytes_transferred(void)
173 {
174     return acct_info.skipped_pages * TARGET_PAGE_SIZE;
175 }
176 
177 uint64_t skipped_mig_pages_transferred(void)
178 {
179     return acct_info.skipped_pages;
180 }
181 
182 uint64_t norm_mig_bytes_transferred(void)
183 {
184     return acct_info.norm_pages * TARGET_PAGE_SIZE;
185 }
186 
187 uint64_t norm_mig_pages_transferred(void)
188 {
189     return acct_info.norm_pages;
190 }
191 
192 uint64_t xbzrle_mig_bytes_transferred(void)
193 {
194     return acct_info.xbzrle_bytes;
195 }
196 
197 uint64_t xbzrle_mig_pages_transferred(void)
198 {
199     return acct_info.xbzrle_pages;
200 }
201 
202 uint64_t xbzrle_mig_pages_cache_miss(void)
203 {
204     return acct_info.xbzrle_cache_miss;
205 }
206 
207 double xbzrle_mig_cache_miss_rate(void)
208 {
209     return acct_info.xbzrle_cache_miss_rate;
210 }
211 
212 uint64_t xbzrle_mig_pages_overflow(void)
213 {
214     return acct_info.xbzrle_overflows;
215 }
216 
217 /* This is the last block that we have visited serching for dirty pages
218  */
219 static RAMBlock *last_seen_block;
220 /* This is the last block from where we have sent data */
221 static RAMBlock *last_sent_block;
222 static ram_addr_t last_offset;
223 static QemuMutex migration_bitmap_mutex;
224 static uint64_t migration_dirty_pages;
225 static uint32_t last_version;
226 static bool ram_bulk_stage;
227 
228 /* used by the search for pages to send */
229 struct PageSearchStatus {
230     /* Current block being searched */
231     RAMBlock    *block;
232     /* Current offset to search from */
233     ram_addr_t   offset;
234     /* Set once we wrap around */
235     bool         complete_round;
236 };
237 typedef struct PageSearchStatus PageSearchStatus;
238 
239 static struct BitmapRcu {
240     struct rcu_head rcu;
241     /* Main migration bitmap */
242     unsigned long *bmap;
243     /* bitmap of pages that haven't been sent even once
244      * only maintained and used in postcopy at the moment
245      * where it's used to send the dirtymap at the start
246      * of the postcopy phase
247      */
248     unsigned long *unsentmap;
249 } *migration_bitmap_rcu;
250 
251 struct CompressParam {
252     bool start;
253     bool done;
254     QEMUFile *file;
255     QemuMutex mutex;
256     QemuCond cond;
257     RAMBlock *block;
258     ram_addr_t offset;
259 };
260 typedef struct CompressParam CompressParam;
261 
262 struct DecompressParam {
263     bool start;
264     QemuMutex mutex;
265     QemuCond cond;
266     void *des;
267     uint8 *compbuf;
268     int len;
269 };
270 typedef struct DecompressParam DecompressParam;
271 
272 static CompressParam *comp_param;
273 static QemuThread *compress_threads;
274 /* comp_done_cond is used to wake up the migration thread when
275  * one of the compression threads has finished the compression.
276  * comp_done_lock is used to co-work with comp_done_cond.
277  */
278 static QemuMutex *comp_done_lock;
279 static QemuCond *comp_done_cond;
280 /* The empty QEMUFileOps will be used by file in CompressParam */
281 static const QEMUFileOps empty_ops = { };
282 
283 static bool compression_switch;
284 static bool quit_comp_thread;
285 static bool quit_decomp_thread;
286 static DecompressParam *decomp_param;
287 static QemuThread *decompress_threads;
288 static uint8_t *compressed_data_buf;
289 
290 static int do_compress_ram_page(CompressParam *param);
291 
292 static void *do_data_compress(void *opaque)
293 {
294     CompressParam *param = opaque;
295 
296     while (!quit_comp_thread) {
297         qemu_mutex_lock(&param->mutex);
298         /* Re-check the quit_comp_thread in case of
299          * terminate_compression_threads is called just before
300          * qemu_mutex_lock(&param->mutex) and after
301          * while(!quit_comp_thread), re-check it here can make
302          * sure the compression thread terminate as expected.
303          */
304         while (!param->start && !quit_comp_thread) {
305             qemu_cond_wait(&param->cond, &param->mutex);
306         }
307         if (!quit_comp_thread) {
308             do_compress_ram_page(param);
309         }
310         param->start = false;
311         qemu_mutex_unlock(&param->mutex);
312 
313         qemu_mutex_lock(comp_done_lock);
314         param->done = true;
315         qemu_cond_signal(comp_done_cond);
316         qemu_mutex_unlock(comp_done_lock);
317     }
318 
319     return NULL;
320 }
321 
322 static inline void terminate_compression_threads(void)
323 {
324     int idx, thread_count;
325 
326     thread_count = migrate_compress_threads();
327     quit_comp_thread = true;
328     for (idx = 0; idx < thread_count; idx++) {
329         qemu_mutex_lock(&comp_param[idx].mutex);
330         qemu_cond_signal(&comp_param[idx].cond);
331         qemu_mutex_unlock(&comp_param[idx].mutex);
332     }
333 }
334 
335 void migrate_compress_threads_join(void)
336 {
337     int i, thread_count;
338 
339     if (!migrate_use_compression()) {
340         return;
341     }
342     terminate_compression_threads();
343     thread_count = migrate_compress_threads();
344     for (i = 0; i < thread_count; i++) {
345         qemu_thread_join(compress_threads + i);
346         qemu_fclose(comp_param[i].file);
347         qemu_mutex_destroy(&comp_param[i].mutex);
348         qemu_cond_destroy(&comp_param[i].cond);
349     }
350     qemu_mutex_destroy(comp_done_lock);
351     qemu_cond_destroy(comp_done_cond);
352     g_free(compress_threads);
353     g_free(comp_param);
354     g_free(comp_done_cond);
355     g_free(comp_done_lock);
356     compress_threads = NULL;
357     comp_param = NULL;
358     comp_done_cond = NULL;
359     comp_done_lock = NULL;
360 }
361 
362 void migrate_compress_threads_create(void)
363 {
364     int i, thread_count;
365 
366     if (!migrate_use_compression()) {
367         return;
368     }
369     quit_comp_thread = false;
370     compression_switch = true;
371     thread_count = migrate_compress_threads();
372     compress_threads = g_new0(QemuThread, thread_count);
373     comp_param = g_new0(CompressParam, thread_count);
374     comp_done_cond = g_new0(QemuCond, 1);
375     comp_done_lock = g_new0(QemuMutex, 1);
376     qemu_cond_init(comp_done_cond);
377     qemu_mutex_init(comp_done_lock);
378     for (i = 0; i < thread_count; i++) {
379         /* com_param[i].file is just used as a dummy buffer to save data, set
380          * it's ops to empty.
381          */
382         comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
383         comp_param[i].done = true;
384         qemu_mutex_init(&comp_param[i].mutex);
385         qemu_cond_init(&comp_param[i].cond);
386         qemu_thread_create(compress_threads + i, "compress",
387                            do_data_compress, comp_param + i,
388                            QEMU_THREAD_JOINABLE);
389     }
390 }
391 
392 /**
393  * save_page_header: Write page header to wire
394  *
395  * If this is the 1st block, it also writes the block identification
396  *
397  * Returns: Number of bytes written
398  *
399  * @f: QEMUFile where to send the data
400  * @block: block that contains the page we want to send
401  * @offset: offset inside the block for the page
402  *          in the lower bits, it contains flags
403  */
404 static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
405 {
406     size_t size, len;
407 
408     qemu_put_be64(f, offset);
409     size = 8;
410 
411     if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
412         len = strlen(block->idstr);
413         qemu_put_byte(f, len);
414         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
415         size += 1 + len;
416     }
417     return size;
418 }
419 
420 /* Reduce amount of guest cpu execution to hopefully slow down memory writes.
421  * If guest dirty memory rate is reduced below the rate at which we can
422  * transfer pages to the destination then we should be able to complete
423  * migration. Some workloads dirty memory way too fast and will not effectively
424  * converge, even with auto-converge.
425  */
426 static void mig_throttle_guest_down(void)
427 {
428     MigrationState *s = migrate_get_current();
429     uint64_t pct_initial =
430             s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
431     uint64_t pct_icrement =
432             s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
433 
434     /* We have not started throttling yet. Let's start it. */
435     if (!cpu_throttle_active()) {
436         cpu_throttle_set(pct_initial);
437     } else {
438         /* Throttling already on, just increase the rate */
439         cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
440     }
441 }
442 
443 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
444  * The important thing is that a stale (not-yet-0'd) page be replaced
445  * by the new data.
446  * As a bonus, if the page wasn't in the cache it gets added so that
447  * when a small write is made into the 0'd page it gets XBZRLE sent
448  */
449 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
450 {
451     if (ram_bulk_stage || !migrate_use_xbzrle()) {
452         return;
453     }
454 
455     /* We don't care if this fails to allocate a new cache page
456      * as long as it updated an old one */
457     cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
458                  bitmap_sync_count);
459 }
460 
461 #define ENCODING_FLAG_XBZRLE 0x1
462 
463 /**
464  * save_xbzrle_page: compress and send current page
465  *
466  * Returns: 1 means that we wrote the page
467  *          0 means that page is identical to the one already sent
468  *          -1 means that xbzrle would be longer than normal
469  *
470  * @f: QEMUFile where to send the data
471  * @current_data:
472  * @current_addr:
473  * @block: block that contains the page we want to send
474  * @offset: offset inside the block for the page
475  * @last_stage: if we are at the completion stage
476  * @bytes_transferred: increase it with the number of transferred bytes
477  */
478 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
479                             ram_addr_t current_addr, RAMBlock *block,
480                             ram_addr_t offset, bool last_stage,
481                             uint64_t *bytes_transferred)
482 {
483     int encoded_len = 0, bytes_xbzrle;
484     uint8_t *prev_cached_page;
485 
486     if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
487         acct_info.xbzrle_cache_miss++;
488         if (!last_stage) {
489             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
490                              bitmap_sync_count) == -1) {
491                 return -1;
492             } else {
493                 /* update *current_data when the page has been
494                    inserted into cache */
495                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
496             }
497         }
498         return -1;
499     }
500 
501     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
502 
503     /* save current buffer into memory */
504     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
505 
506     /* XBZRLE encoding (if there is no overflow) */
507     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
508                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
509                                        TARGET_PAGE_SIZE);
510     if (encoded_len == 0) {
511         DPRINTF("Skipping unmodified page\n");
512         return 0;
513     } else if (encoded_len == -1) {
514         DPRINTF("Overflow\n");
515         acct_info.xbzrle_overflows++;
516         /* update data in the cache */
517         if (!last_stage) {
518             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
519             *current_data = prev_cached_page;
520         }
521         return -1;
522     }
523 
524     /* we need to update the data in the cache, in order to get the same data */
525     if (!last_stage) {
526         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
527     }
528 
529     /* Send XBZRLE based compressed page */
530     bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
531     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
532     qemu_put_be16(f, encoded_len);
533     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
534     bytes_xbzrle += encoded_len + 1 + 2;
535     acct_info.xbzrle_pages++;
536     acct_info.xbzrle_bytes += bytes_xbzrle;
537     *bytes_transferred += bytes_xbzrle;
538 
539     return 1;
540 }
541 
542 /* Called with rcu_read_lock() to protect migration_bitmap
543  * rb: The RAMBlock  to search for dirty pages in
544  * start: Start address (typically so we can continue from previous page)
545  * ram_addr_abs: Pointer into which to store the address of the dirty page
546  *               within the global ram_addr space
547  *
548  * Returns: byte offset within memory region of the start of a dirty page
549  */
550 static inline
551 ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb,
552                                        ram_addr_t start,
553                                        ram_addr_t *ram_addr_abs)
554 {
555     unsigned long base = rb->offset >> TARGET_PAGE_BITS;
556     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
557     uint64_t rb_size = rb->used_length;
558     unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
559     unsigned long *bitmap;
560 
561     unsigned long next;
562 
563     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
564     if (ram_bulk_stage && nr > base) {
565         next = nr + 1;
566     } else {
567         next = find_next_bit(bitmap, size, nr);
568     }
569 
570     *ram_addr_abs = next << TARGET_PAGE_BITS;
571     return (next - base) << TARGET_PAGE_BITS;
572 }
573 
574 static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
575 {
576     bool ret;
577     int nr = addr >> TARGET_PAGE_BITS;
578     unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
579 
580     ret = test_and_clear_bit(nr, bitmap);
581 
582     if (ret) {
583         migration_dirty_pages--;
584     }
585     return ret;
586 }
587 
588 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
589 {
590     unsigned long *bitmap;
591     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
592     migration_dirty_pages +=
593         cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
594 }
595 
596 /* Fix me: there are too many global variables used in migration process. */
597 static int64_t start_time;
598 static int64_t bytes_xfer_prev;
599 static int64_t num_dirty_pages_period;
600 static uint64_t xbzrle_cache_miss_prev;
601 static uint64_t iterations_prev;
602 
603 static void migration_bitmap_sync_init(void)
604 {
605     start_time = 0;
606     bytes_xfer_prev = 0;
607     num_dirty_pages_period = 0;
608     xbzrle_cache_miss_prev = 0;
609     iterations_prev = 0;
610 }
611 
612 /* Called with iothread lock held, to protect ram_list.dirty_memory[] */
613 static void migration_bitmap_sync(void)
614 {
615     RAMBlock *block;
616     uint64_t num_dirty_pages_init = migration_dirty_pages;
617     MigrationState *s = migrate_get_current();
618     int64_t end_time;
619     int64_t bytes_xfer_now;
620 
621     bitmap_sync_count++;
622 
623     if (!bytes_xfer_prev) {
624         bytes_xfer_prev = ram_bytes_transferred();
625     }
626 
627     if (!start_time) {
628         start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
629     }
630 
631     trace_migration_bitmap_sync_start();
632     address_space_sync_dirty_bitmap(&address_space_memory);
633 
634     qemu_mutex_lock(&migration_bitmap_mutex);
635     rcu_read_lock();
636     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
637         migration_bitmap_sync_range(block->offset, block->used_length);
638     }
639     rcu_read_unlock();
640     qemu_mutex_unlock(&migration_bitmap_mutex);
641 
642     trace_migration_bitmap_sync_end(migration_dirty_pages
643                                     - num_dirty_pages_init);
644     num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
645     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
646 
647     /* more than 1 second = 1000 millisecons */
648     if (end_time > start_time + 1000) {
649         if (migrate_auto_converge()) {
650             /* The following detection logic can be refined later. For now:
651                Check to see if the dirtied bytes is 50% more than the approx.
652                amount of bytes that just got transferred since the last time we
653                were in this routine. If that happens twice, start or increase
654                throttling */
655             bytes_xfer_now = ram_bytes_transferred();
656 
657             if (s->dirty_pages_rate &&
658                (num_dirty_pages_period * TARGET_PAGE_SIZE >
659                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
660                (dirty_rate_high_cnt++ >= 2)) {
661                     trace_migration_throttle();
662                     dirty_rate_high_cnt = 0;
663                     mig_throttle_guest_down();
664              }
665              bytes_xfer_prev = bytes_xfer_now;
666         }
667 
668         if (migrate_use_xbzrle()) {
669             if (iterations_prev != acct_info.iterations) {
670                 acct_info.xbzrle_cache_miss_rate =
671                    (double)(acct_info.xbzrle_cache_miss -
672                             xbzrle_cache_miss_prev) /
673                    (acct_info.iterations - iterations_prev);
674             }
675             iterations_prev = acct_info.iterations;
676             xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
677         }
678         s->dirty_pages_rate = num_dirty_pages_period * 1000
679             / (end_time - start_time);
680         s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
681         start_time = end_time;
682         num_dirty_pages_period = 0;
683     }
684     s->dirty_sync_count = bitmap_sync_count;
685 }
686 
687 /**
688  * save_zero_page: Send the zero page to the stream
689  *
690  * Returns: Number of pages written.
691  *
692  * @f: QEMUFile where to send the data
693  * @block: block that contains the page we want to send
694  * @offset: offset inside the block for the page
695  * @p: pointer to the page
696  * @bytes_transferred: increase it with the number of transferred bytes
697  */
698 static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
699                           uint8_t *p, uint64_t *bytes_transferred)
700 {
701     int pages = -1;
702 
703     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
704         acct_info.dup_pages++;
705         *bytes_transferred += save_page_header(f, block,
706                                                offset | RAM_SAVE_FLAG_COMPRESS);
707         qemu_put_byte(f, 0);
708         *bytes_transferred += 1;
709         pages = 1;
710     }
711 
712     return pages;
713 }
714 
715 /**
716  * ram_save_page: Send the given page to the stream
717  *
718  * Returns: Number of pages written.
719  *          < 0 - error
720  *          >=0 - Number of pages written - this might legally be 0
721  *                if xbzrle noticed the page was the same.
722  *
723  * @f: QEMUFile where to send the data
724  * @block: block that contains the page we want to send
725  * @offset: offset inside the block for the page
726  * @last_stage: if we are at the completion stage
727  * @bytes_transferred: increase it with the number of transferred bytes
728  */
729 static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
730                          bool last_stage, uint64_t *bytes_transferred)
731 {
732     int pages = -1;
733     uint64_t bytes_xmit;
734     ram_addr_t current_addr;
735     uint8_t *p;
736     int ret;
737     bool send_async = true;
738 
739     p = block->host + offset;
740 
741     /* In doubt sent page as normal */
742     bytes_xmit = 0;
743     ret = ram_control_save_page(f, block->offset,
744                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
745     if (bytes_xmit) {
746         *bytes_transferred += bytes_xmit;
747         pages = 1;
748     }
749 
750     XBZRLE_cache_lock();
751 
752     current_addr = block->offset + offset;
753 
754     if (block == last_sent_block) {
755         offset |= RAM_SAVE_FLAG_CONTINUE;
756     }
757     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
758         if (ret != RAM_SAVE_CONTROL_DELAYED) {
759             if (bytes_xmit > 0) {
760                 acct_info.norm_pages++;
761             } else if (bytes_xmit == 0) {
762                 acct_info.dup_pages++;
763             }
764         }
765     } else {
766         pages = save_zero_page(f, block, offset, p, bytes_transferred);
767         if (pages > 0) {
768             /* Must let xbzrle know, otherwise a previous (now 0'd) cached
769              * page would be stale
770              */
771             xbzrle_cache_zero_page(current_addr);
772         } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
773             pages = save_xbzrle_page(f, &p, current_addr, block,
774                                      offset, last_stage, bytes_transferred);
775             if (!last_stage) {
776                 /* Can't send this cached data async, since the cache page
777                  * might get updated before it gets to the wire
778                  */
779                 send_async = false;
780             }
781         }
782     }
783 
784     /* XBZRLE overflow or normal page */
785     if (pages == -1) {
786         *bytes_transferred += save_page_header(f, block,
787                                                offset | RAM_SAVE_FLAG_PAGE);
788         if (send_async) {
789             qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
790         } else {
791             qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
792         }
793         *bytes_transferred += TARGET_PAGE_SIZE;
794         pages = 1;
795         acct_info.norm_pages++;
796     }
797 
798     XBZRLE_cache_unlock();
799 
800     return pages;
801 }
802 
803 static int do_compress_ram_page(CompressParam *param)
804 {
805     int bytes_sent, blen;
806     uint8_t *p;
807     RAMBlock *block = param->block;
808     ram_addr_t offset = param->offset;
809 
810     p = block->host + (offset & TARGET_PAGE_MASK);
811 
812     bytes_sent = save_page_header(param->file, block, offset |
813                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
814     blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
815                                      migrate_compress_level());
816     bytes_sent += blen;
817 
818     return bytes_sent;
819 }
820 
821 static inline void start_compression(CompressParam *param)
822 {
823     param->done = false;
824     qemu_mutex_lock(&param->mutex);
825     param->start = true;
826     qemu_cond_signal(&param->cond);
827     qemu_mutex_unlock(&param->mutex);
828 }
829 
830 static inline void start_decompression(DecompressParam *param)
831 {
832     qemu_mutex_lock(&param->mutex);
833     param->start = true;
834     qemu_cond_signal(&param->cond);
835     qemu_mutex_unlock(&param->mutex);
836 }
837 
838 static uint64_t bytes_transferred;
839 
840 static void flush_compressed_data(QEMUFile *f)
841 {
842     int idx, len, thread_count;
843 
844     if (!migrate_use_compression()) {
845         return;
846     }
847     thread_count = migrate_compress_threads();
848     for (idx = 0; idx < thread_count; idx++) {
849         if (!comp_param[idx].done) {
850             qemu_mutex_lock(comp_done_lock);
851             while (!comp_param[idx].done && !quit_comp_thread) {
852                 qemu_cond_wait(comp_done_cond, comp_done_lock);
853             }
854             qemu_mutex_unlock(comp_done_lock);
855         }
856         if (!quit_comp_thread) {
857             len = qemu_put_qemu_file(f, comp_param[idx].file);
858             bytes_transferred += len;
859         }
860     }
861 }
862 
863 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
864                                        ram_addr_t offset)
865 {
866     param->block = block;
867     param->offset = offset;
868 }
869 
870 static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
871                                            ram_addr_t offset,
872                                            uint64_t *bytes_transferred)
873 {
874     int idx, thread_count, bytes_xmit = -1, pages = -1;
875 
876     thread_count = migrate_compress_threads();
877     qemu_mutex_lock(comp_done_lock);
878     while (true) {
879         for (idx = 0; idx < thread_count; idx++) {
880             if (comp_param[idx].done) {
881                 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
882                 set_compress_params(&comp_param[idx], block, offset);
883                 start_compression(&comp_param[idx]);
884                 pages = 1;
885                 acct_info.norm_pages++;
886                 *bytes_transferred += bytes_xmit;
887                 break;
888             }
889         }
890         if (pages > 0) {
891             break;
892         } else {
893             qemu_cond_wait(comp_done_cond, comp_done_lock);
894         }
895     }
896     qemu_mutex_unlock(comp_done_lock);
897 
898     return pages;
899 }
900 
901 /**
902  * ram_save_compressed_page: compress the given page and send it to the stream
903  *
904  * Returns: Number of pages written.
905  *
906  * @f: QEMUFile where to send the data
907  * @block: block that contains the page we want to send
908  * @offset: offset inside the block for the page
909  * @last_stage: if we are at the completion stage
910  * @bytes_transferred: increase it with the number of transferred bytes
911  */
912 static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
913                                     ram_addr_t offset, bool last_stage,
914                                     uint64_t *bytes_transferred)
915 {
916     int pages = -1;
917     uint64_t bytes_xmit;
918     uint8_t *p;
919     int ret;
920 
921     p = block->host + offset;
922 
923     bytes_xmit = 0;
924     ret = ram_control_save_page(f, block->offset,
925                                 offset, TARGET_PAGE_SIZE, &bytes_xmit);
926     if (bytes_xmit) {
927         *bytes_transferred += bytes_xmit;
928         pages = 1;
929     }
930     if (block == last_sent_block) {
931         offset |= RAM_SAVE_FLAG_CONTINUE;
932     }
933     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
934         if (ret != RAM_SAVE_CONTROL_DELAYED) {
935             if (bytes_xmit > 0) {
936                 acct_info.norm_pages++;
937             } else if (bytes_xmit == 0) {
938                 acct_info.dup_pages++;
939             }
940         }
941     } else {
942         /* When starting the process of a new block, the first page of
943          * the block should be sent out before other pages in the same
944          * block, and all the pages in last block should have been sent
945          * out, keeping this order is important, because the 'cont' flag
946          * is used to avoid resending the block name.
947          */
948         if (block != last_sent_block) {
949             flush_compressed_data(f);
950             pages = save_zero_page(f, block, offset, p, bytes_transferred);
951             if (pages == -1) {
952                 set_compress_params(&comp_param[0], block, offset);
953                 /* Use the qemu thread to compress the data to make sure the
954                  * first page is sent out before other pages
955                  */
956                 bytes_xmit = do_compress_ram_page(&comp_param[0]);
957                 acct_info.norm_pages++;
958                 qemu_put_qemu_file(f, comp_param[0].file);
959                 *bytes_transferred += bytes_xmit;
960                 pages = 1;
961             }
962         } else {
963             pages = save_zero_page(f, block, offset, p, bytes_transferred);
964             if (pages == -1) {
965                 pages = compress_page_with_multi_thread(f, block, offset,
966                                                         bytes_transferred);
967             }
968         }
969     }
970 
971     return pages;
972 }
973 
974 /*
975  * Find the next dirty page and update any state associated with
976  * the search process.
977  *
978  * Returns: True if a page is found
979  *
980  * @f: Current migration stream.
981  * @pss: Data about the state of the current dirty page scan.
982  * @*again: Set to false if the search has scanned the whole of RAM
983  * *ram_addr_abs: Pointer into which to store the address of the dirty page
984  *               within the global ram_addr space
985  */
986 static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
987                              bool *again, ram_addr_t *ram_addr_abs)
988 {
989     pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset,
990                                               ram_addr_abs);
991     if (pss->complete_round && pss->block == last_seen_block &&
992         pss->offset >= last_offset) {
993         /*
994          * We've been once around the RAM and haven't found anything.
995          * Give up.
996          */
997         *again = false;
998         return false;
999     }
1000     if (pss->offset >= pss->block->used_length) {
1001         /* Didn't find anything in this RAM Block */
1002         pss->offset = 0;
1003         pss->block = QLIST_NEXT_RCU(pss->block, next);
1004         if (!pss->block) {
1005             /* Hit the end of the list */
1006             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1007             /* Flag that we've looped */
1008             pss->complete_round = true;
1009             ram_bulk_stage = false;
1010             if (migrate_use_xbzrle()) {
1011                 /* If xbzrle is on, stop using the data compression at this
1012                  * point. In theory, xbzrle can do better than compression.
1013                  */
1014                 flush_compressed_data(f);
1015                 compression_switch = false;
1016             }
1017         }
1018         /* Didn't find anything this time, but try again on the new block */
1019         *again = true;
1020         return false;
1021     } else {
1022         /* Can go around again, but... */
1023         *again = true;
1024         /* We've found something so probably don't need to */
1025         return true;
1026     }
1027 }
1028 
1029 /*
1030  * Helper for 'get_queued_page' - gets a page off the queue
1031  *      ms:      MigrationState in
1032  * *offset:      Used to return the offset within the RAMBlock
1033  * ram_addr_abs: global offset in the dirty/sent bitmaps
1034  *
1035  * Returns:      block (or NULL if none available)
1036  */
1037 static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1038                               ram_addr_t *ram_addr_abs)
1039 {
1040     RAMBlock *block = NULL;
1041 
1042     qemu_mutex_lock(&ms->src_page_req_mutex);
1043     if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1044         struct MigrationSrcPageRequest *entry =
1045                                 QSIMPLEQ_FIRST(&ms->src_page_requests);
1046         block = entry->rb;
1047         *offset = entry->offset;
1048         *ram_addr_abs = (entry->offset + entry->rb->offset) &
1049                         TARGET_PAGE_MASK;
1050 
1051         if (entry->len > TARGET_PAGE_SIZE) {
1052             entry->len -= TARGET_PAGE_SIZE;
1053             entry->offset += TARGET_PAGE_SIZE;
1054         } else {
1055             memory_region_unref(block->mr);
1056             QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1057             g_free(entry);
1058         }
1059     }
1060     qemu_mutex_unlock(&ms->src_page_req_mutex);
1061 
1062     return block;
1063 }
1064 
1065 /*
1066  * Unqueue a page from the queue fed by postcopy page requests; skips pages
1067  * that are already sent (!dirty)
1068  *
1069  *      ms:      MigrationState in
1070  *     pss:      PageSearchStatus structure updated with found block/offset
1071  * ram_addr_abs: global offset in the dirty/sent bitmaps
1072  *
1073  * Returns:      true if a queued page is found
1074  */
1075 static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss,
1076                             ram_addr_t *ram_addr_abs)
1077 {
1078     RAMBlock  *block;
1079     ram_addr_t offset;
1080     bool dirty;
1081 
1082     do {
1083         block = unqueue_page(ms, &offset, ram_addr_abs);
1084         /*
1085          * We're sending this page, and since it's postcopy nothing else
1086          * will dirty it, and we must make sure it doesn't get sent again
1087          * even if this queue request was received after the background
1088          * search already sent it.
1089          */
1090         if (block) {
1091             unsigned long *bitmap;
1092             bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1093             dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1094             if (!dirty) {
1095                 trace_get_queued_page_not_dirty(
1096                     block->idstr, (uint64_t)offset,
1097                     (uint64_t)*ram_addr_abs,
1098                     test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
1099                          atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
1100             } else {
1101                 trace_get_queued_page(block->idstr,
1102                                       (uint64_t)offset,
1103                                       (uint64_t)*ram_addr_abs);
1104             }
1105         }
1106 
1107     } while (block && !dirty);
1108 
1109     if (block) {
1110         /*
1111          * As soon as we start servicing pages out of order, then we have
1112          * to kill the bulk stage, since the bulk stage assumes
1113          * in (migration_bitmap_find_and_reset_dirty) that every page is
1114          * dirty, that's no longer true.
1115          */
1116         ram_bulk_stage = false;
1117 
1118         /*
1119          * We want the background search to continue from the queued page
1120          * since the guest is likely to want other pages near to the page
1121          * it just requested.
1122          */
1123         pss->block = block;
1124         pss->offset = offset;
1125     }
1126 
1127     return !!block;
1128 }
1129 
1130 /**
1131  * flush_page_queue: Flush any remaining pages in the ram request queue
1132  *    it should be empty at the end anyway, but in error cases there may be
1133  *    some left.
1134  *
1135  * ms: MigrationState
1136  */
1137 void flush_page_queue(MigrationState *ms)
1138 {
1139     struct MigrationSrcPageRequest *mspr, *next_mspr;
1140     /* This queue generally should be empty - but in the case of a failed
1141      * migration might have some droppings in.
1142      */
1143     rcu_read_lock();
1144     QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1145         memory_region_unref(mspr->rb->mr);
1146         QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1147         g_free(mspr);
1148     }
1149     rcu_read_unlock();
1150 }
1151 
1152 /**
1153  * Queue the pages for transmission, e.g. a request from postcopy destination
1154  *   ms: MigrationStatus in which the queue is held
1155  *   rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
1156  *   start: Offset from the start of the RAMBlock
1157  *   len: Length (in bytes) to send
1158  *   Return: 0 on success
1159  */
1160 int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1161                          ram_addr_t start, ram_addr_t len)
1162 {
1163     RAMBlock *ramblock;
1164 
1165     rcu_read_lock();
1166     if (!rbname) {
1167         /* Reuse last RAMBlock */
1168         ramblock = ms->last_req_rb;
1169 
1170         if (!ramblock) {
1171             /*
1172              * Shouldn't happen, we can't reuse the last RAMBlock if
1173              * it's the 1st request.
1174              */
1175             error_report("ram_save_queue_pages no previous block");
1176             goto err;
1177         }
1178     } else {
1179         ramblock = qemu_ram_block_by_name(rbname);
1180 
1181         if (!ramblock) {
1182             /* We shouldn't be asked for a non-existent RAMBlock */
1183             error_report("ram_save_queue_pages no block '%s'", rbname);
1184             goto err;
1185         }
1186         ms->last_req_rb = ramblock;
1187     }
1188     trace_ram_save_queue_pages(ramblock->idstr, start, len);
1189     if (start+len > ramblock->used_length) {
1190         error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1191                      RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1192                      __func__, start, len, ramblock->used_length);
1193         goto err;
1194     }
1195 
1196     struct MigrationSrcPageRequest *new_entry =
1197         g_malloc0(sizeof(struct MigrationSrcPageRequest));
1198     new_entry->rb = ramblock;
1199     new_entry->offset = start;
1200     new_entry->len = len;
1201 
1202     memory_region_ref(ramblock->mr);
1203     qemu_mutex_lock(&ms->src_page_req_mutex);
1204     QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1205     qemu_mutex_unlock(&ms->src_page_req_mutex);
1206     rcu_read_unlock();
1207 
1208     return 0;
1209 
1210 err:
1211     rcu_read_unlock();
1212     return -1;
1213 }
1214 
1215 /**
1216  * ram_save_target_page: Save one target page
1217  *
1218  *
1219  * @f: QEMUFile where to send the data
1220  * @block: pointer to block that contains the page we want to send
1221  * @offset: offset inside the block for the page;
1222  * @last_stage: if we are at the completion stage
1223  * @bytes_transferred: increase it with the number of transferred bytes
1224  * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1225  *
1226  * Returns: Number of pages written.
1227  */
1228 static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
1229                                 RAMBlock *block, ram_addr_t offset,
1230                                 bool last_stage,
1231                                 uint64_t *bytes_transferred,
1232                                 ram_addr_t dirty_ram_abs)
1233 {
1234     int res = 0;
1235 
1236     /* Check the pages is dirty and if it is send it */
1237     if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
1238         unsigned long *unsentmap;
1239         if (compression_switch && migrate_use_compression()) {
1240             res = ram_save_compressed_page(f, block, offset,
1241                                            last_stage,
1242                                            bytes_transferred);
1243         } else {
1244             res = ram_save_page(f, block, offset, last_stage,
1245                                 bytes_transferred);
1246         }
1247 
1248         if (res < 0) {
1249             return res;
1250         }
1251         unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1252         if (unsentmap) {
1253             clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1254         }
1255         /* Only update last_sent_block if a block was actually sent; xbzrle
1256          * might have decided the page was identical so didn't bother writing
1257          * to the stream.
1258          */
1259         if (res > 0) {
1260             last_sent_block = block;
1261         }
1262     }
1263 
1264     return res;
1265 }
1266 
1267 /**
1268  * ram_save_host_page: Starting at *offset send pages upto the end
1269  *                     of the current host page.  It's valid for the initial
1270  *                     offset to point into the middle of a host page
1271  *                     in which case the remainder of the hostpage is sent.
1272  *                     Only dirty target pages are sent.
1273  *
1274  * Returns: Number of pages written.
1275  *
1276  * @f: QEMUFile where to send the data
1277  * @block: pointer to block that contains the page we want to send
1278  * @offset: offset inside the block for the page; updated to last target page
1279  *          sent
1280  * @last_stage: if we are at the completion stage
1281  * @bytes_transferred: increase it with the number of transferred bytes
1282  * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1283  */
1284 static int ram_save_host_page(MigrationState *ms, QEMUFile *f, RAMBlock *block,
1285                               ram_addr_t *offset, bool last_stage,
1286                               uint64_t *bytes_transferred,
1287                               ram_addr_t dirty_ram_abs)
1288 {
1289     int tmppages, pages = 0;
1290     do {
1291         tmppages = ram_save_target_page(ms, f, block, *offset, last_stage,
1292                                         bytes_transferred, dirty_ram_abs);
1293         if (tmppages < 0) {
1294             return tmppages;
1295         }
1296 
1297         pages += tmppages;
1298         *offset += TARGET_PAGE_SIZE;
1299         dirty_ram_abs += TARGET_PAGE_SIZE;
1300     } while (*offset & (qemu_host_page_size - 1));
1301 
1302     /* The offset we leave with is the last one we looked at */
1303     *offset -= TARGET_PAGE_SIZE;
1304     return pages;
1305 }
1306 
1307 /**
1308  * ram_find_and_save_block: Finds a dirty page and sends it to f
1309  *
1310  * Called within an RCU critical section.
1311  *
1312  * Returns:  The number of pages written
1313  *           0 means no dirty pages
1314  *
1315  * @f: QEMUFile where to send the data
1316  * @last_stage: if we are at the completion stage
1317  * @bytes_transferred: increase it with the number of transferred bytes
1318  *
1319  * On systems where host-page-size > target-page-size it will send all the
1320  * pages in a host page that are dirty.
1321  */
1322 
1323 static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
1324                                    uint64_t *bytes_transferred)
1325 {
1326     PageSearchStatus pss;
1327     MigrationState *ms = migrate_get_current();
1328     int pages = 0;
1329     bool again, found;
1330     ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
1331                                  ram_addr_t space */
1332 
1333     pss.block = last_seen_block;
1334     pss.offset = last_offset;
1335     pss.complete_round = false;
1336 
1337     if (!pss.block) {
1338         pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1339     }
1340 
1341     do {
1342         again = true;
1343         found = get_queued_page(ms, &pss, &dirty_ram_abs);
1344 
1345         if (!found) {
1346             /* priority queue empty, so just search for something dirty */
1347             found = find_dirty_block(f, &pss, &again, &dirty_ram_abs);
1348         }
1349 
1350         if (found) {
1351             pages = ram_save_host_page(ms, f, pss.block, &pss.offset,
1352                                        last_stage, bytes_transferred,
1353                                        dirty_ram_abs);
1354         }
1355     } while (!pages && again);
1356 
1357     last_seen_block = pss.block;
1358     last_offset = pss.offset;
1359 
1360     return pages;
1361 }
1362 
1363 void acct_update_position(QEMUFile *f, size_t size, bool zero)
1364 {
1365     uint64_t pages = size / TARGET_PAGE_SIZE;
1366     if (zero) {
1367         acct_info.dup_pages += pages;
1368     } else {
1369         acct_info.norm_pages += pages;
1370         bytes_transferred += size;
1371         qemu_update_position(f, size);
1372     }
1373 }
1374 
1375 static ram_addr_t ram_save_remaining(void)
1376 {
1377     return migration_dirty_pages;
1378 }
1379 
1380 uint64_t ram_bytes_remaining(void)
1381 {
1382     return ram_save_remaining() * TARGET_PAGE_SIZE;
1383 }
1384 
1385 uint64_t ram_bytes_transferred(void)
1386 {
1387     return bytes_transferred;
1388 }
1389 
1390 uint64_t ram_bytes_total(void)
1391 {
1392     RAMBlock *block;
1393     uint64_t total = 0;
1394 
1395     rcu_read_lock();
1396     QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1397         total += block->used_length;
1398     rcu_read_unlock();
1399     return total;
1400 }
1401 
1402 void free_xbzrle_decoded_buf(void)
1403 {
1404     g_free(xbzrle_decoded_buf);
1405     xbzrle_decoded_buf = NULL;
1406 }
1407 
1408 static void migration_bitmap_free(struct BitmapRcu *bmap)
1409 {
1410     g_free(bmap->bmap);
1411     g_free(bmap->unsentmap);
1412     g_free(bmap);
1413 }
1414 
1415 static void ram_migration_cleanup(void *opaque)
1416 {
1417     /* caller have hold iothread lock or is in a bh, so there is
1418      * no writing race against this migration_bitmap
1419      */
1420     struct BitmapRcu *bitmap = migration_bitmap_rcu;
1421     atomic_rcu_set(&migration_bitmap_rcu, NULL);
1422     if (bitmap) {
1423         memory_global_dirty_log_stop();
1424         call_rcu(bitmap, migration_bitmap_free, rcu);
1425     }
1426 
1427     XBZRLE_cache_lock();
1428     if (XBZRLE.cache) {
1429         cache_fini(XBZRLE.cache);
1430         g_free(XBZRLE.encoded_buf);
1431         g_free(XBZRLE.current_buf);
1432         XBZRLE.cache = NULL;
1433         XBZRLE.encoded_buf = NULL;
1434         XBZRLE.current_buf = NULL;
1435     }
1436     XBZRLE_cache_unlock();
1437 }
1438 
1439 static void reset_ram_globals(void)
1440 {
1441     last_seen_block = NULL;
1442     last_sent_block = NULL;
1443     last_offset = 0;
1444     last_version = ram_list.version;
1445     ram_bulk_stage = true;
1446 }
1447 
1448 #define MAX_WAIT 50 /* ms, half buffered_file limit */
1449 
1450 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1451 {
1452     /* called in qemu main thread, so there is
1453      * no writing race against this migration_bitmap
1454      */
1455     if (migration_bitmap_rcu) {
1456         struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
1457         bitmap = g_new(struct BitmapRcu, 1);
1458         bitmap->bmap = bitmap_new(new);
1459 
1460         /* prevent migration_bitmap content from being set bit
1461          * by migration_bitmap_sync_range() at the same time.
1462          * it is safe to migration if migration_bitmap is cleared bit
1463          * at the same time.
1464          */
1465         qemu_mutex_lock(&migration_bitmap_mutex);
1466         bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1467         bitmap_set(bitmap->bmap, old, new - old);
1468 
1469         /* We don't have a way to safely extend the sentmap
1470          * with RCU; so mark it as missing, entry to postcopy
1471          * will fail.
1472          */
1473         bitmap->unsentmap = NULL;
1474 
1475         atomic_rcu_set(&migration_bitmap_rcu, bitmap);
1476         qemu_mutex_unlock(&migration_bitmap_mutex);
1477         migration_dirty_pages += new - old;
1478         call_rcu(old_bitmap, migration_bitmap_free, rcu);
1479     }
1480 }
1481 
1482 /*
1483  * 'expected' is the value you expect the bitmap mostly to be full
1484  * of; it won't bother printing lines that are all this value.
1485  * If 'todump' is null the migration bitmap is dumped.
1486  */
1487 void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1488 {
1489     int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1490 
1491     int64_t cur;
1492     int64_t linelen = 128;
1493     char linebuf[129];
1494 
1495     if (!todump) {
1496         todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1497     }
1498 
1499     for (cur = 0; cur < ram_pages; cur += linelen) {
1500         int64_t curb;
1501         bool found = false;
1502         /*
1503          * Last line; catch the case where the line length
1504          * is longer than remaining ram
1505          */
1506         if (cur + linelen > ram_pages) {
1507             linelen = ram_pages - cur;
1508         }
1509         for (curb = 0; curb < linelen; curb++) {
1510             bool thisbit = test_bit(cur + curb, todump);
1511             linebuf[curb] = thisbit ? '1' : '.';
1512             found = found || (thisbit != expected);
1513         }
1514         if (found) {
1515             linebuf[curb] = '\0';
1516             fprintf(stderr,  "0x%08" PRIx64 " : %s\n", cur, linebuf);
1517         }
1518     }
1519 }
1520 
1521 /* **** functions for postcopy ***** */
1522 
1523 /*
1524  * Callback from postcopy_each_ram_send_discard for each RAMBlock
1525  * Note: At this point the 'unsentmap' is the processed bitmap combined
1526  *       with the dirtymap; so a '1' means it's either dirty or unsent.
1527  * start,length: Indexes into the bitmap for the first bit
1528  *            representing the named block and length in target-pages
1529  */
1530 static int postcopy_send_discard_bm_ram(MigrationState *ms,
1531                                         PostcopyDiscardState *pds,
1532                                         unsigned long start,
1533                                         unsigned long length)
1534 {
1535     unsigned long end = start + length; /* one after the end */
1536     unsigned long current;
1537     unsigned long *unsentmap;
1538 
1539     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1540     for (current = start; current < end; ) {
1541         unsigned long one = find_next_bit(unsentmap, end, current);
1542 
1543         if (one <= end) {
1544             unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1545             unsigned long discard_length;
1546 
1547             if (zero >= end) {
1548                 discard_length = end - one;
1549             } else {
1550                 discard_length = zero - one;
1551             }
1552             postcopy_discard_send_range(ms, pds, one, discard_length);
1553             current = one + discard_length;
1554         } else {
1555             current = one;
1556         }
1557     }
1558 
1559     return 0;
1560 }
1561 
1562 /*
1563  * Utility for the outgoing postcopy code.
1564  *   Calls postcopy_send_discard_bm_ram for each RAMBlock
1565  *   passing it bitmap indexes and name.
1566  * Returns: 0 on success
1567  * (qemu_ram_foreach_block ends up passing unscaled lengths
1568  *  which would mean postcopy code would have to deal with target page)
1569  */
1570 static int postcopy_each_ram_send_discard(MigrationState *ms)
1571 {
1572     struct RAMBlock *block;
1573     int ret;
1574 
1575     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1576         unsigned long first = block->offset >> TARGET_PAGE_BITS;
1577         PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1578                                                                first,
1579                                                                block->idstr);
1580 
1581         /*
1582          * Postcopy sends chunks of bitmap over the wire, but it
1583          * just needs indexes at this point, avoids it having
1584          * target page specific code.
1585          */
1586         ret = postcopy_send_discard_bm_ram(ms, pds, first,
1587                                     block->used_length >> TARGET_PAGE_BITS);
1588         postcopy_discard_send_finish(ms, pds);
1589         if (ret) {
1590             return ret;
1591         }
1592     }
1593 
1594     return 0;
1595 }
1596 
1597 /*
1598  * Helper for postcopy_chunk_hostpages; it's called twice to cleanup
1599  *   the two bitmaps, that are similar, but one is inverted.
1600  *
1601  * We search for runs of target-pages that don't start or end on a
1602  * host page boundary;
1603  * unsent_pass=true: Cleans up partially unsent host pages by searching
1604  *                 the unsentmap
1605  * unsent_pass=false: Cleans up partially dirty host pages by searching
1606  *                 the main migration bitmap
1607  *
1608  */
1609 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1610                                           RAMBlock *block,
1611                                           PostcopyDiscardState *pds)
1612 {
1613     unsigned long *bitmap;
1614     unsigned long *unsentmap;
1615     unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE;
1616     unsigned long first = block->offset >> TARGET_PAGE_BITS;
1617     unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1618     unsigned long last = first + (len - 1);
1619     unsigned long run_start;
1620 
1621     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1622     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1623 
1624     if (unsent_pass) {
1625         /* Find a sent page */
1626         run_start = find_next_zero_bit(unsentmap, last + 1, first);
1627     } else {
1628         /* Find a dirty page */
1629         run_start = find_next_bit(bitmap, last + 1, first);
1630     }
1631 
1632     while (run_start <= last) {
1633         bool do_fixup = false;
1634         unsigned long fixup_start_addr;
1635         unsigned long host_offset;
1636 
1637         /*
1638          * If the start of this run of pages is in the middle of a host
1639          * page, then we need to fixup this host page.
1640          */
1641         host_offset = run_start % host_ratio;
1642         if (host_offset) {
1643             do_fixup = true;
1644             run_start -= host_offset;
1645             fixup_start_addr = run_start;
1646             /* For the next pass */
1647             run_start = run_start + host_ratio;
1648         } else {
1649             /* Find the end of this run */
1650             unsigned long run_end;
1651             if (unsent_pass) {
1652                 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1653             } else {
1654                 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1655             }
1656             /*
1657              * If the end isn't at the start of a host page, then the
1658              * run doesn't finish at the end of a host page
1659              * and we need to discard.
1660              */
1661             host_offset = run_end % host_ratio;
1662             if (host_offset) {
1663                 do_fixup = true;
1664                 fixup_start_addr = run_end - host_offset;
1665                 /*
1666                  * This host page has gone, the next loop iteration starts
1667                  * from after the fixup
1668                  */
1669                 run_start = fixup_start_addr + host_ratio;
1670             } else {
1671                 /*
1672                  * No discards on this iteration, next loop starts from
1673                  * next sent/dirty page
1674                  */
1675                 run_start = run_end + 1;
1676             }
1677         }
1678 
1679         if (do_fixup) {
1680             unsigned long page;
1681 
1682             /* Tell the destination to discard this page */
1683             if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1684                 /* For the unsent_pass we:
1685                  *     discard partially sent pages
1686                  * For the !unsent_pass (dirty) we:
1687                  *     discard partially dirty pages that were sent
1688                  *     (any partially sent pages were already discarded
1689                  *     by the previous unsent_pass)
1690                  */
1691                 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1692                                             host_ratio);
1693             }
1694 
1695             /* Clean up the bitmap */
1696             for (page = fixup_start_addr;
1697                  page < fixup_start_addr + host_ratio; page++) {
1698                 /* All pages in this host page are now not sent */
1699                 set_bit(page, unsentmap);
1700 
1701                 /*
1702                  * Remark them as dirty, updating the count for any pages
1703                  * that weren't previously dirty.
1704                  */
1705                 migration_dirty_pages += !test_and_set_bit(page, bitmap);
1706             }
1707         }
1708 
1709         if (unsent_pass) {
1710             /* Find the next sent page for the next iteration */
1711             run_start = find_next_zero_bit(unsentmap, last + 1,
1712                                            run_start);
1713         } else {
1714             /* Find the next dirty page for the next iteration */
1715             run_start = find_next_bit(bitmap, last + 1, run_start);
1716         }
1717     }
1718 }
1719 
1720 /*
1721  * Utility for the outgoing postcopy code.
1722  *
1723  * Discard any partially sent host-page size chunks, mark any partially
1724  * dirty host-page size chunks as all dirty.
1725  *
1726  * Returns: 0 on success
1727  */
1728 static int postcopy_chunk_hostpages(MigrationState *ms)
1729 {
1730     struct RAMBlock *block;
1731 
1732     if (qemu_host_page_size == TARGET_PAGE_SIZE) {
1733         /* Easy case - TPS==HPS - nothing to be done */
1734         return 0;
1735     }
1736 
1737     /* Easiest way to make sure we don't resume in the middle of a host-page */
1738     last_seen_block = NULL;
1739     last_sent_block = NULL;
1740     last_offset     = 0;
1741 
1742     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1743         unsigned long first = block->offset >> TARGET_PAGE_BITS;
1744 
1745         PostcopyDiscardState *pds =
1746                          postcopy_discard_send_init(ms, first, block->idstr);
1747 
1748         /* First pass: Discard all partially sent host pages */
1749         postcopy_chunk_hostpages_pass(ms, true, block, pds);
1750         /*
1751          * Second pass: Ensure that all partially dirty host pages are made
1752          * fully dirty.
1753          */
1754         postcopy_chunk_hostpages_pass(ms, false, block, pds);
1755 
1756         postcopy_discard_send_finish(ms, pds);
1757     } /* ram_list loop */
1758 
1759     return 0;
1760 }
1761 
1762 /*
1763  * Transmit the set of pages to be discarded after precopy to the target
1764  * these are pages that:
1765  *     a) Have been previously transmitted but are now dirty again
1766  *     b) Pages that have never been transmitted, this ensures that
1767  *        any pages on the destination that have been mapped by background
1768  *        tasks get discarded (transparent huge pages is the specific concern)
1769  * Hopefully this is pretty sparse
1770  */
1771 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1772 {
1773     int ret;
1774     unsigned long *bitmap, *unsentmap;
1775 
1776     rcu_read_lock();
1777 
1778     /* This should be our last sync, the src is now paused */
1779     migration_bitmap_sync();
1780 
1781     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1782     if (!unsentmap) {
1783         /* We don't have a safe way to resize the sentmap, so
1784          * if the bitmap was resized it will be NULL at this
1785          * point.
1786          */
1787         error_report("migration ram resized during precopy phase");
1788         rcu_read_unlock();
1789         return -EINVAL;
1790     }
1791 
1792     /* Deal with TPS != HPS */
1793     ret = postcopy_chunk_hostpages(ms);
1794     if (ret) {
1795         rcu_read_unlock();
1796         return ret;
1797     }
1798 
1799     /*
1800      * Update the unsentmap to be unsentmap = unsentmap | dirty
1801      */
1802     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1803     bitmap_or(unsentmap, unsentmap, bitmap,
1804                last_ram_offset() >> TARGET_PAGE_BITS);
1805 
1806 
1807     trace_ram_postcopy_send_discard_bitmap();
1808 #ifdef DEBUG_POSTCOPY
1809     ram_debug_dump_bitmap(unsentmap, true);
1810 #endif
1811 
1812     ret = postcopy_each_ram_send_discard(ms);
1813     rcu_read_unlock();
1814 
1815     return ret;
1816 }
1817 
1818 /*
1819  * At the start of the postcopy phase of migration, any now-dirty
1820  * precopied pages are discarded.
1821  *
1822  * start, length describe a byte address range within the RAMBlock
1823  *
1824  * Returns 0 on success.
1825  */
1826 int ram_discard_range(MigrationIncomingState *mis,
1827                       const char *block_name,
1828                       uint64_t start, size_t length)
1829 {
1830     int ret = -1;
1831 
1832     rcu_read_lock();
1833     RAMBlock *rb = qemu_ram_block_by_name(block_name);
1834 
1835     if (!rb) {
1836         error_report("ram_discard_range: Failed to find block '%s'",
1837                      block_name);
1838         goto err;
1839     }
1840 
1841     uint8_t *host_startaddr = rb->host + start;
1842 
1843     if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) {
1844         error_report("ram_discard_range: Unaligned start address: %p",
1845                      host_startaddr);
1846         goto err;
1847     }
1848 
1849     if ((start + length) <= rb->used_length) {
1850         uint8_t *host_endaddr = host_startaddr + length;
1851         if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) {
1852             error_report("ram_discard_range: Unaligned end address: %p",
1853                          host_endaddr);
1854             goto err;
1855         }
1856         ret = postcopy_ram_discard_range(mis, host_startaddr, length);
1857     } else {
1858         error_report("ram_discard_range: Overrun block '%s' (%" PRIu64
1859                      "/%zx/" RAM_ADDR_FMT")",
1860                      block_name, start, length, rb->used_length);
1861     }
1862 
1863 err:
1864     rcu_read_unlock();
1865 
1866     return ret;
1867 }
1868 
1869 
1870 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1871  * long-running RCU critical section.  When rcu-reclaims in the code
1872  * start to become numerous it will be necessary to reduce the
1873  * granularity of these critical sections.
1874  */
1875 
1876 static int ram_save_setup(QEMUFile *f, void *opaque)
1877 {
1878     RAMBlock *block;
1879     int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
1880 
1881     dirty_rate_high_cnt = 0;
1882     bitmap_sync_count = 0;
1883     migration_bitmap_sync_init();
1884     qemu_mutex_init(&migration_bitmap_mutex);
1885 
1886     if (migrate_use_xbzrle()) {
1887         XBZRLE_cache_lock();
1888         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1889                                   TARGET_PAGE_SIZE,
1890                                   TARGET_PAGE_SIZE);
1891         if (!XBZRLE.cache) {
1892             XBZRLE_cache_unlock();
1893             error_report("Error creating cache");
1894             return -1;
1895         }
1896         XBZRLE_cache_unlock();
1897 
1898         /* We prefer not to abort if there is no memory */
1899         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1900         if (!XBZRLE.encoded_buf) {
1901             error_report("Error allocating encoded_buf");
1902             return -1;
1903         }
1904 
1905         XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1906         if (!XBZRLE.current_buf) {
1907             error_report("Error allocating current_buf");
1908             g_free(XBZRLE.encoded_buf);
1909             XBZRLE.encoded_buf = NULL;
1910             return -1;
1911         }
1912 
1913         acct_clear();
1914     }
1915 
1916     /* iothread lock needed for ram_list.dirty_memory[] */
1917     qemu_mutex_lock_iothread();
1918     qemu_mutex_lock_ramlist();
1919     rcu_read_lock();
1920     bytes_transferred = 0;
1921     reset_ram_globals();
1922 
1923     ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1924     migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
1925     migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
1926     bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
1927 
1928     if (migrate_postcopy_ram()) {
1929         migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
1930         bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
1931     }
1932 
1933     /*
1934      * Count the total number of pages used by ram blocks not including any
1935      * gaps due to alignment or unplugs.
1936      */
1937     migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1938 
1939     memory_global_dirty_log_start();
1940     migration_bitmap_sync();
1941     qemu_mutex_unlock_ramlist();
1942     qemu_mutex_unlock_iothread();
1943 
1944     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1945 
1946     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1947         qemu_put_byte(f, strlen(block->idstr));
1948         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1949         qemu_put_be64(f, block->used_length);
1950     }
1951 
1952     rcu_read_unlock();
1953 
1954     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1955     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1956 
1957     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1958 
1959     return 0;
1960 }
1961 
1962 static int ram_save_iterate(QEMUFile *f, void *opaque)
1963 {
1964     int ret;
1965     int i;
1966     int64_t t0;
1967     int pages_sent = 0;
1968 
1969     rcu_read_lock();
1970     if (ram_list.version != last_version) {
1971         reset_ram_globals();
1972     }
1973 
1974     /* Read version before ram_list.blocks */
1975     smp_rmb();
1976 
1977     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
1978 
1979     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1980     i = 0;
1981     while ((ret = qemu_file_rate_limit(f)) == 0) {
1982         int pages;
1983 
1984         pages = ram_find_and_save_block(f, false, &bytes_transferred);
1985         /* no more pages to sent */
1986         if (pages == 0) {
1987             break;
1988         }
1989         pages_sent += pages;
1990         acct_info.iterations++;
1991 
1992         /* we want to check in the 1st loop, just in case it was the 1st time
1993            and we had to sync the dirty bitmap.
1994            qemu_get_clock_ns() is a bit expensive, so we only check each some
1995            iterations
1996         */
1997         if ((i & 63) == 0) {
1998             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
1999             if (t1 > MAX_WAIT) {
2000                 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
2001                         t1, i);
2002                 break;
2003             }
2004         }
2005         i++;
2006     }
2007     flush_compressed_data(f);
2008     rcu_read_unlock();
2009 
2010     /*
2011      * Must occur before EOS (or any QEMUFile operation)
2012      * because of RDMA protocol.
2013      */
2014     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2015 
2016     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2017     bytes_transferred += 8;
2018 
2019     ret = qemu_file_get_error(f);
2020     if (ret < 0) {
2021         return ret;
2022     }
2023 
2024     return pages_sent;
2025 }
2026 
2027 /* Called with iothread lock */
2028 static int ram_save_complete(QEMUFile *f, void *opaque)
2029 {
2030     rcu_read_lock();
2031 
2032     if (!migration_in_postcopy(migrate_get_current())) {
2033         migration_bitmap_sync();
2034     }
2035 
2036     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2037 
2038     /* try transferring iterative blocks of memory */
2039 
2040     /* flush all remaining blocks regardless of rate limiting */
2041     while (true) {
2042         int pages;
2043 
2044         pages = ram_find_and_save_block(f, true, &bytes_transferred);
2045         /* no more blocks to sent */
2046         if (pages == 0) {
2047             break;
2048         }
2049     }
2050 
2051     flush_compressed_data(f);
2052     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
2053 
2054     rcu_read_unlock();
2055 
2056     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2057 
2058     return 0;
2059 }
2060 
2061 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2062                              uint64_t *non_postcopiable_pending,
2063                              uint64_t *postcopiable_pending)
2064 {
2065     uint64_t remaining_size;
2066 
2067     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2068 
2069     if (!migration_in_postcopy(migrate_get_current()) &&
2070         remaining_size < max_size) {
2071         qemu_mutex_lock_iothread();
2072         rcu_read_lock();
2073         migration_bitmap_sync();
2074         rcu_read_unlock();
2075         qemu_mutex_unlock_iothread();
2076         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2077     }
2078 
2079     /* We can do postcopy, and all the data is postcopiable */
2080     *postcopiable_pending += remaining_size;
2081 }
2082 
2083 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2084 {
2085     unsigned int xh_len;
2086     int xh_flags;
2087 
2088     if (!xbzrle_decoded_buf) {
2089         xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2090     }
2091 
2092     /* extract RLE header */
2093     xh_flags = qemu_get_byte(f);
2094     xh_len = qemu_get_be16(f);
2095 
2096     if (xh_flags != ENCODING_FLAG_XBZRLE) {
2097         error_report("Failed to load XBZRLE page - wrong compression!");
2098         return -1;
2099     }
2100 
2101     if (xh_len > TARGET_PAGE_SIZE) {
2102         error_report("Failed to load XBZRLE page - len overflow!");
2103         return -1;
2104     }
2105     /* load data and decode */
2106     qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
2107 
2108     /* decode RLE */
2109     if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
2110                              TARGET_PAGE_SIZE) == -1) {
2111         error_report("Failed to load XBZRLE page - decode error!");
2112         return -1;
2113     }
2114 
2115     return 0;
2116 }
2117 
2118 /* Must be called from within a rcu critical section.
2119  * Returns a pointer from within the RCU-protected ram_list.
2120  */
2121 /*
2122  * Read a RAMBlock ID from the stream f, find the host address of the
2123  * start of that block and add on 'offset'
2124  *
2125  * f: Stream to read from
2126  * offset: Offset within the block
2127  * flags: Page flags (mostly to see if it's a continuation of previous block)
2128  */
2129 static inline void *host_from_stream_offset(QEMUFile *f,
2130                                             ram_addr_t offset,
2131                                             int flags)
2132 {
2133     static RAMBlock *block = NULL;
2134     char id[256];
2135     uint8_t len;
2136 
2137     if (flags & RAM_SAVE_FLAG_CONTINUE) {
2138         if (!block || block->max_length <= offset) {
2139             error_report("Ack, bad migration stream!");
2140             return NULL;
2141         }
2142 
2143         return block->host + offset;
2144     }
2145 
2146     len = qemu_get_byte(f);
2147     qemu_get_buffer(f, (uint8_t *)id, len);
2148     id[len] = 0;
2149 
2150     block = qemu_ram_block_by_name(id);
2151     if (block && block->max_length > offset) {
2152         return block->host + offset;
2153     }
2154 
2155     error_report("Can't find block %s", id);
2156     return NULL;
2157 }
2158 
2159 /*
2160  * If a page (or a whole RDMA chunk) has been
2161  * determined to be zero, then zap it.
2162  */
2163 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2164 {
2165     if (ch != 0 || !is_zero_range(host, size)) {
2166         memset(host, ch, size);
2167     }
2168 }
2169 
2170 static void *do_data_decompress(void *opaque)
2171 {
2172     DecompressParam *param = opaque;
2173     unsigned long pagesize;
2174 
2175     while (!quit_decomp_thread) {
2176         qemu_mutex_lock(&param->mutex);
2177         while (!param->start && !quit_decomp_thread) {
2178             qemu_cond_wait(&param->cond, &param->mutex);
2179             pagesize = TARGET_PAGE_SIZE;
2180             if (!quit_decomp_thread) {
2181                 /* uncompress() will return failed in some case, especially
2182                  * when the page is dirted when doing the compression, it's
2183                  * not a problem because the dirty page will be retransferred
2184                  * and uncompress() won't break the data in other pages.
2185                  */
2186                 uncompress((Bytef *)param->des, &pagesize,
2187                            (const Bytef *)param->compbuf, param->len);
2188             }
2189             param->start = false;
2190         }
2191         qemu_mutex_unlock(&param->mutex);
2192     }
2193 
2194     return NULL;
2195 }
2196 
2197 void migrate_decompress_threads_create(void)
2198 {
2199     int i, thread_count;
2200 
2201     thread_count = migrate_decompress_threads();
2202     decompress_threads = g_new0(QemuThread, thread_count);
2203     decomp_param = g_new0(DecompressParam, thread_count);
2204     compressed_data_buf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2205     quit_decomp_thread = false;
2206     for (i = 0; i < thread_count; i++) {
2207         qemu_mutex_init(&decomp_param[i].mutex);
2208         qemu_cond_init(&decomp_param[i].cond);
2209         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2210         qemu_thread_create(decompress_threads + i, "decompress",
2211                            do_data_decompress, decomp_param + i,
2212                            QEMU_THREAD_JOINABLE);
2213     }
2214 }
2215 
2216 void migrate_decompress_threads_join(void)
2217 {
2218     int i, thread_count;
2219 
2220     quit_decomp_thread = true;
2221     thread_count = migrate_decompress_threads();
2222     for (i = 0; i < thread_count; i++) {
2223         qemu_mutex_lock(&decomp_param[i].mutex);
2224         qemu_cond_signal(&decomp_param[i].cond);
2225         qemu_mutex_unlock(&decomp_param[i].mutex);
2226     }
2227     for (i = 0; i < thread_count; i++) {
2228         qemu_thread_join(decompress_threads + i);
2229         qemu_mutex_destroy(&decomp_param[i].mutex);
2230         qemu_cond_destroy(&decomp_param[i].cond);
2231         g_free(decomp_param[i].compbuf);
2232     }
2233     g_free(decompress_threads);
2234     g_free(decomp_param);
2235     g_free(compressed_data_buf);
2236     decompress_threads = NULL;
2237     decomp_param = NULL;
2238     compressed_data_buf = NULL;
2239 }
2240 
2241 static void decompress_data_with_multi_threads(uint8_t *compbuf,
2242                                                void *host, int len)
2243 {
2244     int idx, thread_count;
2245 
2246     thread_count = migrate_decompress_threads();
2247     while (true) {
2248         for (idx = 0; idx < thread_count; idx++) {
2249             if (!decomp_param[idx].start) {
2250                 memcpy(decomp_param[idx].compbuf, compbuf, len);
2251                 decomp_param[idx].des = host;
2252                 decomp_param[idx].len = len;
2253                 start_decompression(&decomp_param[idx]);
2254                 break;
2255             }
2256         }
2257         if (idx < thread_count) {
2258             break;
2259         }
2260     }
2261 }
2262 
2263 /*
2264  * Allocate data structures etc needed by incoming migration with postcopy-ram
2265  * postcopy-ram's similarly names postcopy_ram_incoming_init does the work
2266  */
2267 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2268 {
2269     size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2270 
2271     return postcopy_ram_incoming_init(mis, ram_pages);
2272 }
2273 
2274 /*
2275  * Called in postcopy mode by ram_load().
2276  * rcu_read_lock is taken prior to this being called.
2277  */
2278 static int ram_load_postcopy(QEMUFile *f)
2279 {
2280     int flags = 0, ret = 0;
2281     bool place_needed = false;
2282     bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
2283     MigrationIncomingState *mis = migration_incoming_get_current();
2284     /* Temporary page that is later 'placed' */
2285     void *postcopy_host_page = postcopy_get_tmp_page(mis);
2286     void *last_host = NULL;
2287     bool all_zero = false;
2288 
2289     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2290         ram_addr_t addr;
2291         void *host = NULL;
2292         void *page_buffer = NULL;
2293         void *place_source = NULL;
2294         uint8_t ch;
2295 
2296         addr = qemu_get_be64(f);
2297         flags = addr & ~TARGET_PAGE_MASK;
2298         addr &= TARGET_PAGE_MASK;
2299 
2300         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2301         place_needed = false;
2302         if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
2303             host = host_from_stream_offset(f, addr, flags);
2304             if (!host) {
2305                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2306                 ret = -EINVAL;
2307                 break;
2308             }
2309             page_buffer = host;
2310             /*
2311              * Postcopy requires that we place whole host pages atomically.
2312              * To make it atomic, the data is read into a temporary page
2313              * that's moved into place later.
2314              * The migration protocol uses,  possibly smaller, target-pages
2315              * however the source ensures it always sends all the components
2316              * of a host page in order.
2317              */
2318             page_buffer = postcopy_host_page +
2319                           ((uintptr_t)host & ~qemu_host_page_mask);
2320             /* If all TP are zero then we can optimise the place */
2321             if (!((uintptr_t)host & ~qemu_host_page_mask)) {
2322                 all_zero = true;
2323             } else {
2324                 /* not the 1st TP within the HP */
2325                 if (host != (last_host + TARGET_PAGE_SIZE)) {
2326                     error_report("Non-sequential target page %p/%p\n",
2327                                   host, last_host);
2328                     ret = -EINVAL;
2329                     break;
2330                 }
2331             }
2332 
2333 
2334             /*
2335              * If it's the last part of a host page then we place the host
2336              * page
2337              */
2338             place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
2339                                      ~qemu_host_page_mask) == 0;
2340             place_source = postcopy_host_page;
2341         }
2342         last_host = host;
2343 
2344         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2345         case RAM_SAVE_FLAG_COMPRESS:
2346             ch = qemu_get_byte(f);
2347             memset(page_buffer, ch, TARGET_PAGE_SIZE);
2348             if (ch) {
2349                 all_zero = false;
2350             }
2351             break;
2352 
2353         case RAM_SAVE_FLAG_PAGE:
2354             all_zero = false;
2355             if (!place_needed || !matching_page_sizes) {
2356                 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2357             } else {
2358                 /* Avoids the qemu_file copy during postcopy, which is
2359                  * going to do a copy later; can only do it when we
2360                  * do this read in one go (matching page sizes)
2361                  */
2362                 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2363                                          TARGET_PAGE_SIZE);
2364             }
2365             break;
2366         case RAM_SAVE_FLAG_EOS:
2367             /* normal exit */
2368             break;
2369         default:
2370             error_report("Unknown combination of migration flags: %#x"
2371                          " (postcopy mode)", flags);
2372             ret = -EINVAL;
2373         }
2374 
2375         if (place_needed) {
2376             /* This gets called at the last target page in the host page */
2377             if (all_zero) {
2378                 ret = postcopy_place_page_zero(mis,
2379                                                host + TARGET_PAGE_SIZE -
2380                                                qemu_host_page_size);
2381             } else {
2382                 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
2383                                                qemu_host_page_size,
2384                                                place_source);
2385             }
2386         }
2387         if (!ret) {
2388             ret = qemu_file_get_error(f);
2389         }
2390     }
2391 
2392     return ret;
2393 }
2394 
2395 static int ram_load(QEMUFile *f, void *opaque, int version_id)
2396 {
2397     int flags = 0, ret = 0;
2398     static uint64_t seq_iter;
2399     int len = 0;
2400     /*
2401      * If system is running in postcopy mode, page inserts to host memory must
2402      * be atomic
2403      */
2404     bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
2405 
2406     seq_iter++;
2407 
2408     if (version_id != 4) {
2409         ret = -EINVAL;
2410     }
2411 
2412     /* This RCU critical section can be very long running.
2413      * When RCU reclaims in the code start to become numerous,
2414      * it will be necessary to reduce the granularity of this
2415      * critical section.
2416      */
2417     rcu_read_lock();
2418 
2419     if (postcopy_running) {
2420         ret = ram_load_postcopy(f);
2421     }
2422 
2423     while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2424         ram_addr_t addr, total_ram_bytes;
2425         void *host = NULL;
2426         uint8_t ch;
2427 
2428         addr = qemu_get_be64(f);
2429         flags = addr & ~TARGET_PAGE_MASK;
2430         addr &= TARGET_PAGE_MASK;
2431 
2432         if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2433                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
2434             host = host_from_stream_offset(f, addr, flags);
2435             if (!host) {
2436                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2437                 ret = -EINVAL;
2438                 break;
2439             }
2440         }
2441 
2442         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2443         case RAM_SAVE_FLAG_MEM_SIZE:
2444             /* Synchronize RAM block list */
2445             total_ram_bytes = addr;
2446             while (!ret && total_ram_bytes) {
2447                 RAMBlock *block;
2448                 char id[256];
2449                 ram_addr_t length;
2450 
2451                 len = qemu_get_byte(f);
2452                 qemu_get_buffer(f, (uint8_t *)id, len);
2453                 id[len] = 0;
2454                 length = qemu_get_be64(f);
2455 
2456                 block = qemu_ram_block_by_name(id);
2457                 if (block) {
2458                     if (length != block->used_length) {
2459                         Error *local_err = NULL;
2460 
2461                         ret = qemu_ram_resize(block->offset, length,
2462                                               &local_err);
2463                         if (local_err) {
2464                             error_report_err(local_err);
2465                         }
2466                     }
2467                     ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2468                                           block->idstr);
2469                 } else {
2470                     error_report("Unknown ramblock \"%s\", cannot "
2471                                  "accept migration", id);
2472                     ret = -EINVAL;
2473                 }
2474 
2475                 total_ram_bytes -= length;
2476             }
2477             break;
2478 
2479         case RAM_SAVE_FLAG_COMPRESS:
2480             ch = qemu_get_byte(f);
2481             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2482             break;
2483 
2484         case RAM_SAVE_FLAG_PAGE:
2485             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2486             break;
2487 
2488         case RAM_SAVE_FLAG_COMPRESS_PAGE:
2489             len = qemu_get_be32(f);
2490             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2491                 error_report("Invalid compressed data length: %d", len);
2492                 ret = -EINVAL;
2493                 break;
2494             }
2495             qemu_get_buffer(f, compressed_data_buf, len);
2496             decompress_data_with_multi_threads(compressed_data_buf, host, len);
2497             break;
2498 
2499         case RAM_SAVE_FLAG_XBZRLE:
2500             if (load_xbzrle(f, addr, host) < 0) {
2501                 error_report("Failed to decompress XBZRLE page at "
2502                              RAM_ADDR_FMT, addr);
2503                 ret = -EINVAL;
2504                 break;
2505             }
2506             break;
2507         case RAM_SAVE_FLAG_EOS:
2508             /* normal exit */
2509             break;
2510         default:
2511             if (flags & RAM_SAVE_FLAG_HOOK) {
2512                 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
2513             } else {
2514                 error_report("Unknown combination of migration flags: %#x",
2515                              flags);
2516                 ret = -EINVAL;
2517             }
2518         }
2519         if (!ret) {
2520             ret = qemu_file_get_error(f);
2521         }
2522     }
2523 
2524     rcu_read_unlock();
2525     DPRINTF("Completed load of VM with exit code %d seq iteration "
2526             "%" PRIu64 "\n", ret, seq_iter);
2527     return ret;
2528 }
2529 
2530 static SaveVMHandlers savevm_ram_handlers = {
2531     .save_live_setup = ram_save_setup,
2532     .save_live_iterate = ram_save_iterate,
2533     .save_live_complete_postcopy = ram_save_complete,
2534     .save_live_complete_precopy = ram_save_complete,
2535     .save_live_pending = ram_save_pending,
2536     .load_state = ram_load,
2537     .cleanup = ram_migration_cleanup,
2538 };
2539 
2540 void ram_mig_init(void)
2541 {
2542     qemu_mutex_init(&XBZRLE.lock);
2543     register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
2544 }
2545