xref: /openbmc/qemu/migration/ram.c (revision 0a9516c2)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2011-2015 Red Hat Inc
6  *
7  * Authors:
8  *  Juan Quintela <quintela@redhat.com>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 #include <stdint.h>
29 #include <zlib.h>
30 #include "qemu/bitops.h"
31 #include "qemu/bitmap.h"
32 #include "qemu/timer.h"
33 #include "qemu/main-loop.h"
34 #include "migration/migration.h"
35 #include "migration/postcopy-ram.h"
36 #include "exec/address-spaces.h"
37 #include "migration/page_cache.h"
38 #include "qemu/error-report.h"
39 #include "trace.h"
40 #include "exec/ram_addr.h"
41 #include "qemu/rcu_queue.h"
42 
43 #ifdef DEBUG_MIGRATION_RAM
44 #define DPRINTF(fmt, ...) \
45     do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0)
46 #else
47 #define DPRINTF(fmt, ...) \
48     do { } while (0)
49 #endif
50 
51 static int dirty_rate_high_cnt;
52 
53 static uint64_t bitmap_sync_count;
54 
55 /***********************************************************/
56 /* ram save/restore */
57 
58 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
59 #define RAM_SAVE_FLAG_COMPRESS 0x02
60 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
61 #define RAM_SAVE_FLAG_PAGE     0x08
62 #define RAM_SAVE_FLAG_EOS      0x10
63 #define RAM_SAVE_FLAG_CONTINUE 0x20
64 #define RAM_SAVE_FLAG_XBZRLE   0x40
65 /* 0x80 is reserved in migration.h start with 0x100 next */
66 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
67 
68 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
69 
70 static inline bool is_zero_range(uint8_t *p, uint64_t size)
71 {
72     return buffer_find_nonzero_offset(p, size) == size;
73 }
74 
75 /* struct contains XBZRLE cache and a static page
76    used by the compression */
77 static struct {
78     /* buffer used for XBZRLE encoding */
79     uint8_t *encoded_buf;
80     /* buffer for storing page content */
81     uint8_t *current_buf;
82     /* Cache for XBZRLE, Protected by lock. */
83     PageCache *cache;
84     QemuMutex lock;
85 } XBZRLE;
86 
87 /* buffer used for XBZRLE decoding */
88 static uint8_t *xbzrle_decoded_buf;
89 
90 static void XBZRLE_cache_lock(void)
91 {
92     if (migrate_use_xbzrle())
93         qemu_mutex_lock(&XBZRLE.lock);
94 }
95 
96 static void XBZRLE_cache_unlock(void)
97 {
98     if (migrate_use_xbzrle())
99         qemu_mutex_unlock(&XBZRLE.lock);
100 }
101 
102 /*
103  * called from qmp_migrate_set_cache_size in main thread, possibly while
104  * a migration is in progress.
105  * A running migration maybe using the cache and might finish during this
106  * call, hence changes to the cache are protected by XBZRLE.lock().
107  */
108 int64_t xbzrle_cache_resize(int64_t new_size)
109 {
110     PageCache *new_cache;
111     int64_t ret;
112 
113     if (new_size < TARGET_PAGE_SIZE) {
114         return -1;
115     }
116 
117     XBZRLE_cache_lock();
118 
119     if (XBZRLE.cache != NULL) {
120         if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
121             goto out_new_size;
122         }
123         new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
124                                         TARGET_PAGE_SIZE);
125         if (!new_cache) {
126             error_report("Error creating cache");
127             ret = -1;
128             goto out;
129         }
130 
131         cache_fini(XBZRLE.cache);
132         XBZRLE.cache = new_cache;
133     }
134 
135 out_new_size:
136     ret = pow2floor(new_size);
137 out:
138     XBZRLE_cache_unlock();
139     return ret;
140 }
141 
142 /* accounting for migration statistics */
143 typedef struct AccountingInfo {
144     uint64_t dup_pages;
145     uint64_t skipped_pages;
146     uint64_t norm_pages;
147     uint64_t iterations;
148     uint64_t xbzrle_bytes;
149     uint64_t xbzrle_pages;
150     uint64_t xbzrle_cache_miss;
151     double xbzrle_cache_miss_rate;
152     uint64_t xbzrle_overflows;
153 } AccountingInfo;
154 
155 static AccountingInfo acct_info;
156 
157 static void acct_clear(void)
158 {
159     memset(&acct_info, 0, sizeof(acct_info));
160 }
161 
162 uint64_t dup_mig_bytes_transferred(void)
163 {
164     return acct_info.dup_pages * TARGET_PAGE_SIZE;
165 }
166 
167 uint64_t dup_mig_pages_transferred(void)
168 {
169     return acct_info.dup_pages;
170 }
171 
172 uint64_t skipped_mig_bytes_transferred(void)
173 {
174     return acct_info.skipped_pages * TARGET_PAGE_SIZE;
175 }
176 
177 uint64_t skipped_mig_pages_transferred(void)
178 {
179     return acct_info.skipped_pages;
180 }
181 
182 uint64_t norm_mig_bytes_transferred(void)
183 {
184     return acct_info.norm_pages * TARGET_PAGE_SIZE;
185 }
186 
187 uint64_t norm_mig_pages_transferred(void)
188 {
189     return acct_info.norm_pages;
190 }
191 
192 uint64_t xbzrle_mig_bytes_transferred(void)
193 {
194     return acct_info.xbzrle_bytes;
195 }
196 
197 uint64_t xbzrle_mig_pages_transferred(void)
198 {
199     return acct_info.xbzrle_pages;
200 }
201 
202 uint64_t xbzrle_mig_pages_cache_miss(void)
203 {
204     return acct_info.xbzrle_cache_miss;
205 }
206 
207 double xbzrle_mig_cache_miss_rate(void)
208 {
209     return acct_info.xbzrle_cache_miss_rate;
210 }
211 
212 uint64_t xbzrle_mig_pages_overflow(void)
213 {
214     return acct_info.xbzrle_overflows;
215 }
216 
217 /* This is the last block that we have visited serching for dirty pages
218  */
219 static RAMBlock *last_seen_block;
220 /* This is the last block from where we have sent data */
221 static RAMBlock *last_sent_block;
222 static ram_addr_t last_offset;
223 static QemuMutex migration_bitmap_mutex;
224 static uint64_t migration_dirty_pages;
225 static uint32_t last_version;
226 static bool ram_bulk_stage;
227 
228 /* used by the search for pages to send */
229 struct PageSearchStatus {
230     /* Current block being searched */
231     RAMBlock    *block;
232     /* Current offset to search from */
233     ram_addr_t   offset;
234     /* Set once we wrap around */
235     bool         complete_round;
236 };
237 typedef struct PageSearchStatus PageSearchStatus;
238 
239 static struct BitmapRcu {
240     struct rcu_head rcu;
241     /* Main migration bitmap */
242     unsigned long *bmap;
243     /* bitmap of pages that haven't been sent even once
244      * only maintained and used in postcopy at the moment
245      * where it's used to send the dirtymap at the start
246      * of the postcopy phase
247      */
248     unsigned long *unsentmap;
249 } *migration_bitmap_rcu;
250 
251 struct CompressParam {
252     bool start;
253     bool done;
254     QEMUFile *file;
255     QemuMutex mutex;
256     QemuCond cond;
257     RAMBlock *block;
258     ram_addr_t offset;
259 };
260 typedef struct CompressParam CompressParam;
261 
262 struct DecompressParam {
263     bool start;
264     QemuMutex mutex;
265     QemuCond cond;
266     void *des;
267     uint8 *compbuf;
268     int len;
269 };
270 typedef struct DecompressParam DecompressParam;
271 
272 static CompressParam *comp_param;
273 static QemuThread *compress_threads;
274 /* comp_done_cond is used to wake up the migration thread when
275  * one of the compression threads has finished the compression.
276  * comp_done_lock is used to co-work with comp_done_cond.
277  */
278 static QemuMutex *comp_done_lock;
279 static QemuCond *comp_done_cond;
280 /* The empty QEMUFileOps will be used by file in CompressParam */
281 static const QEMUFileOps empty_ops = { };
282 
283 static bool compression_switch;
284 static bool quit_comp_thread;
285 static bool quit_decomp_thread;
286 static DecompressParam *decomp_param;
287 static QemuThread *decompress_threads;
288 static uint8_t *compressed_data_buf;
289 
290 static int do_compress_ram_page(CompressParam *param);
291 
292 static void *do_data_compress(void *opaque)
293 {
294     CompressParam *param = opaque;
295 
296     while (!quit_comp_thread) {
297         qemu_mutex_lock(&param->mutex);
298         /* Re-check the quit_comp_thread in case of
299          * terminate_compression_threads is called just before
300          * qemu_mutex_lock(&param->mutex) and after
301          * while(!quit_comp_thread), re-check it here can make
302          * sure the compression thread terminate as expected.
303          */
304         while (!param->start && !quit_comp_thread) {
305             qemu_cond_wait(&param->cond, &param->mutex);
306         }
307         if (!quit_comp_thread) {
308             do_compress_ram_page(param);
309         }
310         param->start = false;
311         qemu_mutex_unlock(&param->mutex);
312 
313         qemu_mutex_lock(comp_done_lock);
314         param->done = true;
315         qemu_cond_signal(comp_done_cond);
316         qemu_mutex_unlock(comp_done_lock);
317     }
318 
319     return NULL;
320 }
321 
322 static inline void terminate_compression_threads(void)
323 {
324     int idx, thread_count;
325 
326     thread_count = migrate_compress_threads();
327     quit_comp_thread = true;
328     for (idx = 0; idx < thread_count; idx++) {
329         qemu_mutex_lock(&comp_param[idx].mutex);
330         qemu_cond_signal(&comp_param[idx].cond);
331         qemu_mutex_unlock(&comp_param[idx].mutex);
332     }
333 }
334 
335 void migrate_compress_threads_join(void)
336 {
337     int i, thread_count;
338 
339     if (!migrate_use_compression()) {
340         return;
341     }
342     terminate_compression_threads();
343     thread_count = migrate_compress_threads();
344     for (i = 0; i < thread_count; i++) {
345         qemu_thread_join(compress_threads + i);
346         qemu_fclose(comp_param[i].file);
347         qemu_mutex_destroy(&comp_param[i].mutex);
348         qemu_cond_destroy(&comp_param[i].cond);
349     }
350     qemu_mutex_destroy(comp_done_lock);
351     qemu_cond_destroy(comp_done_cond);
352     g_free(compress_threads);
353     g_free(comp_param);
354     g_free(comp_done_cond);
355     g_free(comp_done_lock);
356     compress_threads = NULL;
357     comp_param = NULL;
358     comp_done_cond = NULL;
359     comp_done_lock = NULL;
360 }
361 
362 void migrate_compress_threads_create(void)
363 {
364     int i, thread_count;
365 
366     if (!migrate_use_compression()) {
367         return;
368     }
369     quit_comp_thread = false;
370     compression_switch = true;
371     thread_count = migrate_compress_threads();
372     compress_threads = g_new0(QemuThread, thread_count);
373     comp_param = g_new0(CompressParam, thread_count);
374     comp_done_cond = g_new0(QemuCond, 1);
375     comp_done_lock = g_new0(QemuMutex, 1);
376     qemu_cond_init(comp_done_cond);
377     qemu_mutex_init(comp_done_lock);
378     for (i = 0; i < thread_count; i++) {
379         /* com_param[i].file is just used as a dummy buffer to save data, set
380          * it's ops to empty.
381          */
382         comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
383         comp_param[i].done = true;
384         qemu_mutex_init(&comp_param[i].mutex);
385         qemu_cond_init(&comp_param[i].cond);
386         qemu_thread_create(compress_threads + i, "compress",
387                            do_data_compress, comp_param + i,
388                            QEMU_THREAD_JOINABLE);
389     }
390 }
391 
392 /**
393  * save_page_header: Write page header to wire
394  *
395  * If this is the 1st block, it also writes the block identification
396  *
397  * Returns: Number of bytes written
398  *
399  * @f: QEMUFile where to send the data
400  * @block: block that contains the page we want to send
401  * @offset: offset inside the block for the page
402  *          in the lower bits, it contains flags
403  */
404 static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
405 {
406     size_t size, len;
407 
408     qemu_put_be64(f, offset);
409     size = 8;
410 
411     if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
412         len = strlen(block->idstr);
413         qemu_put_byte(f, len);
414         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
415         size += 1 + len;
416     }
417     return size;
418 }
419 
420 /* Reduce amount of guest cpu execution to hopefully slow down memory writes.
421  * If guest dirty memory rate is reduced below the rate at which we can
422  * transfer pages to the destination then we should be able to complete
423  * migration. Some workloads dirty memory way too fast and will not effectively
424  * converge, even with auto-converge.
425  */
426 static void mig_throttle_guest_down(void)
427 {
428     MigrationState *s = migrate_get_current();
429     uint64_t pct_initial =
430             s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
431     uint64_t pct_icrement =
432             s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
433 
434     /* We have not started throttling yet. Let's start it. */
435     if (!cpu_throttle_active()) {
436         cpu_throttle_set(pct_initial);
437     } else {
438         /* Throttling already on, just increase the rate */
439         cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
440     }
441 }
442 
443 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
444  * The important thing is that a stale (not-yet-0'd) page be replaced
445  * by the new data.
446  * As a bonus, if the page wasn't in the cache it gets added so that
447  * when a small write is made into the 0'd page it gets XBZRLE sent
448  */
449 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
450 {
451     if (ram_bulk_stage || !migrate_use_xbzrle()) {
452         return;
453     }
454 
455     /* We don't care if this fails to allocate a new cache page
456      * as long as it updated an old one */
457     cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
458                  bitmap_sync_count);
459 }
460 
461 #define ENCODING_FLAG_XBZRLE 0x1
462 
463 /**
464  * save_xbzrle_page: compress and send current page
465  *
466  * Returns: 1 means that we wrote the page
467  *          0 means that page is identical to the one already sent
468  *          -1 means that xbzrle would be longer than normal
469  *
470  * @f: QEMUFile where to send the data
471  * @current_data:
472  * @current_addr:
473  * @block: block that contains the page we want to send
474  * @offset: offset inside the block for the page
475  * @last_stage: if we are at the completion stage
476  * @bytes_transferred: increase it with the number of transferred bytes
477  */
478 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
479                             ram_addr_t current_addr, RAMBlock *block,
480                             ram_addr_t offset, bool last_stage,
481                             uint64_t *bytes_transferred)
482 {
483     int encoded_len = 0, bytes_xbzrle;
484     uint8_t *prev_cached_page;
485 
486     if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
487         acct_info.xbzrle_cache_miss++;
488         if (!last_stage) {
489             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
490                              bitmap_sync_count) == -1) {
491                 return -1;
492             } else {
493                 /* update *current_data when the page has been
494                    inserted into cache */
495                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
496             }
497         }
498         return -1;
499     }
500 
501     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
502 
503     /* save current buffer into memory */
504     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
505 
506     /* XBZRLE encoding (if there is no overflow) */
507     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
508                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
509                                        TARGET_PAGE_SIZE);
510     if (encoded_len == 0) {
511         DPRINTF("Skipping unmodified page\n");
512         return 0;
513     } else if (encoded_len == -1) {
514         DPRINTF("Overflow\n");
515         acct_info.xbzrle_overflows++;
516         /* update data in the cache */
517         if (!last_stage) {
518             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
519             *current_data = prev_cached_page;
520         }
521         return -1;
522     }
523 
524     /* we need to update the data in the cache, in order to get the same data */
525     if (!last_stage) {
526         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
527     }
528 
529     /* Send XBZRLE based compressed page */
530     bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
531     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
532     qemu_put_be16(f, encoded_len);
533     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
534     bytes_xbzrle += encoded_len + 1 + 2;
535     acct_info.xbzrle_pages++;
536     acct_info.xbzrle_bytes += bytes_xbzrle;
537     *bytes_transferred += bytes_xbzrle;
538 
539     return 1;
540 }
541 
542 /* Called with rcu_read_lock() to protect migration_bitmap
543  * rb: The RAMBlock  to search for dirty pages in
544  * start: Start address (typically so we can continue from previous page)
545  * ram_addr_abs: Pointer into which to store the address of the dirty page
546  *               within the global ram_addr space
547  *
548  * Returns: byte offset within memory region of the start of a dirty page
549  */
550 static inline
551 ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb,
552                                        ram_addr_t start,
553                                        ram_addr_t *ram_addr_abs)
554 {
555     unsigned long base = rb->offset >> TARGET_PAGE_BITS;
556     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
557     uint64_t rb_size = rb->used_length;
558     unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
559     unsigned long *bitmap;
560 
561     unsigned long next;
562 
563     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
564     if (ram_bulk_stage && nr > base) {
565         next = nr + 1;
566     } else {
567         next = find_next_bit(bitmap, size, nr);
568     }
569 
570     *ram_addr_abs = next << TARGET_PAGE_BITS;
571     return (next - base) << TARGET_PAGE_BITS;
572 }
573 
574 static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
575 {
576     bool ret;
577     int nr = addr >> TARGET_PAGE_BITS;
578     unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
579 
580     ret = test_and_clear_bit(nr, bitmap);
581 
582     if (ret) {
583         migration_dirty_pages--;
584     }
585     return ret;
586 }
587 
588 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
589 {
590     unsigned long *bitmap;
591     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
592     migration_dirty_pages +=
593         cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
594 }
595 
596 /* Fix me: there are too many global variables used in migration process. */
597 static int64_t start_time;
598 static int64_t bytes_xfer_prev;
599 static int64_t num_dirty_pages_period;
600 static uint64_t xbzrle_cache_miss_prev;
601 static uint64_t iterations_prev;
602 
603 static void migration_bitmap_sync_init(void)
604 {
605     start_time = 0;
606     bytes_xfer_prev = 0;
607     num_dirty_pages_period = 0;
608     xbzrle_cache_miss_prev = 0;
609     iterations_prev = 0;
610 }
611 
612 /* Called with iothread lock held, to protect ram_list.dirty_memory[] */
613 static void migration_bitmap_sync(void)
614 {
615     RAMBlock *block;
616     uint64_t num_dirty_pages_init = migration_dirty_pages;
617     MigrationState *s = migrate_get_current();
618     int64_t end_time;
619     int64_t bytes_xfer_now;
620 
621     bitmap_sync_count++;
622 
623     if (!bytes_xfer_prev) {
624         bytes_xfer_prev = ram_bytes_transferred();
625     }
626 
627     if (!start_time) {
628         start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
629     }
630 
631     trace_migration_bitmap_sync_start();
632     address_space_sync_dirty_bitmap(&address_space_memory);
633 
634     qemu_mutex_lock(&migration_bitmap_mutex);
635     rcu_read_lock();
636     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
637         migration_bitmap_sync_range(block->offset, block->used_length);
638     }
639     rcu_read_unlock();
640     qemu_mutex_unlock(&migration_bitmap_mutex);
641 
642     trace_migration_bitmap_sync_end(migration_dirty_pages
643                                     - num_dirty_pages_init);
644     num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
645     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
646 
647     /* more than 1 second = 1000 millisecons */
648     if (end_time > start_time + 1000) {
649         if (migrate_auto_converge()) {
650             /* The following detection logic can be refined later. For now:
651                Check to see if the dirtied bytes is 50% more than the approx.
652                amount of bytes that just got transferred since the last time we
653                were in this routine. If that happens twice, start or increase
654                throttling */
655             bytes_xfer_now = ram_bytes_transferred();
656 
657             if (s->dirty_pages_rate &&
658                (num_dirty_pages_period * TARGET_PAGE_SIZE >
659                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
660                (dirty_rate_high_cnt++ >= 2)) {
661                     trace_migration_throttle();
662                     dirty_rate_high_cnt = 0;
663                     mig_throttle_guest_down();
664              }
665              bytes_xfer_prev = bytes_xfer_now;
666         }
667 
668         if (migrate_use_xbzrle()) {
669             if (iterations_prev != acct_info.iterations) {
670                 acct_info.xbzrle_cache_miss_rate =
671                    (double)(acct_info.xbzrle_cache_miss -
672                             xbzrle_cache_miss_prev) /
673                    (acct_info.iterations - iterations_prev);
674             }
675             iterations_prev = acct_info.iterations;
676             xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
677         }
678         s->dirty_pages_rate = num_dirty_pages_period * 1000
679             / (end_time - start_time);
680         s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
681         start_time = end_time;
682         num_dirty_pages_period = 0;
683     }
684     s->dirty_sync_count = bitmap_sync_count;
685 }
686 
687 /**
688  * save_zero_page: Send the zero page to the stream
689  *
690  * Returns: Number of pages written.
691  *
692  * @f: QEMUFile where to send the data
693  * @block: block that contains the page we want to send
694  * @offset: offset inside the block for the page
695  * @p: pointer to the page
696  * @bytes_transferred: increase it with the number of transferred bytes
697  */
698 static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
699                           uint8_t *p, uint64_t *bytes_transferred)
700 {
701     int pages = -1;
702 
703     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
704         acct_info.dup_pages++;
705         *bytes_transferred += save_page_header(f, block,
706                                                offset | RAM_SAVE_FLAG_COMPRESS);
707         qemu_put_byte(f, 0);
708         *bytes_transferred += 1;
709         pages = 1;
710     }
711 
712     return pages;
713 }
714 
715 /**
716  * ram_save_page: Send the given page to the stream
717  *
718  * Returns: Number of pages written.
719  *
720  * @f: QEMUFile where to send the data
721  * @block: block that contains the page we want to send
722  * @offset: offset inside the block for the page
723  * @last_stage: if we are at the completion stage
724  * @bytes_transferred: increase it with the number of transferred bytes
725  */
726 static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
727                          bool last_stage, uint64_t *bytes_transferred)
728 {
729     int pages = -1;
730     uint64_t bytes_xmit;
731     ram_addr_t current_addr;
732     uint8_t *p;
733     int ret;
734     bool send_async = true;
735 
736     p = block->host + offset;
737 
738     /* In doubt sent page as normal */
739     bytes_xmit = 0;
740     ret = ram_control_save_page(f, block->offset,
741                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
742     if (bytes_xmit) {
743         *bytes_transferred += bytes_xmit;
744         pages = 1;
745     }
746 
747     XBZRLE_cache_lock();
748 
749     current_addr = block->offset + offset;
750 
751     if (block == last_sent_block) {
752         offset |= RAM_SAVE_FLAG_CONTINUE;
753     }
754     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
755         if (ret != RAM_SAVE_CONTROL_DELAYED) {
756             if (bytes_xmit > 0) {
757                 acct_info.norm_pages++;
758             } else if (bytes_xmit == 0) {
759                 acct_info.dup_pages++;
760             }
761         }
762     } else {
763         pages = save_zero_page(f, block, offset, p, bytes_transferred);
764         if (pages > 0) {
765             /* Must let xbzrle know, otherwise a previous (now 0'd) cached
766              * page would be stale
767              */
768             xbzrle_cache_zero_page(current_addr);
769         } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
770             pages = save_xbzrle_page(f, &p, current_addr, block,
771                                      offset, last_stage, bytes_transferred);
772             if (!last_stage) {
773                 /* Can't send this cached data async, since the cache page
774                  * might get updated before it gets to the wire
775                  */
776                 send_async = false;
777             }
778         }
779     }
780 
781     /* XBZRLE overflow or normal page */
782     if (pages == -1) {
783         *bytes_transferred += save_page_header(f, block,
784                                                offset | RAM_SAVE_FLAG_PAGE);
785         if (send_async) {
786             qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
787         } else {
788             qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
789         }
790         *bytes_transferred += TARGET_PAGE_SIZE;
791         pages = 1;
792         acct_info.norm_pages++;
793     }
794 
795     XBZRLE_cache_unlock();
796 
797     return pages;
798 }
799 
800 static int do_compress_ram_page(CompressParam *param)
801 {
802     int bytes_sent, blen;
803     uint8_t *p;
804     RAMBlock *block = param->block;
805     ram_addr_t offset = param->offset;
806 
807     p = block->host + (offset & TARGET_PAGE_MASK);
808 
809     bytes_sent = save_page_header(param->file, block, offset |
810                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
811     blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
812                                      migrate_compress_level());
813     bytes_sent += blen;
814 
815     return bytes_sent;
816 }
817 
818 static inline void start_compression(CompressParam *param)
819 {
820     param->done = false;
821     qemu_mutex_lock(&param->mutex);
822     param->start = true;
823     qemu_cond_signal(&param->cond);
824     qemu_mutex_unlock(&param->mutex);
825 }
826 
827 static inline void start_decompression(DecompressParam *param)
828 {
829     qemu_mutex_lock(&param->mutex);
830     param->start = true;
831     qemu_cond_signal(&param->cond);
832     qemu_mutex_unlock(&param->mutex);
833 }
834 
835 static uint64_t bytes_transferred;
836 
837 static void flush_compressed_data(QEMUFile *f)
838 {
839     int idx, len, thread_count;
840 
841     if (!migrate_use_compression()) {
842         return;
843     }
844     thread_count = migrate_compress_threads();
845     for (idx = 0; idx < thread_count; idx++) {
846         if (!comp_param[idx].done) {
847             qemu_mutex_lock(comp_done_lock);
848             while (!comp_param[idx].done && !quit_comp_thread) {
849                 qemu_cond_wait(comp_done_cond, comp_done_lock);
850             }
851             qemu_mutex_unlock(comp_done_lock);
852         }
853         if (!quit_comp_thread) {
854             len = qemu_put_qemu_file(f, comp_param[idx].file);
855             bytes_transferred += len;
856         }
857     }
858 }
859 
860 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
861                                        ram_addr_t offset)
862 {
863     param->block = block;
864     param->offset = offset;
865 }
866 
867 static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
868                                            ram_addr_t offset,
869                                            uint64_t *bytes_transferred)
870 {
871     int idx, thread_count, bytes_xmit = -1, pages = -1;
872 
873     thread_count = migrate_compress_threads();
874     qemu_mutex_lock(comp_done_lock);
875     while (true) {
876         for (idx = 0; idx < thread_count; idx++) {
877             if (comp_param[idx].done) {
878                 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
879                 set_compress_params(&comp_param[idx], block, offset);
880                 start_compression(&comp_param[idx]);
881                 pages = 1;
882                 acct_info.norm_pages++;
883                 *bytes_transferred += bytes_xmit;
884                 break;
885             }
886         }
887         if (pages > 0) {
888             break;
889         } else {
890             qemu_cond_wait(comp_done_cond, comp_done_lock);
891         }
892     }
893     qemu_mutex_unlock(comp_done_lock);
894 
895     return pages;
896 }
897 
898 /**
899  * ram_save_compressed_page: compress the given page and send it to the stream
900  *
901  * Returns: Number of pages written.
902  *
903  * @f: QEMUFile where to send the data
904  * @block: block that contains the page we want to send
905  * @offset: offset inside the block for the page
906  * @last_stage: if we are at the completion stage
907  * @bytes_transferred: increase it with the number of transferred bytes
908  */
909 static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
910                                     ram_addr_t offset, bool last_stage,
911                                     uint64_t *bytes_transferred)
912 {
913     int pages = -1;
914     uint64_t bytes_xmit;
915     uint8_t *p;
916     int ret;
917 
918     p = block->host + offset;
919 
920     bytes_xmit = 0;
921     ret = ram_control_save_page(f, block->offset,
922                                 offset, TARGET_PAGE_SIZE, &bytes_xmit);
923     if (bytes_xmit) {
924         *bytes_transferred += bytes_xmit;
925         pages = 1;
926     }
927     if (block == last_sent_block) {
928         offset |= RAM_SAVE_FLAG_CONTINUE;
929     }
930     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
931         if (ret != RAM_SAVE_CONTROL_DELAYED) {
932             if (bytes_xmit > 0) {
933                 acct_info.norm_pages++;
934             } else if (bytes_xmit == 0) {
935                 acct_info.dup_pages++;
936             }
937         }
938     } else {
939         /* When starting the process of a new block, the first page of
940          * the block should be sent out before other pages in the same
941          * block, and all the pages in last block should have been sent
942          * out, keeping this order is important, because the 'cont' flag
943          * is used to avoid resending the block name.
944          */
945         if (block != last_sent_block) {
946             flush_compressed_data(f);
947             pages = save_zero_page(f, block, offset, p, bytes_transferred);
948             if (pages == -1) {
949                 set_compress_params(&comp_param[0], block, offset);
950                 /* Use the qemu thread to compress the data to make sure the
951                  * first page is sent out before other pages
952                  */
953                 bytes_xmit = do_compress_ram_page(&comp_param[0]);
954                 acct_info.norm_pages++;
955                 qemu_put_qemu_file(f, comp_param[0].file);
956                 *bytes_transferred += bytes_xmit;
957                 pages = 1;
958             }
959         } else {
960             pages = save_zero_page(f, block, offset, p, bytes_transferred);
961             if (pages == -1) {
962                 pages = compress_page_with_multi_thread(f, block, offset,
963                                                         bytes_transferred);
964             }
965         }
966     }
967 
968     return pages;
969 }
970 
971 /*
972  * Find the next dirty page and update any state associated with
973  * the search process.
974  *
975  * Returns: True if a page is found
976  *
977  * @f: Current migration stream.
978  * @pss: Data about the state of the current dirty page scan.
979  * @*again: Set to false if the search has scanned the whole of RAM
980  * *ram_addr_abs: Pointer into which to store the address of the dirty page
981  *               within the global ram_addr space
982  */
983 static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
984                              bool *again, ram_addr_t *ram_addr_abs)
985 {
986     pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset,
987                                               ram_addr_abs);
988     if (pss->complete_round && pss->block == last_seen_block &&
989         pss->offset >= last_offset) {
990         /*
991          * We've been once around the RAM and haven't found anything.
992          * Give up.
993          */
994         *again = false;
995         return false;
996     }
997     if (pss->offset >= pss->block->used_length) {
998         /* Didn't find anything in this RAM Block */
999         pss->offset = 0;
1000         pss->block = QLIST_NEXT_RCU(pss->block, next);
1001         if (!pss->block) {
1002             /* Hit the end of the list */
1003             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1004             /* Flag that we've looped */
1005             pss->complete_round = true;
1006             ram_bulk_stage = false;
1007             if (migrate_use_xbzrle()) {
1008                 /* If xbzrle is on, stop using the data compression at this
1009                  * point. In theory, xbzrle can do better than compression.
1010                  */
1011                 flush_compressed_data(f);
1012                 compression_switch = false;
1013             }
1014         }
1015         /* Didn't find anything this time, but try again on the new block */
1016         *again = true;
1017         return false;
1018     } else {
1019         /* Can go around again, but... */
1020         *again = true;
1021         /* We've found something so probably don't need to */
1022         return true;
1023     }
1024 }
1025 
1026 /*
1027  * Helper for 'get_queued_page' - gets a page off the queue
1028  *      ms:      MigrationState in
1029  * *offset:      Used to return the offset within the RAMBlock
1030  * ram_addr_abs: global offset in the dirty/sent bitmaps
1031  *
1032  * Returns:      block (or NULL if none available)
1033  */
1034 static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1035                               ram_addr_t *ram_addr_abs)
1036 {
1037     RAMBlock *block = NULL;
1038 
1039     qemu_mutex_lock(&ms->src_page_req_mutex);
1040     if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1041         struct MigrationSrcPageRequest *entry =
1042                                 QSIMPLEQ_FIRST(&ms->src_page_requests);
1043         block = entry->rb;
1044         *offset = entry->offset;
1045         *ram_addr_abs = (entry->offset + entry->rb->offset) &
1046                         TARGET_PAGE_MASK;
1047 
1048         if (entry->len > TARGET_PAGE_SIZE) {
1049             entry->len -= TARGET_PAGE_SIZE;
1050             entry->offset += TARGET_PAGE_SIZE;
1051         } else {
1052             memory_region_unref(block->mr);
1053             QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1054             g_free(entry);
1055         }
1056     }
1057     qemu_mutex_unlock(&ms->src_page_req_mutex);
1058 
1059     return block;
1060 }
1061 
1062 /*
1063  * Unqueue a page from the queue fed by postcopy page requests; skips pages
1064  * that are already sent (!dirty)
1065  *
1066  *      ms:      MigrationState in
1067  *     pss:      PageSearchStatus structure updated with found block/offset
1068  * ram_addr_abs: global offset in the dirty/sent bitmaps
1069  *
1070  * Returns:      true if a queued page is found
1071  */
1072 static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss,
1073                             ram_addr_t *ram_addr_abs)
1074 {
1075     RAMBlock  *block;
1076     ram_addr_t offset;
1077     bool dirty;
1078 
1079     do {
1080         block = unqueue_page(ms, &offset, ram_addr_abs);
1081         /*
1082          * We're sending this page, and since it's postcopy nothing else
1083          * will dirty it, and we must make sure it doesn't get sent again
1084          * even if this queue request was received after the background
1085          * search already sent it.
1086          */
1087         if (block) {
1088             unsigned long *bitmap;
1089             bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1090             dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1091             if (!dirty) {
1092                 trace_get_queued_page_not_dirty(
1093                     block->idstr, (uint64_t)offset,
1094                     (uint64_t)*ram_addr_abs,
1095                     test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
1096                          atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
1097             } else {
1098                 trace_get_queued_page(block->idstr,
1099                                       (uint64_t)offset,
1100                                       (uint64_t)*ram_addr_abs);
1101             }
1102         }
1103 
1104     } while (block && !dirty);
1105 
1106     if (block) {
1107         /*
1108          * As soon as we start servicing pages out of order, then we have
1109          * to kill the bulk stage, since the bulk stage assumes
1110          * in (migration_bitmap_find_and_reset_dirty) that every page is
1111          * dirty, that's no longer true.
1112          */
1113         ram_bulk_stage = false;
1114 
1115         /*
1116          * We want the background search to continue from the queued page
1117          * since the guest is likely to want other pages near to the page
1118          * it just requested.
1119          */
1120         pss->block = block;
1121         pss->offset = offset;
1122     }
1123 
1124     return !!block;
1125 }
1126 
1127 /**
1128  * flush_page_queue: Flush any remaining pages in the ram request queue
1129  *    it should be empty at the end anyway, but in error cases there may be
1130  *    some left.
1131  *
1132  * ms: MigrationState
1133  */
1134 void flush_page_queue(MigrationState *ms)
1135 {
1136     struct MigrationSrcPageRequest *mspr, *next_mspr;
1137     /* This queue generally should be empty - but in the case of a failed
1138      * migration might have some droppings in.
1139      */
1140     rcu_read_lock();
1141     QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1142         memory_region_unref(mspr->rb->mr);
1143         QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1144         g_free(mspr);
1145     }
1146     rcu_read_unlock();
1147 }
1148 
1149 /**
1150  * Queue the pages for transmission, e.g. a request from postcopy destination
1151  *   ms: MigrationStatus in which the queue is held
1152  *   rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
1153  *   start: Offset from the start of the RAMBlock
1154  *   len: Length (in bytes) to send
1155  *   Return: 0 on success
1156  */
1157 int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1158                          ram_addr_t start, ram_addr_t len)
1159 {
1160     RAMBlock *ramblock;
1161 
1162     rcu_read_lock();
1163     if (!rbname) {
1164         /* Reuse last RAMBlock */
1165         ramblock = ms->last_req_rb;
1166 
1167         if (!ramblock) {
1168             /*
1169              * Shouldn't happen, we can't reuse the last RAMBlock if
1170              * it's the 1st request.
1171              */
1172             error_report("ram_save_queue_pages no previous block");
1173             goto err;
1174         }
1175     } else {
1176         ramblock = qemu_ram_block_by_name(rbname);
1177 
1178         if (!ramblock) {
1179             /* We shouldn't be asked for a non-existent RAMBlock */
1180             error_report("ram_save_queue_pages no block '%s'", rbname);
1181             goto err;
1182         }
1183         ms->last_req_rb = ramblock;
1184     }
1185     trace_ram_save_queue_pages(ramblock->idstr, start, len);
1186     if (start+len > ramblock->used_length) {
1187         error_report("%s request overrun start=%zx len=%zx blocklen=%zx",
1188                      __func__, start, len, ramblock->used_length);
1189         goto err;
1190     }
1191 
1192     struct MigrationSrcPageRequest *new_entry =
1193         g_malloc0(sizeof(struct MigrationSrcPageRequest));
1194     new_entry->rb = ramblock;
1195     new_entry->offset = start;
1196     new_entry->len = len;
1197 
1198     memory_region_ref(ramblock->mr);
1199     qemu_mutex_lock(&ms->src_page_req_mutex);
1200     QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1201     qemu_mutex_unlock(&ms->src_page_req_mutex);
1202     rcu_read_unlock();
1203 
1204     return 0;
1205 
1206 err:
1207     rcu_read_unlock();
1208     return -1;
1209 }
1210 
1211 /**
1212  * ram_save_target_page: Save one target page
1213  *
1214  *
1215  * @f: QEMUFile where to send the data
1216  * @block: pointer to block that contains the page we want to send
1217  * @offset: offset inside the block for the page;
1218  * @last_stage: if we are at the completion stage
1219  * @bytes_transferred: increase it with the number of transferred bytes
1220  * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1221  *
1222  * Returns: Number of pages written.
1223  */
1224 static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
1225                                 RAMBlock *block, ram_addr_t offset,
1226                                 bool last_stage,
1227                                 uint64_t *bytes_transferred,
1228                                 ram_addr_t dirty_ram_abs)
1229 {
1230     int res = 0;
1231 
1232     /* Check the pages is dirty and if it is send it */
1233     if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
1234         unsigned long *unsentmap;
1235         if (compression_switch && migrate_use_compression()) {
1236             res = ram_save_compressed_page(f, block, offset,
1237                                            last_stage,
1238                                            bytes_transferred);
1239         } else {
1240             res = ram_save_page(f, block, offset, last_stage,
1241                                 bytes_transferred);
1242         }
1243 
1244         if (res < 0) {
1245             return res;
1246         }
1247         unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1248         if (unsentmap) {
1249             clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1250         }
1251     }
1252 
1253     return res;
1254 }
1255 
1256 /**
1257  * ram_save_host_page: Starting at *offset send pages upto the end
1258  *                     of the current host page.  It's valid for the initial
1259  *                     offset to point into the middle of a host page
1260  *                     in which case the remainder of the hostpage is sent.
1261  *                     Only dirty target pages are sent.
1262  *
1263  * Returns: Number of pages written.
1264  *
1265  * @f: QEMUFile where to send the data
1266  * @block: pointer to block that contains the page we want to send
1267  * @offset: offset inside the block for the page; updated to last target page
1268  *          sent
1269  * @last_stage: if we are at the completion stage
1270  * @bytes_transferred: increase it with the number of transferred bytes
1271  * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1272  */
1273 static int ram_save_host_page(MigrationState *ms, QEMUFile *f, RAMBlock *block,
1274                               ram_addr_t *offset, bool last_stage,
1275                               uint64_t *bytes_transferred,
1276                               ram_addr_t dirty_ram_abs)
1277 {
1278     int tmppages, pages = 0;
1279     do {
1280         tmppages = ram_save_target_page(ms, f, block, *offset, last_stage,
1281                                         bytes_transferred, dirty_ram_abs);
1282         if (tmppages < 0) {
1283             return tmppages;
1284         }
1285 
1286         pages += tmppages;
1287         *offset += TARGET_PAGE_SIZE;
1288         dirty_ram_abs += TARGET_PAGE_SIZE;
1289     } while (*offset & (qemu_host_page_size - 1));
1290 
1291     /* The offset we leave with is the last one we looked at */
1292     *offset -= TARGET_PAGE_SIZE;
1293     return pages;
1294 }
1295 
1296 /**
1297  * ram_find_and_save_block: Finds a dirty page and sends it to f
1298  *
1299  * Called within an RCU critical section.
1300  *
1301  * Returns:  The number of pages written
1302  *           0 means no dirty pages
1303  *
1304  * @f: QEMUFile where to send the data
1305  * @last_stage: if we are at the completion stage
1306  * @bytes_transferred: increase it with the number of transferred bytes
1307  *
1308  * On systems where host-page-size > target-page-size it will send all the
1309  * pages in a host page that are dirty.
1310  */
1311 
1312 static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
1313                                    uint64_t *bytes_transferred)
1314 {
1315     PageSearchStatus pss;
1316     MigrationState *ms = migrate_get_current();
1317     int pages = 0;
1318     bool again, found;
1319     ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
1320                                  ram_addr_t space */
1321 
1322     pss.block = last_seen_block;
1323     pss.offset = last_offset;
1324     pss.complete_round = false;
1325 
1326     if (!pss.block) {
1327         pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1328     }
1329 
1330     do {
1331         again = true;
1332         found = get_queued_page(ms, &pss, &dirty_ram_abs);
1333 
1334         if (!found) {
1335             /* priority queue empty, so just search for something dirty */
1336             found = find_dirty_block(f, &pss, &again, &dirty_ram_abs);
1337         }
1338 
1339         if (found) {
1340             pages = ram_save_host_page(ms, f, pss.block, &pss.offset,
1341                                        last_stage, bytes_transferred,
1342                                        dirty_ram_abs);
1343         }
1344     } while (!pages && again);
1345 
1346     last_seen_block = pss.block;
1347     last_offset = pss.offset;
1348 
1349     return pages;
1350 }
1351 
1352 void acct_update_position(QEMUFile *f, size_t size, bool zero)
1353 {
1354     uint64_t pages = size / TARGET_PAGE_SIZE;
1355     if (zero) {
1356         acct_info.dup_pages += pages;
1357     } else {
1358         acct_info.norm_pages += pages;
1359         bytes_transferred += size;
1360         qemu_update_position(f, size);
1361     }
1362 }
1363 
1364 static ram_addr_t ram_save_remaining(void)
1365 {
1366     return migration_dirty_pages;
1367 }
1368 
1369 uint64_t ram_bytes_remaining(void)
1370 {
1371     return ram_save_remaining() * TARGET_PAGE_SIZE;
1372 }
1373 
1374 uint64_t ram_bytes_transferred(void)
1375 {
1376     return bytes_transferred;
1377 }
1378 
1379 uint64_t ram_bytes_total(void)
1380 {
1381     RAMBlock *block;
1382     uint64_t total = 0;
1383 
1384     rcu_read_lock();
1385     QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1386         total += block->used_length;
1387     rcu_read_unlock();
1388     return total;
1389 }
1390 
1391 void free_xbzrle_decoded_buf(void)
1392 {
1393     g_free(xbzrle_decoded_buf);
1394     xbzrle_decoded_buf = NULL;
1395 }
1396 
1397 static void migration_bitmap_free(struct BitmapRcu *bmap)
1398 {
1399     g_free(bmap->bmap);
1400     g_free(bmap->unsentmap);
1401     g_free(bmap);
1402 }
1403 
1404 static void ram_migration_cleanup(void *opaque)
1405 {
1406     /* caller have hold iothread lock or is in a bh, so there is
1407      * no writing race against this migration_bitmap
1408      */
1409     struct BitmapRcu *bitmap = migration_bitmap_rcu;
1410     atomic_rcu_set(&migration_bitmap_rcu, NULL);
1411     if (bitmap) {
1412         memory_global_dirty_log_stop();
1413         call_rcu(bitmap, migration_bitmap_free, rcu);
1414     }
1415 
1416     XBZRLE_cache_lock();
1417     if (XBZRLE.cache) {
1418         cache_fini(XBZRLE.cache);
1419         g_free(XBZRLE.encoded_buf);
1420         g_free(XBZRLE.current_buf);
1421         XBZRLE.cache = NULL;
1422         XBZRLE.encoded_buf = NULL;
1423         XBZRLE.current_buf = NULL;
1424     }
1425     XBZRLE_cache_unlock();
1426 }
1427 
1428 static void reset_ram_globals(void)
1429 {
1430     last_seen_block = NULL;
1431     last_sent_block = NULL;
1432     last_offset = 0;
1433     last_version = ram_list.version;
1434     ram_bulk_stage = true;
1435 }
1436 
1437 #define MAX_WAIT 50 /* ms, half buffered_file limit */
1438 
1439 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1440 {
1441     /* called in qemu main thread, so there is
1442      * no writing race against this migration_bitmap
1443      */
1444     if (migration_bitmap_rcu) {
1445         struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
1446         bitmap = g_new(struct BitmapRcu, 1);
1447         bitmap->bmap = bitmap_new(new);
1448 
1449         /* prevent migration_bitmap content from being set bit
1450          * by migration_bitmap_sync_range() at the same time.
1451          * it is safe to migration if migration_bitmap is cleared bit
1452          * at the same time.
1453          */
1454         qemu_mutex_lock(&migration_bitmap_mutex);
1455         bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1456         bitmap_set(bitmap->bmap, old, new - old);
1457 
1458         /* We don't have a way to safely extend the sentmap
1459          * with RCU; so mark it as missing, entry to postcopy
1460          * will fail.
1461          */
1462         bitmap->unsentmap = NULL;
1463 
1464         atomic_rcu_set(&migration_bitmap_rcu, bitmap);
1465         qemu_mutex_unlock(&migration_bitmap_mutex);
1466         migration_dirty_pages += new - old;
1467         call_rcu(old_bitmap, migration_bitmap_free, rcu);
1468     }
1469 }
1470 
1471 /*
1472  * 'expected' is the value you expect the bitmap mostly to be full
1473  * of; it won't bother printing lines that are all this value.
1474  * If 'todump' is null the migration bitmap is dumped.
1475  */
1476 void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1477 {
1478     int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1479 
1480     int64_t cur;
1481     int64_t linelen = 128;
1482     char linebuf[129];
1483 
1484     if (!todump) {
1485         todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1486     }
1487 
1488     for (cur = 0; cur < ram_pages; cur += linelen) {
1489         int64_t curb;
1490         bool found = false;
1491         /*
1492          * Last line; catch the case where the line length
1493          * is longer than remaining ram
1494          */
1495         if (cur + linelen > ram_pages) {
1496             linelen = ram_pages - cur;
1497         }
1498         for (curb = 0; curb < linelen; curb++) {
1499             bool thisbit = test_bit(cur + curb, todump);
1500             linebuf[curb] = thisbit ? '1' : '.';
1501             found = found || (thisbit != expected);
1502         }
1503         if (found) {
1504             linebuf[curb] = '\0';
1505             fprintf(stderr,  "0x%08" PRIx64 " : %s\n", cur, linebuf);
1506         }
1507     }
1508 }
1509 
1510 /* **** functions for postcopy ***** */
1511 
1512 /*
1513  * Callback from postcopy_each_ram_send_discard for each RAMBlock
1514  * Note: At this point the 'unsentmap' is the processed bitmap combined
1515  *       with the dirtymap; so a '1' means it's either dirty or unsent.
1516  * start,length: Indexes into the bitmap for the first bit
1517  *            representing the named block and length in target-pages
1518  */
1519 static int postcopy_send_discard_bm_ram(MigrationState *ms,
1520                                         PostcopyDiscardState *pds,
1521                                         unsigned long start,
1522                                         unsigned long length)
1523 {
1524     unsigned long end = start + length; /* one after the end */
1525     unsigned long current;
1526     unsigned long *unsentmap;
1527 
1528     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1529     for (current = start; current < end; ) {
1530         unsigned long one = find_next_bit(unsentmap, end, current);
1531 
1532         if (one <= end) {
1533             unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1534             unsigned long discard_length;
1535 
1536             if (zero >= end) {
1537                 discard_length = end - one;
1538             } else {
1539                 discard_length = zero - one;
1540             }
1541             postcopy_discard_send_range(ms, pds, one, discard_length);
1542             current = one + discard_length;
1543         } else {
1544             current = one;
1545         }
1546     }
1547 
1548     return 0;
1549 }
1550 
1551 /*
1552  * Utility for the outgoing postcopy code.
1553  *   Calls postcopy_send_discard_bm_ram for each RAMBlock
1554  *   passing it bitmap indexes and name.
1555  * Returns: 0 on success
1556  * (qemu_ram_foreach_block ends up passing unscaled lengths
1557  *  which would mean postcopy code would have to deal with target page)
1558  */
1559 static int postcopy_each_ram_send_discard(MigrationState *ms)
1560 {
1561     struct RAMBlock *block;
1562     int ret;
1563 
1564     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1565         unsigned long first = block->offset >> TARGET_PAGE_BITS;
1566         PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1567                                                                first,
1568                                                                block->idstr);
1569 
1570         /*
1571          * Postcopy sends chunks of bitmap over the wire, but it
1572          * just needs indexes at this point, avoids it having
1573          * target page specific code.
1574          */
1575         ret = postcopy_send_discard_bm_ram(ms, pds, first,
1576                                     block->used_length >> TARGET_PAGE_BITS);
1577         postcopy_discard_send_finish(ms, pds);
1578         if (ret) {
1579             return ret;
1580         }
1581     }
1582 
1583     return 0;
1584 }
1585 
1586 /*
1587  * Helper for postcopy_chunk_hostpages; it's called twice to cleanup
1588  *   the two bitmaps, that are similar, but one is inverted.
1589  *
1590  * We search for runs of target-pages that don't start or end on a
1591  * host page boundary;
1592  * unsent_pass=true: Cleans up partially unsent host pages by searching
1593  *                 the unsentmap
1594  * unsent_pass=false: Cleans up partially dirty host pages by searching
1595  *                 the main migration bitmap
1596  *
1597  */
1598 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1599                                           RAMBlock *block,
1600                                           PostcopyDiscardState *pds)
1601 {
1602     unsigned long *bitmap;
1603     unsigned long *unsentmap;
1604     unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE;
1605     unsigned long first = block->offset >> TARGET_PAGE_BITS;
1606     unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1607     unsigned long last = first + (len - 1);
1608     unsigned long run_start;
1609 
1610     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1611     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1612 
1613     if (unsent_pass) {
1614         /* Find a sent page */
1615         run_start = find_next_zero_bit(unsentmap, last + 1, first);
1616     } else {
1617         /* Find a dirty page */
1618         run_start = find_next_bit(bitmap, last + 1, first);
1619     }
1620 
1621     while (run_start <= last) {
1622         bool do_fixup = false;
1623         unsigned long fixup_start_addr;
1624         unsigned long host_offset;
1625 
1626         /*
1627          * If the start of this run of pages is in the middle of a host
1628          * page, then we need to fixup this host page.
1629          */
1630         host_offset = run_start % host_ratio;
1631         if (host_offset) {
1632             do_fixup = true;
1633             run_start -= host_offset;
1634             fixup_start_addr = run_start;
1635             /* For the next pass */
1636             run_start = run_start + host_ratio;
1637         } else {
1638             /* Find the end of this run */
1639             unsigned long run_end;
1640             if (unsent_pass) {
1641                 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1642             } else {
1643                 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1644             }
1645             /*
1646              * If the end isn't at the start of a host page, then the
1647              * run doesn't finish at the end of a host page
1648              * and we need to discard.
1649              */
1650             host_offset = run_end % host_ratio;
1651             if (host_offset) {
1652                 do_fixup = true;
1653                 fixup_start_addr = run_end - host_offset;
1654                 /*
1655                  * This host page has gone, the next loop iteration starts
1656                  * from after the fixup
1657                  */
1658                 run_start = fixup_start_addr + host_ratio;
1659             } else {
1660                 /*
1661                  * No discards on this iteration, next loop starts from
1662                  * next sent/dirty page
1663                  */
1664                 run_start = run_end + 1;
1665             }
1666         }
1667 
1668         if (do_fixup) {
1669             unsigned long page;
1670 
1671             /* Tell the destination to discard this page */
1672             if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1673                 /* For the unsent_pass we:
1674                  *     discard partially sent pages
1675                  * For the !unsent_pass (dirty) we:
1676                  *     discard partially dirty pages that were sent
1677                  *     (any partially sent pages were already discarded
1678                  *     by the previous unsent_pass)
1679                  */
1680                 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1681                                             host_ratio);
1682             }
1683 
1684             /* Clean up the bitmap */
1685             for (page = fixup_start_addr;
1686                  page < fixup_start_addr + host_ratio; page++) {
1687                 /* All pages in this host page are now not sent */
1688                 set_bit(page, unsentmap);
1689 
1690                 /*
1691                  * Remark them as dirty, updating the count for any pages
1692                  * that weren't previously dirty.
1693                  */
1694                 migration_dirty_pages += !test_and_set_bit(page, bitmap);
1695             }
1696         }
1697 
1698         if (unsent_pass) {
1699             /* Find the next sent page for the next iteration */
1700             run_start = find_next_zero_bit(unsentmap, last + 1,
1701                                            run_start);
1702         } else {
1703             /* Find the next dirty page for the next iteration */
1704             run_start = find_next_bit(bitmap, last + 1, run_start);
1705         }
1706     }
1707 }
1708 
1709 /*
1710  * Utility for the outgoing postcopy code.
1711  *
1712  * Discard any partially sent host-page size chunks, mark any partially
1713  * dirty host-page size chunks as all dirty.
1714  *
1715  * Returns: 0 on success
1716  */
1717 static int postcopy_chunk_hostpages(MigrationState *ms)
1718 {
1719     struct RAMBlock *block;
1720 
1721     if (qemu_host_page_size == TARGET_PAGE_SIZE) {
1722         /* Easy case - TPS==HPS - nothing to be done */
1723         return 0;
1724     }
1725 
1726     /* Easiest way to make sure we don't resume in the middle of a host-page */
1727     last_seen_block = NULL;
1728     last_sent_block = NULL;
1729     last_offset     = 0;
1730 
1731     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1732         unsigned long first = block->offset >> TARGET_PAGE_BITS;
1733 
1734         PostcopyDiscardState *pds =
1735                          postcopy_discard_send_init(ms, first, block->idstr);
1736 
1737         /* First pass: Discard all partially sent host pages */
1738         postcopy_chunk_hostpages_pass(ms, true, block, pds);
1739         /*
1740          * Second pass: Ensure that all partially dirty host pages are made
1741          * fully dirty.
1742          */
1743         postcopy_chunk_hostpages_pass(ms, false, block, pds);
1744 
1745         postcopy_discard_send_finish(ms, pds);
1746     } /* ram_list loop */
1747 
1748     return 0;
1749 }
1750 
1751 /*
1752  * Transmit the set of pages to be discarded after precopy to the target
1753  * these are pages that:
1754  *     a) Have been previously transmitted but are now dirty again
1755  *     b) Pages that have never been transmitted, this ensures that
1756  *        any pages on the destination that have been mapped by background
1757  *        tasks get discarded (transparent huge pages is the specific concern)
1758  * Hopefully this is pretty sparse
1759  */
1760 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1761 {
1762     int ret;
1763     unsigned long *bitmap, *unsentmap;
1764 
1765     rcu_read_lock();
1766 
1767     /* This should be our last sync, the src is now paused */
1768     migration_bitmap_sync();
1769 
1770     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1771     if (!unsentmap) {
1772         /* We don't have a safe way to resize the sentmap, so
1773          * if the bitmap was resized it will be NULL at this
1774          * point.
1775          */
1776         error_report("migration ram resized during precopy phase");
1777         rcu_read_unlock();
1778         return -EINVAL;
1779     }
1780 
1781     /* Deal with TPS != HPS */
1782     ret = postcopy_chunk_hostpages(ms);
1783     if (ret) {
1784         rcu_read_unlock();
1785         return ret;
1786     }
1787 
1788     /*
1789      * Update the unsentmap to be unsentmap = unsentmap | dirty
1790      */
1791     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1792     bitmap_or(unsentmap, unsentmap, bitmap,
1793                last_ram_offset() >> TARGET_PAGE_BITS);
1794 
1795 
1796     trace_ram_postcopy_send_discard_bitmap();
1797 #ifdef DEBUG_POSTCOPY
1798     ram_debug_dump_bitmap(unsentmap, true);
1799 #endif
1800 
1801     ret = postcopy_each_ram_send_discard(ms);
1802     rcu_read_unlock();
1803 
1804     return ret;
1805 }
1806 
1807 /*
1808  * At the start of the postcopy phase of migration, any now-dirty
1809  * precopied pages are discarded.
1810  *
1811  * start, length describe a byte address range within the RAMBlock
1812  *
1813  * Returns 0 on success.
1814  */
1815 int ram_discard_range(MigrationIncomingState *mis,
1816                       const char *block_name,
1817                       uint64_t start, size_t length)
1818 {
1819     int ret = -1;
1820 
1821     rcu_read_lock();
1822     RAMBlock *rb = qemu_ram_block_by_name(block_name);
1823 
1824     if (!rb) {
1825         error_report("ram_discard_range: Failed to find block '%s'",
1826                      block_name);
1827         goto err;
1828     }
1829 
1830     uint8_t *host_startaddr = rb->host + start;
1831 
1832     if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) {
1833         error_report("ram_discard_range: Unaligned start address: %p",
1834                      host_startaddr);
1835         goto err;
1836     }
1837 
1838     if ((start + length) <= rb->used_length) {
1839         uint8_t *host_endaddr = host_startaddr + length;
1840         if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) {
1841             error_report("ram_discard_range: Unaligned end address: %p",
1842                          host_endaddr);
1843             goto err;
1844         }
1845         ret = postcopy_ram_discard_range(mis, host_startaddr, length);
1846     } else {
1847         error_report("ram_discard_range: Overrun block '%s' (%" PRIu64
1848                      "/%zu/%zu)",
1849                      block_name, start, length, rb->used_length);
1850     }
1851 
1852 err:
1853     rcu_read_unlock();
1854 
1855     return ret;
1856 }
1857 
1858 
1859 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1860  * long-running RCU critical section.  When rcu-reclaims in the code
1861  * start to become numerous it will be necessary to reduce the
1862  * granularity of these critical sections.
1863  */
1864 
1865 static int ram_save_setup(QEMUFile *f, void *opaque)
1866 {
1867     RAMBlock *block;
1868     int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
1869 
1870     dirty_rate_high_cnt = 0;
1871     bitmap_sync_count = 0;
1872     migration_bitmap_sync_init();
1873     qemu_mutex_init(&migration_bitmap_mutex);
1874 
1875     if (migrate_use_xbzrle()) {
1876         XBZRLE_cache_lock();
1877         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1878                                   TARGET_PAGE_SIZE,
1879                                   TARGET_PAGE_SIZE);
1880         if (!XBZRLE.cache) {
1881             XBZRLE_cache_unlock();
1882             error_report("Error creating cache");
1883             return -1;
1884         }
1885         XBZRLE_cache_unlock();
1886 
1887         /* We prefer not to abort if there is no memory */
1888         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1889         if (!XBZRLE.encoded_buf) {
1890             error_report("Error allocating encoded_buf");
1891             return -1;
1892         }
1893 
1894         XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1895         if (!XBZRLE.current_buf) {
1896             error_report("Error allocating current_buf");
1897             g_free(XBZRLE.encoded_buf);
1898             XBZRLE.encoded_buf = NULL;
1899             return -1;
1900         }
1901 
1902         acct_clear();
1903     }
1904 
1905     /* iothread lock needed for ram_list.dirty_memory[] */
1906     qemu_mutex_lock_iothread();
1907     qemu_mutex_lock_ramlist();
1908     rcu_read_lock();
1909     bytes_transferred = 0;
1910     reset_ram_globals();
1911 
1912     ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1913     migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
1914     migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
1915     bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
1916 
1917     if (migrate_postcopy_ram()) {
1918         migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
1919         bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
1920     }
1921 
1922     /*
1923      * Count the total number of pages used by ram blocks not including any
1924      * gaps due to alignment or unplugs.
1925      */
1926     migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1927 
1928     memory_global_dirty_log_start();
1929     migration_bitmap_sync();
1930     qemu_mutex_unlock_ramlist();
1931     qemu_mutex_unlock_iothread();
1932 
1933     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1934 
1935     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1936         qemu_put_byte(f, strlen(block->idstr));
1937         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1938         qemu_put_be64(f, block->used_length);
1939     }
1940 
1941     rcu_read_unlock();
1942 
1943     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1944     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1945 
1946     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1947 
1948     return 0;
1949 }
1950 
1951 static int ram_save_iterate(QEMUFile *f, void *opaque)
1952 {
1953     int ret;
1954     int i;
1955     int64_t t0;
1956     int pages_sent = 0;
1957 
1958     rcu_read_lock();
1959     if (ram_list.version != last_version) {
1960         reset_ram_globals();
1961     }
1962 
1963     /* Read version before ram_list.blocks */
1964     smp_rmb();
1965 
1966     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
1967 
1968     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1969     i = 0;
1970     while ((ret = qemu_file_rate_limit(f)) == 0) {
1971         int pages;
1972 
1973         pages = ram_find_and_save_block(f, false, &bytes_transferred);
1974         /* no more pages to sent */
1975         if (pages == 0) {
1976             break;
1977         }
1978         pages_sent += pages;
1979         acct_info.iterations++;
1980 
1981         /* we want to check in the 1st loop, just in case it was the 1st time
1982            and we had to sync the dirty bitmap.
1983            qemu_get_clock_ns() is a bit expensive, so we only check each some
1984            iterations
1985         */
1986         if ((i & 63) == 0) {
1987             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
1988             if (t1 > MAX_WAIT) {
1989                 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
1990                         t1, i);
1991                 break;
1992             }
1993         }
1994         i++;
1995     }
1996     flush_compressed_data(f);
1997     rcu_read_unlock();
1998 
1999     /*
2000      * Must occur before EOS (or any QEMUFile operation)
2001      * because of RDMA protocol.
2002      */
2003     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2004 
2005     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2006     bytes_transferred += 8;
2007 
2008     ret = qemu_file_get_error(f);
2009     if (ret < 0) {
2010         return ret;
2011     }
2012 
2013     return pages_sent;
2014 }
2015 
2016 /* Called with iothread lock */
2017 static int ram_save_complete(QEMUFile *f, void *opaque)
2018 {
2019     rcu_read_lock();
2020 
2021     if (!migration_in_postcopy(migrate_get_current())) {
2022         migration_bitmap_sync();
2023     }
2024 
2025     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2026 
2027     /* try transferring iterative blocks of memory */
2028 
2029     /* flush all remaining blocks regardless of rate limiting */
2030     while (true) {
2031         int pages;
2032 
2033         pages = ram_find_and_save_block(f, true, &bytes_transferred);
2034         /* no more blocks to sent */
2035         if (pages == 0) {
2036             break;
2037         }
2038     }
2039 
2040     flush_compressed_data(f);
2041     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
2042 
2043     rcu_read_unlock();
2044 
2045     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2046 
2047     return 0;
2048 }
2049 
2050 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2051                              uint64_t *non_postcopiable_pending,
2052                              uint64_t *postcopiable_pending)
2053 {
2054     uint64_t remaining_size;
2055 
2056     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2057 
2058     if (!migration_in_postcopy(migrate_get_current()) &&
2059         remaining_size < max_size) {
2060         qemu_mutex_lock_iothread();
2061         rcu_read_lock();
2062         migration_bitmap_sync();
2063         rcu_read_unlock();
2064         qemu_mutex_unlock_iothread();
2065         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2066     }
2067 
2068     /* We can do postcopy, and all the data is postcopiable */
2069     *postcopiable_pending += remaining_size;
2070 }
2071 
2072 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2073 {
2074     unsigned int xh_len;
2075     int xh_flags;
2076 
2077     if (!xbzrle_decoded_buf) {
2078         xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2079     }
2080 
2081     /* extract RLE header */
2082     xh_flags = qemu_get_byte(f);
2083     xh_len = qemu_get_be16(f);
2084 
2085     if (xh_flags != ENCODING_FLAG_XBZRLE) {
2086         error_report("Failed to load XBZRLE page - wrong compression!");
2087         return -1;
2088     }
2089 
2090     if (xh_len > TARGET_PAGE_SIZE) {
2091         error_report("Failed to load XBZRLE page - len overflow!");
2092         return -1;
2093     }
2094     /* load data and decode */
2095     qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
2096 
2097     /* decode RLE */
2098     if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
2099                              TARGET_PAGE_SIZE) == -1) {
2100         error_report("Failed to load XBZRLE page - decode error!");
2101         return -1;
2102     }
2103 
2104     return 0;
2105 }
2106 
2107 /* Must be called from within a rcu critical section.
2108  * Returns a pointer from within the RCU-protected ram_list.
2109  */
2110 /*
2111  * Read a RAMBlock ID from the stream f, find the host address of the
2112  * start of that block and add on 'offset'
2113  *
2114  * f: Stream to read from
2115  * offset: Offset within the block
2116  * flags: Page flags (mostly to see if it's a continuation of previous block)
2117  */
2118 static inline void *host_from_stream_offset(QEMUFile *f,
2119                                             ram_addr_t offset,
2120                                             int flags)
2121 {
2122     static RAMBlock *block = NULL;
2123     char id[256];
2124     uint8_t len;
2125 
2126     if (flags & RAM_SAVE_FLAG_CONTINUE) {
2127         if (!block || block->max_length <= offset) {
2128             error_report("Ack, bad migration stream!");
2129             return NULL;
2130         }
2131 
2132         return block->host + offset;
2133     }
2134 
2135     len = qemu_get_byte(f);
2136     qemu_get_buffer(f, (uint8_t *)id, len);
2137     id[len] = 0;
2138 
2139     block = qemu_ram_block_by_name(id);
2140     if (block && block->max_length > offset) {
2141         return block->host + offset;
2142     }
2143 
2144     error_report("Can't find block %s", id);
2145     return NULL;
2146 }
2147 
2148 /*
2149  * If a page (or a whole RDMA chunk) has been
2150  * determined to be zero, then zap it.
2151  */
2152 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2153 {
2154     if (ch != 0 || !is_zero_range(host, size)) {
2155         memset(host, ch, size);
2156     }
2157 }
2158 
2159 static void *do_data_decompress(void *opaque)
2160 {
2161     DecompressParam *param = opaque;
2162     unsigned long pagesize;
2163 
2164     while (!quit_decomp_thread) {
2165         qemu_mutex_lock(&param->mutex);
2166         while (!param->start && !quit_decomp_thread) {
2167             qemu_cond_wait(&param->cond, &param->mutex);
2168             pagesize = TARGET_PAGE_SIZE;
2169             if (!quit_decomp_thread) {
2170                 /* uncompress() will return failed in some case, especially
2171                  * when the page is dirted when doing the compression, it's
2172                  * not a problem because the dirty page will be retransferred
2173                  * and uncompress() won't break the data in other pages.
2174                  */
2175                 uncompress((Bytef *)param->des, &pagesize,
2176                            (const Bytef *)param->compbuf, param->len);
2177             }
2178             param->start = false;
2179         }
2180         qemu_mutex_unlock(&param->mutex);
2181     }
2182 
2183     return NULL;
2184 }
2185 
2186 void migrate_decompress_threads_create(void)
2187 {
2188     int i, thread_count;
2189 
2190     thread_count = migrate_decompress_threads();
2191     decompress_threads = g_new0(QemuThread, thread_count);
2192     decomp_param = g_new0(DecompressParam, thread_count);
2193     compressed_data_buf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2194     quit_decomp_thread = false;
2195     for (i = 0; i < thread_count; i++) {
2196         qemu_mutex_init(&decomp_param[i].mutex);
2197         qemu_cond_init(&decomp_param[i].cond);
2198         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2199         qemu_thread_create(decompress_threads + i, "decompress",
2200                            do_data_decompress, decomp_param + i,
2201                            QEMU_THREAD_JOINABLE);
2202     }
2203 }
2204 
2205 void migrate_decompress_threads_join(void)
2206 {
2207     int i, thread_count;
2208 
2209     quit_decomp_thread = true;
2210     thread_count = migrate_decompress_threads();
2211     for (i = 0; i < thread_count; i++) {
2212         qemu_mutex_lock(&decomp_param[i].mutex);
2213         qemu_cond_signal(&decomp_param[i].cond);
2214         qemu_mutex_unlock(&decomp_param[i].mutex);
2215     }
2216     for (i = 0; i < thread_count; i++) {
2217         qemu_thread_join(decompress_threads + i);
2218         qemu_mutex_destroy(&decomp_param[i].mutex);
2219         qemu_cond_destroy(&decomp_param[i].cond);
2220         g_free(decomp_param[i].compbuf);
2221     }
2222     g_free(decompress_threads);
2223     g_free(decomp_param);
2224     g_free(compressed_data_buf);
2225     decompress_threads = NULL;
2226     decomp_param = NULL;
2227     compressed_data_buf = NULL;
2228 }
2229 
2230 static void decompress_data_with_multi_threads(uint8_t *compbuf,
2231                                                void *host, int len)
2232 {
2233     int idx, thread_count;
2234 
2235     thread_count = migrate_decompress_threads();
2236     while (true) {
2237         for (idx = 0; idx < thread_count; idx++) {
2238             if (!decomp_param[idx].start) {
2239                 memcpy(decomp_param[idx].compbuf, compbuf, len);
2240                 decomp_param[idx].des = host;
2241                 decomp_param[idx].len = len;
2242                 start_decompression(&decomp_param[idx]);
2243                 break;
2244             }
2245         }
2246         if (idx < thread_count) {
2247             break;
2248         }
2249     }
2250 }
2251 
2252 /*
2253  * Allocate data structures etc needed by incoming migration with postcopy-ram
2254  * postcopy-ram's similarly names postcopy_ram_incoming_init does the work
2255  */
2256 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2257 {
2258     size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2259 
2260     return postcopy_ram_incoming_init(mis, ram_pages);
2261 }
2262 
2263 /*
2264  * Called in postcopy mode by ram_load().
2265  * rcu_read_lock is taken prior to this being called.
2266  */
2267 static int ram_load_postcopy(QEMUFile *f)
2268 {
2269     int flags = 0, ret = 0;
2270     bool place_needed = false;
2271     bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
2272     MigrationIncomingState *mis = migration_incoming_get_current();
2273     /* Temporary page that is later 'placed' */
2274     void *postcopy_host_page = postcopy_get_tmp_page(mis);
2275     void *last_host = NULL;
2276 
2277     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2278         ram_addr_t addr;
2279         void *host = NULL;
2280         void *page_buffer = NULL;
2281         void *place_source = NULL;
2282         uint8_t ch;
2283         bool all_zero = false;
2284 
2285         addr = qemu_get_be64(f);
2286         flags = addr & ~TARGET_PAGE_MASK;
2287         addr &= TARGET_PAGE_MASK;
2288 
2289         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2290         place_needed = false;
2291         if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
2292             host = host_from_stream_offset(f, addr, flags);
2293             if (!host) {
2294                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2295                 ret = -EINVAL;
2296                 break;
2297             }
2298             page_buffer = host;
2299             /*
2300              * Postcopy requires that we place whole host pages atomically.
2301              * To make it atomic, the data is read into a temporary page
2302              * that's moved into place later.
2303              * The migration protocol uses,  possibly smaller, target-pages
2304              * however the source ensures it always sends all the components
2305              * of a host page in order.
2306              */
2307             page_buffer = postcopy_host_page +
2308                           ((uintptr_t)host & ~qemu_host_page_mask);
2309             /* If all TP are zero then we can optimise the place */
2310             if (!((uintptr_t)host & ~qemu_host_page_mask)) {
2311                 all_zero = true;
2312             } else {
2313                 /* not the 1st TP within the HP */
2314                 if (host != (last_host + TARGET_PAGE_SIZE)) {
2315                     error_report("Non-sequential target page %p/%p\n",
2316                                   host, last_host);
2317                     ret = -EINVAL;
2318                     break;
2319                 }
2320             }
2321 
2322 
2323             /*
2324              * If it's the last part of a host page then we place the host
2325              * page
2326              */
2327             place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
2328                                      ~qemu_host_page_mask) == 0;
2329             place_source = postcopy_host_page;
2330         }
2331         last_host = host;
2332 
2333         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2334         case RAM_SAVE_FLAG_COMPRESS:
2335             ch = qemu_get_byte(f);
2336             memset(page_buffer, ch, TARGET_PAGE_SIZE);
2337             if (ch) {
2338                 all_zero = false;
2339             }
2340             break;
2341 
2342         case RAM_SAVE_FLAG_PAGE:
2343             all_zero = false;
2344             if (!place_needed || !matching_page_sizes) {
2345                 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2346             } else {
2347                 /* Avoids the qemu_file copy during postcopy, which is
2348                  * going to do a copy later; can only do it when we
2349                  * do this read in one go (matching page sizes)
2350                  */
2351                 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2352                                          TARGET_PAGE_SIZE);
2353             }
2354             break;
2355         case RAM_SAVE_FLAG_EOS:
2356             /* normal exit */
2357             break;
2358         default:
2359             error_report("Unknown combination of migration flags: %#x"
2360                          " (postcopy mode)", flags);
2361             ret = -EINVAL;
2362         }
2363 
2364         if (place_needed) {
2365             /* This gets called at the last target page in the host page */
2366             if (all_zero) {
2367                 ret = postcopy_place_page_zero(mis,
2368                                                host + TARGET_PAGE_SIZE -
2369                                                qemu_host_page_size);
2370             } else {
2371                 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
2372                                                qemu_host_page_size,
2373                                                place_source);
2374             }
2375         }
2376         if (!ret) {
2377             ret = qemu_file_get_error(f);
2378         }
2379     }
2380 
2381     return ret;
2382 }
2383 
2384 static int ram_load(QEMUFile *f, void *opaque, int version_id)
2385 {
2386     int flags = 0, ret = 0;
2387     static uint64_t seq_iter;
2388     int len = 0;
2389     /*
2390      * If system is running in postcopy mode, page inserts to host memory must
2391      * be atomic
2392      */
2393     bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
2394 
2395     seq_iter++;
2396 
2397     if (version_id != 4) {
2398         ret = -EINVAL;
2399     }
2400 
2401     /* This RCU critical section can be very long running.
2402      * When RCU reclaims in the code start to become numerous,
2403      * it will be necessary to reduce the granularity of this
2404      * critical section.
2405      */
2406     rcu_read_lock();
2407 
2408     if (postcopy_running) {
2409         ret = ram_load_postcopy(f);
2410     }
2411 
2412     while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2413         ram_addr_t addr, total_ram_bytes;
2414         void *host = NULL;
2415         uint8_t ch;
2416 
2417         addr = qemu_get_be64(f);
2418         flags = addr & ~TARGET_PAGE_MASK;
2419         addr &= TARGET_PAGE_MASK;
2420 
2421         if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2422                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
2423             host = host_from_stream_offset(f, addr, flags);
2424             if (!host) {
2425                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2426                 ret = -EINVAL;
2427                 break;
2428             }
2429         }
2430 
2431         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2432         case RAM_SAVE_FLAG_MEM_SIZE:
2433             /* Synchronize RAM block list */
2434             total_ram_bytes = addr;
2435             while (!ret && total_ram_bytes) {
2436                 RAMBlock *block;
2437                 char id[256];
2438                 ram_addr_t length;
2439 
2440                 len = qemu_get_byte(f);
2441                 qemu_get_buffer(f, (uint8_t *)id, len);
2442                 id[len] = 0;
2443                 length = qemu_get_be64(f);
2444 
2445                 block = qemu_ram_block_by_name(id);
2446                 if (block) {
2447                     if (length != block->used_length) {
2448                         Error *local_err = NULL;
2449 
2450                         ret = qemu_ram_resize(block->offset, length,
2451                                               &local_err);
2452                         if (local_err) {
2453                             error_report_err(local_err);
2454                         }
2455                     }
2456                     ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2457                                           block->idstr);
2458                 } else {
2459                     error_report("Unknown ramblock \"%s\", cannot "
2460                                  "accept migration", id);
2461                     ret = -EINVAL;
2462                 }
2463 
2464                 total_ram_bytes -= length;
2465             }
2466             break;
2467 
2468         case RAM_SAVE_FLAG_COMPRESS:
2469             ch = qemu_get_byte(f);
2470             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2471             break;
2472 
2473         case RAM_SAVE_FLAG_PAGE:
2474             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2475             break;
2476 
2477         case RAM_SAVE_FLAG_COMPRESS_PAGE:
2478             len = qemu_get_be32(f);
2479             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2480                 error_report("Invalid compressed data length: %d", len);
2481                 ret = -EINVAL;
2482                 break;
2483             }
2484             qemu_get_buffer(f, compressed_data_buf, len);
2485             decompress_data_with_multi_threads(compressed_data_buf, host, len);
2486             break;
2487 
2488         case RAM_SAVE_FLAG_XBZRLE:
2489             if (load_xbzrle(f, addr, host) < 0) {
2490                 error_report("Failed to decompress XBZRLE page at "
2491                              RAM_ADDR_FMT, addr);
2492                 ret = -EINVAL;
2493                 break;
2494             }
2495             break;
2496         case RAM_SAVE_FLAG_EOS:
2497             /* normal exit */
2498             break;
2499         default:
2500             if (flags & RAM_SAVE_FLAG_HOOK) {
2501                 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
2502             } else {
2503                 error_report("Unknown combination of migration flags: %#x",
2504                              flags);
2505                 ret = -EINVAL;
2506             }
2507         }
2508         if (!ret) {
2509             ret = qemu_file_get_error(f);
2510         }
2511     }
2512 
2513     rcu_read_unlock();
2514     DPRINTF("Completed load of VM with exit code %d seq iteration "
2515             "%" PRIu64 "\n", ret, seq_iter);
2516     return ret;
2517 }
2518 
2519 static SaveVMHandlers savevm_ram_handlers = {
2520     .save_live_setup = ram_save_setup,
2521     .save_live_iterate = ram_save_iterate,
2522     .save_live_complete_postcopy = ram_save_complete,
2523     .save_live_complete_precopy = ram_save_complete,
2524     .save_live_pending = ram_save_pending,
2525     .load_state = ram_load,
2526     .cleanup = ram_migration_cleanup,
2527 };
2528 
2529 void ram_mig_init(void)
2530 {
2531     qemu_mutex_init(&XBZRLE.lock);
2532     register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
2533 }
2534