xref: /openbmc/qemu/migration/ram.c (revision 0221d73c)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2011-2015 Red Hat Inc
6  *
7  * Authors:
8  *  Juan Quintela <quintela@redhat.com>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 
29 #include "qemu/osdep.h"
30 #include "cpu.h"
31 #include <zlib.h>
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/pmem.h"
37 #include "xbzrle.h"
38 #include "ram.h"
39 #include "migration.h"
40 #include "socket.h"
41 #include "migration/register.h"
42 #include "migration/misc.h"
43 #include "qemu-file.h"
44 #include "postcopy-ram.h"
45 #include "page_cache.h"
46 #include "qemu/error-report.h"
47 #include "qapi/error.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qmp/qerror.h"
50 #include "trace.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
55 #include "block.h"
56 #include "sysemu/sysemu.h"
57 #include "qemu/uuid.h"
58 #include "savevm.h"
59 #include "qemu/iov.h"
60 
61 /***********************************************************/
62 /* ram save/restore */
63 
64 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65  * worked for pages that where filled with the same char.  We switched
66  * it to only search for the zero value.  And to avoid confusion with
67  * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68  */
69 
70 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
71 #define RAM_SAVE_FLAG_ZERO     0x02
72 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
73 #define RAM_SAVE_FLAG_PAGE     0x08
74 #define RAM_SAVE_FLAG_EOS      0x10
75 #define RAM_SAVE_FLAG_CONTINUE 0x20
76 #define RAM_SAVE_FLAG_XBZRLE   0x40
77 /* 0x80 is reserved in migration.h start with 0x100 next */
78 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
79 
80 static inline bool is_zero_range(uint8_t *p, uint64_t size)
81 {
82     return buffer_is_zero(p, size);
83 }
84 
85 XBZRLECacheStats xbzrle_counters;
86 
87 /* struct contains XBZRLE cache and a static page
88    used by the compression */
89 static struct {
90     /* buffer used for XBZRLE encoding */
91     uint8_t *encoded_buf;
92     /* buffer for storing page content */
93     uint8_t *current_buf;
94     /* Cache for XBZRLE, Protected by lock. */
95     PageCache *cache;
96     QemuMutex lock;
97     /* it will store a page full of zeros */
98     uint8_t *zero_target_page;
99     /* buffer used for XBZRLE decoding */
100     uint8_t *decoded_buf;
101 } XBZRLE;
102 
103 static void XBZRLE_cache_lock(void)
104 {
105     if (migrate_use_xbzrle())
106         qemu_mutex_lock(&XBZRLE.lock);
107 }
108 
109 static void XBZRLE_cache_unlock(void)
110 {
111     if (migrate_use_xbzrle())
112         qemu_mutex_unlock(&XBZRLE.lock);
113 }
114 
115 /**
116  * xbzrle_cache_resize: resize the xbzrle cache
117  *
118  * This function is called from qmp_migrate_set_cache_size in main
119  * thread, possibly while a migration is in progress.  A running
120  * migration may be using the cache and might finish during this call,
121  * hence changes to the cache are protected by XBZRLE.lock().
122  *
123  * Returns 0 for success or -1 for error
124  *
125  * @new_size: new cache size
126  * @errp: set *errp if the check failed, with reason
127  */
128 int xbzrle_cache_resize(int64_t new_size, Error **errp)
129 {
130     PageCache *new_cache;
131     int64_t ret = 0;
132 
133     /* Check for truncation */
134     if (new_size != (size_t)new_size) {
135         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136                    "exceeding address space");
137         return -1;
138     }
139 
140     if (new_size == migrate_xbzrle_cache_size()) {
141         /* nothing to do */
142         return 0;
143     }
144 
145     XBZRLE_cache_lock();
146 
147     if (XBZRLE.cache != NULL) {
148         new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
149         if (!new_cache) {
150             ret = -1;
151             goto out;
152         }
153 
154         cache_fini(XBZRLE.cache);
155         XBZRLE.cache = new_cache;
156     }
157 out:
158     XBZRLE_cache_unlock();
159     return ret;
160 }
161 
162 static bool ramblock_is_ignored(RAMBlock *block)
163 {
164     return !qemu_ram_is_migratable(block) ||
165            (migrate_ignore_shared() && qemu_ram_is_shared(block));
166 }
167 
168 /* Should be holding either ram_list.mutex, or the RCU lock. */
169 #define RAMBLOCK_FOREACH_NOT_IGNORED(block)            \
170     INTERNAL_RAMBLOCK_FOREACH(block)                   \
171         if (ramblock_is_ignored(block)) {} else
172 
173 #define RAMBLOCK_FOREACH_MIGRATABLE(block)             \
174     INTERNAL_RAMBLOCK_FOREACH(block)                   \
175         if (!qemu_ram_is_migratable(block)) {} else
176 
177 #undef RAMBLOCK_FOREACH
178 
179 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
180 {
181     RAMBlock *block;
182     int ret = 0;
183 
184     RCU_READ_LOCK_GUARD();
185 
186     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
187         ret = func(block, opaque);
188         if (ret) {
189             break;
190         }
191     }
192     return ret;
193 }
194 
195 static void ramblock_recv_map_init(void)
196 {
197     RAMBlock *rb;
198 
199     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
200         assert(!rb->receivedmap);
201         rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
202     }
203 }
204 
205 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
206 {
207     return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
208                     rb->receivedmap);
209 }
210 
211 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
212 {
213     return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
214 }
215 
216 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
217 {
218     set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
219 }
220 
221 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
222                                     size_t nr)
223 {
224     bitmap_set_atomic(rb->receivedmap,
225                       ramblock_recv_bitmap_offset(host_addr, rb),
226                       nr);
227 }
228 
229 #define  RAMBLOCK_RECV_BITMAP_ENDING  (0x0123456789abcdefULL)
230 
231 /*
232  * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
233  *
234  * Returns >0 if success with sent bytes, or <0 if error.
235  */
236 int64_t ramblock_recv_bitmap_send(QEMUFile *file,
237                                   const char *block_name)
238 {
239     RAMBlock *block = qemu_ram_block_by_name(block_name);
240     unsigned long *le_bitmap, nbits;
241     uint64_t size;
242 
243     if (!block) {
244         error_report("%s: invalid block name: %s", __func__, block_name);
245         return -1;
246     }
247 
248     nbits = block->used_length >> TARGET_PAGE_BITS;
249 
250     /*
251      * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
252      * machines we may need 4 more bytes for padding (see below
253      * comment). So extend it a bit before hand.
254      */
255     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
256 
257     /*
258      * Always use little endian when sending the bitmap. This is
259      * required that when source and destination VMs are not using the
260      * same endianess. (Note: big endian won't work.)
261      */
262     bitmap_to_le(le_bitmap, block->receivedmap, nbits);
263 
264     /* Size of the bitmap, in bytes */
265     size = DIV_ROUND_UP(nbits, 8);
266 
267     /*
268      * size is always aligned to 8 bytes for 64bit machines, but it
269      * may not be true for 32bit machines. We need this padding to
270      * make sure the migration can survive even between 32bit and
271      * 64bit machines.
272      */
273     size = ROUND_UP(size, 8);
274 
275     qemu_put_be64(file, size);
276     qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
277     /*
278      * Mark as an end, in case the middle part is screwed up due to
279      * some "misterious" reason.
280      */
281     qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
282     qemu_fflush(file);
283 
284     g_free(le_bitmap);
285 
286     if (qemu_file_get_error(file)) {
287         return qemu_file_get_error(file);
288     }
289 
290     return size + sizeof(size);
291 }
292 
293 /*
294  * An outstanding page request, on the source, having been received
295  * and queued
296  */
297 struct RAMSrcPageRequest {
298     RAMBlock *rb;
299     hwaddr    offset;
300     hwaddr    len;
301 
302     QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
303 };
304 
305 /* State of RAM for migration */
306 struct RAMState {
307     /* QEMUFile used for this migration */
308     QEMUFile *f;
309     /* Last block that we have visited searching for dirty pages */
310     RAMBlock *last_seen_block;
311     /* Last block from where we have sent data */
312     RAMBlock *last_sent_block;
313     /* Last dirty target page we have sent */
314     ram_addr_t last_page;
315     /* last ram version we have seen */
316     uint32_t last_version;
317     /* We are in the first round */
318     bool ram_bulk_stage;
319     /* The free page optimization is enabled */
320     bool fpo_enabled;
321     /* How many times we have dirty too many pages */
322     int dirty_rate_high_cnt;
323     /* these variables are used for bitmap sync */
324     /* last time we did a full bitmap_sync */
325     int64_t time_last_bitmap_sync;
326     /* bytes transferred at start_time */
327     uint64_t bytes_xfer_prev;
328     /* number of dirty pages since start_time */
329     uint64_t num_dirty_pages_period;
330     /* xbzrle misses since the beginning of the period */
331     uint64_t xbzrle_cache_miss_prev;
332 
333     /* compression statistics since the beginning of the period */
334     /* amount of count that no free thread to compress data */
335     uint64_t compress_thread_busy_prev;
336     /* amount bytes after compression */
337     uint64_t compressed_size_prev;
338     /* amount of compressed pages */
339     uint64_t compress_pages_prev;
340 
341     /* total handled target pages at the beginning of period */
342     uint64_t target_page_count_prev;
343     /* total handled target pages since start */
344     uint64_t target_page_count;
345     /* number of dirty bits in the bitmap */
346     uint64_t migration_dirty_pages;
347     /* Protects modification of the bitmap and migration dirty pages */
348     QemuMutex bitmap_mutex;
349     /* The RAMBlock used in the last src_page_requests */
350     RAMBlock *last_req_rb;
351     /* Queue of outstanding page requests from the destination */
352     QemuMutex src_page_req_mutex;
353     QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
354 };
355 typedef struct RAMState RAMState;
356 
357 static RAMState *ram_state;
358 
359 static NotifierWithReturnList precopy_notifier_list;
360 
361 void precopy_infrastructure_init(void)
362 {
363     notifier_with_return_list_init(&precopy_notifier_list);
364 }
365 
366 void precopy_add_notifier(NotifierWithReturn *n)
367 {
368     notifier_with_return_list_add(&precopy_notifier_list, n);
369 }
370 
371 void precopy_remove_notifier(NotifierWithReturn *n)
372 {
373     notifier_with_return_remove(n);
374 }
375 
376 int precopy_notify(PrecopyNotifyReason reason, Error **errp)
377 {
378     PrecopyNotifyData pnd;
379     pnd.reason = reason;
380     pnd.errp = errp;
381 
382     return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
383 }
384 
385 void precopy_enable_free_page_optimization(void)
386 {
387     if (!ram_state) {
388         return;
389     }
390 
391     ram_state->fpo_enabled = true;
392 }
393 
394 uint64_t ram_bytes_remaining(void)
395 {
396     return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
397                        0;
398 }
399 
400 MigrationStats ram_counters;
401 
402 /* used by the search for pages to send */
403 struct PageSearchStatus {
404     /* Current block being searched */
405     RAMBlock    *block;
406     /* Current page to search from */
407     unsigned long page;
408     /* Set once we wrap around */
409     bool         complete_round;
410 };
411 typedef struct PageSearchStatus PageSearchStatus;
412 
413 CompressionStats compression_counters;
414 
415 struct CompressParam {
416     bool done;
417     bool quit;
418     bool zero_page;
419     QEMUFile *file;
420     QemuMutex mutex;
421     QemuCond cond;
422     RAMBlock *block;
423     ram_addr_t offset;
424 
425     /* internally used fields */
426     z_stream stream;
427     uint8_t *originbuf;
428 };
429 typedef struct CompressParam CompressParam;
430 
431 struct DecompressParam {
432     bool done;
433     bool quit;
434     QemuMutex mutex;
435     QemuCond cond;
436     void *des;
437     uint8_t *compbuf;
438     int len;
439     z_stream stream;
440 };
441 typedef struct DecompressParam DecompressParam;
442 
443 static CompressParam *comp_param;
444 static QemuThread *compress_threads;
445 /* comp_done_cond is used to wake up the migration thread when
446  * one of the compression threads has finished the compression.
447  * comp_done_lock is used to co-work with comp_done_cond.
448  */
449 static QemuMutex comp_done_lock;
450 static QemuCond comp_done_cond;
451 /* The empty QEMUFileOps will be used by file in CompressParam */
452 static const QEMUFileOps empty_ops = { };
453 
454 static QEMUFile *decomp_file;
455 static DecompressParam *decomp_param;
456 static QemuThread *decompress_threads;
457 static QemuMutex decomp_done_lock;
458 static QemuCond decomp_done_cond;
459 
460 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
461                                  ram_addr_t offset, uint8_t *source_buf);
462 
463 static void *do_data_compress(void *opaque)
464 {
465     CompressParam *param = opaque;
466     RAMBlock *block;
467     ram_addr_t offset;
468     bool zero_page;
469 
470     qemu_mutex_lock(&param->mutex);
471     while (!param->quit) {
472         if (param->block) {
473             block = param->block;
474             offset = param->offset;
475             param->block = NULL;
476             qemu_mutex_unlock(&param->mutex);
477 
478             zero_page = do_compress_ram_page(param->file, &param->stream,
479                                              block, offset, param->originbuf);
480 
481             qemu_mutex_lock(&comp_done_lock);
482             param->done = true;
483             param->zero_page = zero_page;
484             qemu_cond_signal(&comp_done_cond);
485             qemu_mutex_unlock(&comp_done_lock);
486 
487             qemu_mutex_lock(&param->mutex);
488         } else {
489             qemu_cond_wait(&param->cond, &param->mutex);
490         }
491     }
492     qemu_mutex_unlock(&param->mutex);
493 
494     return NULL;
495 }
496 
497 static void compress_threads_save_cleanup(void)
498 {
499     int i, thread_count;
500 
501     if (!migrate_use_compression() || !comp_param) {
502         return;
503     }
504 
505     thread_count = migrate_compress_threads();
506     for (i = 0; i < thread_count; i++) {
507         /*
508          * we use it as a indicator which shows if the thread is
509          * properly init'd or not
510          */
511         if (!comp_param[i].file) {
512             break;
513         }
514 
515         qemu_mutex_lock(&comp_param[i].mutex);
516         comp_param[i].quit = true;
517         qemu_cond_signal(&comp_param[i].cond);
518         qemu_mutex_unlock(&comp_param[i].mutex);
519 
520         qemu_thread_join(compress_threads + i);
521         qemu_mutex_destroy(&comp_param[i].mutex);
522         qemu_cond_destroy(&comp_param[i].cond);
523         deflateEnd(&comp_param[i].stream);
524         g_free(comp_param[i].originbuf);
525         qemu_fclose(comp_param[i].file);
526         comp_param[i].file = NULL;
527     }
528     qemu_mutex_destroy(&comp_done_lock);
529     qemu_cond_destroy(&comp_done_cond);
530     g_free(compress_threads);
531     g_free(comp_param);
532     compress_threads = NULL;
533     comp_param = NULL;
534 }
535 
536 static int compress_threads_save_setup(void)
537 {
538     int i, thread_count;
539 
540     if (!migrate_use_compression()) {
541         return 0;
542     }
543     thread_count = migrate_compress_threads();
544     compress_threads = g_new0(QemuThread, thread_count);
545     comp_param = g_new0(CompressParam, thread_count);
546     qemu_cond_init(&comp_done_cond);
547     qemu_mutex_init(&comp_done_lock);
548     for (i = 0; i < thread_count; i++) {
549         comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
550         if (!comp_param[i].originbuf) {
551             goto exit;
552         }
553 
554         if (deflateInit(&comp_param[i].stream,
555                         migrate_compress_level()) != Z_OK) {
556             g_free(comp_param[i].originbuf);
557             goto exit;
558         }
559 
560         /* comp_param[i].file is just used as a dummy buffer to save data,
561          * set its ops to empty.
562          */
563         comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
564         comp_param[i].done = true;
565         comp_param[i].quit = false;
566         qemu_mutex_init(&comp_param[i].mutex);
567         qemu_cond_init(&comp_param[i].cond);
568         qemu_thread_create(compress_threads + i, "compress",
569                            do_data_compress, comp_param + i,
570                            QEMU_THREAD_JOINABLE);
571     }
572     return 0;
573 
574 exit:
575     compress_threads_save_cleanup();
576     return -1;
577 }
578 
579 /* Multiple fd's */
580 
581 #define MULTIFD_MAGIC 0x11223344U
582 #define MULTIFD_VERSION 1
583 
584 #define MULTIFD_FLAG_SYNC (1 << 0)
585 
586 /* This value needs to be a multiple of qemu_target_page_size() */
587 #define MULTIFD_PACKET_SIZE (512 * 1024)
588 
589 typedef struct {
590     uint32_t magic;
591     uint32_t version;
592     unsigned char uuid[16]; /* QemuUUID */
593     uint8_t id;
594     uint8_t unused1[7];     /* Reserved for future use */
595     uint64_t unused2[4];    /* Reserved for future use */
596 } __attribute__((packed)) MultiFDInit_t;
597 
598 typedef struct {
599     uint32_t magic;
600     uint32_t version;
601     uint32_t flags;
602     /* maximum number of allocated pages */
603     uint32_t pages_alloc;
604     uint32_t pages_used;
605     /* size of the next packet that contains pages */
606     uint32_t next_packet_size;
607     uint64_t packet_num;
608     uint64_t unused[4];    /* Reserved for future use */
609     char ramblock[256];
610     uint64_t offset[];
611 } __attribute__((packed)) MultiFDPacket_t;
612 
613 typedef struct {
614     /* number of used pages */
615     uint32_t used;
616     /* number of allocated pages */
617     uint32_t allocated;
618     /* global number of generated multifd packets */
619     uint64_t packet_num;
620     /* offset of each page */
621     ram_addr_t *offset;
622     /* pointer to each page */
623     struct iovec *iov;
624     RAMBlock *block;
625 } MultiFDPages_t;
626 
627 typedef struct {
628     /* this fields are not changed once the thread is created */
629     /* channel number */
630     uint8_t id;
631     /* channel thread name */
632     char *name;
633     /* channel thread id */
634     QemuThread thread;
635     /* communication channel */
636     QIOChannel *c;
637     /* sem where to wait for more work */
638     QemuSemaphore sem;
639     /* this mutex protects the following parameters */
640     QemuMutex mutex;
641     /* is this channel thread running */
642     bool running;
643     /* should this thread finish */
644     bool quit;
645     /* thread has work to do */
646     int pending_job;
647     /* array of pages to sent */
648     MultiFDPages_t *pages;
649     /* packet allocated len */
650     uint32_t packet_len;
651     /* pointer to the packet */
652     MultiFDPacket_t *packet;
653     /* multifd flags for each packet */
654     uint32_t flags;
655     /* size of the next packet that contains pages */
656     uint32_t next_packet_size;
657     /* global number of generated multifd packets */
658     uint64_t packet_num;
659     /* thread local variables */
660     /* packets sent through this channel */
661     uint64_t num_packets;
662     /* pages sent through this channel */
663     uint64_t num_pages;
664     /* syncs main thread and channels */
665     QemuSemaphore sem_sync;
666 }  MultiFDSendParams;
667 
668 typedef struct {
669     /* this fields are not changed once the thread is created */
670     /* channel number */
671     uint8_t id;
672     /* channel thread name */
673     char *name;
674     /* channel thread id */
675     QemuThread thread;
676     /* communication channel */
677     QIOChannel *c;
678     /* this mutex protects the following parameters */
679     QemuMutex mutex;
680     /* is this channel thread running */
681     bool running;
682     /* should this thread finish */
683     bool quit;
684     /* array of pages to receive */
685     MultiFDPages_t *pages;
686     /* packet allocated len */
687     uint32_t packet_len;
688     /* pointer to the packet */
689     MultiFDPacket_t *packet;
690     /* multifd flags for each packet */
691     uint32_t flags;
692     /* global number of generated multifd packets */
693     uint64_t packet_num;
694     /* thread local variables */
695     /* size of the next packet that contains pages */
696     uint32_t next_packet_size;
697     /* packets sent through this channel */
698     uint64_t num_packets;
699     /* pages sent through this channel */
700     uint64_t num_pages;
701     /* syncs main thread and channels */
702     QemuSemaphore sem_sync;
703 } MultiFDRecvParams;
704 
705 static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
706 {
707     MultiFDInit_t msg;
708     int ret;
709 
710     msg.magic = cpu_to_be32(MULTIFD_MAGIC);
711     msg.version = cpu_to_be32(MULTIFD_VERSION);
712     msg.id = p->id;
713     memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
714 
715     ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
716     if (ret != 0) {
717         return -1;
718     }
719     return 0;
720 }
721 
722 static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
723 {
724     MultiFDInit_t msg;
725     int ret;
726 
727     ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
728     if (ret != 0) {
729         return -1;
730     }
731 
732     msg.magic = be32_to_cpu(msg.magic);
733     msg.version = be32_to_cpu(msg.version);
734 
735     if (msg.magic != MULTIFD_MAGIC) {
736         error_setg(errp, "multifd: received packet magic %x "
737                    "expected %x", msg.magic, MULTIFD_MAGIC);
738         return -1;
739     }
740 
741     if (msg.version != MULTIFD_VERSION) {
742         error_setg(errp, "multifd: received packet version %d "
743                    "expected %d", msg.version, MULTIFD_VERSION);
744         return -1;
745     }
746 
747     if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
748         char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
749         char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
750 
751         error_setg(errp, "multifd: received uuid '%s' and expected "
752                    "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
753         g_free(uuid);
754         g_free(msg_uuid);
755         return -1;
756     }
757 
758     if (msg.id > migrate_multifd_channels()) {
759         error_setg(errp, "multifd: received channel version %d "
760                    "expected %d", msg.version, MULTIFD_VERSION);
761         return -1;
762     }
763 
764     return msg.id;
765 }
766 
767 static MultiFDPages_t *multifd_pages_init(size_t size)
768 {
769     MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
770 
771     pages->allocated = size;
772     pages->iov = g_new0(struct iovec, size);
773     pages->offset = g_new0(ram_addr_t, size);
774 
775     return pages;
776 }
777 
778 static void multifd_pages_clear(MultiFDPages_t *pages)
779 {
780     pages->used = 0;
781     pages->allocated = 0;
782     pages->packet_num = 0;
783     pages->block = NULL;
784     g_free(pages->iov);
785     pages->iov = NULL;
786     g_free(pages->offset);
787     pages->offset = NULL;
788     g_free(pages);
789 }
790 
791 static void multifd_send_fill_packet(MultiFDSendParams *p)
792 {
793     MultiFDPacket_t *packet = p->packet;
794     int i;
795 
796     packet->flags = cpu_to_be32(p->flags);
797     packet->pages_alloc = cpu_to_be32(p->pages->allocated);
798     packet->pages_used = cpu_to_be32(p->pages->used);
799     packet->next_packet_size = cpu_to_be32(p->next_packet_size);
800     packet->packet_num = cpu_to_be64(p->packet_num);
801 
802     if (p->pages->block) {
803         strncpy(packet->ramblock, p->pages->block->idstr, 256);
804     }
805 
806     for (i = 0; i < p->pages->used; i++) {
807         packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
808     }
809 }
810 
811 static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
812 {
813     MultiFDPacket_t *packet = p->packet;
814     uint32_t pages_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
815     RAMBlock *block;
816     int i;
817 
818     packet->magic = be32_to_cpu(packet->magic);
819     if (packet->magic != MULTIFD_MAGIC) {
820         error_setg(errp, "multifd: received packet "
821                    "magic %x and expected magic %x",
822                    packet->magic, MULTIFD_MAGIC);
823         return -1;
824     }
825 
826     packet->version = be32_to_cpu(packet->version);
827     if (packet->version != MULTIFD_VERSION) {
828         error_setg(errp, "multifd: received packet "
829                    "version %d and expected version %d",
830                    packet->version, MULTIFD_VERSION);
831         return -1;
832     }
833 
834     p->flags = be32_to_cpu(packet->flags);
835 
836     packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
837     /*
838      * If we received a packet that is 100 times bigger than expected
839      * just stop migration.  It is a magic number.
840      */
841     if (packet->pages_alloc > pages_max * 100) {
842         error_setg(errp, "multifd: received packet "
843                    "with size %d and expected a maximum size of %d",
844                    packet->pages_alloc, pages_max * 100) ;
845         return -1;
846     }
847     /*
848      * We received a packet that is bigger than expected but inside
849      * reasonable limits (see previous comment).  Just reallocate.
850      */
851     if (packet->pages_alloc > p->pages->allocated) {
852         multifd_pages_clear(p->pages);
853         p->pages = multifd_pages_init(packet->pages_alloc);
854     }
855 
856     p->pages->used = be32_to_cpu(packet->pages_used);
857     if (p->pages->used > packet->pages_alloc) {
858         error_setg(errp, "multifd: received packet "
859                    "with %d pages and expected maximum pages are %d",
860                    p->pages->used, packet->pages_alloc) ;
861         return -1;
862     }
863 
864     p->next_packet_size = be32_to_cpu(packet->next_packet_size);
865     p->packet_num = be64_to_cpu(packet->packet_num);
866 
867     if (p->pages->used) {
868         /* make sure that ramblock is 0 terminated */
869         packet->ramblock[255] = 0;
870         block = qemu_ram_block_by_name(packet->ramblock);
871         if (!block) {
872             error_setg(errp, "multifd: unknown ram block %s",
873                        packet->ramblock);
874             return -1;
875         }
876     }
877 
878     for (i = 0; i < p->pages->used; i++) {
879         ram_addr_t offset = be64_to_cpu(packet->offset[i]);
880 
881         if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
882             error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
883                        " (max " RAM_ADDR_FMT ")",
884                        offset, block->max_length);
885             return -1;
886         }
887         p->pages->iov[i].iov_base = block->host + offset;
888         p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
889     }
890 
891     return 0;
892 }
893 
894 struct {
895     MultiFDSendParams *params;
896     /* array of pages to sent */
897     MultiFDPages_t *pages;
898     /* global number of generated multifd packets */
899     uint64_t packet_num;
900     /* send channels ready */
901     QemuSemaphore channels_ready;
902 } *multifd_send_state;
903 
904 /*
905  * How we use multifd_send_state->pages and channel->pages?
906  *
907  * We create a pages for each channel, and a main one.  Each time that
908  * we need to send a batch of pages we interchange the ones between
909  * multifd_send_state and the channel that is sending it.  There are
910  * two reasons for that:
911  *    - to not have to do so many mallocs during migration
912  *    - to make easier to know what to free at the end of migration
913  *
914  * This way we always know who is the owner of each "pages" struct,
915  * and we don't need any locking.  It belongs to the migration thread
916  * or to the channel thread.  Switching is safe because the migration
917  * thread is using the channel mutex when changing it, and the channel
918  * have to had finish with its own, otherwise pending_job can't be
919  * false.
920  */
921 
922 static int multifd_send_pages(RAMState *rs)
923 {
924     int i;
925     static int next_channel;
926     MultiFDSendParams *p = NULL; /* make happy gcc */
927     MultiFDPages_t *pages = multifd_send_state->pages;
928     uint64_t transferred;
929 
930     qemu_sem_wait(&multifd_send_state->channels_ready);
931     for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
932         p = &multifd_send_state->params[i];
933 
934         qemu_mutex_lock(&p->mutex);
935         if (p->quit) {
936             error_report("%s: channel %d has already quit!", __func__, i);
937             qemu_mutex_unlock(&p->mutex);
938             return -1;
939         }
940         if (!p->pending_job) {
941             p->pending_job++;
942             next_channel = (i + 1) % migrate_multifd_channels();
943             break;
944         }
945         qemu_mutex_unlock(&p->mutex);
946     }
947     p->pages->used = 0;
948 
949     p->packet_num = multifd_send_state->packet_num++;
950     p->pages->block = NULL;
951     multifd_send_state->pages = p->pages;
952     p->pages = pages;
953     transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
954     qemu_file_update_transfer(rs->f, transferred);
955     ram_counters.multifd_bytes += transferred;
956     ram_counters.transferred += transferred;;
957     qemu_mutex_unlock(&p->mutex);
958     qemu_sem_post(&p->sem);
959 
960     return 1;
961 }
962 
963 static int multifd_queue_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
964 {
965     MultiFDPages_t *pages = multifd_send_state->pages;
966 
967     if (!pages->block) {
968         pages->block = block;
969     }
970 
971     if (pages->block == block) {
972         pages->offset[pages->used] = offset;
973         pages->iov[pages->used].iov_base = block->host + offset;
974         pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
975         pages->used++;
976 
977         if (pages->used < pages->allocated) {
978             return 1;
979         }
980     }
981 
982     if (multifd_send_pages(rs) < 0) {
983         return -1;
984     }
985 
986     if (pages->block != block) {
987         return  multifd_queue_page(rs, block, offset);
988     }
989 
990     return 1;
991 }
992 
993 static void multifd_send_terminate_threads(Error *err)
994 {
995     int i;
996 
997     trace_multifd_send_terminate_threads(err != NULL);
998 
999     if (err) {
1000         MigrationState *s = migrate_get_current();
1001         migrate_set_error(s, err);
1002         if (s->state == MIGRATION_STATUS_SETUP ||
1003             s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
1004             s->state == MIGRATION_STATUS_DEVICE ||
1005             s->state == MIGRATION_STATUS_ACTIVE) {
1006             migrate_set_state(&s->state, s->state,
1007                               MIGRATION_STATUS_FAILED);
1008         }
1009     }
1010 
1011     for (i = 0; i < migrate_multifd_channels(); i++) {
1012         MultiFDSendParams *p = &multifd_send_state->params[i];
1013 
1014         qemu_mutex_lock(&p->mutex);
1015         p->quit = true;
1016         qemu_sem_post(&p->sem);
1017         qemu_mutex_unlock(&p->mutex);
1018     }
1019 }
1020 
1021 void multifd_save_cleanup(void)
1022 {
1023     int i;
1024 
1025     if (!migrate_use_multifd()) {
1026         return;
1027     }
1028     multifd_send_terminate_threads(NULL);
1029     for (i = 0; i < migrate_multifd_channels(); i++) {
1030         MultiFDSendParams *p = &multifd_send_state->params[i];
1031 
1032         if (p->running) {
1033             qemu_thread_join(&p->thread);
1034         }
1035         socket_send_channel_destroy(p->c);
1036         p->c = NULL;
1037         qemu_mutex_destroy(&p->mutex);
1038         qemu_sem_destroy(&p->sem);
1039         qemu_sem_destroy(&p->sem_sync);
1040         g_free(p->name);
1041         p->name = NULL;
1042         multifd_pages_clear(p->pages);
1043         p->pages = NULL;
1044         p->packet_len = 0;
1045         g_free(p->packet);
1046         p->packet = NULL;
1047     }
1048     qemu_sem_destroy(&multifd_send_state->channels_ready);
1049     g_free(multifd_send_state->params);
1050     multifd_send_state->params = NULL;
1051     multifd_pages_clear(multifd_send_state->pages);
1052     multifd_send_state->pages = NULL;
1053     g_free(multifd_send_state);
1054     multifd_send_state = NULL;
1055 }
1056 
1057 static void multifd_send_sync_main(RAMState *rs)
1058 {
1059     int i;
1060 
1061     if (!migrate_use_multifd()) {
1062         return;
1063     }
1064     if (multifd_send_state->pages->used) {
1065         if (multifd_send_pages(rs) < 0) {
1066             error_report("%s: multifd_send_pages fail", __func__);
1067             return;
1068         }
1069     }
1070     for (i = 0; i < migrate_multifd_channels(); i++) {
1071         MultiFDSendParams *p = &multifd_send_state->params[i];
1072 
1073         trace_multifd_send_sync_main_signal(p->id);
1074 
1075         qemu_mutex_lock(&p->mutex);
1076 
1077         if (p->quit) {
1078             error_report("%s: channel %d has already quit", __func__, i);
1079             qemu_mutex_unlock(&p->mutex);
1080             return;
1081         }
1082 
1083         p->packet_num = multifd_send_state->packet_num++;
1084         p->flags |= MULTIFD_FLAG_SYNC;
1085         p->pending_job++;
1086         qemu_file_update_transfer(rs->f, p->packet_len);
1087         ram_counters.multifd_bytes += p->packet_len;
1088         ram_counters.transferred += p->packet_len;
1089         qemu_mutex_unlock(&p->mutex);
1090         qemu_sem_post(&p->sem);
1091     }
1092     for (i = 0; i < migrate_multifd_channels(); i++) {
1093         MultiFDSendParams *p = &multifd_send_state->params[i];
1094 
1095         trace_multifd_send_sync_main_wait(p->id);
1096         qemu_sem_wait(&p->sem_sync);
1097     }
1098     trace_multifd_send_sync_main(multifd_send_state->packet_num);
1099 }
1100 
1101 static void *multifd_send_thread(void *opaque)
1102 {
1103     MultiFDSendParams *p = opaque;
1104     Error *local_err = NULL;
1105     int ret = 0;
1106     uint32_t flags = 0;
1107 
1108     trace_multifd_send_thread_start(p->id);
1109     rcu_register_thread();
1110 
1111     if (multifd_send_initial_packet(p, &local_err) < 0) {
1112         ret = -1;
1113         goto out;
1114     }
1115     /* initial packet */
1116     p->num_packets = 1;
1117 
1118     while (true) {
1119         qemu_sem_wait(&p->sem);
1120         qemu_mutex_lock(&p->mutex);
1121 
1122         if (p->pending_job) {
1123             uint32_t used = p->pages->used;
1124             uint64_t packet_num = p->packet_num;
1125             flags = p->flags;
1126 
1127             p->next_packet_size = used * qemu_target_page_size();
1128             multifd_send_fill_packet(p);
1129             p->flags = 0;
1130             p->num_packets++;
1131             p->num_pages += used;
1132             qemu_mutex_unlock(&p->mutex);
1133 
1134             trace_multifd_send(p->id, packet_num, used, flags,
1135                                p->next_packet_size);
1136 
1137             ret = qio_channel_write_all(p->c, (void *)p->packet,
1138                                         p->packet_len, &local_err);
1139             if (ret != 0) {
1140                 break;
1141             }
1142 
1143             if (used) {
1144                 ret = qio_channel_writev_all(p->c, p->pages->iov,
1145                                              used, &local_err);
1146                 if (ret != 0) {
1147                     break;
1148                 }
1149             }
1150 
1151             qemu_mutex_lock(&p->mutex);
1152             p->pending_job--;
1153             qemu_mutex_unlock(&p->mutex);
1154 
1155             if (flags & MULTIFD_FLAG_SYNC) {
1156                 qemu_sem_post(&p->sem_sync);
1157             }
1158             qemu_sem_post(&multifd_send_state->channels_ready);
1159         } else if (p->quit) {
1160             qemu_mutex_unlock(&p->mutex);
1161             break;
1162         } else {
1163             qemu_mutex_unlock(&p->mutex);
1164             /* sometimes there are spurious wakeups */
1165         }
1166     }
1167 
1168 out:
1169     if (local_err) {
1170         trace_multifd_send_error(p->id);
1171         multifd_send_terminate_threads(local_err);
1172     }
1173 
1174     /*
1175      * Error happen, I will exit, but I can't just leave, tell
1176      * who pay attention to me.
1177      */
1178     if (ret != 0) {
1179         qemu_sem_post(&p->sem_sync);
1180         qemu_sem_post(&multifd_send_state->channels_ready);
1181     }
1182 
1183     qemu_mutex_lock(&p->mutex);
1184     p->running = false;
1185     qemu_mutex_unlock(&p->mutex);
1186 
1187     rcu_unregister_thread();
1188     trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1189 
1190     return NULL;
1191 }
1192 
1193 static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1194 {
1195     MultiFDSendParams *p = opaque;
1196     QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1197     Error *local_err = NULL;
1198 
1199     trace_multifd_new_send_channel_async(p->id);
1200     if (qio_task_propagate_error(task, &local_err)) {
1201         migrate_set_error(migrate_get_current(), local_err);
1202         multifd_save_cleanup();
1203     } else {
1204         p->c = QIO_CHANNEL(sioc);
1205         qio_channel_set_delay(p->c, false);
1206         p->running = true;
1207         qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1208                            QEMU_THREAD_JOINABLE);
1209     }
1210 }
1211 
1212 int multifd_save_setup(void)
1213 {
1214     int thread_count;
1215     uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
1216     uint8_t i;
1217 
1218     if (!migrate_use_multifd()) {
1219         return 0;
1220     }
1221     thread_count = migrate_multifd_channels();
1222     multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1223     multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
1224     multifd_send_state->pages = multifd_pages_init(page_count);
1225     qemu_sem_init(&multifd_send_state->channels_ready, 0);
1226 
1227     for (i = 0; i < thread_count; i++) {
1228         MultiFDSendParams *p = &multifd_send_state->params[i];
1229 
1230         qemu_mutex_init(&p->mutex);
1231         qemu_sem_init(&p->sem, 0);
1232         qemu_sem_init(&p->sem_sync, 0);
1233         p->quit = false;
1234         p->pending_job = 0;
1235         p->id = i;
1236         p->pages = multifd_pages_init(page_count);
1237         p->packet_len = sizeof(MultiFDPacket_t)
1238                       + sizeof(ram_addr_t) * page_count;
1239         p->packet = g_malloc0(p->packet_len);
1240         p->packet->magic = cpu_to_be32(MULTIFD_MAGIC);
1241         p->packet->version = cpu_to_be32(MULTIFD_VERSION);
1242         p->name = g_strdup_printf("multifdsend_%d", i);
1243         socket_send_channel_create(multifd_new_send_channel_async, p);
1244     }
1245     return 0;
1246 }
1247 
1248 struct {
1249     MultiFDRecvParams *params;
1250     /* number of created threads */
1251     int count;
1252     /* syncs main thread and channels */
1253     QemuSemaphore sem_sync;
1254     /* global number of generated multifd packets */
1255     uint64_t packet_num;
1256 } *multifd_recv_state;
1257 
1258 static void multifd_recv_terminate_threads(Error *err)
1259 {
1260     int i;
1261 
1262     trace_multifd_recv_terminate_threads(err != NULL);
1263 
1264     if (err) {
1265         MigrationState *s = migrate_get_current();
1266         migrate_set_error(s, err);
1267         if (s->state == MIGRATION_STATUS_SETUP ||
1268             s->state == MIGRATION_STATUS_ACTIVE) {
1269             migrate_set_state(&s->state, s->state,
1270                               MIGRATION_STATUS_FAILED);
1271         }
1272     }
1273 
1274     for (i = 0; i < migrate_multifd_channels(); i++) {
1275         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1276 
1277         qemu_mutex_lock(&p->mutex);
1278         p->quit = true;
1279         /* We could arrive here for two reasons:
1280            - normal quit, i.e. everything went fine, just finished
1281            - error quit: We close the channels so the channel threads
1282              finish the qio_channel_read_all_eof() */
1283         qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1284         qemu_mutex_unlock(&p->mutex);
1285     }
1286 }
1287 
1288 int multifd_load_cleanup(Error **errp)
1289 {
1290     int i;
1291     int ret = 0;
1292 
1293     if (!migrate_use_multifd()) {
1294         return 0;
1295     }
1296     multifd_recv_terminate_threads(NULL);
1297     for (i = 0; i < migrate_multifd_channels(); i++) {
1298         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1299 
1300         if (p->running) {
1301             p->quit = true;
1302             /*
1303              * multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
1304              * however try to wakeup it without harm in cleanup phase.
1305              */
1306             qemu_sem_post(&p->sem_sync);
1307             qemu_thread_join(&p->thread);
1308         }
1309         object_unref(OBJECT(p->c));
1310         p->c = NULL;
1311         qemu_mutex_destroy(&p->mutex);
1312         qemu_sem_destroy(&p->sem_sync);
1313         g_free(p->name);
1314         p->name = NULL;
1315         multifd_pages_clear(p->pages);
1316         p->pages = NULL;
1317         p->packet_len = 0;
1318         g_free(p->packet);
1319         p->packet = NULL;
1320     }
1321     qemu_sem_destroy(&multifd_recv_state->sem_sync);
1322     g_free(multifd_recv_state->params);
1323     multifd_recv_state->params = NULL;
1324     g_free(multifd_recv_state);
1325     multifd_recv_state = NULL;
1326 
1327     return ret;
1328 }
1329 
1330 static void multifd_recv_sync_main(void)
1331 {
1332     int i;
1333 
1334     if (!migrate_use_multifd()) {
1335         return;
1336     }
1337     for (i = 0; i < migrate_multifd_channels(); i++) {
1338         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1339 
1340         trace_multifd_recv_sync_main_wait(p->id);
1341         qemu_sem_wait(&multifd_recv_state->sem_sync);
1342     }
1343     for (i = 0; i < migrate_multifd_channels(); i++) {
1344         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1345 
1346         qemu_mutex_lock(&p->mutex);
1347         if (multifd_recv_state->packet_num < p->packet_num) {
1348             multifd_recv_state->packet_num = p->packet_num;
1349         }
1350         qemu_mutex_unlock(&p->mutex);
1351         trace_multifd_recv_sync_main_signal(p->id);
1352         qemu_sem_post(&p->sem_sync);
1353     }
1354     trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1355 }
1356 
1357 static void *multifd_recv_thread(void *opaque)
1358 {
1359     MultiFDRecvParams *p = opaque;
1360     Error *local_err = NULL;
1361     int ret;
1362 
1363     trace_multifd_recv_thread_start(p->id);
1364     rcu_register_thread();
1365 
1366     while (true) {
1367         uint32_t used;
1368         uint32_t flags;
1369 
1370         if (p->quit) {
1371             break;
1372         }
1373 
1374         ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1375                                        p->packet_len, &local_err);
1376         if (ret == 0) {   /* EOF */
1377             break;
1378         }
1379         if (ret == -1) {   /* Error */
1380             break;
1381         }
1382 
1383         qemu_mutex_lock(&p->mutex);
1384         ret = multifd_recv_unfill_packet(p, &local_err);
1385         if (ret) {
1386             qemu_mutex_unlock(&p->mutex);
1387             break;
1388         }
1389 
1390         used = p->pages->used;
1391         flags = p->flags;
1392         trace_multifd_recv(p->id, p->packet_num, used, flags,
1393                            p->next_packet_size);
1394         p->num_packets++;
1395         p->num_pages += used;
1396         qemu_mutex_unlock(&p->mutex);
1397 
1398         if (used) {
1399             ret = qio_channel_readv_all(p->c, p->pages->iov,
1400                                         used, &local_err);
1401             if (ret != 0) {
1402                 break;
1403             }
1404         }
1405 
1406         if (flags & MULTIFD_FLAG_SYNC) {
1407             qemu_sem_post(&multifd_recv_state->sem_sync);
1408             qemu_sem_wait(&p->sem_sync);
1409         }
1410     }
1411 
1412     if (local_err) {
1413         multifd_recv_terminate_threads(local_err);
1414     }
1415     qemu_mutex_lock(&p->mutex);
1416     p->running = false;
1417     qemu_mutex_unlock(&p->mutex);
1418 
1419     rcu_unregister_thread();
1420     trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1421 
1422     return NULL;
1423 }
1424 
1425 int multifd_load_setup(void)
1426 {
1427     int thread_count;
1428     uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
1429     uint8_t i;
1430 
1431     if (!migrate_use_multifd()) {
1432         return 0;
1433     }
1434     thread_count = migrate_multifd_channels();
1435     multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1436     multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
1437     atomic_set(&multifd_recv_state->count, 0);
1438     qemu_sem_init(&multifd_recv_state->sem_sync, 0);
1439 
1440     for (i = 0; i < thread_count; i++) {
1441         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1442 
1443         qemu_mutex_init(&p->mutex);
1444         qemu_sem_init(&p->sem_sync, 0);
1445         p->quit = false;
1446         p->id = i;
1447         p->pages = multifd_pages_init(page_count);
1448         p->packet_len = sizeof(MultiFDPacket_t)
1449                       + sizeof(ram_addr_t) * page_count;
1450         p->packet = g_malloc0(p->packet_len);
1451         p->name = g_strdup_printf("multifdrecv_%d", i);
1452     }
1453     return 0;
1454 }
1455 
1456 bool multifd_recv_all_channels_created(void)
1457 {
1458     int thread_count = migrate_multifd_channels();
1459 
1460     if (!migrate_use_multifd()) {
1461         return true;
1462     }
1463 
1464     return thread_count == atomic_read(&multifd_recv_state->count);
1465 }
1466 
1467 /*
1468  * Try to receive all multifd channels to get ready for the migration.
1469  * - Return true and do not set @errp when correctly receving all channels;
1470  * - Return false and do not set @errp when correctly receiving the current one;
1471  * - Return false and set @errp when failing to receive the current channel.
1472  */
1473 bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
1474 {
1475     MultiFDRecvParams *p;
1476     Error *local_err = NULL;
1477     int id;
1478 
1479     id = multifd_recv_initial_packet(ioc, &local_err);
1480     if (id < 0) {
1481         multifd_recv_terminate_threads(local_err);
1482         error_propagate_prepend(errp, local_err,
1483                                 "failed to receive packet"
1484                                 " via multifd channel %d: ",
1485                                 atomic_read(&multifd_recv_state->count));
1486         return false;
1487     }
1488     trace_multifd_recv_new_channel(id);
1489 
1490     p = &multifd_recv_state->params[id];
1491     if (p->c != NULL) {
1492         error_setg(&local_err, "multifd: received id '%d' already setup'",
1493                    id);
1494         multifd_recv_terminate_threads(local_err);
1495         error_propagate(errp, local_err);
1496         return false;
1497     }
1498     p->c = ioc;
1499     object_ref(OBJECT(ioc));
1500     /* initial packet */
1501     p->num_packets = 1;
1502 
1503     p->running = true;
1504     qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1505                        QEMU_THREAD_JOINABLE);
1506     atomic_inc(&multifd_recv_state->count);
1507     return atomic_read(&multifd_recv_state->count) ==
1508            migrate_multifd_channels();
1509 }
1510 
1511 /**
1512  * save_page_header: write page header to wire
1513  *
1514  * If this is the 1st block, it also writes the block identification
1515  *
1516  * Returns the number of bytes written
1517  *
1518  * @f: QEMUFile where to send the data
1519  * @block: block that contains the page we want to send
1520  * @offset: offset inside the block for the page
1521  *          in the lower bits, it contains flags
1522  */
1523 static size_t save_page_header(RAMState *rs, QEMUFile *f,  RAMBlock *block,
1524                                ram_addr_t offset)
1525 {
1526     size_t size, len;
1527 
1528     if (block == rs->last_sent_block) {
1529         offset |= RAM_SAVE_FLAG_CONTINUE;
1530     }
1531     qemu_put_be64(f, offset);
1532     size = 8;
1533 
1534     if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
1535         len = strlen(block->idstr);
1536         qemu_put_byte(f, len);
1537         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
1538         size += 1 + len;
1539         rs->last_sent_block = block;
1540     }
1541     return size;
1542 }
1543 
1544 /**
1545  * mig_throttle_guest_down: throotle down the guest
1546  *
1547  * Reduce amount of guest cpu execution to hopefully slow down memory
1548  * writes. If guest dirty memory rate is reduced below the rate at
1549  * which we can transfer pages to the destination then we should be
1550  * able to complete migration. Some workloads dirty memory way too
1551  * fast and will not effectively converge, even with auto-converge.
1552  */
1553 static void mig_throttle_guest_down(void)
1554 {
1555     MigrationState *s = migrate_get_current();
1556     uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1557     uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
1558     int pct_max = s->parameters.max_cpu_throttle;
1559 
1560     /* We have not started throttling yet. Let's start it. */
1561     if (!cpu_throttle_active()) {
1562         cpu_throttle_set(pct_initial);
1563     } else {
1564         /* Throttling already on, just increase the rate */
1565         cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1566                          pct_max));
1567     }
1568 }
1569 
1570 /**
1571  * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1572  *
1573  * @rs: current RAM state
1574  * @current_addr: address for the zero page
1575  *
1576  * Update the xbzrle cache to reflect a page that's been sent as all 0.
1577  * The important thing is that a stale (not-yet-0'd) page be replaced
1578  * by the new data.
1579  * As a bonus, if the page wasn't in the cache it gets added so that
1580  * when a small write is made into the 0'd page it gets XBZRLE sent.
1581  */
1582 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
1583 {
1584     if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
1585         return;
1586     }
1587 
1588     /* We don't care if this fails to allocate a new cache page
1589      * as long as it updated an old one */
1590     cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
1591                  ram_counters.dirty_sync_count);
1592 }
1593 
1594 #define ENCODING_FLAG_XBZRLE 0x1
1595 
1596 /**
1597  * save_xbzrle_page: compress and send current page
1598  *
1599  * Returns: 1 means that we wrote the page
1600  *          0 means that page is identical to the one already sent
1601  *          -1 means that xbzrle would be longer than normal
1602  *
1603  * @rs: current RAM state
1604  * @current_data: pointer to the address of the page contents
1605  * @current_addr: addr of the page
1606  * @block: block that contains the page we want to send
1607  * @offset: offset inside the block for the page
1608  * @last_stage: if we are at the completion stage
1609  */
1610 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
1611                             ram_addr_t current_addr, RAMBlock *block,
1612                             ram_addr_t offset, bool last_stage)
1613 {
1614     int encoded_len = 0, bytes_xbzrle;
1615     uint8_t *prev_cached_page;
1616 
1617     if (!cache_is_cached(XBZRLE.cache, current_addr,
1618                          ram_counters.dirty_sync_count)) {
1619         xbzrle_counters.cache_miss++;
1620         if (!last_stage) {
1621             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
1622                              ram_counters.dirty_sync_count) == -1) {
1623                 return -1;
1624             } else {
1625                 /* update *current_data when the page has been
1626                    inserted into cache */
1627                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1628             }
1629         }
1630         return -1;
1631     }
1632 
1633     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1634 
1635     /* save current buffer into memory */
1636     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1637 
1638     /* XBZRLE encoding (if there is no overflow) */
1639     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1640                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1641                                        TARGET_PAGE_SIZE);
1642 
1643     /*
1644      * Update the cache contents, so that it corresponds to the data
1645      * sent, in all cases except where we skip the page.
1646      */
1647     if (!last_stage && encoded_len != 0) {
1648         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1649         /*
1650          * In the case where we couldn't compress, ensure that the caller
1651          * sends the data from the cache, since the guest might have
1652          * changed the RAM since we copied it.
1653          */
1654         *current_data = prev_cached_page;
1655     }
1656 
1657     if (encoded_len == 0) {
1658         trace_save_xbzrle_page_skipping();
1659         return 0;
1660     } else if (encoded_len == -1) {
1661         trace_save_xbzrle_page_overflow();
1662         xbzrle_counters.overflow++;
1663         return -1;
1664     }
1665 
1666     /* Send XBZRLE based compressed page */
1667     bytes_xbzrle = save_page_header(rs, rs->f, block,
1668                                     offset | RAM_SAVE_FLAG_XBZRLE);
1669     qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1670     qemu_put_be16(rs->f, encoded_len);
1671     qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
1672     bytes_xbzrle += encoded_len + 1 + 2;
1673     xbzrle_counters.pages++;
1674     xbzrle_counters.bytes += bytes_xbzrle;
1675     ram_counters.transferred += bytes_xbzrle;
1676 
1677     return 1;
1678 }
1679 
1680 /**
1681  * migration_bitmap_find_dirty: find the next dirty page from start
1682  *
1683  * Returns the page offset within memory region of the start of a dirty page
1684  *
1685  * @rs: current RAM state
1686  * @rb: RAMBlock where to search for dirty pages
1687  * @start: page where we start the search
1688  */
1689 static inline
1690 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
1691                                           unsigned long start)
1692 {
1693     unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1694     unsigned long *bitmap = rb->bmap;
1695     unsigned long next;
1696 
1697     if (ramblock_is_ignored(rb)) {
1698         return size;
1699     }
1700 
1701     /*
1702      * When the free page optimization is enabled, we need to check the bitmap
1703      * to send the non-free pages rather than all the pages in the bulk stage.
1704      */
1705     if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
1706         next = start + 1;
1707     } else {
1708         next = find_next_bit(bitmap, size, start);
1709     }
1710 
1711     return next;
1712 }
1713 
1714 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
1715                                                 RAMBlock *rb,
1716                                                 unsigned long page)
1717 {
1718     bool ret;
1719 
1720     qemu_mutex_lock(&rs->bitmap_mutex);
1721 
1722     /*
1723      * Clear dirty bitmap if needed.  This _must_ be called before we
1724      * send any of the page in the chunk because we need to make sure
1725      * we can capture further page content changes when we sync dirty
1726      * log the next time.  So as long as we are going to send any of
1727      * the page in the chunk we clear the remote dirty bitmap for all.
1728      * Clearing it earlier won't be a problem, but too late will.
1729      */
1730     if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
1731         uint8_t shift = rb->clear_bmap_shift;
1732         hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
1733         hwaddr start = (page << TARGET_PAGE_BITS) & (-size);
1734 
1735         /*
1736          * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
1737          * can make things easier sometimes since then start address
1738          * of the small chunk will always be 64 pages aligned so the
1739          * bitmap will always be aligned to unsigned long.  We should
1740          * even be able to remove this restriction but I'm simply
1741          * keeping it.
1742          */
1743         assert(shift >= 6);
1744         trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
1745         memory_region_clear_dirty_bitmap(rb->mr, start, size);
1746     }
1747 
1748     ret = test_and_clear_bit(page, rb->bmap);
1749 
1750     if (ret) {
1751         rs->migration_dirty_pages--;
1752     }
1753     qemu_mutex_unlock(&rs->bitmap_mutex);
1754 
1755     return ret;
1756 }
1757 
1758 /* Called with RCU critical section */
1759 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
1760 {
1761     rs->migration_dirty_pages +=
1762         cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length,
1763                                               &rs->num_dirty_pages_period);
1764 }
1765 
1766 /**
1767  * ram_pagesize_summary: calculate all the pagesizes of a VM
1768  *
1769  * Returns a summary bitmap of the page sizes of all RAMBlocks
1770  *
1771  * For VMs with just normal pages this is equivalent to the host page
1772  * size. If it's got some huge pages then it's the OR of all the
1773  * different page sizes.
1774  */
1775 uint64_t ram_pagesize_summary(void)
1776 {
1777     RAMBlock *block;
1778     uint64_t summary = 0;
1779 
1780     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1781         summary |= block->page_size;
1782     }
1783 
1784     return summary;
1785 }
1786 
1787 uint64_t ram_get_total_transferred_pages(void)
1788 {
1789     return  ram_counters.normal + ram_counters.duplicate +
1790                 compression_counters.pages + xbzrle_counters.pages;
1791 }
1792 
1793 static void migration_update_rates(RAMState *rs, int64_t end_time)
1794 {
1795     uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
1796     double compressed_size;
1797 
1798     /* calculate period counters */
1799     ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1800                 / (end_time - rs->time_last_bitmap_sync);
1801 
1802     if (!page_count) {
1803         return;
1804     }
1805 
1806     if (migrate_use_xbzrle()) {
1807         xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
1808             rs->xbzrle_cache_miss_prev) / page_count;
1809         rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1810     }
1811 
1812     if (migrate_use_compression()) {
1813         compression_counters.busy_rate = (double)(compression_counters.busy -
1814             rs->compress_thread_busy_prev) / page_count;
1815         rs->compress_thread_busy_prev = compression_counters.busy;
1816 
1817         compressed_size = compression_counters.compressed_size -
1818                           rs->compressed_size_prev;
1819         if (compressed_size) {
1820             double uncompressed_size = (compression_counters.pages -
1821                                     rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1822 
1823             /* Compression-Ratio = Uncompressed-size / Compressed-size */
1824             compression_counters.compression_rate =
1825                                         uncompressed_size / compressed_size;
1826 
1827             rs->compress_pages_prev = compression_counters.pages;
1828             rs->compressed_size_prev = compression_counters.compressed_size;
1829         }
1830     }
1831 }
1832 
1833 static void migration_bitmap_sync(RAMState *rs)
1834 {
1835     RAMBlock *block;
1836     int64_t end_time;
1837     uint64_t bytes_xfer_now;
1838 
1839     ram_counters.dirty_sync_count++;
1840 
1841     if (!rs->time_last_bitmap_sync) {
1842         rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1843     }
1844 
1845     trace_migration_bitmap_sync_start();
1846     memory_global_dirty_log_sync();
1847 
1848     qemu_mutex_lock(&rs->bitmap_mutex);
1849     WITH_RCU_READ_LOCK_GUARD() {
1850         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1851             ramblock_sync_dirty_bitmap(rs, block);
1852         }
1853         ram_counters.remaining = ram_bytes_remaining();
1854     }
1855     qemu_mutex_unlock(&rs->bitmap_mutex);
1856 
1857     memory_global_after_dirty_log_sync();
1858     trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1859 
1860     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1861 
1862     /* more than 1 second = 1000 millisecons */
1863     if (end_time > rs->time_last_bitmap_sync + 1000) {
1864         bytes_xfer_now = ram_counters.transferred;
1865 
1866         /* During block migration the auto-converge logic incorrectly detects
1867          * that ram migration makes no progress. Avoid this by disabling the
1868          * throttling logic during the bulk phase of block migration. */
1869         if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1870             /* The following detection logic can be refined later. For now:
1871                Check to see if the dirtied bytes is 50% more than the approx.
1872                amount of bytes that just got transferred since the last time we
1873                were in this routine. If that happens twice, start or increase
1874                throttling */
1875 
1876             if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
1877                    (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
1878                 (++rs->dirty_rate_high_cnt >= 2)) {
1879                     trace_migration_throttle();
1880                     rs->dirty_rate_high_cnt = 0;
1881                     mig_throttle_guest_down();
1882             }
1883         }
1884 
1885         migration_update_rates(rs, end_time);
1886 
1887         rs->target_page_count_prev = rs->target_page_count;
1888 
1889         /* reset period counters */
1890         rs->time_last_bitmap_sync = end_time;
1891         rs->num_dirty_pages_period = 0;
1892         rs->bytes_xfer_prev = bytes_xfer_now;
1893     }
1894     if (migrate_use_events()) {
1895         qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
1896     }
1897 }
1898 
1899 static void migration_bitmap_sync_precopy(RAMState *rs)
1900 {
1901     Error *local_err = NULL;
1902 
1903     /*
1904      * The current notifier usage is just an optimization to migration, so we
1905      * don't stop the normal migration process in the error case.
1906      */
1907     if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1908         error_report_err(local_err);
1909     }
1910 
1911     migration_bitmap_sync(rs);
1912 
1913     if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1914         error_report_err(local_err);
1915     }
1916 }
1917 
1918 /**
1919  * save_zero_page_to_file: send the zero page to the file
1920  *
1921  * Returns the size of data written to the file, 0 means the page is not
1922  * a zero page
1923  *
1924  * @rs: current RAM state
1925  * @file: the file where the data is saved
1926  * @block: block that contains the page we want to send
1927  * @offset: offset inside the block for the page
1928  */
1929 static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1930                                   RAMBlock *block, ram_addr_t offset)
1931 {
1932     uint8_t *p = block->host + offset;
1933     int len = 0;
1934 
1935     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1936         len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1937         qemu_put_byte(file, 0);
1938         len += 1;
1939     }
1940     return len;
1941 }
1942 
1943 /**
1944  * save_zero_page: send the zero page to the stream
1945  *
1946  * Returns the number of pages written.
1947  *
1948  * @rs: current RAM state
1949  * @block: block that contains the page we want to send
1950  * @offset: offset inside the block for the page
1951  */
1952 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
1953 {
1954     int len = save_zero_page_to_file(rs, rs->f, block, offset);
1955 
1956     if (len) {
1957         ram_counters.duplicate++;
1958         ram_counters.transferred += len;
1959         return 1;
1960     }
1961     return -1;
1962 }
1963 
1964 static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
1965 {
1966     if (!migrate_release_ram() || !migration_in_postcopy()) {
1967         return;
1968     }
1969 
1970     ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
1971 }
1972 
1973 /*
1974  * @pages: the number of pages written by the control path,
1975  *        < 0 - error
1976  *        > 0 - number of pages written
1977  *
1978  * Return true if the pages has been saved, otherwise false is returned.
1979  */
1980 static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1981                               int *pages)
1982 {
1983     uint64_t bytes_xmit = 0;
1984     int ret;
1985 
1986     *pages = -1;
1987     ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1988                                 &bytes_xmit);
1989     if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1990         return false;
1991     }
1992 
1993     if (bytes_xmit) {
1994         ram_counters.transferred += bytes_xmit;
1995         *pages = 1;
1996     }
1997 
1998     if (ret == RAM_SAVE_CONTROL_DELAYED) {
1999         return true;
2000     }
2001 
2002     if (bytes_xmit > 0) {
2003         ram_counters.normal++;
2004     } else if (bytes_xmit == 0) {
2005         ram_counters.duplicate++;
2006     }
2007 
2008     return true;
2009 }
2010 
2011 /*
2012  * directly send the page to the stream
2013  *
2014  * Returns the number of pages written.
2015  *
2016  * @rs: current RAM state
2017  * @block: block that contains the page we want to send
2018  * @offset: offset inside the block for the page
2019  * @buf: the page to be sent
2020  * @async: send to page asyncly
2021  */
2022 static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
2023                             uint8_t *buf, bool async)
2024 {
2025     ram_counters.transferred += save_page_header(rs, rs->f, block,
2026                                                  offset | RAM_SAVE_FLAG_PAGE);
2027     if (async) {
2028         qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
2029                               migrate_release_ram() &
2030                               migration_in_postcopy());
2031     } else {
2032         qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
2033     }
2034     ram_counters.transferred += TARGET_PAGE_SIZE;
2035     ram_counters.normal++;
2036     return 1;
2037 }
2038 
2039 /**
2040  * ram_save_page: send the given page to the stream
2041  *
2042  * Returns the number of pages written.
2043  *          < 0 - error
2044  *          >=0 - Number of pages written - this might legally be 0
2045  *                if xbzrle noticed the page was the same.
2046  *
2047  * @rs: current RAM state
2048  * @block: block that contains the page we want to send
2049  * @offset: offset inside the block for the page
2050  * @last_stage: if we are at the completion stage
2051  */
2052 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
2053 {
2054     int pages = -1;
2055     uint8_t *p;
2056     bool send_async = true;
2057     RAMBlock *block = pss->block;
2058     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2059     ram_addr_t current_addr = block->offset + offset;
2060 
2061     p = block->host + offset;
2062     trace_ram_save_page(block->idstr, (uint64_t)offset, p);
2063 
2064     XBZRLE_cache_lock();
2065     if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
2066         migrate_use_xbzrle()) {
2067         pages = save_xbzrle_page(rs, &p, current_addr, block,
2068                                  offset, last_stage);
2069         if (!last_stage) {
2070             /* Can't send this cached data async, since the cache page
2071              * might get updated before it gets to the wire
2072              */
2073             send_async = false;
2074         }
2075     }
2076 
2077     /* XBZRLE overflow or normal page */
2078     if (pages == -1) {
2079         pages = save_normal_page(rs, block, offset, p, send_async);
2080     }
2081 
2082     XBZRLE_cache_unlock();
2083 
2084     return pages;
2085 }
2086 
2087 static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
2088                                  ram_addr_t offset)
2089 {
2090     if (multifd_queue_page(rs, block, offset) < 0) {
2091         return -1;
2092     }
2093     ram_counters.normal++;
2094 
2095     return 1;
2096 }
2097 
2098 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
2099                                  ram_addr_t offset, uint8_t *source_buf)
2100 {
2101     RAMState *rs = ram_state;
2102     uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
2103     bool zero_page = false;
2104     int ret;
2105 
2106     if (save_zero_page_to_file(rs, f, block, offset)) {
2107         zero_page = true;
2108         goto exit;
2109     }
2110 
2111     save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
2112 
2113     /*
2114      * copy it to a internal buffer to avoid it being modified by VM
2115      * so that we can catch up the error during compression and
2116      * decompression
2117      */
2118     memcpy(source_buf, p, TARGET_PAGE_SIZE);
2119     ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
2120     if (ret < 0) {
2121         qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
2122         error_report("compressed data failed!");
2123         return false;
2124     }
2125 
2126 exit:
2127     ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
2128     return zero_page;
2129 }
2130 
2131 static void
2132 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
2133 {
2134     ram_counters.transferred += bytes_xmit;
2135 
2136     if (param->zero_page) {
2137         ram_counters.duplicate++;
2138         return;
2139     }
2140 
2141     /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
2142     compression_counters.compressed_size += bytes_xmit - 8;
2143     compression_counters.pages++;
2144 }
2145 
2146 static bool save_page_use_compression(RAMState *rs);
2147 
2148 static void flush_compressed_data(RAMState *rs)
2149 {
2150     int idx, len, thread_count;
2151 
2152     if (!save_page_use_compression(rs)) {
2153         return;
2154     }
2155     thread_count = migrate_compress_threads();
2156 
2157     qemu_mutex_lock(&comp_done_lock);
2158     for (idx = 0; idx < thread_count; idx++) {
2159         while (!comp_param[idx].done) {
2160             qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2161         }
2162     }
2163     qemu_mutex_unlock(&comp_done_lock);
2164 
2165     for (idx = 0; idx < thread_count; idx++) {
2166         qemu_mutex_lock(&comp_param[idx].mutex);
2167         if (!comp_param[idx].quit) {
2168             len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2169             /*
2170              * it's safe to fetch zero_page without holding comp_done_lock
2171              * as there is no further request submitted to the thread,
2172              * i.e, the thread should be waiting for a request at this point.
2173              */
2174             update_compress_thread_counts(&comp_param[idx], len);
2175         }
2176         qemu_mutex_unlock(&comp_param[idx].mutex);
2177     }
2178 }
2179 
2180 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
2181                                        ram_addr_t offset)
2182 {
2183     param->block = block;
2184     param->offset = offset;
2185 }
2186 
2187 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
2188                                            ram_addr_t offset)
2189 {
2190     int idx, thread_count, bytes_xmit = -1, pages = -1;
2191     bool wait = migrate_compress_wait_thread();
2192 
2193     thread_count = migrate_compress_threads();
2194     qemu_mutex_lock(&comp_done_lock);
2195 retry:
2196     for (idx = 0; idx < thread_count; idx++) {
2197         if (comp_param[idx].done) {
2198             comp_param[idx].done = false;
2199             bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2200             qemu_mutex_lock(&comp_param[idx].mutex);
2201             set_compress_params(&comp_param[idx], block, offset);
2202             qemu_cond_signal(&comp_param[idx].cond);
2203             qemu_mutex_unlock(&comp_param[idx].mutex);
2204             pages = 1;
2205             update_compress_thread_counts(&comp_param[idx], bytes_xmit);
2206             break;
2207         }
2208     }
2209 
2210     /*
2211      * wait for the free thread if the user specifies 'compress-wait-thread',
2212      * otherwise we will post the page out in the main thread as normal page.
2213      */
2214     if (pages < 0 && wait) {
2215         qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2216         goto retry;
2217     }
2218     qemu_mutex_unlock(&comp_done_lock);
2219 
2220     return pages;
2221 }
2222 
2223 /**
2224  * find_dirty_block: find the next dirty page and update any state
2225  * associated with the search process.
2226  *
2227  * Returns true if a page is found
2228  *
2229  * @rs: current RAM state
2230  * @pss: data about the state of the current dirty page scan
2231  * @again: set to false if the search has scanned the whole of RAM
2232  */
2233 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
2234 {
2235     pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
2236     if (pss->complete_round && pss->block == rs->last_seen_block &&
2237         pss->page >= rs->last_page) {
2238         /*
2239          * We've been once around the RAM and haven't found anything.
2240          * Give up.
2241          */
2242         *again = false;
2243         return false;
2244     }
2245     if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
2246         /* Didn't find anything in this RAM Block */
2247         pss->page = 0;
2248         pss->block = QLIST_NEXT_RCU(pss->block, next);
2249         if (!pss->block) {
2250             /*
2251              * If memory migration starts over, we will meet a dirtied page
2252              * which may still exists in compression threads's ring, so we
2253              * should flush the compressed data to make sure the new page
2254              * is not overwritten by the old one in the destination.
2255              *
2256              * Also If xbzrle is on, stop using the data compression at this
2257              * point. In theory, xbzrle can do better than compression.
2258              */
2259             flush_compressed_data(rs);
2260 
2261             /* Hit the end of the list */
2262             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2263             /* Flag that we've looped */
2264             pss->complete_round = true;
2265             rs->ram_bulk_stage = false;
2266         }
2267         /* Didn't find anything this time, but try again on the new block */
2268         *again = true;
2269         return false;
2270     } else {
2271         /* Can go around again, but... */
2272         *again = true;
2273         /* We've found something so probably don't need to */
2274         return true;
2275     }
2276 }
2277 
2278 /**
2279  * unqueue_page: gets a page of the queue
2280  *
2281  * Helper for 'get_queued_page' - gets a page off the queue
2282  *
2283  * Returns the block of the page (or NULL if none available)
2284  *
2285  * @rs: current RAM state
2286  * @offset: used to return the offset within the RAMBlock
2287  */
2288 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
2289 {
2290     RAMBlock *block = NULL;
2291 
2292     if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2293         return NULL;
2294     }
2295 
2296     qemu_mutex_lock(&rs->src_page_req_mutex);
2297     if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2298         struct RAMSrcPageRequest *entry =
2299                                 QSIMPLEQ_FIRST(&rs->src_page_requests);
2300         block = entry->rb;
2301         *offset = entry->offset;
2302 
2303         if (entry->len > TARGET_PAGE_SIZE) {
2304             entry->len -= TARGET_PAGE_SIZE;
2305             entry->offset += TARGET_PAGE_SIZE;
2306         } else {
2307             memory_region_unref(block->mr);
2308             QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2309             g_free(entry);
2310             migration_consume_urgent_request();
2311         }
2312     }
2313     qemu_mutex_unlock(&rs->src_page_req_mutex);
2314 
2315     return block;
2316 }
2317 
2318 /**
2319  * get_queued_page: unqueue a page from the postcopy requests
2320  *
2321  * Skips pages that are already sent (!dirty)
2322  *
2323  * Returns true if a queued page is found
2324  *
2325  * @rs: current RAM state
2326  * @pss: data about the state of the current dirty page scan
2327  */
2328 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
2329 {
2330     RAMBlock  *block;
2331     ram_addr_t offset;
2332     bool dirty;
2333 
2334     do {
2335         block = unqueue_page(rs, &offset);
2336         /*
2337          * We're sending this page, and since it's postcopy nothing else
2338          * will dirty it, and we must make sure it doesn't get sent again
2339          * even if this queue request was received after the background
2340          * search already sent it.
2341          */
2342         if (block) {
2343             unsigned long page;
2344 
2345             page = offset >> TARGET_PAGE_BITS;
2346             dirty = test_bit(page, block->bmap);
2347             if (!dirty) {
2348                 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
2349                                                 page);
2350             } else {
2351                 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
2352             }
2353         }
2354 
2355     } while (block && !dirty);
2356 
2357     if (block) {
2358         /*
2359          * As soon as we start servicing pages out of order, then we have
2360          * to kill the bulk stage, since the bulk stage assumes
2361          * in (migration_bitmap_find_and_reset_dirty) that every page is
2362          * dirty, that's no longer true.
2363          */
2364         rs->ram_bulk_stage = false;
2365 
2366         /*
2367          * We want the background search to continue from the queued page
2368          * since the guest is likely to want other pages near to the page
2369          * it just requested.
2370          */
2371         pss->block = block;
2372         pss->page = offset >> TARGET_PAGE_BITS;
2373 
2374         /*
2375          * This unqueued page would break the "one round" check, even is
2376          * really rare.
2377          */
2378         pss->complete_round = false;
2379     }
2380 
2381     return !!block;
2382 }
2383 
2384 /**
2385  * migration_page_queue_free: drop any remaining pages in the ram
2386  * request queue
2387  *
2388  * It should be empty at the end anyway, but in error cases there may
2389  * be some left.  in case that there is any page left, we drop it.
2390  *
2391  */
2392 static void migration_page_queue_free(RAMState *rs)
2393 {
2394     struct RAMSrcPageRequest *mspr, *next_mspr;
2395     /* This queue generally should be empty - but in the case of a failed
2396      * migration might have some droppings in.
2397      */
2398     RCU_READ_LOCK_GUARD();
2399     QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
2400         memory_region_unref(mspr->rb->mr);
2401         QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2402         g_free(mspr);
2403     }
2404 }
2405 
2406 /**
2407  * ram_save_queue_pages: queue the page for transmission
2408  *
2409  * A request from postcopy destination for example.
2410  *
2411  * Returns zero on success or negative on error
2412  *
2413  * @rbname: Name of the RAMBLock of the request. NULL means the
2414  *          same that last one.
2415  * @start: starting address from the start of the RAMBlock
2416  * @len: length (in bytes) to send
2417  */
2418 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
2419 {
2420     RAMBlock *ramblock;
2421     RAMState *rs = ram_state;
2422 
2423     ram_counters.postcopy_requests++;
2424     RCU_READ_LOCK_GUARD();
2425 
2426     if (!rbname) {
2427         /* Reuse last RAMBlock */
2428         ramblock = rs->last_req_rb;
2429 
2430         if (!ramblock) {
2431             /*
2432              * Shouldn't happen, we can't reuse the last RAMBlock if
2433              * it's the 1st request.
2434              */
2435             error_report("ram_save_queue_pages no previous block");
2436             goto err;
2437         }
2438     } else {
2439         ramblock = qemu_ram_block_by_name(rbname);
2440 
2441         if (!ramblock) {
2442             /* We shouldn't be asked for a non-existent RAMBlock */
2443             error_report("ram_save_queue_pages no block '%s'", rbname);
2444             goto err;
2445         }
2446         rs->last_req_rb = ramblock;
2447     }
2448     trace_ram_save_queue_pages(ramblock->idstr, start, len);
2449     if (start+len > ramblock->used_length) {
2450         error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2451                      RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
2452                      __func__, start, len, ramblock->used_length);
2453         goto err;
2454     }
2455 
2456     struct RAMSrcPageRequest *new_entry =
2457         g_malloc0(sizeof(struct RAMSrcPageRequest));
2458     new_entry->rb = ramblock;
2459     new_entry->offset = start;
2460     new_entry->len = len;
2461 
2462     memory_region_ref(ramblock->mr);
2463     qemu_mutex_lock(&rs->src_page_req_mutex);
2464     QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
2465     migration_make_urgent_request();
2466     qemu_mutex_unlock(&rs->src_page_req_mutex);
2467 
2468     return 0;
2469 
2470 err:
2471     return -1;
2472 }
2473 
2474 static bool save_page_use_compression(RAMState *rs)
2475 {
2476     if (!migrate_use_compression()) {
2477         return false;
2478     }
2479 
2480     /*
2481      * If xbzrle is on, stop using the data compression after first
2482      * round of migration even if compression is enabled. In theory,
2483      * xbzrle can do better than compression.
2484      */
2485     if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2486         return true;
2487     }
2488 
2489     return false;
2490 }
2491 
2492 /*
2493  * try to compress the page before posting it out, return true if the page
2494  * has been properly handled by compression, otherwise needs other
2495  * paths to handle it
2496  */
2497 static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2498 {
2499     if (!save_page_use_compression(rs)) {
2500         return false;
2501     }
2502 
2503     /*
2504      * When starting the process of a new block, the first page of
2505      * the block should be sent out before other pages in the same
2506      * block, and all the pages in last block should have been sent
2507      * out, keeping this order is important, because the 'cont' flag
2508      * is used to avoid resending the block name.
2509      *
2510      * We post the fist page as normal page as compression will take
2511      * much CPU resource.
2512      */
2513     if (block != rs->last_sent_block) {
2514         flush_compressed_data(rs);
2515         return false;
2516     }
2517 
2518     if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2519         return true;
2520     }
2521 
2522     compression_counters.busy++;
2523     return false;
2524 }
2525 
2526 /**
2527  * ram_save_target_page: save one target page
2528  *
2529  * Returns the number of pages written
2530  *
2531  * @rs: current RAM state
2532  * @pss: data about the page we want to send
2533  * @last_stage: if we are at the completion stage
2534  */
2535 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
2536                                 bool last_stage)
2537 {
2538     RAMBlock *block = pss->block;
2539     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2540     int res;
2541 
2542     if (control_save_page(rs, block, offset, &res)) {
2543         return res;
2544     }
2545 
2546     if (save_compress_page(rs, block, offset)) {
2547         return 1;
2548     }
2549 
2550     res = save_zero_page(rs, block, offset);
2551     if (res > 0) {
2552         /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2553          * page would be stale
2554          */
2555         if (!save_page_use_compression(rs)) {
2556             XBZRLE_cache_lock();
2557             xbzrle_cache_zero_page(rs, block->offset + offset);
2558             XBZRLE_cache_unlock();
2559         }
2560         ram_release_pages(block->idstr, offset, res);
2561         return res;
2562     }
2563 
2564     /*
2565      * do not use multifd for compression as the first page in the new
2566      * block should be posted out before sending the compressed page
2567      */
2568     if (!save_page_use_compression(rs) && migrate_use_multifd()) {
2569         return ram_save_multifd_page(rs, block, offset);
2570     }
2571 
2572     return ram_save_page(rs, pss, last_stage);
2573 }
2574 
2575 /**
2576  * ram_save_host_page: save a whole host page
2577  *
2578  * Starting at *offset send pages up to the end of the current host
2579  * page. It's valid for the initial offset to point into the middle of
2580  * a host page in which case the remainder of the hostpage is sent.
2581  * Only dirty target pages are sent. Note that the host page size may
2582  * be a huge page for this block.
2583  * The saving stops at the boundary of the used_length of the block
2584  * if the RAMBlock isn't a multiple of the host page size.
2585  *
2586  * Returns the number of pages written or negative on error
2587  *
2588  * @rs: current RAM state
2589  * @ms: current migration state
2590  * @pss: data about the page we want to send
2591  * @last_stage: if we are at the completion stage
2592  */
2593 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
2594                               bool last_stage)
2595 {
2596     int tmppages, pages = 0;
2597     size_t pagesize_bits =
2598         qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2599 
2600     if (ramblock_is_ignored(pss->block)) {
2601         error_report("block %s should not be migrated !", pss->block->idstr);
2602         return 0;
2603     }
2604 
2605     do {
2606         /* Check the pages is dirty and if it is send it */
2607         if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2608             pss->page++;
2609             continue;
2610         }
2611 
2612         tmppages = ram_save_target_page(rs, pss, last_stage);
2613         if (tmppages < 0) {
2614             return tmppages;
2615         }
2616 
2617         pages += tmppages;
2618         pss->page++;
2619     } while ((pss->page & (pagesize_bits - 1)) &&
2620              offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
2621 
2622     /* The offset we leave with is the last one we looked at */
2623     pss->page--;
2624     return pages;
2625 }
2626 
2627 /**
2628  * ram_find_and_save_block: finds a dirty page and sends it to f
2629  *
2630  * Called within an RCU critical section.
2631  *
2632  * Returns the number of pages written where zero means no dirty pages,
2633  * or negative on error
2634  *
2635  * @rs: current RAM state
2636  * @last_stage: if we are at the completion stage
2637  *
2638  * On systems where host-page-size > target-page-size it will send all the
2639  * pages in a host page that are dirty.
2640  */
2641 
2642 static int ram_find_and_save_block(RAMState *rs, bool last_stage)
2643 {
2644     PageSearchStatus pss;
2645     int pages = 0;
2646     bool again, found;
2647 
2648     /* No dirty page as there is zero RAM */
2649     if (!ram_bytes_total()) {
2650         return pages;
2651     }
2652 
2653     pss.block = rs->last_seen_block;
2654     pss.page = rs->last_page;
2655     pss.complete_round = false;
2656 
2657     if (!pss.block) {
2658         pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2659     }
2660 
2661     do {
2662         again = true;
2663         found = get_queued_page(rs, &pss);
2664 
2665         if (!found) {
2666             /* priority queue empty, so just search for something dirty */
2667             found = find_dirty_block(rs, &pss, &again);
2668         }
2669 
2670         if (found) {
2671             pages = ram_save_host_page(rs, &pss, last_stage);
2672         }
2673     } while (!pages && again);
2674 
2675     rs->last_seen_block = pss.block;
2676     rs->last_page = pss.page;
2677 
2678     return pages;
2679 }
2680 
2681 void acct_update_position(QEMUFile *f, size_t size, bool zero)
2682 {
2683     uint64_t pages = size / TARGET_PAGE_SIZE;
2684 
2685     if (zero) {
2686         ram_counters.duplicate += pages;
2687     } else {
2688         ram_counters.normal += pages;
2689         ram_counters.transferred += size;
2690         qemu_update_position(f, size);
2691     }
2692 }
2693 
2694 static uint64_t ram_bytes_total_common(bool count_ignored)
2695 {
2696     RAMBlock *block;
2697     uint64_t total = 0;
2698 
2699     RCU_READ_LOCK_GUARD();
2700 
2701     if (count_ignored) {
2702         RAMBLOCK_FOREACH_MIGRATABLE(block) {
2703             total += block->used_length;
2704         }
2705     } else {
2706         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2707             total += block->used_length;
2708         }
2709     }
2710     return total;
2711 }
2712 
2713 uint64_t ram_bytes_total(void)
2714 {
2715     return ram_bytes_total_common(false);
2716 }
2717 
2718 static void xbzrle_load_setup(void)
2719 {
2720     XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2721 }
2722 
2723 static void xbzrle_load_cleanup(void)
2724 {
2725     g_free(XBZRLE.decoded_buf);
2726     XBZRLE.decoded_buf = NULL;
2727 }
2728 
2729 static void ram_state_cleanup(RAMState **rsp)
2730 {
2731     if (*rsp) {
2732         migration_page_queue_free(*rsp);
2733         qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2734         qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2735         g_free(*rsp);
2736         *rsp = NULL;
2737     }
2738 }
2739 
2740 static void xbzrle_cleanup(void)
2741 {
2742     XBZRLE_cache_lock();
2743     if (XBZRLE.cache) {
2744         cache_fini(XBZRLE.cache);
2745         g_free(XBZRLE.encoded_buf);
2746         g_free(XBZRLE.current_buf);
2747         g_free(XBZRLE.zero_target_page);
2748         XBZRLE.cache = NULL;
2749         XBZRLE.encoded_buf = NULL;
2750         XBZRLE.current_buf = NULL;
2751         XBZRLE.zero_target_page = NULL;
2752     }
2753     XBZRLE_cache_unlock();
2754 }
2755 
2756 static void ram_save_cleanup(void *opaque)
2757 {
2758     RAMState **rsp = opaque;
2759     RAMBlock *block;
2760 
2761     /* caller have hold iothread lock or is in a bh, so there is
2762      * no writing race against the migration bitmap
2763      */
2764     memory_global_dirty_log_stop();
2765 
2766     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2767         g_free(block->clear_bmap);
2768         block->clear_bmap = NULL;
2769         g_free(block->bmap);
2770         block->bmap = NULL;
2771     }
2772 
2773     xbzrle_cleanup();
2774     compress_threads_save_cleanup();
2775     ram_state_cleanup(rsp);
2776 }
2777 
2778 static void ram_state_reset(RAMState *rs)
2779 {
2780     rs->last_seen_block = NULL;
2781     rs->last_sent_block = NULL;
2782     rs->last_page = 0;
2783     rs->last_version = ram_list.version;
2784     rs->ram_bulk_stage = true;
2785     rs->fpo_enabled = false;
2786 }
2787 
2788 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2789 
2790 /*
2791  * 'expected' is the value you expect the bitmap mostly to be full
2792  * of; it won't bother printing lines that are all this value.
2793  * If 'todump' is null the migration bitmap is dumped.
2794  */
2795 void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2796                            unsigned long pages)
2797 {
2798     int64_t cur;
2799     int64_t linelen = 128;
2800     char linebuf[129];
2801 
2802     for (cur = 0; cur < pages; cur += linelen) {
2803         int64_t curb;
2804         bool found = false;
2805         /*
2806          * Last line; catch the case where the line length
2807          * is longer than remaining ram
2808          */
2809         if (cur + linelen > pages) {
2810             linelen = pages - cur;
2811         }
2812         for (curb = 0; curb < linelen; curb++) {
2813             bool thisbit = test_bit(cur + curb, todump);
2814             linebuf[curb] = thisbit ? '1' : '.';
2815             found = found || (thisbit != expected);
2816         }
2817         if (found) {
2818             linebuf[curb] = '\0';
2819             fprintf(stderr,  "0x%08" PRIx64 " : %s\n", cur, linebuf);
2820         }
2821     }
2822 }
2823 
2824 /* **** functions for postcopy ***** */
2825 
2826 void ram_postcopy_migrated_memory_release(MigrationState *ms)
2827 {
2828     struct RAMBlock *block;
2829 
2830     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2831         unsigned long *bitmap = block->bmap;
2832         unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2833         unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2834 
2835         while (run_start < range) {
2836             unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2837             ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
2838                               (run_end - run_start) << TARGET_PAGE_BITS);
2839             run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2840         }
2841     }
2842 }
2843 
2844 /**
2845  * postcopy_send_discard_bm_ram: discard a RAMBlock
2846  *
2847  * Returns zero on success
2848  *
2849  * Callback from postcopy_each_ram_send_discard for each RAMBlock
2850  *
2851  * @ms: current migration state
2852  * @block: RAMBlock to discard
2853  */
2854 static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
2855 {
2856     unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2857     unsigned long current;
2858     unsigned long *bitmap = block->bmap;
2859 
2860     for (current = 0; current < end; ) {
2861         unsigned long one = find_next_bit(bitmap, end, current);
2862         unsigned long zero, discard_length;
2863 
2864         if (one >= end) {
2865             break;
2866         }
2867 
2868         zero = find_next_zero_bit(bitmap, end, one + 1);
2869 
2870         if (zero >= end) {
2871             discard_length = end - one;
2872         } else {
2873             discard_length = zero - one;
2874         }
2875         postcopy_discard_send_range(ms, one, discard_length);
2876         current = one + discard_length;
2877     }
2878 
2879     return 0;
2880 }
2881 
2882 /**
2883  * postcopy_each_ram_send_discard: discard all RAMBlocks
2884  *
2885  * Returns 0 for success or negative for error
2886  *
2887  * Utility for the outgoing postcopy code.
2888  *   Calls postcopy_send_discard_bm_ram for each RAMBlock
2889  *   passing it bitmap indexes and name.
2890  * (qemu_ram_foreach_block ends up passing unscaled lengths
2891  *  which would mean postcopy code would have to deal with target page)
2892  *
2893  * @ms: current migration state
2894  */
2895 static int postcopy_each_ram_send_discard(MigrationState *ms)
2896 {
2897     struct RAMBlock *block;
2898     int ret;
2899 
2900     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2901         postcopy_discard_send_init(ms, block->idstr);
2902 
2903         /*
2904          * Postcopy sends chunks of bitmap over the wire, but it
2905          * just needs indexes at this point, avoids it having
2906          * target page specific code.
2907          */
2908         ret = postcopy_send_discard_bm_ram(ms, block);
2909         postcopy_discard_send_finish(ms);
2910         if (ret) {
2911             return ret;
2912         }
2913     }
2914 
2915     return 0;
2916 }
2917 
2918 /**
2919  * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2920  *
2921  * Helper for postcopy_chunk_hostpages; it's called twice to
2922  * canonicalize the two bitmaps, that are similar, but one is
2923  * inverted.
2924  *
2925  * Postcopy requires that all target pages in a hostpage are dirty or
2926  * clean, not a mix.  This function canonicalizes the bitmaps.
2927  *
2928  * @ms: current migration state
2929  * @block: block that contains the page we want to canonicalize
2930  */
2931 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
2932 {
2933     RAMState *rs = ram_state;
2934     unsigned long *bitmap = block->bmap;
2935     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2936     unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2937     unsigned long run_start;
2938 
2939     if (block->page_size == TARGET_PAGE_SIZE) {
2940         /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2941         return;
2942     }
2943 
2944     /* Find a dirty page */
2945     run_start = find_next_bit(bitmap, pages, 0);
2946 
2947     while (run_start < pages) {
2948 
2949         /*
2950          * If the start of this run of pages is in the middle of a host
2951          * page, then we need to fixup this host page.
2952          */
2953         if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
2954             /* Find the end of this run */
2955             run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
2956             /*
2957              * If the end isn't at the start of a host page, then the
2958              * run doesn't finish at the end of a host page
2959              * and we need to discard.
2960              */
2961         }
2962 
2963         if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
2964             unsigned long page;
2965             unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2966                                                              host_ratio);
2967             run_start = QEMU_ALIGN_UP(run_start, host_ratio);
2968 
2969             /* Clean up the bitmap */
2970             for (page = fixup_start_addr;
2971                  page < fixup_start_addr + host_ratio; page++) {
2972                 /*
2973                  * Remark them as dirty, updating the count for any pages
2974                  * that weren't previously dirty.
2975                  */
2976                 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
2977             }
2978         }
2979 
2980         /* Find the next dirty page for the next iteration */
2981         run_start = find_next_bit(bitmap, pages, run_start);
2982     }
2983 }
2984 
2985 /**
2986  * postcopy_chunk_hostpages: discard any partially sent host page
2987  *
2988  * Utility for the outgoing postcopy code.
2989  *
2990  * Discard any partially sent host-page size chunks, mark any partially
2991  * dirty host-page size chunks as all dirty.  In this case the host-page
2992  * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2993  *
2994  * Returns zero on success
2995  *
2996  * @ms: current migration state
2997  * @block: block we want to work with
2998  */
2999 static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
3000 {
3001     postcopy_discard_send_init(ms, block->idstr);
3002 
3003     /*
3004      * Ensure that all partially dirty host pages are made fully dirty.
3005      */
3006     postcopy_chunk_hostpages_pass(ms, block);
3007 
3008     postcopy_discard_send_finish(ms);
3009     return 0;
3010 }
3011 
3012 /**
3013  * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
3014  *
3015  * Returns zero on success
3016  *
3017  * Transmit the set of pages to be discarded after precopy to the target
3018  * these are pages that:
3019  *     a) Have been previously transmitted but are now dirty again
3020  *     b) Pages that have never been transmitted, this ensures that
3021  *        any pages on the destination that have been mapped by background
3022  *        tasks get discarded (transparent huge pages is the specific concern)
3023  * Hopefully this is pretty sparse
3024  *
3025  * @ms: current migration state
3026  */
3027 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
3028 {
3029     RAMState *rs = ram_state;
3030     RAMBlock *block;
3031     int ret;
3032 
3033     RCU_READ_LOCK_GUARD();
3034 
3035     /* This should be our last sync, the src is now paused */
3036     migration_bitmap_sync(rs);
3037 
3038     /* Easiest way to make sure we don't resume in the middle of a host-page */
3039     rs->last_seen_block = NULL;
3040     rs->last_sent_block = NULL;
3041     rs->last_page = 0;
3042 
3043     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3044         /* Deal with TPS != HPS and huge pages */
3045         ret = postcopy_chunk_hostpages(ms, block);
3046         if (ret) {
3047             return ret;
3048         }
3049 
3050 #ifdef DEBUG_POSTCOPY
3051         ram_debug_dump_bitmap(block->bmap, true,
3052                               block->used_length >> TARGET_PAGE_BITS);
3053 #endif
3054     }
3055     trace_ram_postcopy_send_discard_bitmap();
3056 
3057     ret = postcopy_each_ram_send_discard(ms);
3058 
3059     return ret;
3060 }
3061 
3062 /**
3063  * ram_discard_range: discard dirtied pages at the beginning of postcopy
3064  *
3065  * Returns zero on success
3066  *
3067  * @rbname: name of the RAMBlock of the request. NULL means the
3068  *          same that last one.
3069  * @start: RAMBlock starting page
3070  * @length: RAMBlock size
3071  */
3072 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
3073 {
3074     int ret = -1;
3075 
3076     trace_ram_discard_range(rbname, start, length);
3077 
3078     RCU_READ_LOCK_GUARD();
3079     RAMBlock *rb = qemu_ram_block_by_name(rbname);
3080 
3081     if (!rb) {
3082         error_report("ram_discard_range: Failed to find block '%s'", rbname);
3083         goto err;
3084     }
3085 
3086     /*
3087      * On source VM, we don't need to update the received bitmap since
3088      * we don't even have one.
3089      */
3090     if (rb->receivedmap) {
3091         bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
3092                      length >> qemu_target_page_bits());
3093     }
3094 
3095     ret = ram_block_discard_range(rb, start, length);
3096 
3097 err:
3098     return ret;
3099 }
3100 
3101 /*
3102  * For every allocation, we will try not to crash the VM if the
3103  * allocation failed.
3104  */
3105 static int xbzrle_init(void)
3106 {
3107     Error *local_err = NULL;
3108 
3109     if (!migrate_use_xbzrle()) {
3110         return 0;
3111     }
3112 
3113     XBZRLE_cache_lock();
3114 
3115     XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
3116     if (!XBZRLE.zero_target_page) {
3117         error_report("%s: Error allocating zero page", __func__);
3118         goto err_out;
3119     }
3120 
3121     XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
3122                               TARGET_PAGE_SIZE, &local_err);
3123     if (!XBZRLE.cache) {
3124         error_report_err(local_err);
3125         goto free_zero_page;
3126     }
3127 
3128     XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
3129     if (!XBZRLE.encoded_buf) {
3130         error_report("%s: Error allocating encoded_buf", __func__);
3131         goto free_cache;
3132     }
3133 
3134     XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
3135     if (!XBZRLE.current_buf) {
3136         error_report("%s: Error allocating current_buf", __func__);
3137         goto free_encoded_buf;
3138     }
3139 
3140     /* We are all good */
3141     XBZRLE_cache_unlock();
3142     return 0;
3143 
3144 free_encoded_buf:
3145     g_free(XBZRLE.encoded_buf);
3146     XBZRLE.encoded_buf = NULL;
3147 free_cache:
3148     cache_fini(XBZRLE.cache);
3149     XBZRLE.cache = NULL;
3150 free_zero_page:
3151     g_free(XBZRLE.zero_target_page);
3152     XBZRLE.zero_target_page = NULL;
3153 err_out:
3154     XBZRLE_cache_unlock();
3155     return -ENOMEM;
3156 }
3157 
3158 static int ram_state_init(RAMState **rsp)
3159 {
3160     *rsp = g_try_new0(RAMState, 1);
3161 
3162     if (!*rsp) {
3163         error_report("%s: Init ramstate fail", __func__);
3164         return -1;
3165     }
3166 
3167     qemu_mutex_init(&(*rsp)->bitmap_mutex);
3168     qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3169     QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
3170 
3171     /*
3172      * Count the total number of pages used by ram blocks not including any
3173      * gaps due to alignment or unplugs.
3174      * This must match with the initial values of dirty bitmap.
3175      */
3176     (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
3177     ram_state_reset(*rsp);
3178 
3179     return 0;
3180 }
3181 
3182 static void ram_list_init_bitmaps(void)
3183 {
3184     MigrationState *ms = migrate_get_current();
3185     RAMBlock *block;
3186     unsigned long pages;
3187     uint8_t shift;
3188 
3189     /* Skip setting bitmap if there is no RAM */
3190     if (ram_bytes_total()) {
3191         shift = ms->clear_bitmap_shift;
3192         if (shift > CLEAR_BITMAP_SHIFT_MAX) {
3193             error_report("clear_bitmap_shift (%u) too big, using "
3194                          "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
3195             shift = CLEAR_BITMAP_SHIFT_MAX;
3196         } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
3197             error_report("clear_bitmap_shift (%u) too small, using "
3198                          "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
3199             shift = CLEAR_BITMAP_SHIFT_MIN;
3200         }
3201 
3202         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3203             pages = block->max_length >> TARGET_PAGE_BITS;
3204             /*
3205              * The initial dirty bitmap for migration must be set with all
3206              * ones to make sure we'll migrate every guest RAM page to
3207              * destination.
3208              * Here we set RAMBlock.bmap all to 1 because when rebegin a
3209              * new migration after a failed migration, ram_list.
3210              * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
3211              * guest memory.
3212              */
3213             block->bmap = bitmap_new(pages);
3214             bitmap_set(block->bmap, 0, pages);
3215             block->clear_bmap_shift = shift;
3216             block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
3217         }
3218     }
3219 }
3220 
3221 static void ram_init_bitmaps(RAMState *rs)
3222 {
3223     /* For memory_global_dirty_log_start below.  */
3224     qemu_mutex_lock_iothread();
3225     qemu_mutex_lock_ramlist();
3226 
3227     WITH_RCU_READ_LOCK_GUARD() {
3228         ram_list_init_bitmaps();
3229         memory_global_dirty_log_start();
3230         migration_bitmap_sync_precopy(rs);
3231     }
3232     qemu_mutex_unlock_ramlist();
3233     qemu_mutex_unlock_iothread();
3234 }
3235 
3236 static int ram_init_all(RAMState **rsp)
3237 {
3238     if (ram_state_init(rsp)) {
3239         return -1;
3240     }
3241 
3242     if (xbzrle_init()) {
3243         ram_state_cleanup(rsp);
3244         return -1;
3245     }
3246 
3247     ram_init_bitmaps(*rsp);
3248 
3249     return 0;
3250 }
3251 
3252 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3253 {
3254     RAMBlock *block;
3255     uint64_t pages = 0;
3256 
3257     /*
3258      * Postcopy is not using xbzrle/compression, so no need for that.
3259      * Also, since source are already halted, we don't need to care
3260      * about dirty page logging as well.
3261      */
3262 
3263     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3264         pages += bitmap_count_one(block->bmap,
3265                                   block->used_length >> TARGET_PAGE_BITS);
3266     }
3267 
3268     /* This may not be aligned with current bitmaps. Recalculate. */
3269     rs->migration_dirty_pages = pages;
3270 
3271     rs->last_seen_block = NULL;
3272     rs->last_sent_block = NULL;
3273     rs->last_page = 0;
3274     rs->last_version = ram_list.version;
3275     /*
3276      * Disable the bulk stage, otherwise we'll resend the whole RAM no
3277      * matter what we have sent.
3278      */
3279     rs->ram_bulk_stage = false;
3280 
3281     /* Update RAMState cache of output QEMUFile */
3282     rs->f = out;
3283 
3284     trace_ram_state_resume_prepare(pages);
3285 }
3286 
3287 /*
3288  * This function clears bits of the free pages reported by the caller from the
3289  * migration dirty bitmap. @addr is the host address corresponding to the
3290  * start of the continuous guest free pages, and @len is the total bytes of
3291  * those pages.
3292  */
3293 void qemu_guest_free_page_hint(void *addr, size_t len)
3294 {
3295     RAMBlock *block;
3296     ram_addr_t offset;
3297     size_t used_len, start, npages;
3298     MigrationState *s = migrate_get_current();
3299 
3300     /* This function is currently expected to be used during live migration */
3301     if (!migration_is_setup_or_active(s->state)) {
3302         return;
3303     }
3304 
3305     for (; len > 0; len -= used_len, addr += used_len) {
3306         block = qemu_ram_block_from_host(addr, false, &offset);
3307         if (unlikely(!block || offset >= block->used_length)) {
3308             /*
3309              * The implementation might not support RAMBlock resize during
3310              * live migration, but it could happen in theory with future
3311              * updates. So we add a check here to capture that case.
3312              */
3313             error_report_once("%s unexpected error", __func__);
3314             return;
3315         }
3316 
3317         if (len <= block->used_length - offset) {
3318             used_len = len;
3319         } else {
3320             used_len = block->used_length - offset;
3321         }
3322 
3323         start = offset >> TARGET_PAGE_BITS;
3324         npages = used_len >> TARGET_PAGE_BITS;
3325 
3326         qemu_mutex_lock(&ram_state->bitmap_mutex);
3327         ram_state->migration_dirty_pages -=
3328                       bitmap_count_one_with_offset(block->bmap, start, npages);
3329         bitmap_clear(block->bmap, start, npages);
3330         qemu_mutex_unlock(&ram_state->bitmap_mutex);
3331     }
3332 }
3333 
3334 /*
3335  * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3336  * long-running RCU critical section.  When rcu-reclaims in the code
3337  * start to become numerous it will be necessary to reduce the
3338  * granularity of these critical sections.
3339  */
3340 
3341 /**
3342  * ram_save_setup: Setup RAM for migration
3343  *
3344  * Returns zero to indicate success and negative for error
3345  *
3346  * @f: QEMUFile where to send the data
3347  * @opaque: RAMState pointer
3348  */
3349 static int ram_save_setup(QEMUFile *f, void *opaque)
3350 {
3351     RAMState **rsp = opaque;
3352     RAMBlock *block;
3353 
3354     if (compress_threads_save_setup()) {
3355         return -1;
3356     }
3357 
3358     /* migration has already setup the bitmap, reuse it. */
3359     if (!migration_in_colo_state()) {
3360         if (ram_init_all(rsp) != 0) {
3361             compress_threads_save_cleanup();
3362             return -1;
3363         }
3364     }
3365     (*rsp)->f = f;
3366 
3367     WITH_RCU_READ_LOCK_GUARD() {
3368         qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
3369 
3370         RAMBLOCK_FOREACH_MIGRATABLE(block) {
3371             qemu_put_byte(f, strlen(block->idstr));
3372             qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3373             qemu_put_be64(f, block->used_length);
3374             if (migrate_postcopy_ram() && block->page_size !=
3375                                           qemu_host_page_size) {
3376                 qemu_put_be64(f, block->page_size);
3377             }
3378             if (migrate_ignore_shared()) {
3379                 qemu_put_be64(f, block->mr->addr);
3380             }
3381         }
3382     }
3383 
3384     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3385     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3386 
3387     multifd_send_sync_main(*rsp);
3388     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3389     qemu_fflush(f);
3390 
3391     return 0;
3392 }
3393 
3394 /**
3395  * ram_save_iterate: iterative stage for migration
3396  *
3397  * Returns zero to indicate success and negative for error
3398  *
3399  * @f: QEMUFile where to send the data
3400  * @opaque: RAMState pointer
3401  */
3402 static int ram_save_iterate(QEMUFile *f, void *opaque)
3403 {
3404     RAMState **temp = opaque;
3405     RAMState *rs = *temp;
3406     int ret;
3407     int i;
3408     int64_t t0;
3409     int done = 0;
3410 
3411     if (blk_mig_bulk_active()) {
3412         /* Avoid transferring ram during bulk phase of block migration as
3413          * the bulk phase will usually take a long time and transferring
3414          * ram updates during that time is pointless. */
3415         goto out;
3416     }
3417 
3418     WITH_RCU_READ_LOCK_GUARD() {
3419         if (ram_list.version != rs->last_version) {
3420             ram_state_reset(rs);
3421         }
3422 
3423         /* Read version before ram_list.blocks */
3424         smp_rmb();
3425 
3426         ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3427 
3428         t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3429         i = 0;
3430         while ((ret = qemu_file_rate_limit(f)) == 0 ||
3431                 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
3432             int pages;
3433 
3434             if (qemu_file_get_error(f)) {
3435                 break;
3436             }
3437 
3438             pages = ram_find_and_save_block(rs, false);
3439             /* no more pages to sent */
3440             if (pages == 0) {
3441                 done = 1;
3442                 break;
3443             }
3444 
3445             if (pages < 0) {
3446                 qemu_file_set_error(f, pages);
3447                 break;
3448             }
3449 
3450             rs->target_page_count += pages;
3451 
3452             /*
3453              * we want to check in the 1st loop, just in case it was the 1st
3454              * time and we had to sync the dirty bitmap.
3455              * qemu_clock_get_ns() is a bit expensive, so we only check each
3456              * some iterations
3457              */
3458             if ((i & 63) == 0) {
3459                 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
3460                               1000000;
3461                 if (t1 > MAX_WAIT) {
3462                     trace_ram_save_iterate_big_wait(t1, i);
3463                     break;
3464                 }
3465             }
3466             i++;
3467         }
3468     }
3469 
3470     /*
3471      * Must occur before EOS (or any QEMUFile operation)
3472      * because of RDMA protocol.
3473      */
3474     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3475 
3476 out:
3477     multifd_send_sync_main(rs);
3478     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3479     qemu_fflush(f);
3480     ram_counters.transferred += 8;
3481 
3482     ret = qemu_file_get_error(f);
3483     if (ret < 0) {
3484         return ret;
3485     }
3486 
3487     return done;
3488 }
3489 
3490 /**
3491  * ram_save_complete: function called to send the remaining amount of ram
3492  *
3493  * Returns zero to indicate success or negative on error
3494  *
3495  * Called with iothread lock
3496  *
3497  * @f: QEMUFile where to send the data
3498  * @opaque: RAMState pointer
3499  */
3500 static int ram_save_complete(QEMUFile *f, void *opaque)
3501 {
3502     RAMState **temp = opaque;
3503     RAMState *rs = *temp;
3504     int ret = 0;
3505 
3506     WITH_RCU_READ_LOCK_GUARD() {
3507         if (!migration_in_postcopy()) {
3508             migration_bitmap_sync_precopy(rs);
3509         }
3510 
3511         ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3512 
3513         /* try transferring iterative blocks of memory */
3514 
3515         /* flush all remaining blocks regardless of rate limiting */
3516         while (true) {
3517             int pages;
3518 
3519             pages = ram_find_and_save_block(rs, !migration_in_colo_state());
3520             /* no more blocks to sent */
3521             if (pages == 0) {
3522                 break;
3523             }
3524             if (pages < 0) {
3525                 ret = pages;
3526                 break;
3527             }
3528         }
3529 
3530         flush_compressed_data(rs);
3531         ram_control_after_iterate(f, RAM_CONTROL_FINISH);
3532     }
3533 
3534     multifd_send_sync_main(rs);
3535     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3536     qemu_fflush(f);
3537 
3538     return ret;
3539 }
3540 
3541 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
3542                              uint64_t *res_precopy_only,
3543                              uint64_t *res_compatible,
3544                              uint64_t *res_postcopy_only)
3545 {
3546     RAMState **temp = opaque;
3547     RAMState *rs = *temp;
3548     uint64_t remaining_size;
3549 
3550     remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3551 
3552     if (!migration_in_postcopy() &&
3553         remaining_size < max_size) {
3554         qemu_mutex_lock_iothread();
3555         WITH_RCU_READ_LOCK_GUARD() {
3556             migration_bitmap_sync_precopy(rs);
3557         }
3558         qemu_mutex_unlock_iothread();
3559         remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3560     }
3561 
3562     if (migrate_postcopy_ram()) {
3563         /* We can do postcopy, and all the data is postcopiable */
3564         *res_compatible += remaining_size;
3565     } else {
3566         *res_precopy_only += remaining_size;
3567     }
3568 }
3569 
3570 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3571 {
3572     unsigned int xh_len;
3573     int xh_flags;
3574     uint8_t *loaded_data;
3575 
3576     /* extract RLE header */
3577     xh_flags = qemu_get_byte(f);
3578     xh_len = qemu_get_be16(f);
3579 
3580     if (xh_flags != ENCODING_FLAG_XBZRLE) {
3581         error_report("Failed to load XBZRLE page - wrong compression!");
3582         return -1;
3583     }
3584 
3585     if (xh_len > TARGET_PAGE_SIZE) {
3586         error_report("Failed to load XBZRLE page - len overflow!");
3587         return -1;
3588     }
3589     loaded_data = XBZRLE.decoded_buf;
3590     /* load data and decode */
3591     /* it can change loaded_data to point to an internal buffer */
3592     qemu_get_buffer_in_place(f, &loaded_data, xh_len);
3593 
3594     /* decode RLE */
3595     if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3596                              TARGET_PAGE_SIZE) == -1) {
3597         error_report("Failed to load XBZRLE page - decode error!");
3598         return -1;
3599     }
3600 
3601     return 0;
3602 }
3603 
3604 /**
3605  * ram_block_from_stream: read a RAMBlock id from the migration stream
3606  *
3607  * Must be called from within a rcu critical section.
3608  *
3609  * Returns a pointer from within the RCU-protected ram_list.
3610  *
3611  * @f: QEMUFile where to read the data from
3612  * @flags: Page flags (mostly to see if it's a continuation of previous block)
3613  */
3614 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
3615 {
3616     static RAMBlock *block = NULL;
3617     char id[256];
3618     uint8_t len;
3619 
3620     if (flags & RAM_SAVE_FLAG_CONTINUE) {
3621         if (!block) {
3622             error_report("Ack, bad migration stream!");
3623             return NULL;
3624         }
3625         return block;
3626     }
3627 
3628     len = qemu_get_byte(f);
3629     qemu_get_buffer(f, (uint8_t *)id, len);
3630     id[len] = 0;
3631 
3632     block = qemu_ram_block_by_name(id);
3633     if (!block) {
3634         error_report("Can't find block %s", id);
3635         return NULL;
3636     }
3637 
3638     if (ramblock_is_ignored(block)) {
3639         error_report("block %s should not be migrated !", id);
3640         return NULL;
3641     }
3642 
3643     return block;
3644 }
3645 
3646 static inline void *host_from_ram_block_offset(RAMBlock *block,
3647                                                ram_addr_t offset)
3648 {
3649     if (!offset_in_ramblock(block, offset)) {
3650         return NULL;
3651     }
3652 
3653     return block->host + offset;
3654 }
3655 
3656 static inline void *colo_cache_from_block_offset(RAMBlock *block,
3657                                                  ram_addr_t offset)
3658 {
3659     if (!offset_in_ramblock(block, offset)) {
3660         return NULL;
3661     }
3662     if (!block->colo_cache) {
3663         error_report("%s: colo_cache is NULL in block :%s",
3664                      __func__, block->idstr);
3665         return NULL;
3666     }
3667 
3668     /*
3669     * During colo checkpoint, we need bitmap of these migrated pages.
3670     * It help us to decide which pages in ram cache should be flushed
3671     * into VM's RAM later.
3672     */
3673     if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3674         ram_state->migration_dirty_pages++;
3675     }
3676     return block->colo_cache + offset;
3677 }
3678 
3679 /**
3680  * ram_handle_compressed: handle the zero page case
3681  *
3682  * If a page (or a whole RDMA chunk) has been
3683  * determined to be zero, then zap it.
3684  *
3685  * @host: host address for the zero page
3686  * @ch: what the page is filled from.  We only support zero
3687  * @size: size of the zero page
3688  */
3689 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3690 {
3691     if (ch != 0 || !is_zero_range(host, size)) {
3692         memset(host, ch, size);
3693     }
3694 }
3695 
3696 /* return the size after decompression, or negative value on error */
3697 static int
3698 qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3699                      const uint8_t *source, size_t source_len)
3700 {
3701     int err;
3702 
3703     err = inflateReset(stream);
3704     if (err != Z_OK) {
3705         return -1;
3706     }
3707 
3708     stream->avail_in = source_len;
3709     stream->next_in = (uint8_t *)source;
3710     stream->avail_out = dest_len;
3711     stream->next_out = dest;
3712 
3713     err = inflate(stream, Z_NO_FLUSH);
3714     if (err != Z_STREAM_END) {
3715         return -1;
3716     }
3717 
3718     return stream->total_out;
3719 }
3720 
3721 static void *do_data_decompress(void *opaque)
3722 {
3723     DecompressParam *param = opaque;
3724     unsigned long pagesize;
3725     uint8_t *des;
3726     int len, ret;
3727 
3728     qemu_mutex_lock(&param->mutex);
3729     while (!param->quit) {
3730         if (param->des) {
3731             des = param->des;
3732             len = param->len;
3733             param->des = 0;
3734             qemu_mutex_unlock(&param->mutex);
3735 
3736             pagesize = TARGET_PAGE_SIZE;
3737 
3738             ret = qemu_uncompress_data(&param->stream, des, pagesize,
3739                                        param->compbuf, len);
3740             if (ret < 0 && migrate_get_current()->decompress_error_check) {
3741                 error_report("decompress data failed");
3742                 qemu_file_set_error(decomp_file, ret);
3743             }
3744 
3745             qemu_mutex_lock(&decomp_done_lock);
3746             param->done = true;
3747             qemu_cond_signal(&decomp_done_cond);
3748             qemu_mutex_unlock(&decomp_done_lock);
3749 
3750             qemu_mutex_lock(&param->mutex);
3751         } else {
3752             qemu_cond_wait(&param->cond, &param->mutex);
3753         }
3754     }
3755     qemu_mutex_unlock(&param->mutex);
3756 
3757     return NULL;
3758 }
3759 
3760 static int wait_for_decompress_done(void)
3761 {
3762     int idx, thread_count;
3763 
3764     if (!migrate_use_compression()) {
3765         return 0;
3766     }
3767 
3768     thread_count = migrate_decompress_threads();
3769     qemu_mutex_lock(&decomp_done_lock);
3770     for (idx = 0; idx < thread_count; idx++) {
3771         while (!decomp_param[idx].done) {
3772             qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3773         }
3774     }
3775     qemu_mutex_unlock(&decomp_done_lock);
3776     return qemu_file_get_error(decomp_file);
3777 }
3778 
3779 static void compress_threads_load_cleanup(void)
3780 {
3781     int i, thread_count;
3782 
3783     if (!migrate_use_compression()) {
3784         return;
3785     }
3786     thread_count = migrate_decompress_threads();
3787     for (i = 0; i < thread_count; i++) {
3788         /*
3789          * we use it as a indicator which shows if the thread is
3790          * properly init'd or not
3791          */
3792         if (!decomp_param[i].compbuf) {
3793             break;
3794         }
3795 
3796         qemu_mutex_lock(&decomp_param[i].mutex);
3797         decomp_param[i].quit = true;
3798         qemu_cond_signal(&decomp_param[i].cond);
3799         qemu_mutex_unlock(&decomp_param[i].mutex);
3800     }
3801     for (i = 0; i < thread_count; i++) {
3802         if (!decomp_param[i].compbuf) {
3803             break;
3804         }
3805 
3806         qemu_thread_join(decompress_threads + i);
3807         qemu_mutex_destroy(&decomp_param[i].mutex);
3808         qemu_cond_destroy(&decomp_param[i].cond);
3809         inflateEnd(&decomp_param[i].stream);
3810         g_free(decomp_param[i].compbuf);
3811         decomp_param[i].compbuf = NULL;
3812     }
3813     g_free(decompress_threads);
3814     g_free(decomp_param);
3815     decompress_threads = NULL;
3816     decomp_param = NULL;
3817     decomp_file = NULL;
3818 }
3819 
3820 static int compress_threads_load_setup(QEMUFile *f)
3821 {
3822     int i, thread_count;
3823 
3824     if (!migrate_use_compression()) {
3825         return 0;
3826     }
3827 
3828     thread_count = migrate_decompress_threads();
3829     decompress_threads = g_new0(QemuThread, thread_count);
3830     decomp_param = g_new0(DecompressParam, thread_count);
3831     qemu_mutex_init(&decomp_done_lock);
3832     qemu_cond_init(&decomp_done_cond);
3833     decomp_file = f;
3834     for (i = 0; i < thread_count; i++) {
3835         if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3836             goto exit;
3837         }
3838 
3839         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3840         qemu_mutex_init(&decomp_param[i].mutex);
3841         qemu_cond_init(&decomp_param[i].cond);
3842         decomp_param[i].done = true;
3843         decomp_param[i].quit = false;
3844         qemu_thread_create(decompress_threads + i, "decompress",
3845                            do_data_decompress, decomp_param + i,
3846                            QEMU_THREAD_JOINABLE);
3847     }
3848     return 0;
3849 exit:
3850     compress_threads_load_cleanup();
3851     return -1;
3852 }
3853 
3854 static void decompress_data_with_multi_threads(QEMUFile *f,
3855                                                void *host, int len)
3856 {
3857     int idx, thread_count;
3858 
3859     thread_count = migrate_decompress_threads();
3860     qemu_mutex_lock(&decomp_done_lock);
3861     while (true) {
3862         for (idx = 0; idx < thread_count; idx++) {
3863             if (decomp_param[idx].done) {
3864                 decomp_param[idx].done = false;
3865                 qemu_mutex_lock(&decomp_param[idx].mutex);
3866                 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
3867                 decomp_param[idx].des = host;
3868                 decomp_param[idx].len = len;
3869                 qemu_cond_signal(&decomp_param[idx].cond);
3870                 qemu_mutex_unlock(&decomp_param[idx].mutex);
3871                 break;
3872             }
3873         }
3874         if (idx < thread_count) {
3875             break;
3876         } else {
3877             qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3878         }
3879     }
3880     qemu_mutex_unlock(&decomp_done_lock);
3881 }
3882 
3883 /*
3884  * colo cache: this is for secondary VM, we cache the whole
3885  * memory of the secondary VM, it is need to hold the global lock
3886  * to call this helper.
3887  */
3888 int colo_init_ram_cache(void)
3889 {
3890     RAMBlock *block;
3891 
3892     rcu_read_lock();
3893     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3894         block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3895                                                 NULL,
3896                                                 false);
3897         if (!block->colo_cache) {
3898             error_report("%s: Can't alloc memory for COLO cache of block %s,"
3899                          "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3900                          block->used_length);
3901             RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3902                 if (block->colo_cache) {
3903                     qemu_anon_ram_free(block->colo_cache, block->used_length);
3904                     block->colo_cache = NULL;
3905                 }
3906             }
3907             return -errno;
3908         }
3909         memcpy(block->colo_cache, block->host, block->used_length);
3910     }
3911     rcu_read_unlock();
3912     /*
3913     * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3914     * with to decide which page in cache should be flushed into SVM's RAM. Here
3915     * we use the same name 'ram_bitmap' as for migration.
3916     */
3917     if (ram_bytes_total()) {
3918         RAMBlock *block;
3919 
3920         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3921             unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3922 
3923             block->bmap = bitmap_new(pages);
3924             bitmap_set(block->bmap, 0, pages);
3925         }
3926     }
3927     ram_state = g_new0(RAMState, 1);
3928     ram_state->migration_dirty_pages = 0;
3929     qemu_mutex_init(&ram_state->bitmap_mutex);
3930     memory_global_dirty_log_start();
3931 
3932     return 0;
3933 }
3934 
3935 /* It is need to hold the global lock to call this helper */
3936 void colo_release_ram_cache(void)
3937 {
3938     RAMBlock *block;
3939 
3940     memory_global_dirty_log_stop();
3941     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3942         g_free(block->bmap);
3943         block->bmap = NULL;
3944     }
3945 
3946     WITH_RCU_READ_LOCK_GUARD() {
3947         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3948             if (block->colo_cache) {
3949                 qemu_anon_ram_free(block->colo_cache, block->used_length);
3950                 block->colo_cache = NULL;
3951             }
3952         }
3953     }
3954     qemu_mutex_destroy(&ram_state->bitmap_mutex);
3955     g_free(ram_state);
3956     ram_state = NULL;
3957 }
3958 
3959 /**
3960  * ram_load_setup: Setup RAM for migration incoming side
3961  *
3962  * Returns zero to indicate success and negative for error
3963  *
3964  * @f: QEMUFile where to receive the data
3965  * @opaque: RAMState pointer
3966  */
3967 static int ram_load_setup(QEMUFile *f, void *opaque)
3968 {
3969     if (compress_threads_load_setup(f)) {
3970         return -1;
3971     }
3972 
3973     xbzrle_load_setup();
3974     ramblock_recv_map_init();
3975 
3976     return 0;
3977 }
3978 
3979 static int ram_load_cleanup(void *opaque)
3980 {
3981     RAMBlock *rb;
3982 
3983     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3984         if (ramblock_is_pmem(rb)) {
3985             pmem_persist(rb->host, rb->used_length);
3986         }
3987     }
3988 
3989     xbzrle_load_cleanup();
3990     compress_threads_load_cleanup();
3991 
3992     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3993         g_free(rb->receivedmap);
3994         rb->receivedmap = NULL;
3995     }
3996 
3997     return 0;
3998 }
3999 
4000 /**
4001  * ram_postcopy_incoming_init: allocate postcopy data structures
4002  *
4003  * Returns 0 for success and negative if there was one error
4004  *
4005  * @mis: current migration incoming state
4006  *
4007  * Allocate data structures etc needed by incoming migration with
4008  * postcopy-ram. postcopy-ram's similarly names
4009  * postcopy_ram_incoming_init does the work.
4010  */
4011 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
4012 {
4013     return postcopy_ram_incoming_init(mis);
4014 }
4015 
4016 /**
4017  * ram_load_postcopy: load a page in postcopy case
4018  *
4019  * Returns 0 for success or -errno in case of error
4020  *
4021  * Called in postcopy mode by ram_load().
4022  * rcu_read_lock is taken prior to this being called.
4023  *
4024  * @f: QEMUFile where to send the data
4025  */
4026 static int ram_load_postcopy(QEMUFile *f)
4027 {
4028     int flags = 0, ret = 0;
4029     bool place_needed = false;
4030     bool matches_target_page_size = false;
4031     MigrationIncomingState *mis = migration_incoming_get_current();
4032     /* Temporary page that is later 'placed' */
4033     void *postcopy_host_page = mis->postcopy_tmp_page;
4034     void *last_host = NULL;
4035     bool all_zero = false;
4036 
4037     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4038         ram_addr_t addr;
4039         void *host = NULL;
4040         void *page_buffer = NULL;
4041         void *place_source = NULL;
4042         RAMBlock *block = NULL;
4043         uint8_t ch;
4044 
4045         addr = qemu_get_be64(f);
4046 
4047         /*
4048          * If qemu file error, we should stop here, and then "addr"
4049          * may be invalid
4050          */
4051         ret = qemu_file_get_error(f);
4052         if (ret) {
4053             break;
4054         }
4055 
4056         flags = addr & ~TARGET_PAGE_MASK;
4057         addr &= TARGET_PAGE_MASK;
4058 
4059         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
4060         place_needed = false;
4061         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
4062             block = ram_block_from_stream(f, flags);
4063 
4064             host = host_from_ram_block_offset(block, addr);
4065             if (!host) {
4066                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4067                 ret = -EINVAL;
4068                 break;
4069             }
4070             matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
4071             /*
4072              * Postcopy requires that we place whole host pages atomically;
4073              * these may be huge pages for RAMBlocks that are backed by
4074              * hugetlbfs.
4075              * To make it atomic, the data is read into a temporary page
4076              * that's moved into place later.
4077              * The migration protocol uses,  possibly smaller, target-pages
4078              * however the source ensures it always sends all the components
4079              * of a host page in order.
4080              */
4081             page_buffer = postcopy_host_page +
4082                           ((uintptr_t)host & (block->page_size - 1));
4083             /* If all TP are zero then we can optimise the place */
4084             if (!((uintptr_t)host & (block->page_size - 1))) {
4085                 all_zero = true;
4086             } else {
4087                 /* not the 1st TP within the HP */
4088                 if (host != (last_host + TARGET_PAGE_SIZE)) {
4089                     error_report("Non-sequential target page %p/%p",
4090                                   host, last_host);
4091                     ret = -EINVAL;
4092                     break;
4093                 }
4094             }
4095 
4096 
4097             /*
4098              * If it's the last part of a host page then we place the host
4099              * page
4100              */
4101             place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
4102                                      (block->page_size - 1)) == 0;
4103             place_source = postcopy_host_page;
4104         }
4105         last_host = host;
4106 
4107         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4108         case RAM_SAVE_FLAG_ZERO:
4109             ch = qemu_get_byte(f);
4110             memset(page_buffer, ch, TARGET_PAGE_SIZE);
4111             if (ch) {
4112                 all_zero = false;
4113             }
4114             break;
4115 
4116         case RAM_SAVE_FLAG_PAGE:
4117             all_zero = false;
4118             if (!matches_target_page_size) {
4119                 /* For huge pages, we always use temporary buffer */
4120                 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
4121             } else {
4122                 /*
4123                  * For small pages that matches target page size, we
4124                  * avoid the qemu_file copy.  Instead we directly use
4125                  * the buffer of QEMUFile to place the page.  Note: we
4126                  * cannot do any QEMUFile operation before using that
4127                  * buffer to make sure the buffer is valid when
4128                  * placing the page.
4129                  */
4130                 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
4131                                          TARGET_PAGE_SIZE);
4132             }
4133             break;
4134         case RAM_SAVE_FLAG_EOS:
4135             /* normal exit */
4136             multifd_recv_sync_main();
4137             break;
4138         default:
4139             error_report("Unknown combination of migration flags: %#x"
4140                          " (postcopy mode)", flags);
4141             ret = -EINVAL;
4142             break;
4143         }
4144 
4145         /* Detect for any possible file errors */
4146         if (!ret && qemu_file_get_error(f)) {
4147             ret = qemu_file_get_error(f);
4148         }
4149 
4150         if (!ret && place_needed) {
4151             /* This gets called at the last target page in the host page */
4152             void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
4153 
4154             if (all_zero) {
4155                 ret = postcopy_place_page_zero(mis, place_dest,
4156                                                block);
4157             } else {
4158                 ret = postcopy_place_page(mis, place_dest,
4159                                           place_source, block);
4160             }
4161         }
4162     }
4163 
4164     return ret;
4165 }
4166 
4167 static bool postcopy_is_advised(void)
4168 {
4169     PostcopyState ps = postcopy_state_get();
4170     return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
4171 }
4172 
4173 static bool postcopy_is_running(void)
4174 {
4175     PostcopyState ps = postcopy_state_get();
4176     return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
4177 }
4178 
4179 /*
4180  * Flush content of RAM cache into SVM's memory.
4181  * Only flush the pages that be dirtied by PVM or SVM or both.
4182  */
4183 static void colo_flush_ram_cache(void)
4184 {
4185     RAMBlock *block = NULL;
4186     void *dst_host;
4187     void *src_host;
4188     unsigned long offset = 0;
4189 
4190     memory_global_dirty_log_sync();
4191     WITH_RCU_READ_LOCK_GUARD() {
4192         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4193             ramblock_sync_dirty_bitmap(ram_state, block);
4194         }
4195     }
4196 
4197     trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
4198     WITH_RCU_READ_LOCK_GUARD() {
4199         block = QLIST_FIRST_RCU(&ram_list.blocks);
4200 
4201         while (block) {
4202             offset = migration_bitmap_find_dirty(ram_state, block, offset);
4203 
4204             if (offset << TARGET_PAGE_BITS >= block->used_length) {
4205                 offset = 0;
4206                 block = QLIST_NEXT_RCU(block, next);
4207             } else {
4208                 migration_bitmap_clear_dirty(ram_state, block, offset);
4209                 dst_host = block->host + (offset << TARGET_PAGE_BITS);
4210                 src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
4211                 memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
4212             }
4213         }
4214     }
4215     trace_colo_flush_ram_cache_end();
4216 }
4217 
4218 /**
4219  * ram_load_precopy: load pages in precopy case
4220  *
4221  * Returns 0 for success or -errno in case of error
4222  *
4223  * Called in precopy mode by ram_load().
4224  * rcu_read_lock is taken prior to this being called.
4225  *
4226  * @f: QEMUFile where to send the data
4227  */
4228 static int ram_load_precopy(QEMUFile *f)
4229 {
4230     int flags = 0, ret = 0, invalid_flags = 0, len = 0;
4231     /* ADVISE is earlier, it shows the source has the postcopy capability on */
4232     bool postcopy_advised = postcopy_is_advised();
4233     if (!migrate_use_compression()) {
4234         invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4235     }
4236 
4237     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4238         ram_addr_t addr, total_ram_bytes;
4239         void *host = NULL;
4240         uint8_t ch;
4241 
4242         addr = qemu_get_be64(f);
4243         flags = addr & ~TARGET_PAGE_MASK;
4244         addr &= TARGET_PAGE_MASK;
4245 
4246         if (flags & invalid_flags) {
4247             if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4248                 error_report("Received an unexpected compressed page");
4249             }
4250 
4251             ret = -EINVAL;
4252             break;
4253         }
4254 
4255         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
4256                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4257             RAMBlock *block = ram_block_from_stream(f, flags);
4258 
4259             /*
4260              * After going into COLO, we should load the Page into colo_cache.
4261              */
4262             if (migration_incoming_in_colo_state()) {
4263                 host = colo_cache_from_block_offset(block, addr);
4264             } else {
4265                 host = host_from_ram_block_offset(block, addr);
4266             }
4267             if (!host) {
4268                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4269                 ret = -EINVAL;
4270                 break;
4271             }
4272 
4273             if (!migration_incoming_in_colo_state()) {
4274                 ramblock_recv_bitmap_set(block, host);
4275             }
4276 
4277             trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
4278         }
4279 
4280         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4281         case RAM_SAVE_FLAG_MEM_SIZE:
4282             /* Synchronize RAM block list */
4283             total_ram_bytes = addr;
4284             while (!ret && total_ram_bytes) {
4285                 RAMBlock *block;
4286                 char id[256];
4287                 ram_addr_t length;
4288 
4289                 len = qemu_get_byte(f);
4290                 qemu_get_buffer(f, (uint8_t *)id, len);
4291                 id[len] = 0;
4292                 length = qemu_get_be64(f);
4293 
4294                 block = qemu_ram_block_by_name(id);
4295                 if (block && !qemu_ram_is_migratable(block)) {
4296                     error_report("block %s should not be migrated !", id);
4297                     ret = -EINVAL;
4298                 } else if (block) {
4299                     if (length != block->used_length) {
4300                         Error *local_err = NULL;
4301 
4302                         ret = qemu_ram_resize(block, length,
4303                                               &local_err);
4304                         if (local_err) {
4305                             error_report_err(local_err);
4306                         }
4307                     }
4308                     /* For postcopy we need to check hugepage sizes match */
4309                     if (postcopy_advised &&
4310                         block->page_size != qemu_host_page_size) {
4311                         uint64_t remote_page_size = qemu_get_be64(f);
4312                         if (remote_page_size != block->page_size) {
4313                             error_report("Mismatched RAM page size %s "
4314                                          "(local) %zd != %" PRId64,
4315                                          id, block->page_size,
4316                                          remote_page_size);
4317                             ret = -EINVAL;
4318                         }
4319                     }
4320                     if (migrate_ignore_shared()) {
4321                         hwaddr addr = qemu_get_be64(f);
4322                         if (ramblock_is_ignored(block) &&
4323                             block->mr->addr != addr) {
4324                             error_report("Mismatched GPAs for block %s "
4325                                          "%" PRId64 "!= %" PRId64,
4326                                          id, (uint64_t)addr,
4327                                          (uint64_t)block->mr->addr);
4328                             ret = -EINVAL;
4329                         }
4330                     }
4331                     ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4332                                           block->idstr);
4333                 } else {
4334                     error_report("Unknown ramblock \"%s\", cannot "
4335                                  "accept migration", id);
4336                     ret = -EINVAL;
4337                 }
4338 
4339                 total_ram_bytes -= length;
4340             }
4341             break;
4342 
4343         case RAM_SAVE_FLAG_ZERO:
4344             ch = qemu_get_byte(f);
4345             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4346             break;
4347 
4348         case RAM_SAVE_FLAG_PAGE:
4349             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4350             break;
4351 
4352         case RAM_SAVE_FLAG_COMPRESS_PAGE:
4353             len = qemu_get_be32(f);
4354             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4355                 error_report("Invalid compressed data length: %d", len);
4356                 ret = -EINVAL;
4357                 break;
4358             }
4359             decompress_data_with_multi_threads(f, host, len);
4360             break;
4361 
4362         case RAM_SAVE_FLAG_XBZRLE:
4363             if (load_xbzrle(f, addr, host) < 0) {
4364                 error_report("Failed to decompress XBZRLE page at "
4365                              RAM_ADDR_FMT, addr);
4366                 ret = -EINVAL;
4367                 break;
4368             }
4369             break;
4370         case RAM_SAVE_FLAG_EOS:
4371             /* normal exit */
4372             multifd_recv_sync_main();
4373             break;
4374         default:
4375             if (flags & RAM_SAVE_FLAG_HOOK) {
4376                 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
4377             } else {
4378                 error_report("Unknown combination of migration flags: %#x",
4379                              flags);
4380                 ret = -EINVAL;
4381             }
4382         }
4383         if (!ret) {
4384             ret = qemu_file_get_error(f);
4385         }
4386     }
4387 
4388     return ret;
4389 }
4390 
4391 static int ram_load(QEMUFile *f, void *opaque, int version_id)
4392 {
4393     int ret = 0;
4394     static uint64_t seq_iter;
4395     /*
4396      * If system is running in postcopy mode, page inserts to host memory must
4397      * be atomic
4398      */
4399     bool postcopy_running = postcopy_is_running();
4400 
4401     seq_iter++;
4402 
4403     if (version_id != 4) {
4404         return -EINVAL;
4405     }
4406 
4407     /*
4408      * This RCU critical section can be very long running.
4409      * When RCU reclaims in the code start to become numerous,
4410      * it will be necessary to reduce the granularity of this
4411      * critical section.
4412      */
4413     WITH_RCU_READ_LOCK_GUARD() {
4414         if (postcopy_running) {
4415             ret = ram_load_postcopy(f);
4416         } else {
4417             ret = ram_load_precopy(f);
4418         }
4419 
4420         ret |= wait_for_decompress_done();
4421     }
4422     trace_ram_load_complete(ret, seq_iter);
4423 
4424     if (!ret  && migration_incoming_in_colo_state()) {
4425         colo_flush_ram_cache();
4426     }
4427     return ret;
4428 }
4429 
4430 static bool ram_has_postcopy(void *opaque)
4431 {
4432     RAMBlock *rb;
4433     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4434         if (ramblock_is_pmem(rb)) {
4435             info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4436                          "is not supported now!", rb->idstr, rb->host);
4437             return false;
4438         }
4439     }
4440 
4441     return migrate_postcopy_ram();
4442 }
4443 
4444 /* Sync all the dirty bitmap with destination VM.  */
4445 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4446 {
4447     RAMBlock *block;
4448     QEMUFile *file = s->to_dst_file;
4449     int ramblock_count = 0;
4450 
4451     trace_ram_dirty_bitmap_sync_start();
4452 
4453     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4454         qemu_savevm_send_recv_bitmap(file, block->idstr);
4455         trace_ram_dirty_bitmap_request(block->idstr);
4456         ramblock_count++;
4457     }
4458 
4459     trace_ram_dirty_bitmap_sync_wait();
4460 
4461     /* Wait until all the ramblocks' dirty bitmap synced */
4462     while (ramblock_count--) {
4463         qemu_sem_wait(&s->rp_state.rp_sem);
4464     }
4465 
4466     trace_ram_dirty_bitmap_sync_complete();
4467 
4468     return 0;
4469 }
4470 
4471 static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4472 {
4473     qemu_sem_post(&s->rp_state.rp_sem);
4474 }
4475 
4476 /*
4477  * Read the received bitmap, revert it as the initial dirty bitmap.
4478  * This is only used when the postcopy migration is paused but wants
4479  * to resume from a middle point.
4480  */
4481 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4482 {
4483     int ret = -EINVAL;
4484     QEMUFile *file = s->rp_state.from_dst_file;
4485     unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
4486     uint64_t local_size = DIV_ROUND_UP(nbits, 8);
4487     uint64_t size, end_mark;
4488 
4489     trace_ram_dirty_bitmap_reload_begin(block->idstr);
4490 
4491     if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4492         error_report("%s: incorrect state %s", __func__,
4493                      MigrationStatus_str(s->state));
4494         return -EINVAL;
4495     }
4496 
4497     /*
4498      * Note: see comments in ramblock_recv_bitmap_send() on why we
4499      * need the endianess convertion, and the paddings.
4500      */
4501     local_size = ROUND_UP(local_size, 8);
4502 
4503     /* Add paddings */
4504     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4505 
4506     size = qemu_get_be64(file);
4507 
4508     /* The size of the bitmap should match with our ramblock */
4509     if (size != local_size) {
4510         error_report("%s: ramblock '%s' bitmap size mismatch "
4511                      "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4512                      block->idstr, size, local_size);
4513         ret = -EINVAL;
4514         goto out;
4515     }
4516 
4517     size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4518     end_mark = qemu_get_be64(file);
4519 
4520     ret = qemu_file_get_error(file);
4521     if (ret || size != local_size) {
4522         error_report("%s: read bitmap failed for ramblock '%s': %d"
4523                      " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4524                      __func__, block->idstr, ret, local_size, size);
4525         ret = -EIO;
4526         goto out;
4527     }
4528 
4529     if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4530         error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4531                      __func__, block->idstr, end_mark);
4532         ret = -EINVAL;
4533         goto out;
4534     }
4535 
4536     /*
4537      * Endianess convertion. We are during postcopy (though paused).
4538      * The dirty bitmap won't change. We can directly modify it.
4539      */
4540     bitmap_from_le(block->bmap, le_bitmap, nbits);
4541 
4542     /*
4543      * What we received is "received bitmap". Revert it as the initial
4544      * dirty bitmap for this ramblock.
4545      */
4546     bitmap_complement(block->bmap, block->bmap, nbits);
4547 
4548     trace_ram_dirty_bitmap_reload_complete(block->idstr);
4549 
4550     /*
4551      * We succeeded to sync bitmap for current ramblock. If this is
4552      * the last one to sync, we need to notify the main send thread.
4553      */
4554     ram_dirty_bitmap_reload_notify(s);
4555 
4556     ret = 0;
4557 out:
4558     g_free(le_bitmap);
4559     return ret;
4560 }
4561 
4562 static int ram_resume_prepare(MigrationState *s, void *opaque)
4563 {
4564     RAMState *rs = *(RAMState **)opaque;
4565     int ret;
4566 
4567     ret = ram_dirty_bitmap_sync_all(s, rs);
4568     if (ret) {
4569         return ret;
4570     }
4571 
4572     ram_state_resume_prepare(rs, s->to_dst_file);
4573 
4574     return 0;
4575 }
4576 
4577 static SaveVMHandlers savevm_ram_handlers = {
4578     .save_setup = ram_save_setup,
4579     .save_live_iterate = ram_save_iterate,
4580     .save_live_complete_postcopy = ram_save_complete,
4581     .save_live_complete_precopy = ram_save_complete,
4582     .has_postcopy = ram_has_postcopy,
4583     .save_live_pending = ram_save_pending,
4584     .load_state = ram_load,
4585     .save_cleanup = ram_save_cleanup,
4586     .load_setup = ram_load_setup,
4587     .load_cleanup = ram_load_cleanup,
4588     .resume_prepare = ram_resume_prepare,
4589 };
4590 
4591 void ram_mig_init(void)
4592 {
4593     qemu_mutex_init(&XBZRLE.lock);
4594     register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
4595 }
4596