xref: /openbmc/qemu/migration/ram.c (revision a27bd6c7)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2011-2015 Red Hat Inc
6  *
7  * Authors:
8  *  Juan Quintela <quintela@redhat.com>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 
29 #include "qemu/osdep.h"
30 #include "cpu.h"
31 #include <zlib.h>
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/pmem.h"
37 #include "xbzrle.h"
38 #include "ram.h"
39 #include "migration.h"
40 #include "socket.h"
41 #include "migration/register.h"
42 #include "migration/misc.h"
43 #include "qemu-file.h"
44 #include "postcopy-ram.h"
45 #include "page_cache.h"
46 #include "qemu/error-report.h"
47 #include "qapi/error.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qmp/qerror.h"
50 #include "trace.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
55 #include "block.h"
56 #include "sysemu/sysemu.h"
57 #include "qemu/uuid.h"
58 #include "savevm.h"
59 #include "qemu/iov.h"
60 
61 /***********************************************************/
62 /* ram save/restore */
63 
64 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65  * worked for pages that where filled with the same char.  We switched
66  * it to only search for the zero value.  And to avoid confusion with
67  * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68  */
69 
70 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
71 #define RAM_SAVE_FLAG_ZERO     0x02
72 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
73 #define RAM_SAVE_FLAG_PAGE     0x08
74 #define RAM_SAVE_FLAG_EOS      0x10
75 #define RAM_SAVE_FLAG_CONTINUE 0x20
76 #define RAM_SAVE_FLAG_XBZRLE   0x40
77 /* 0x80 is reserved in migration.h start with 0x100 next */
78 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
79 
80 static inline bool is_zero_range(uint8_t *p, uint64_t size)
81 {
82     return buffer_is_zero(p, size);
83 }
84 
85 XBZRLECacheStats xbzrle_counters;
86 
87 /* struct contains XBZRLE cache and a static page
88    used by the compression */
89 static struct {
90     /* buffer used for XBZRLE encoding */
91     uint8_t *encoded_buf;
92     /* buffer for storing page content */
93     uint8_t *current_buf;
94     /* Cache for XBZRLE, Protected by lock. */
95     PageCache *cache;
96     QemuMutex lock;
97     /* it will store a page full of zeros */
98     uint8_t *zero_target_page;
99     /* buffer used for XBZRLE decoding */
100     uint8_t *decoded_buf;
101 } XBZRLE;
102 
103 static void XBZRLE_cache_lock(void)
104 {
105     if (migrate_use_xbzrle())
106         qemu_mutex_lock(&XBZRLE.lock);
107 }
108 
109 static void XBZRLE_cache_unlock(void)
110 {
111     if (migrate_use_xbzrle())
112         qemu_mutex_unlock(&XBZRLE.lock);
113 }
114 
115 /**
116  * xbzrle_cache_resize: resize the xbzrle cache
117  *
118  * This function is called from qmp_migrate_set_cache_size in main
119  * thread, possibly while a migration is in progress.  A running
120  * migration may be using the cache and might finish during this call,
121  * hence changes to the cache are protected by XBZRLE.lock().
122  *
123  * Returns 0 for success or -1 for error
124  *
125  * @new_size: new cache size
126  * @errp: set *errp if the check failed, with reason
127  */
128 int xbzrle_cache_resize(int64_t new_size, Error **errp)
129 {
130     PageCache *new_cache;
131     int64_t ret = 0;
132 
133     /* Check for truncation */
134     if (new_size != (size_t)new_size) {
135         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136                    "exceeding address space");
137         return -1;
138     }
139 
140     if (new_size == migrate_xbzrle_cache_size()) {
141         /* nothing to do */
142         return 0;
143     }
144 
145     XBZRLE_cache_lock();
146 
147     if (XBZRLE.cache != NULL) {
148         new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
149         if (!new_cache) {
150             ret = -1;
151             goto out;
152         }
153 
154         cache_fini(XBZRLE.cache);
155         XBZRLE.cache = new_cache;
156     }
157 out:
158     XBZRLE_cache_unlock();
159     return ret;
160 }
161 
162 static bool ramblock_is_ignored(RAMBlock *block)
163 {
164     return !qemu_ram_is_migratable(block) ||
165            (migrate_ignore_shared() && qemu_ram_is_shared(block));
166 }
167 
168 /* Should be holding either ram_list.mutex, or the RCU lock. */
169 #define RAMBLOCK_FOREACH_NOT_IGNORED(block)            \
170     INTERNAL_RAMBLOCK_FOREACH(block)                   \
171         if (ramblock_is_ignored(block)) {} else
172 
173 #define RAMBLOCK_FOREACH_MIGRATABLE(block)             \
174     INTERNAL_RAMBLOCK_FOREACH(block)                   \
175         if (!qemu_ram_is_migratable(block)) {} else
176 
177 #undef RAMBLOCK_FOREACH
178 
179 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
180 {
181     RAMBlock *block;
182     int ret = 0;
183 
184     rcu_read_lock();
185     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
186         ret = func(block, opaque);
187         if (ret) {
188             break;
189         }
190     }
191     rcu_read_unlock();
192     return ret;
193 }
194 
195 static void ramblock_recv_map_init(void)
196 {
197     RAMBlock *rb;
198 
199     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
200         assert(!rb->receivedmap);
201         rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
202     }
203 }
204 
205 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
206 {
207     return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
208                     rb->receivedmap);
209 }
210 
211 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
212 {
213     return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
214 }
215 
216 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
217 {
218     set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
219 }
220 
221 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
222                                     size_t nr)
223 {
224     bitmap_set_atomic(rb->receivedmap,
225                       ramblock_recv_bitmap_offset(host_addr, rb),
226                       nr);
227 }
228 
229 #define  RAMBLOCK_RECV_BITMAP_ENDING  (0x0123456789abcdefULL)
230 
231 /*
232  * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
233  *
234  * Returns >0 if success with sent bytes, or <0 if error.
235  */
236 int64_t ramblock_recv_bitmap_send(QEMUFile *file,
237                                   const char *block_name)
238 {
239     RAMBlock *block = qemu_ram_block_by_name(block_name);
240     unsigned long *le_bitmap, nbits;
241     uint64_t size;
242 
243     if (!block) {
244         error_report("%s: invalid block name: %s", __func__, block_name);
245         return -1;
246     }
247 
248     nbits = block->used_length >> TARGET_PAGE_BITS;
249 
250     /*
251      * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
252      * machines we may need 4 more bytes for padding (see below
253      * comment). So extend it a bit before hand.
254      */
255     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
256 
257     /*
258      * Always use little endian when sending the bitmap. This is
259      * required that when source and destination VMs are not using the
260      * same endianess. (Note: big endian won't work.)
261      */
262     bitmap_to_le(le_bitmap, block->receivedmap, nbits);
263 
264     /* Size of the bitmap, in bytes */
265     size = DIV_ROUND_UP(nbits, 8);
266 
267     /*
268      * size is always aligned to 8 bytes for 64bit machines, but it
269      * may not be true for 32bit machines. We need this padding to
270      * make sure the migration can survive even between 32bit and
271      * 64bit machines.
272      */
273     size = ROUND_UP(size, 8);
274 
275     qemu_put_be64(file, size);
276     qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
277     /*
278      * Mark as an end, in case the middle part is screwed up due to
279      * some "misterious" reason.
280      */
281     qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
282     qemu_fflush(file);
283 
284     g_free(le_bitmap);
285 
286     if (qemu_file_get_error(file)) {
287         return qemu_file_get_error(file);
288     }
289 
290     return size + sizeof(size);
291 }
292 
293 /*
294  * An outstanding page request, on the source, having been received
295  * and queued
296  */
297 struct RAMSrcPageRequest {
298     RAMBlock *rb;
299     hwaddr    offset;
300     hwaddr    len;
301 
302     QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
303 };
304 
305 /* State of RAM for migration */
306 struct RAMState {
307     /* QEMUFile used for this migration */
308     QEMUFile *f;
309     /* Last block that we have visited searching for dirty pages */
310     RAMBlock *last_seen_block;
311     /* Last block from where we have sent data */
312     RAMBlock *last_sent_block;
313     /* Last dirty target page we have sent */
314     ram_addr_t last_page;
315     /* last ram version we have seen */
316     uint32_t last_version;
317     /* We are in the first round */
318     bool ram_bulk_stage;
319     /* The free page optimization is enabled */
320     bool fpo_enabled;
321     /* How many times we have dirty too many pages */
322     int dirty_rate_high_cnt;
323     /* these variables are used for bitmap sync */
324     /* last time we did a full bitmap_sync */
325     int64_t time_last_bitmap_sync;
326     /* bytes transferred at start_time */
327     uint64_t bytes_xfer_prev;
328     /* number of dirty pages since start_time */
329     uint64_t num_dirty_pages_period;
330     /* xbzrle misses since the beginning of the period */
331     uint64_t xbzrle_cache_miss_prev;
332 
333     /* compression statistics since the beginning of the period */
334     /* amount of count that no free thread to compress data */
335     uint64_t compress_thread_busy_prev;
336     /* amount bytes after compression */
337     uint64_t compressed_size_prev;
338     /* amount of compressed pages */
339     uint64_t compress_pages_prev;
340 
341     /* total handled target pages at the beginning of period */
342     uint64_t target_page_count_prev;
343     /* total handled target pages since start */
344     uint64_t target_page_count;
345     /* number of dirty bits in the bitmap */
346     uint64_t migration_dirty_pages;
347     /* Protects modification of the bitmap and migration dirty pages */
348     QemuMutex bitmap_mutex;
349     /* The RAMBlock used in the last src_page_requests */
350     RAMBlock *last_req_rb;
351     /* Queue of outstanding page requests from the destination */
352     QemuMutex src_page_req_mutex;
353     QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
354 };
355 typedef struct RAMState RAMState;
356 
357 static RAMState *ram_state;
358 
359 static NotifierWithReturnList precopy_notifier_list;
360 
361 void precopy_infrastructure_init(void)
362 {
363     notifier_with_return_list_init(&precopy_notifier_list);
364 }
365 
366 void precopy_add_notifier(NotifierWithReturn *n)
367 {
368     notifier_with_return_list_add(&precopy_notifier_list, n);
369 }
370 
371 void precopy_remove_notifier(NotifierWithReturn *n)
372 {
373     notifier_with_return_remove(n);
374 }
375 
376 int precopy_notify(PrecopyNotifyReason reason, Error **errp)
377 {
378     PrecopyNotifyData pnd;
379     pnd.reason = reason;
380     pnd.errp = errp;
381 
382     return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
383 }
384 
385 void precopy_enable_free_page_optimization(void)
386 {
387     if (!ram_state) {
388         return;
389     }
390 
391     ram_state->fpo_enabled = true;
392 }
393 
394 uint64_t ram_bytes_remaining(void)
395 {
396     return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
397                        0;
398 }
399 
400 MigrationStats ram_counters;
401 
402 /* used by the search for pages to send */
403 struct PageSearchStatus {
404     /* Current block being searched */
405     RAMBlock    *block;
406     /* Current page to search from */
407     unsigned long page;
408     /* Set once we wrap around */
409     bool         complete_round;
410 };
411 typedef struct PageSearchStatus PageSearchStatus;
412 
413 CompressionStats compression_counters;
414 
415 struct CompressParam {
416     bool done;
417     bool quit;
418     bool zero_page;
419     QEMUFile *file;
420     QemuMutex mutex;
421     QemuCond cond;
422     RAMBlock *block;
423     ram_addr_t offset;
424 
425     /* internally used fields */
426     z_stream stream;
427     uint8_t *originbuf;
428 };
429 typedef struct CompressParam CompressParam;
430 
431 struct DecompressParam {
432     bool done;
433     bool quit;
434     QemuMutex mutex;
435     QemuCond cond;
436     void *des;
437     uint8_t *compbuf;
438     int len;
439     z_stream stream;
440 };
441 typedef struct DecompressParam DecompressParam;
442 
443 static CompressParam *comp_param;
444 static QemuThread *compress_threads;
445 /* comp_done_cond is used to wake up the migration thread when
446  * one of the compression threads has finished the compression.
447  * comp_done_lock is used to co-work with comp_done_cond.
448  */
449 static QemuMutex comp_done_lock;
450 static QemuCond comp_done_cond;
451 /* The empty QEMUFileOps will be used by file in CompressParam */
452 static const QEMUFileOps empty_ops = { };
453 
454 static QEMUFile *decomp_file;
455 static DecompressParam *decomp_param;
456 static QemuThread *decompress_threads;
457 static QemuMutex decomp_done_lock;
458 static QemuCond decomp_done_cond;
459 
460 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
461                                  ram_addr_t offset, uint8_t *source_buf);
462 
463 static void *do_data_compress(void *opaque)
464 {
465     CompressParam *param = opaque;
466     RAMBlock *block;
467     ram_addr_t offset;
468     bool zero_page;
469 
470     qemu_mutex_lock(&param->mutex);
471     while (!param->quit) {
472         if (param->block) {
473             block = param->block;
474             offset = param->offset;
475             param->block = NULL;
476             qemu_mutex_unlock(&param->mutex);
477 
478             zero_page = do_compress_ram_page(param->file, &param->stream,
479                                              block, offset, param->originbuf);
480 
481             qemu_mutex_lock(&comp_done_lock);
482             param->done = true;
483             param->zero_page = zero_page;
484             qemu_cond_signal(&comp_done_cond);
485             qemu_mutex_unlock(&comp_done_lock);
486 
487             qemu_mutex_lock(&param->mutex);
488         } else {
489             qemu_cond_wait(&param->cond, &param->mutex);
490         }
491     }
492     qemu_mutex_unlock(&param->mutex);
493 
494     return NULL;
495 }
496 
497 static void compress_threads_save_cleanup(void)
498 {
499     int i, thread_count;
500 
501     if (!migrate_use_compression() || !comp_param) {
502         return;
503     }
504 
505     thread_count = migrate_compress_threads();
506     for (i = 0; i < thread_count; i++) {
507         /*
508          * we use it as a indicator which shows if the thread is
509          * properly init'd or not
510          */
511         if (!comp_param[i].file) {
512             break;
513         }
514 
515         qemu_mutex_lock(&comp_param[i].mutex);
516         comp_param[i].quit = true;
517         qemu_cond_signal(&comp_param[i].cond);
518         qemu_mutex_unlock(&comp_param[i].mutex);
519 
520         qemu_thread_join(compress_threads + i);
521         qemu_mutex_destroy(&comp_param[i].mutex);
522         qemu_cond_destroy(&comp_param[i].cond);
523         deflateEnd(&comp_param[i].stream);
524         g_free(comp_param[i].originbuf);
525         qemu_fclose(comp_param[i].file);
526         comp_param[i].file = NULL;
527     }
528     qemu_mutex_destroy(&comp_done_lock);
529     qemu_cond_destroy(&comp_done_cond);
530     g_free(compress_threads);
531     g_free(comp_param);
532     compress_threads = NULL;
533     comp_param = NULL;
534 }
535 
536 static int compress_threads_save_setup(void)
537 {
538     int i, thread_count;
539 
540     if (!migrate_use_compression()) {
541         return 0;
542     }
543     thread_count = migrate_compress_threads();
544     compress_threads = g_new0(QemuThread, thread_count);
545     comp_param = g_new0(CompressParam, thread_count);
546     qemu_cond_init(&comp_done_cond);
547     qemu_mutex_init(&comp_done_lock);
548     for (i = 0; i < thread_count; i++) {
549         comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
550         if (!comp_param[i].originbuf) {
551             goto exit;
552         }
553 
554         if (deflateInit(&comp_param[i].stream,
555                         migrate_compress_level()) != Z_OK) {
556             g_free(comp_param[i].originbuf);
557             goto exit;
558         }
559 
560         /* comp_param[i].file is just used as a dummy buffer to save data,
561          * set its ops to empty.
562          */
563         comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
564         comp_param[i].done = true;
565         comp_param[i].quit = false;
566         qemu_mutex_init(&comp_param[i].mutex);
567         qemu_cond_init(&comp_param[i].cond);
568         qemu_thread_create(compress_threads + i, "compress",
569                            do_data_compress, comp_param + i,
570                            QEMU_THREAD_JOINABLE);
571     }
572     return 0;
573 
574 exit:
575     compress_threads_save_cleanup();
576     return -1;
577 }
578 
579 /* Multiple fd's */
580 
581 #define MULTIFD_MAGIC 0x11223344U
582 #define MULTIFD_VERSION 1
583 
584 #define MULTIFD_FLAG_SYNC (1 << 0)
585 
586 /* This value needs to be a multiple of qemu_target_page_size() */
587 #define MULTIFD_PACKET_SIZE (512 * 1024)
588 
589 typedef struct {
590     uint32_t magic;
591     uint32_t version;
592     unsigned char uuid[16]; /* QemuUUID */
593     uint8_t id;
594     uint8_t unused1[7];     /* Reserved for future use */
595     uint64_t unused2[4];    /* Reserved for future use */
596 } __attribute__((packed)) MultiFDInit_t;
597 
598 typedef struct {
599     uint32_t magic;
600     uint32_t version;
601     uint32_t flags;
602     /* maximum number of allocated pages */
603     uint32_t pages_alloc;
604     uint32_t pages_used;
605     /* size of the next packet that contains pages */
606     uint32_t next_packet_size;
607     uint64_t packet_num;
608     uint64_t unused[4];    /* Reserved for future use */
609     char ramblock[256];
610     uint64_t offset[];
611 } __attribute__((packed)) MultiFDPacket_t;
612 
613 typedef struct {
614     /* number of used pages */
615     uint32_t used;
616     /* number of allocated pages */
617     uint32_t allocated;
618     /* global number of generated multifd packets */
619     uint64_t packet_num;
620     /* offset of each page */
621     ram_addr_t *offset;
622     /* pointer to each page */
623     struct iovec *iov;
624     RAMBlock *block;
625 } MultiFDPages_t;
626 
627 typedef struct {
628     /* this fields are not changed once the thread is created */
629     /* channel number */
630     uint8_t id;
631     /* channel thread name */
632     char *name;
633     /* channel thread id */
634     QemuThread thread;
635     /* communication channel */
636     QIOChannel *c;
637     /* sem where to wait for more work */
638     QemuSemaphore sem;
639     /* this mutex protects the following parameters */
640     QemuMutex mutex;
641     /* is this channel thread running */
642     bool running;
643     /* should this thread finish */
644     bool quit;
645     /* thread has work to do */
646     int pending_job;
647     /* array of pages to sent */
648     MultiFDPages_t *pages;
649     /* packet allocated len */
650     uint32_t packet_len;
651     /* pointer to the packet */
652     MultiFDPacket_t *packet;
653     /* multifd flags for each packet */
654     uint32_t flags;
655     /* size of the next packet that contains pages */
656     uint32_t next_packet_size;
657     /* global number of generated multifd packets */
658     uint64_t packet_num;
659     /* thread local variables */
660     /* packets sent through this channel */
661     uint64_t num_packets;
662     /* pages sent through this channel */
663     uint64_t num_pages;
664 }  MultiFDSendParams;
665 
666 typedef struct {
667     /* this fields are not changed once the thread is created */
668     /* channel number */
669     uint8_t id;
670     /* channel thread name */
671     char *name;
672     /* channel thread id */
673     QemuThread thread;
674     /* communication channel */
675     QIOChannel *c;
676     /* this mutex protects the following parameters */
677     QemuMutex mutex;
678     /* is this channel thread running */
679     bool running;
680     /* should this thread finish */
681     bool quit;
682     /* array of pages to receive */
683     MultiFDPages_t *pages;
684     /* packet allocated len */
685     uint32_t packet_len;
686     /* pointer to the packet */
687     MultiFDPacket_t *packet;
688     /* multifd flags for each packet */
689     uint32_t flags;
690     /* global number of generated multifd packets */
691     uint64_t packet_num;
692     /* thread local variables */
693     /* size of the next packet that contains pages */
694     uint32_t next_packet_size;
695     /* packets sent through this channel */
696     uint64_t num_packets;
697     /* pages sent through this channel */
698     uint64_t num_pages;
699     /* syncs main thread and channels */
700     QemuSemaphore sem_sync;
701 } MultiFDRecvParams;
702 
703 static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
704 {
705     MultiFDInit_t msg;
706     int ret;
707 
708     msg.magic = cpu_to_be32(MULTIFD_MAGIC);
709     msg.version = cpu_to_be32(MULTIFD_VERSION);
710     msg.id = p->id;
711     memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
712 
713     ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
714     if (ret != 0) {
715         return -1;
716     }
717     return 0;
718 }
719 
720 static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
721 {
722     MultiFDInit_t msg;
723     int ret;
724 
725     ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
726     if (ret != 0) {
727         return -1;
728     }
729 
730     msg.magic = be32_to_cpu(msg.magic);
731     msg.version = be32_to_cpu(msg.version);
732 
733     if (msg.magic != MULTIFD_MAGIC) {
734         error_setg(errp, "multifd: received packet magic %x "
735                    "expected %x", msg.magic, MULTIFD_MAGIC);
736         return -1;
737     }
738 
739     if (msg.version != MULTIFD_VERSION) {
740         error_setg(errp, "multifd: received packet version %d "
741                    "expected %d", msg.version, MULTIFD_VERSION);
742         return -1;
743     }
744 
745     if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
746         char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
747         char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
748 
749         error_setg(errp, "multifd: received uuid '%s' and expected "
750                    "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
751         g_free(uuid);
752         g_free(msg_uuid);
753         return -1;
754     }
755 
756     if (msg.id > migrate_multifd_channels()) {
757         error_setg(errp, "multifd: received channel version %d "
758                    "expected %d", msg.version, MULTIFD_VERSION);
759         return -1;
760     }
761 
762     return msg.id;
763 }
764 
765 static MultiFDPages_t *multifd_pages_init(size_t size)
766 {
767     MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
768 
769     pages->allocated = size;
770     pages->iov = g_new0(struct iovec, size);
771     pages->offset = g_new0(ram_addr_t, size);
772 
773     return pages;
774 }
775 
776 static void multifd_pages_clear(MultiFDPages_t *pages)
777 {
778     pages->used = 0;
779     pages->allocated = 0;
780     pages->packet_num = 0;
781     pages->block = NULL;
782     g_free(pages->iov);
783     pages->iov = NULL;
784     g_free(pages->offset);
785     pages->offset = NULL;
786     g_free(pages);
787 }
788 
789 static void multifd_send_fill_packet(MultiFDSendParams *p)
790 {
791     MultiFDPacket_t *packet = p->packet;
792     uint32_t page_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
793     int i;
794 
795     packet->magic = cpu_to_be32(MULTIFD_MAGIC);
796     packet->version = cpu_to_be32(MULTIFD_VERSION);
797     packet->flags = cpu_to_be32(p->flags);
798     packet->pages_alloc = cpu_to_be32(page_max);
799     packet->pages_used = cpu_to_be32(p->pages->used);
800     packet->next_packet_size = cpu_to_be32(p->next_packet_size);
801     packet->packet_num = cpu_to_be64(p->packet_num);
802 
803     if (p->pages->block) {
804         strncpy(packet->ramblock, p->pages->block->idstr, 256);
805     }
806 
807     for (i = 0; i < p->pages->used; i++) {
808         packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
809     }
810 }
811 
812 static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
813 {
814     MultiFDPacket_t *packet = p->packet;
815     uint32_t pages_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
816     RAMBlock *block;
817     int i;
818 
819     packet->magic = be32_to_cpu(packet->magic);
820     if (packet->magic != MULTIFD_MAGIC) {
821         error_setg(errp, "multifd: received packet "
822                    "magic %x and expected magic %x",
823                    packet->magic, MULTIFD_MAGIC);
824         return -1;
825     }
826 
827     packet->version = be32_to_cpu(packet->version);
828     if (packet->version != MULTIFD_VERSION) {
829         error_setg(errp, "multifd: received packet "
830                    "version %d and expected version %d",
831                    packet->version, MULTIFD_VERSION);
832         return -1;
833     }
834 
835     p->flags = be32_to_cpu(packet->flags);
836 
837     packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
838     /*
839      * If we recevied a packet that is 100 times bigger than expected
840      * just stop migration.  It is a magic number.
841      */
842     if (packet->pages_alloc > pages_max * 100) {
843         error_setg(errp, "multifd: received packet "
844                    "with size %d and expected a maximum size of %d",
845                    packet->pages_alloc, pages_max * 100) ;
846         return -1;
847     }
848     /*
849      * We received a packet that is bigger than expected but inside
850      * reasonable limits (see previous comment).  Just reallocate.
851      */
852     if (packet->pages_alloc > p->pages->allocated) {
853         multifd_pages_clear(p->pages);
854         p->pages = multifd_pages_init(packet->pages_alloc);
855     }
856 
857     p->pages->used = be32_to_cpu(packet->pages_used);
858     if (p->pages->used > packet->pages_alloc) {
859         error_setg(errp, "multifd: received packet "
860                    "with %d pages and expected maximum pages are %d",
861                    p->pages->used, packet->pages_alloc) ;
862         return -1;
863     }
864 
865     p->next_packet_size = be32_to_cpu(packet->next_packet_size);
866     p->packet_num = be64_to_cpu(packet->packet_num);
867 
868     if (p->pages->used) {
869         /* make sure that ramblock is 0 terminated */
870         packet->ramblock[255] = 0;
871         block = qemu_ram_block_by_name(packet->ramblock);
872         if (!block) {
873             error_setg(errp, "multifd: unknown ram block %s",
874                        packet->ramblock);
875             return -1;
876         }
877     }
878 
879     for (i = 0; i < p->pages->used; i++) {
880         ram_addr_t offset = be64_to_cpu(packet->offset[i]);
881 
882         if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
883             error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
884                        " (max " RAM_ADDR_FMT ")",
885                        offset, block->max_length);
886             return -1;
887         }
888         p->pages->iov[i].iov_base = block->host + offset;
889         p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
890     }
891 
892     return 0;
893 }
894 
895 struct {
896     MultiFDSendParams *params;
897     /* array of pages to sent */
898     MultiFDPages_t *pages;
899     /* syncs main thread and channels */
900     QemuSemaphore sem_sync;
901     /* global number of generated multifd packets */
902     uint64_t packet_num;
903     /* send channels ready */
904     QemuSemaphore channels_ready;
905 } *multifd_send_state;
906 
907 /*
908  * How we use multifd_send_state->pages and channel->pages?
909  *
910  * We create a pages for each channel, and a main one.  Each time that
911  * we need to send a batch of pages we interchange the ones between
912  * multifd_send_state and the channel that is sending it.  There are
913  * two reasons for that:
914  *    - to not have to do so many mallocs during migration
915  *    - to make easier to know what to free at the end of migration
916  *
917  * This way we always know who is the owner of each "pages" struct,
918  * and we don't need any locking.  It belongs to the migration thread
919  * or to the channel thread.  Switching is safe because the migration
920  * thread is using the channel mutex when changing it, and the channel
921  * have to had finish with its own, otherwise pending_job can't be
922  * false.
923  */
924 
925 static int multifd_send_pages(void)
926 {
927     int i;
928     static int next_channel;
929     MultiFDSendParams *p = NULL; /* make happy gcc */
930     MultiFDPages_t *pages = multifd_send_state->pages;
931     uint64_t transferred;
932 
933     qemu_sem_wait(&multifd_send_state->channels_ready);
934     for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
935         p = &multifd_send_state->params[i];
936 
937         qemu_mutex_lock(&p->mutex);
938         if (p->quit) {
939             error_report("%s: channel %d has already quit!", __func__, i);
940             qemu_mutex_unlock(&p->mutex);
941             return -1;
942         }
943         if (!p->pending_job) {
944             p->pending_job++;
945             next_channel = (i + 1) % migrate_multifd_channels();
946             break;
947         }
948         qemu_mutex_unlock(&p->mutex);
949     }
950     p->pages->used = 0;
951 
952     p->packet_num = multifd_send_state->packet_num++;
953     p->pages->block = NULL;
954     multifd_send_state->pages = p->pages;
955     p->pages = pages;
956     transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
957     ram_counters.multifd_bytes += transferred;
958     ram_counters.transferred += transferred;;
959     qemu_mutex_unlock(&p->mutex);
960     qemu_sem_post(&p->sem);
961 
962     return 1;
963 }
964 
965 static int multifd_queue_page(RAMBlock *block, ram_addr_t offset)
966 {
967     MultiFDPages_t *pages = multifd_send_state->pages;
968 
969     if (!pages->block) {
970         pages->block = block;
971     }
972 
973     if (pages->block == block) {
974         pages->offset[pages->used] = offset;
975         pages->iov[pages->used].iov_base = block->host + offset;
976         pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
977         pages->used++;
978 
979         if (pages->used < pages->allocated) {
980             return 1;
981         }
982     }
983 
984     if (multifd_send_pages() < 0) {
985         return -1;
986     }
987 
988     if (pages->block != block) {
989         return  multifd_queue_page(block, offset);
990     }
991 
992     return 1;
993 }
994 
995 static void multifd_send_terminate_threads(Error *err)
996 {
997     int i;
998 
999     if (err) {
1000         MigrationState *s = migrate_get_current();
1001         migrate_set_error(s, err);
1002         if (s->state == MIGRATION_STATUS_SETUP ||
1003             s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
1004             s->state == MIGRATION_STATUS_DEVICE ||
1005             s->state == MIGRATION_STATUS_ACTIVE) {
1006             migrate_set_state(&s->state, s->state,
1007                               MIGRATION_STATUS_FAILED);
1008         }
1009     }
1010 
1011     for (i = 0; i < migrate_multifd_channels(); i++) {
1012         MultiFDSendParams *p = &multifd_send_state->params[i];
1013 
1014         qemu_mutex_lock(&p->mutex);
1015         p->quit = true;
1016         qemu_sem_post(&p->sem);
1017         qemu_mutex_unlock(&p->mutex);
1018     }
1019 }
1020 
1021 void multifd_save_cleanup(void)
1022 {
1023     int i;
1024 
1025     if (!migrate_use_multifd()) {
1026         return;
1027     }
1028     multifd_send_terminate_threads(NULL);
1029     for (i = 0; i < migrate_multifd_channels(); i++) {
1030         MultiFDSendParams *p = &multifd_send_state->params[i];
1031 
1032         if (p->running) {
1033             qemu_thread_join(&p->thread);
1034         }
1035         socket_send_channel_destroy(p->c);
1036         p->c = NULL;
1037         qemu_mutex_destroy(&p->mutex);
1038         qemu_sem_destroy(&p->sem);
1039         g_free(p->name);
1040         p->name = NULL;
1041         multifd_pages_clear(p->pages);
1042         p->pages = NULL;
1043         p->packet_len = 0;
1044         g_free(p->packet);
1045         p->packet = NULL;
1046     }
1047     qemu_sem_destroy(&multifd_send_state->channels_ready);
1048     qemu_sem_destroy(&multifd_send_state->sem_sync);
1049     g_free(multifd_send_state->params);
1050     multifd_send_state->params = NULL;
1051     multifd_pages_clear(multifd_send_state->pages);
1052     multifd_send_state->pages = NULL;
1053     g_free(multifd_send_state);
1054     multifd_send_state = NULL;
1055 }
1056 
1057 static void multifd_send_sync_main(void)
1058 {
1059     int i;
1060 
1061     if (!migrate_use_multifd()) {
1062         return;
1063     }
1064     if (multifd_send_state->pages->used) {
1065         if (multifd_send_pages() < 0) {
1066             error_report("%s: multifd_send_pages fail", __func__);
1067             return;
1068         }
1069     }
1070     for (i = 0; i < migrate_multifd_channels(); i++) {
1071         MultiFDSendParams *p = &multifd_send_state->params[i];
1072 
1073         trace_multifd_send_sync_main_signal(p->id);
1074 
1075         qemu_mutex_lock(&p->mutex);
1076 
1077         if (p->quit) {
1078             error_report("%s: channel %d has already quit", __func__, i);
1079             qemu_mutex_unlock(&p->mutex);
1080             return;
1081         }
1082 
1083         p->packet_num = multifd_send_state->packet_num++;
1084         p->flags |= MULTIFD_FLAG_SYNC;
1085         p->pending_job++;
1086         qemu_mutex_unlock(&p->mutex);
1087         qemu_sem_post(&p->sem);
1088     }
1089     for (i = 0; i < migrate_multifd_channels(); i++) {
1090         MultiFDSendParams *p = &multifd_send_state->params[i];
1091 
1092         trace_multifd_send_sync_main_wait(p->id);
1093         qemu_sem_wait(&multifd_send_state->sem_sync);
1094     }
1095     trace_multifd_send_sync_main(multifd_send_state->packet_num);
1096 }
1097 
1098 static void *multifd_send_thread(void *opaque)
1099 {
1100     MultiFDSendParams *p = opaque;
1101     Error *local_err = NULL;
1102     int ret = 0;
1103     uint32_t flags = 0;
1104 
1105     trace_multifd_send_thread_start(p->id);
1106     rcu_register_thread();
1107 
1108     if (multifd_send_initial_packet(p, &local_err) < 0) {
1109         goto out;
1110     }
1111     /* initial packet */
1112     p->num_packets = 1;
1113 
1114     while (true) {
1115         qemu_sem_wait(&p->sem);
1116         qemu_mutex_lock(&p->mutex);
1117 
1118         if (p->pending_job) {
1119             uint32_t used = p->pages->used;
1120             uint64_t packet_num = p->packet_num;
1121             flags = p->flags;
1122 
1123             p->next_packet_size = used * qemu_target_page_size();
1124             multifd_send_fill_packet(p);
1125             p->flags = 0;
1126             p->num_packets++;
1127             p->num_pages += used;
1128             p->pages->used = 0;
1129             qemu_mutex_unlock(&p->mutex);
1130 
1131             trace_multifd_send(p->id, packet_num, used, flags,
1132                                p->next_packet_size);
1133 
1134             ret = qio_channel_write_all(p->c, (void *)p->packet,
1135                                         p->packet_len, &local_err);
1136             if (ret != 0) {
1137                 break;
1138             }
1139 
1140             if (used) {
1141                 ret = qio_channel_writev_all(p->c, p->pages->iov,
1142                                              used, &local_err);
1143                 if (ret != 0) {
1144                     break;
1145                 }
1146             }
1147 
1148             qemu_mutex_lock(&p->mutex);
1149             p->pending_job--;
1150             qemu_mutex_unlock(&p->mutex);
1151 
1152             if (flags & MULTIFD_FLAG_SYNC) {
1153                 qemu_sem_post(&multifd_send_state->sem_sync);
1154             }
1155             qemu_sem_post(&multifd_send_state->channels_ready);
1156         } else if (p->quit) {
1157             qemu_mutex_unlock(&p->mutex);
1158             break;
1159         } else {
1160             qemu_mutex_unlock(&p->mutex);
1161             /* sometimes there are spurious wakeups */
1162         }
1163     }
1164 
1165 out:
1166     if (local_err) {
1167         multifd_send_terminate_threads(local_err);
1168     }
1169 
1170     /*
1171      * Error happen, I will exit, but I can't just leave, tell
1172      * who pay attention to me.
1173      */
1174     if (ret != 0) {
1175         if (flags & MULTIFD_FLAG_SYNC) {
1176             qemu_sem_post(&multifd_send_state->sem_sync);
1177         }
1178         qemu_sem_post(&multifd_send_state->channels_ready);
1179     }
1180 
1181     qemu_mutex_lock(&p->mutex);
1182     p->running = false;
1183     qemu_mutex_unlock(&p->mutex);
1184 
1185     rcu_unregister_thread();
1186     trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1187 
1188     return NULL;
1189 }
1190 
1191 static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1192 {
1193     MultiFDSendParams *p = opaque;
1194     QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1195     Error *local_err = NULL;
1196 
1197     if (qio_task_propagate_error(task, &local_err)) {
1198         migrate_set_error(migrate_get_current(), local_err);
1199         multifd_save_cleanup();
1200     } else {
1201         p->c = QIO_CHANNEL(sioc);
1202         qio_channel_set_delay(p->c, false);
1203         p->running = true;
1204         qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1205                            QEMU_THREAD_JOINABLE);
1206     }
1207 }
1208 
1209 int multifd_save_setup(void)
1210 {
1211     int thread_count;
1212     uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
1213     uint8_t i;
1214 
1215     if (!migrate_use_multifd()) {
1216         return 0;
1217     }
1218     thread_count = migrate_multifd_channels();
1219     multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1220     multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
1221     multifd_send_state->pages = multifd_pages_init(page_count);
1222     qemu_sem_init(&multifd_send_state->sem_sync, 0);
1223     qemu_sem_init(&multifd_send_state->channels_ready, 0);
1224 
1225     for (i = 0; i < thread_count; i++) {
1226         MultiFDSendParams *p = &multifd_send_state->params[i];
1227 
1228         qemu_mutex_init(&p->mutex);
1229         qemu_sem_init(&p->sem, 0);
1230         p->quit = false;
1231         p->pending_job = 0;
1232         p->id = i;
1233         p->pages = multifd_pages_init(page_count);
1234         p->packet_len = sizeof(MultiFDPacket_t)
1235                       + sizeof(ram_addr_t) * page_count;
1236         p->packet = g_malloc0(p->packet_len);
1237         p->name = g_strdup_printf("multifdsend_%d", i);
1238         socket_send_channel_create(multifd_new_send_channel_async, p);
1239     }
1240     return 0;
1241 }
1242 
1243 struct {
1244     MultiFDRecvParams *params;
1245     /* number of created threads */
1246     int count;
1247     /* syncs main thread and channels */
1248     QemuSemaphore sem_sync;
1249     /* global number of generated multifd packets */
1250     uint64_t packet_num;
1251 } *multifd_recv_state;
1252 
1253 static void multifd_recv_terminate_threads(Error *err)
1254 {
1255     int i;
1256 
1257     if (err) {
1258         MigrationState *s = migrate_get_current();
1259         migrate_set_error(s, err);
1260         if (s->state == MIGRATION_STATUS_SETUP ||
1261             s->state == MIGRATION_STATUS_ACTIVE) {
1262             migrate_set_state(&s->state, s->state,
1263                               MIGRATION_STATUS_FAILED);
1264         }
1265     }
1266 
1267     for (i = 0; i < migrate_multifd_channels(); i++) {
1268         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1269 
1270         qemu_mutex_lock(&p->mutex);
1271         p->quit = true;
1272         /* We could arrive here for two reasons:
1273            - normal quit, i.e. everything went fine, just finished
1274            - error quit: We close the channels so the channel threads
1275              finish the qio_channel_read_all_eof() */
1276         qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1277         qemu_mutex_unlock(&p->mutex);
1278     }
1279 }
1280 
1281 int multifd_load_cleanup(Error **errp)
1282 {
1283     int i;
1284     int ret = 0;
1285 
1286     if (!migrate_use_multifd()) {
1287         return 0;
1288     }
1289     multifd_recv_terminate_threads(NULL);
1290     for (i = 0; i < migrate_multifd_channels(); i++) {
1291         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1292 
1293         if (p->running) {
1294             p->quit = true;
1295             /*
1296              * multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
1297              * however try to wakeup it without harm in cleanup phase.
1298              */
1299             qemu_sem_post(&p->sem_sync);
1300             qemu_thread_join(&p->thread);
1301         }
1302         object_unref(OBJECT(p->c));
1303         p->c = NULL;
1304         qemu_mutex_destroy(&p->mutex);
1305         qemu_sem_destroy(&p->sem_sync);
1306         g_free(p->name);
1307         p->name = NULL;
1308         multifd_pages_clear(p->pages);
1309         p->pages = NULL;
1310         p->packet_len = 0;
1311         g_free(p->packet);
1312         p->packet = NULL;
1313     }
1314     qemu_sem_destroy(&multifd_recv_state->sem_sync);
1315     g_free(multifd_recv_state->params);
1316     multifd_recv_state->params = NULL;
1317     g_free(multifd_recv_state);
1318     multifd_recv_state = NULL;
1319 
1320     return ret;
1321 }
1322 
1323 static void multifd_recv_sync_main(void)
1324 {
1325     int i;
1326 
1327     if (!migrate_use_multifd()) {
1328         return;
1329     }
1330     for (i = 0; i < migrate_multifd_channels(); i++) {
1331         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1332 
1333         trace_multifd_recv_sync_main_wait(p->id);
1334         qemu_sem_wait(&multifd_recv_state->sem_sync);
1335     }
1336     for (i = 0; i < migrate_multifd_channels(); i++) {
1337         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1338 
1339         qemu_mutex_lock(&p->mutex);
1340         if (multifd_recv_state->packet_num < p->packet_num) {
1341             multifd_recv_state->packet_num = p->packet_num;
1342         }
1343         qemu_mutex_unlock(&p->mutex);
1344         trace_multifd_recv_sync_main_signal(p->id);
1345         qemu_sem_post(&p->sem_sync);
1346     }
1347     trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1348 }
1349 
1350 static void *multifd_recv_thread(void *opaque)
1351 {
1352     MultiFDRecvParams *p = opaque;
1353     Error *local_err = NULL;
1354     int ret;
1355 
1356     trace_multifd_recv_thread_start(p->id);
1357     rcu_register_thread();
1358 
1359     while (true) {
1360         uint32_t used;
1361         uint32_t flags;
1362 
1363         if (p->quit) {
1364             break;
1365         }
1366 
1367         ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1368                                        p->packet_len, &local_err);
1369         if (ret == 0) {   /* EOF */
1370             break;
1371         }
1372         if (ret == -1) {   /* Error */
1373             break;
1374         }
1375 
1376         qemu_mutex_lock(&p->mutex);
1377         ret = multifd_recv_unfill_packet(p, &local_err);
1378         if (ret) {
1379             qemu_mutex_unlock(&p->mutex);
1380             break;
1381         }
1382 
1383         used = p->pages->used;
1384         flags = p->flags;
1385         trace_multifd_recv(p->id, p->packet_num, used, flags,
1386                            p->next_packet_size);
1387         p->num_packets++;
1388         p->num_pages += used;
1389         qemu_mutex_unlock(&p->mutex);
1390 
1391         if (used) {
1392             ret = qio_channel_readv_all(p->c, p->pages->iov,
1393                                         used, &local_err);
1394             if (ret != 0) {
1395                 break;
1396             }
1397         }
1398 
1399         if (flags & MULTIFD_FLAG_SYNC) {
1400             qemu_sem_post(&multifd_recv_state->sem_sync);
1401             qemu_sem_wait(&p->sem_sync);
1402         }
1403     }
1404 
1405     if (local_err) {
1406         multifd_recv_terminate_threads(local_err);
1407     }
1408     qemu_mutex_lock(&p->mutex);
1409     p->running = false;
1410     qemu_mutex_unlock(&p->mutex);
1411 
1412     rcu_unregister_thread();
1413     trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1414 
1415     return NULL;
1416 }
1417 
1418 int multifd_load_setup(void)
1419 {
1420     int thread_count;
1421     uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
1422     uint8_t i;
1423 
1424     if (!migrate_use_multifd()) {
1425         return 0;
1426     }
1427     thread_count = migrate_multifd_channels();
1428     multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1429     multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
1430     atomic_set(&multifd_recv_state->count, 0);
1431     qemu_sem_init(&multifd_recv_state->sem_sync, 0);
1432 
1433     for (i = 0; i < thread_count; i++) {
1434         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1435 
1436         qemu_mutex_init(&p->mutex);
1437         qemu_sem_init(&p->sem_sync, 0);
1438         p->quit = false;
1439         p->id = i;
1440         p->pages = multifd_pages_init(page_count);
1441         p->packet_len = sizeof(MultiFDPacket_t)
1442                       + sizeof(ram_addr_t) * page_count;
1443         p->packet = g_malloc0(p->packet_len);
1444         p->name = g_strdup_printf("multifdrecv_%d", i);
1445     }
1446     return 0;
1447 }
1448 
1449 bool multifd_recv_all_channels_created(void)
1450 {
1451     int thread_count = migrate_multifd_channels();
1452 
1453     if (!migrate_use_multifd()) {
1454         return true;
1455     }
1456 
1457     return thread_count == atomic_read(&multifd_recv_state->count);
1458 }
1459 
1460 /*
1461  * Try to receive all multifd channels to get ready for the migration.
1462  * - Return true and do not set @errp when correctly receving all channels;
1463  * - Return false and do not set @errp when correctly receiving the current one;
1464  * - Return false and set @errp when failing to receive the current channel.
1465  */
1466 bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
1467 {
1468     MultiFDRecvParams *p;
1469     Error *local_err = NULL;
1470     int id;
1471 
1472     id = multifd_recv_initial_packet(ioc, &local_err);
1473     if (id < 0) {
1474         multifd_recv_terminate_threads(local_err);
1475         error_propagate_prepend(errp, local_err,
1476                                 "failed to receive packet"
1477                                 " via multifd channel %d: ",
1478                                 atomic_read(&multifd_recv_state->count));
1479         return false;
1480     }
1481 
1482     p = &multifd_recv_state->params[id];
1483     if (p->c != NULL) {
1484         error_setg(&local_err, "multifd: received id '%d' already setup'",
1485                    id);
1486         multifd_recv_terminate_threads(local_err);
1487         error_propagate(errp, local_err);
1488         return false;
1489     }
1490     p->c = ioc;
1491     object_ref(OBJECT(ioc));
1492     /* initial packet */
1493     p->num_packets = 1;
1494 
1495     p->running = true;
1496     qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1497                        QEMU_THREAD_JOINABLE);
1498     atomic_inc(&multifd_recv_state->count);
1499     return atomic_read(&multifd_recv_state->count) ==
1500            migrate_multifd_channels();
1501 }
1502 
1503 /**
1504  * save_page_header: write page header to wire
1505  *
1506  * If this is the 1st block, it also writes the block identification
1507  *
1508  * Returns the number of bytes written
1509  *
1510  * @f: QEMUFile where to send the data
1511  * @block: block that contains the page we want to send
1512  * @offset: offset inside the block for the page
1513  *          in the lower bits, it contains flags
1514  */
1515 static size_t save_page_header(RAMState *rs, QEMUFile *f,  RAMBlock *block,
1516                                ram_addr_t offset)
1517 {
1518     size_t size, len;
1519 
1520     if (block == rs->last_sent_block) {
1521         offset |= RAM_SAVE_FLAG_CONTINUE;
1522     }
1523     qemu_put_be64(f, offset);
1524     size = 8;
1525 
1526     if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
1527         len = strlen(block->idstr);
1528         qemu_put_byte(f, len);
1529         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
1530         size += 1 + len;
1531         rs->last_sent_block = block;
1532     }
1533     return size;
1534 }
1535 
1536 /**
1537  * mig_throttle_guest_down: throotle down the guest
1538  *
1539  * Reduce amount of guest cpu execution to hopefully slow down memory
1540  * writes. If guest dirty memory rate is reduced below the rate at
1541  * which we can transfer pages to the destination then we should be
1542  * able to complete migration. Some workloads dirty memory way too
1543  * fast and will not effectively converge, even with auto-converge.
1544  */
1545 static void mig_throttle_guest_down(void)
1546 {
1547     MigrationState *s = migrate_get_current();
1548     uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1549     uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
1550     int pct_max = s->parameters.max_cpu_throttle;
1551 
1552     /* We have not started throttling yet. Let's start it. */
1553     if (!cpu_throttle_active()) {
1554         cpu_throttle_set(pct_initial);
1555     } else {
1556         /* Throttling already on, just increase the rate */
1557         cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1558                          pct_max));
1559     }
1560 }
1561 
1562 /**
1563  * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1564  *
1565  * @rs: current RAM state
1566  * @current_addr: address for the zero page
1567  *
1568  * Update the xbzrle cache to reflect a page that's been sent as all 0.
1569  * The important thing is that a stale (not-yet-0'd) page be replaced
1570  * by the new data.
1571  * As a bonus, if the page wasn't in the cache it gets added so that
1572  * when a small write is made into the 0'd page it gets XBZRLE sent.
1573  */
1574 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
1575 {
1576     if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
1577         return;
1578     }
1579 
1580     /* We don't care if this fails to allocate a new cache page
1581      * as long as it updated an old one */
1582     cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
1583                  ram_counters.dirty_sync_count);
1584 }
1585 
1586 #define ENCODING_FLAG_XBZRLE 0x1
1587 
1588 /**
1589  * save_xbzrle_page: compress and send current page
1590  *
1591  * Returns: 1 means that we wrote the page
1592  *          0 means that page is identical to the one already sent
1593  *          -1 means that xbzrle would be longer than normal
1594  *
1595  * @rs: current RAM state
1596  * @current_data: pointer to the address of the page contents
1597  * @current_addr: addr of the page
1598  * @block: block that contains the page we want to send
1599  * @offset: offset inside the block for the page
1600  * @last_stage: if we are at the completion stage
1601  */
1602 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
1603                             ram_addr_t current_addr, RAMBlock *block,
1604                             ram_addr_t offset, bool last_stage)
1605 {
1606     int encoded_len = 0, bytes_xbzrle;
1607     uint8_t *prev_cached_page;
1608 
1609     if (!cache_is_cached(XBZRLE.cache, current_addr,
1610                          ram_counters.dirty_sync_count)) {
1611         xbzrle_counters.cache_miss++;
1612         if (!last_stage) {
1613             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
1614                              ram_counters.dirty_sync_count) == -1) {
1615                 return -1;
1616             } else {
1617                 /* update *current_data when the page has been
1618                    inserted into cache */
1619                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1620             }
1621         }
1622         return -1;
1623     }
1624 
1625     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1626 
1627     /* save current buffer into memory */
1628     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1629 
1630     /* XBZRLE encoding (if there is no overflow) */
1631     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1632                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1633                                        TARGET_PAGE_SIZE);
1634 
1635     /*
1636      * Update the cache contents, so that it corresponds to the data
1637      * sent, in all cases except where we skip the page.
1638      */
1639     if (!last_stage && encoded_len != 0) {
1640         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1641         /*
1642          * In the case where we couldn't compress, ensure that the caller
1643          * sends the data from the cache, since the guest might have
1644          * changed the RAM since we copied it.
1645          */
1646         *current_data = prev_cached_page;
1647     }
1648 
1649     if (encoded_len == 0) {
1650         trace_save_xbzrle_page_skipping();
1651         return 0;
1652     } else if (encoded_len == -1) {
1653         trace_save_xbzrle_page_overflow();
1654         xbzrle_counters.overflow++;
1655         return -1;
1656     }
1657 
1658     /* Send XBZRLE based compressed page */
1659     bytes_xbzrle = save_page_header(rs, rs->f, block,
1660                                     offset | RAM_SAVE_FLAG_XBZRLE);
1661     qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1662     qemu_put_be16(rs->f, encoded_len);
1663     qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
1664     bytes_xbzrle += encoded_len + 1 + 2;
1665     xbzrle_counters.pages++;
1666     xbzrle_counters.bytes += bytes_xbzrle;
1667     ram_counters.transferred += bytes_xbzrle;
1668 
1669     return 1;
1670 }
1671 
1672 /**
1673  * migration_bitmap_find_dirty: find the next dirty page from start
1674  *
1675  * Returns the page offset within memory region of the start of a dirty page
1676  *
1677  * @rs: current RAM state
1678  * @rb: RAMBlock where to search for dirty pages
1679  * @start: page where we start the search
1680  */
1681 static inline
1682 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
1683                                           unsigned long start)
1684 {
1685     unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1686     unsigned long *bitmap = rb->bmap;
1687     unsigned long next;
1688 
1689     if (ramblock_is_ignored(rb)) {
1690         return size;
1691     }
1692 
1693     /*
1694      * When the free page optimization is enabled, we need to check the bitmap
1695      * to send the non-free pages rather than all the pages in the bulk stage.
1696      */
1697     if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
1698         next = start + 1;
1699     } else {
1700         next = find_next_bit(bitmap, size, start);
1701     }
1702 
1703     return next;
1704 }
1705 
1706 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
1707                                                 RAMBlock *rb,
1708                                                 unsigned long page)
1709 {
1710     bool ret;
1711 
1712     qemu_mutex_lock(&rs->bitmap_mutex);
1713 
1714     /*
1715      * Clear dirty bitmap if needed.  This _must_ be called before we
1716      * send any of the page in the chunk because we need to make sure
1717      * we can capture further page content changes when we sync dirty
1718      * log the next time.  So as long as we are going to send any of
1719      * the page in the chunk we clear the remote dirty bitmap for all.
1720      * Clearing it earlier won't be a problem, but too late will.
1721      */
1722     if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
1723         uint8_t shift = rb->clear_bmap_shift;
1724         hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
1725         hwaddr start = (page << TARGET_PAGE_BITS) & (-size);
1726 
1727         /*
1728          * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
1729          * can make things easier sometimes since then start address
1730          * of the small chunk will always be 64 pages aligned so the
1731          * bitmap will always be aligned to unsigned long.  We should
1732          * even be able to remove this restriction but I'm simply
1733          * keeping it.
1734          */
1735         assert(shift >= 6);
1736         trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
1737         memory_region_clear_dirty_bitmap(rb->mr, start, size);
1738     }
1739 
1740     ret = test_and_clear_bit(page, rb->bmap);
1741 
1742     if (ret) {
1743         rs->migration_dirty_pages--;
1744     }
1745     qemu_mutex_unlock(&rs->bitmap_mutex);
1746 
1747     return ret;
1748 }
1749 
1750 /* Called with RCU critical section */
1751 static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
1752                                         ram_addr_t length)
1753 {
1754     rs->migration_dirty_pages +=
1755         cpu_physical_memory_sync_dirty_bitmap(rb, 0, length,
1756                                               &rs->num_dirty_pages_period);
1757 }
1758 
1759 /**
1760  * ram_pagesize_summary: calculate all the pagesizes of a VM
1761  *
1762  * Returns a summary bitmap of the page sizes of all RAMBlocks
1763  *
1764  * For VMs with just normal pages this is equivalent to the host page
1765  * size. If it's got some huge pages then it's the OR of all the
1766  * different page sizes.
1767  */
1768 uint64_t ram_pagesize_summary(void)
1769 {
1770     RAMBlock *block;
1771     uint64_t summary = 0;
1772 
1773     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1774         summary |= block->page_size;
1775     }
1776 
1777     return summary;
1778 }
1779 
1780 uint64_t ram_get_total_transferred_pages(void)
1781 {
1782     return  ram_counters.normal + ram_counters.duplicate +
1783                 compression_counters.pages + xbzrle_counters.pages;
1784 }
1785 
1786 static void migration_update_rates(RAMState *rs, int64_t end_time)
1787 {
1788     uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
1789     double compressed_size;
1790 
1791     /* calculate period counters */
1792     ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1793                 / (end_time - rs->time_last_bitmap_sync);
1794 
1795     if (!page_count) {
1796         return;
1797     }
1798 
1799     if (migrate_use_xbzrle()) {
1800         xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
1801             rs->xbzrle_cache_miss_prev) / page_count;
1802         rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1803     }
1804 
1805     if (migrate_use_compression()) {
1806         compression_counters.busy_rate = (double)(compression_counters.busy -
1807             rs->compress_thread_busy_prev) / page_count;
1808         rs->compress_thread_busy_prev = compression_counters.busy;
1809 
1810         compressed_size = compression_counters.compressed_size -
1811                           rs->compressed_size_prev;
1812         if (compressed_size) {
1813             double uncompressed_size = (compression_counters.pages -
1814                                     rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1815 
1816             /* Compression-Ratio = Uncompressed-size / Compressed-size */
1817             compression_counters.compression_rate =
1818                                         uncompressed_size / compressed_size;
1819 
1820             rs->compress_pages_prev = compression_counters.pages;
1821             rs->compressed_size_prev = compression_counters.compressed_size;
1822         }
1823     }
1824 }
1825 
1826 static void migration_bitmap_sync(RAMState *rs)
1827 {
1828     RAMBlock *block;
1829     int64_t end_time;
1830     uint64_t bytes_xfer_now;
1831 
1832     ram_counters.dirty_sync_count++;
1833 
1834     if (!rs->time_last_bitmap_sync) {
1835         rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1836     }
1837 
1838     trace_migration_bitmap_sync_start();
1839     memory_global_dirty_log_sync();
1840 
1841     qemu_mutex_lock(&rs->bitmap_mutex);
1842     rcu_read_lock();
1843     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1844         migration_bitmap_sync_range(rs, block, block->used_length);
1845     }
1846     ram_counters.remaining = ram_bytes_remaining();
1847     rcu_read_unlock();
1848     qemu_mutex_unlock(&rs->bitmap_mutex);
1849 
1850     trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1851 
1852     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1853 
1854     /* more than 1 second = 1000 millisecons */
1855     if (end_time > rs->time_last_bitmap_sync + 1000) {
1856         bytes_xfer_now = ram_counters.transferred;
1857 
1858         /* During block migration the auto-converge logic incorrectly detects
1859          * that ram migration makes no progress. Avoid this by disabling the
1860          * throttling logic during the bulk phase of block migration. */
1861         if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1862             /* The following detection logic can be refined later. For now:
1863                Check to see if the dirtied bytes is 50% more than the approx.
1864                amount of bytes that just got transferred since the last time we
1865                were in this routine. If that happens twice, start or increase
1866                throttling */
1867 
1868             if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
1869                    (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
1870                 (++rs->dirty_rate_high_cnt >= 2)) {
1871                     trace_migration_throttle();
1872                     rs->dirty_rate_high_cnt = 0;
1873                     mig_throttle_guest_down();
1874             }
1875         }
1876 
1877         migration_update_rates(rs, end_time);
1878 
1879         rs->target_page_count_prev = rs->target_page_count;
1880 
1881         /* reset period counters */
1882         rs->time_last_bitmap_sync = end_time;
1883         rs->num_dirty_pages_period = 0;
1884         rs->bytes_xfer_prev = bytes_xfer_now;
1885     }
1886     if (migrate_use_events()) {
1887         qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
1888     }
1889 }
1890 
1891 static void migration_bitmap_sync_precopy(RAMState *rs)
1892 {
1893     Error *local_err = NULL;
1894 
1895     /*
1896      * The current notifier usage is just an optimization to migration, so we
1897      * don't stop the normal migration process in the error case.
1898      */
1899     if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1900         error_report_err(local_err);
1901     }
1902 
1903     migration_bitmap_sync(rs);
1904 
1905     if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1906         error_report_err(local_err);
1907     }
1908 }
1909 
1910 /**
1911  * save_zero_page_to_file: send the zero page to the file
1912  *
1913  * Returns the size of data written to the file, 0 means the page is not
1914  * a zero page
1915  *
1916  * @rs: current RAM state
1917  * @file: the file where the data is saved
1918  * @block: block that contains the page we want to send
1919  * @offset: offset inside the block for the page
1920  */
1921 static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1922                                   RAMBlock *block, ram_addr_t offset)
1923 {
1924     uint8_t *p = block->host + offset;
1925     int len = 0;
1926 
1927     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1928         len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1929         qemu_put_byte(file, 0);
1930         len += 1;
1931     }
1932     return len;
1933 }
1934 
1935 /**
1936  * save_zero_page: send the zero page to the stream
1937  *
1938  * Returns the number of pages written.
1939  *
1940  * @rs: current RAM state
1941  * @block: block that contains the page we want to send
1942  * @offset: offset inside the block for the page
1943  */
1944 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
1945 {
1946     int len = save_zero_page_to_file(rs, rs->f, block, offset);
1947 
1948     if (len) {
1949         ram_counters.duplicate++;
1950         ram_counters.transferred += len;
1951         return 1;
1952     }
1953     return -1;
1954 }
1955 
1956 static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
1957 {
1958     if (!migrate_release_ram() || !migration_in_postcopy()) {
1959         return;
1960     }
1961 
1962     ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
1963 }
1964 
1965 /*
1966  * @pages: the number of pages written by the control path,
1967  *        < 0 - error
1968  *        > 0 - number of pages written
1969  *
1970  * Return true if the pages has been saved, otherwise false is returned.
1971  */
1972 static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1973                               int *pages)
1974 {
1975     uint64_t bytes_xmit = 0;
1976     int ret;
1977 
1978     *pages = -1;
1979     ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1980                                 &bytes_xmit);
1981     if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1982         return false;
1983     }
1984 
1985     if (bytes_xmit) {
1986         ram_counters.transferred += bytes_xmit;
1987         *pages = 1;
1988     }
1989 
1990     if (ret == RAM_SAVE_CONTROL_DELAYED) {
1991         return true;
1992     }
1993 
1994     if (bytes_xmit > 0) {
1995         ram_counters.normal++;
1996     } else if (bytes_xmit == 0) {
1997         ram_counters.duplicate++;
1998     }
1999 
2000     return true;
2001 }
2002 
2003 /*
2004  * directly send the page to the stream
2005  *
2006  * Returns the number of pages written.
2007  *
2008  * @rs: current RAM state
2009  * @block: block that contains the page we want to send
2010  * @offset: offset inside the block for the page
2011  * @buf: the page to be sent
2012  * @async: send to page asyncly
2013  */
2014 static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
2015                             uint8_t *buf, bool async)
2016 {
2017     ram_counters.transferred += save_page_header(rs, rs->f, block,
2018                                                  offset | RAM_SAVE_FLAG_PAGE);
2019     if (async) {
2020         qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
2021                               migrate_release_ram() &
2022                               migration_in_postcopy());
2023     } else {
2024         qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
2025     }
2026     ram_counters.transferred += TARGET_PAGE_SIZE;
2027     ram_counters.normal++;
2028     return 1;
2029 }
2030 
2031 /**
2032  * ram_save_page: send the given page to the stream
2033  *
2034  * Returns the number of pages written.
2035  *          < 0 - error
2036  *          >=0 - Number of pages written - this might legally be 0
2037  *                if xbzrle noticed the page was the same.
2038  *
2039  * @rs: current RAM state
2040  * @block: block that contains the page we want to send
2041  * @offset: offset inside the block for the page
2042  * @last_stage: if we are at the completion stage
2043  */
2044 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
2045 {
2046     int pages = -1;
2047     uint8_t *p;
2048     bool send_async = true;
2049     RAMBlock *block = pss->block;
2050     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2051     ram_addr_t current_addr = block->offset + offset;
2052 
2053     p = block->host + offset;
2054     trace_ram_save_page(block->idstr, (uint64_t)offset, p);
2055 
2056     XBZRLE_cache_lock();
2057     if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
2058         migrate_use_xbzrle()) {
2059         pages = save_xbzrle_page(rs, &p, current_addr, block,
2060                                  offset, last_stage);
2061         if (!last_stage) {
2062             /* Can't send this cached data async, since the cache page
2063              * might get updated before it gets to the wire
2064              */
2065             send_async = false;
2066         }
2067     }
2068 
2069     /* XBZRLE overflow or normal page */
2070     if (pages == -1) {
2071         pages = save_normal_page(rs, block, offset, p, send_async);
2072     }
2073 
2074     XBZRLE_cache_unlock();
2075 
2076     return pages;
2077 }
2078 
2079 static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
2080                                  ram_addr_t offset)
2081 {
2082     if (multifd_queue_page(block, offset) < 0) {
2083         return -1;
2084     }
2085     ram_counters.normal++;
2086 
2087     return 1;
2088 }
2089 
2090 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
2091                                  ram_addr_t offset, uint8_t *source_buf)
2092 {
2093     RAMState *rs = ram_state;
2094     uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
2095     bool zero_page = false;
2096     int ret;
2097 
2098     if (save_zero_page_to_file(rs, f, block, offset)) {
2099         zero_page = true;
2100         goto exit;
2101     }
2102 
2103     save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
2104 
2105     /*
2106      * copy it to a internal buffer to avoid it being modified by VM
2107      * so that we can catch up the error during compression and
2108      * decompression
2109      */
2110     memcpy(source_buf, p, TARGET_PAGE_SIZE);
2111     ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
2112     if (ret < 0) {
2113         qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
2114         error_report("compressed data failed!");
2115         return false;
2116     }
2117 
2118 exit:
2119     ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
2120     return zero_page;
2121 }
2122 
2123 static void
2124 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
2125 {
2126     ram_counters.transferred += bytes_xmit;
2127 
2128     if (param->zero_page) {
2129         ram_counters.duplicate++;
2130         return;
2131     }
2132 
2133     /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
2134     compression_counters.compressed_size += bytes_xmit - 8;
2135     compression_counters.pages++;
2136 }
2137 
2138 static bool save_page_use_compression(RAMState *rs);
2139 
2140 static void flush_compressed_data(RAMState *rs)
2141 {
2142     int idx, len, thread_count;
2143 
2144     if (!save_page_use_compression(rs)) {
2145         return;
2146     }
2147     thread_count = migrate_compress_threads();
2148 
2149     qemu_mutex_lock(&comp_done_lock);
2150     for (idx = 0; idx < thread_count; idx++) {
2151         while (!comp_param[idx].done) {
2152             qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2153         }
2154     }
2155     qemu_mutex_unlock(&comp_done_lock);
2156 
2157     for (idx = 0; idx < thread_count; idx++) {
2158         qemu_mutex_lock(&comp_param[idx].mutex);
2159         if (!comp_param[idx].quit) {
2160             len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2161             /*
2162              * it's safe to fetch zero_page without holding comp_done_lock
2163              * as there is no further request submitted to the thread,
2164              * i.e, the thread should be waiting for a request at this point.
2165              */
2166             update_compress_thread_counts(&comp_param[idx], len);
2167         }
2168         qemu_mutex_unlock(&comp_param[idx].mutex);
2169     }
2170 }
2171 
2172 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
2173                                        ram_addr_t offset)
2174 {
2175     param->block = block;
2176     param->offset = offset;
2177 }
2178 
2179 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
2180                                            ram_addr_t offset)
2181 {
2182     int idx, thread_count, bytes_xmit = -1, pages = -1;
2183     bool wait = migrate_compress_wait_thread();
2184 
2185     thread_count = migrate_compress_threads();
2186     qemu_mutex_lock(&comp_done_lock);
2187 retry:
2188     for (idx = 0; idx < thread_count; idx++) {
2189         if (comp_param[idx].done) {
2190             comp_param[idx].done = false;
2191             bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2192             qemu_mutex_lock(&comp_param[idx].mutex);
2193             set_compress_params(&comp_param[idx], block, offset);
2194             qemu_cond_signal(&comp_param[idx].cond);
2195             qemu_mutex_unlock(&comp_param[idx].mutex);
2196             pages = 1;
2197             update_compress_thread_counts(&comp_param[idx], bytes_xmit);
2198             break;
2199         }
2200     }
2201 
2202     /*
2203      * wait for the free thread if the user specifies 'compress-wait-thread',
2204      * otherwise we will post the page out in the main thread as normal page.
2205      */
2206     if (pages < 0 && wait) {
2207         qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2208         goto retry;
2209     }
2210     qemu_mutex_unlock(&comp_done_lock);
2211 
2212     return pages;
2213 }
2214 
2215 /**
2216  * find_dirty_block: find the next dirty page and update any state
2217  * associated with the search process.
2218  *
2219  * Returns true if a page is found
2220  *
2221  * @rs: current RAM state
2222  * @pss: data about the state of the current dirty page scan
2223  * @again: set to false if the search has scanned the whole of RAM
2224  */
2225 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
2226 {
2227     pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
2228     if (pss->complete_round && pss->block == rs->last_seen_block &&
2229         pss->page >= rs->last_page) {
2230         /*
2231          * We've been once around the RAM and haven't found anything.
2232          * Give up.
2233          */
2234         *again = false;
2235         return false;
2236     }
2237     if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
2238         /* Didn't find anything in this RAM Block */
2239         pss->page = 0;
2240         pss->block = QLIST_NEXT_RCU(pss->block, next);
2241         if (!pss->block) {
2242             /*
2243              * If memory migration starts over, we will meet a dirtied page
2244              * which may still exists in compression threads's ring, so we
2245              * should flush the compressed data to make sure the new page
2246              * is not overwritten by the old one in the destination.
2247              *
2248              * Also If xbzrle is on, stop using the data compression at this
2249              * point. In theory, xbzrle can do better than compression.
2250              */
2251             flush_compressed_data(rs);
2252 
2253             /* Hit the end of the list */
2254             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2255             /* Flag that we've looped */
2256             pss->complete_round = true;
2257             rs->ram_bulk_stage = false;
2258         }
2259         /* Didn't find anything this time, but try again on the new block */
2260         *again = true;
2261         return false;
2262     } else {
2263         /* Can go around again, but... */
2264         *again = true;
2265         /* We've found something so probably don't need to */
2266         return true;
2267     }
2268 }
2269 
2270 /**
2271  * unqueue_page: gets a page of the queue
2272  *
2273  * Helper for 'get_queued_page' - gets a page off the queue
2274  *
2275  * Returns the block of the page (or NULL if none available)
2276  *
2277  * @rs: current RAM state
2278  * @offset: used to return the offset within the RAMBlock
2279  */
2280 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
2281 {
2282     RAMBlock *block = NULL;
2283 
2284     if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2285         return NULL;
2286     }
2287 
2288     qemu_mutex_lock(&rs->src_page_req_mutex);
2289     if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2290         struct RAMSrcPageRequest *entry =
2291                                 QSIMPLEQ_FIRST(&rs->src_page_requests);
2292         block = entry->rb;
2293         *offset = entry->offset;
2294 
2295         if (entry->len > TARGET_PAGE_SIZE) {
2296             entry->len -= TARGET_PAGE_SIZE;
2297             entry->offset += TARGET_PAGE_SIZE;
2298         } else {
2299             memory_region_unref(block->mr);
2300             QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2301             g_free(entry);
2302             migration_consume_urgent_request();
2303         }
2304     }
2305     qemu_mutex_unlock(&rs->src_page_req_mutex);
2306 
2307     return block;
2308 }
2309 
2310 /**
2311  * get_queued_page: unqueue a page from the postcopy requests
2312  *
2313  * Skips pages that are already sent (!dirty)
2314  *
2315  * Returns true if a queued page is found
2316  *
2317  * @rs: current RAM state
2318  * @pss: data about the state of the current dirty page scan
2319  */
2320 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
2321 {
2322     RAMBlock  *block;
2323     ram_addr_t offset;
2324     bool dirty;
2325 
2326     do {
2327         block = unqueue_page(rs, &offset);
2328         /*
2329          * We're sending this page, and since it's postcopy nothing else
2330          * will dirty it, and we must make sure it doesn't get sent again
2331          * even if this queue request was received after the background
2332          * search already sent it.
2333          */
2334         if (block) {
2335             unsigned long page;
2336 
2337             page = offset >> TARGET_PAGE_BITS;
2338             dirty = test_bit(page, block->bmap);
2339             if (!dirty) {
2340                 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
2341                        page, test_bit(page, block->unsentmap));
2342             } else {
2343                 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
2344             }
2345         }
2346 
2347     } while (block && !dirty);
2348 
2349     if (block) {
2350         /*
2351          * As soon as we start servicing pages out of order, then we have
2352          * to kill the bulk stage, since the bulk stage assumes
2353          * in (migration_bitmap_find_and_reset_dirty) that every page is
2354          * dirty, that's no longer true.
2355          */
2356         rs->ram_bulk_stage = false;
2357 
2358         /*
2359          * We want the background search to continue from the queued page
2360          * since the guest is likely to want other pages near to the page
2361          * it just requested.
2362          */
2363         pss->block = block;
2364         pss->page = offset >> TARGET_PAGE_BITS;
2365 
2366         /*
2367          * This unqueued page would break the "one round" check, even is
2368          * really rare.
2369          */
2370         pss->complete_round = false;
2371     }
2372 
2373     return !!block;
2374 }
2375 
2376 /**
2377  * migration_page_queue_free: drop any remaining pages in the ram
2378  * request queue
2379  *
2380  * It should be empty at the end anyway, but in error cases there may
2381  * be some left.  in case that there is any page left, we drop it.
2382  *
2383  */
2384 static void migration_page_queue_free(RAMState *rs)
2385 {
2386     struct RAMSrcPageRequest *mspr, *next_mspr;
2387     /* This queue generally should be empty - but in the case of a failed
2388      * migration might have some droppings in.
2389      */
2390     rcu_read_lock();
2391     QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
2392         memory_region_unref(mspr->rb->mr);
2393         QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2394         g_free(mspr);
2395     }
2396     rcu_read_unlock();
2397 }
2398 
2399 /**
2400  * ram_save_queue_pages: queue the page for transmission
2401  *
2402  * A request from postcopy destination for example.
2403  *
2404  * Returns zero on success or negative on error
2405  *
2406  * @rbname: Name of the RAMBLock of the request. NULL means the
2407  *          same that last one.
2408  * @start: starting address from the start of the RAMBlock
2409  * @len: length (in bytes) to send
2410  */
2411 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
2412 {
2413     RAMBlock *ramblock;
2414     RAMState *rs = ram_state;
2415 
2416     ram_counters.postcopy_requests++;
2417     rcu_read_lock();
2418     if (!rbname) {
2419         /* Reuse last RAMBlock */
2420         ramblock = rs->last_req_rb;
2421 
2422         if (!ramblock) {
2423             /*
2424              * Shouldn't happen, we can't reuse the last RAMBlock if
2425              * it's the 1st request.
2426              */
2427             error_report("ram_save_queue_pages no previous block");
2428             goto err;
2429         }
2430     } else {
2431         ramblock = qemu_ram_block_by_name(rbname);
2432 
2433         if (!ramblock) {
2434             /* We shouldn't be asked for a non-existent RAMBlock */
2435             error_report("ram_save_queue_pages no block '%s'", rbname);
2436             goto err;
2437         }
2438         rs->last_req_rb = ramblock;
2439     }
2440     trace_ram_save_queue_pages(ramblock->idstr, start, len);
2441     if (start+len > ramblock->used_length) {
2442         error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2443                      RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
2444                      __func__, start, len, ramblock->used_length);
2445         goto err;
2446     }
2447 
2448     struct RAMSrcPageRequest *new_entry =
2449         g_malloc0(sizeof(struct RAMSrcPageRequest));
2450     new_entry->rb = ramblock;
2451     new_entry->offset = start;
2452     new_entry->len = len;
2453 
2454     memory_region_ref(ramblock->mr);
2455     qemu_mutex_lock(&rs->src_page_req_mutex);
2456     QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
2457     migration_make_urgent_request();
2458     qemu_mutex_unlock(&rs->src_page_req_mutex);
2459     rcu_read_unlock();
2460 
2461     return 0;
2462 
2463 err:
2464     rcu_read_unlock();
2465     return -1;
2466 }
2467 
2468 static bool save_page_use_compression(RAMState *rs)
2469 {
2470     if (!migrate_use_compression()) {
2471         return false;
2472     }
2473 
2474     /*
2475      * If xbzrle is on, stop using the data compression after first
2476      * round of migration even if compression is enabled. In theory,
2477      * xbzrle can do better than compression.
2478      */
2479     if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2480         return true;
2481     }
2482 
2483     return false;
2484 }
2485 
2486 /*
2487  * try to compress the page before posting it out, return true if the page
2488  * has been properly handled by compression, otherwise needs other
2489  * paths to handle it
2490  */
2491 static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2492 {
2493     if (!save_page_use_compression(rs)) {
2494         return false;
2495     }
2496 
2497     /*
2498      * When starting the process of a new block, the first page of
2499      * the block should be sent out before other pages in the same
2500      * block, and all the pages in last block should have been sent
2501      * out, keeping this order is important, because the 'cont' flag
2502      * is used to avoid resending the block name.
2503      *
2504      * We post the fist page as normal page as compression will take
2505      * much CPU resource.
2506      */
2507     if (block != rs->last_sent_block) {
2508         flush_compressed_data(rs);
2509         return false;
2510     }
2511 
2512     if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2513         return true;
2514     }
2515 
2516     compression_counters.busy++;
2517     return false;
2518 }
2519 
2520 /**
2521  * ram_save_target_page: save one target page
2522  *
2523  * Returns the number of pages written
2524  *
2525  * @rs: current RAM state
2526  * @pss: data about the page we want to send
2527  * @last_stage: if we are at the completion stage
2528  */
2529 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
2530                                 bool last_stage)
2531 {
2532     RAMBlock *block = pss->block;
2533     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2534     int res;
2535 
2536     if (control_save_page(rs, block, offset, &res)) {
2537         return res;
2538     }
2539 
2540     if (save_compress_page(rs, block, offset)) {
2541         return 1;
2542     }
2543 
2544     res = save_zero_page(rs, block, offset);
2545     if (res > 0) {
2546         /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2547          * page would be stale
2548          */
2549         if (!save_page_use_compression(rs)) {
2550             XBZRLE_cache_lock();
2551             xbzrle_cache_zero_page(rs, block->offset + offset);
2552             XBZRLE_cache_unlock();
2553         }
2554         ram_release_pages(block->idstr, offset, res);
2555         return res;
2556     }
2557 
2558     /*
2559      * do not use multifd for compression as the first page in the new
2560      * block should be posted out before sending the compressed page
2561      */
2562     if (!save_page_use_compression(rs) && migrate_use_multifd()) {
2563         return ram_save_multifd_page(rs, block, offset);
2564     }
2565 
2566     return ram_save_page(rs, pss, last_stage);
2567 }
2568 
2569 /**
2570  * ram_save_host_page: save a whole host page
2571  *
2572  * Starting at *offset send pages up to the end of the current host
2573  * page. It's valid for the initial offset to point into the middle of
2574  * a host page in which case the remainder of the hostpage is sent.
2575  * Only dirty target pages are sent. Note that the host page size may
2576  * be a huge page for this block.
2577  * The saving stops at the boundary of the used_length of the block
2578  * if the RAMBlock isn't a multiple of the host page size.
2579  *
2580  * Returns the number of pages written or negative on error
2581  *
2582  * @rs: current RAM state
2583  * @ms: current migration state
2584  * @pss: data about the page we want to send
2585  * @last_stage: if we are at the completion stage
2586  */
2587 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
2588                               bool last_stage)
2589 {
2590     int tmppages, pages = 0;
2591     size_t pagesize_bits =
2592         qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2593 
2594     if (ramblock_is_ignored(pss->block)) {
2595         error_report("block %s should not be migrated !", pss->block->idstr);
2596         return 0;
2597     }
2598 
2599     do {
2600         /* Check the pages is dirty and if it is send it */
2601         if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2602             pss->page++;
2603             continue;
2604         }
2605 
2606         tmppages = ram_save_target_page(rs, pss, last_stage);
2607         if (tmppages < 0) {
2608             return tmppages;
2609         }
2610 
2611         pages += tmppages;
2612         if (pss->block->unsentmap) {
2613             clear_bit(pss->page, pss->block->unsentmap);
2614         }
2615 
2616         pss->page++;
2617     } while ((pss->page & (pagesize_bits - 1)) &&
2618              offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
2619 
2620     /* The offset we leave with is the last one we looked at */
2621     pss->page--;
2622     return pages;
2623 }
2624 
2625 /**
2626  * ram_find_and_save_block: finds a dirty page and sends it to f
2627  *
2628  * Called within an RCU critical section.
2629  *
2630  * Returns the number of pages written where zero means no dirty pages,
2631  * or negative on error
2632  *
2633  * @rs: current RAM state
2634  * @last_stage: if we are at the completion stage
2635  *
2636  * On systems where host-page-size > target-page-size it will send all the
2637  * pages in a host page that are dirty.
2638  */
2639 
2640 static int ram_find_and_save_block(RAMState *rs, bool last_stage)
2641 {
2642     PageSearchStatus pss;
2643     int pages = 0;
2644     bool again, found;
2645 
2646     /* No dirty page as there is zero RAM */
2647     if (!ram_bytes_total()) {
2648         return pages;
2649     }
2650 
2651     pss.block = rs->last_seen_block;
2652     pss.page = rs->last_page;
2653     pss.complete_round = false;
2654 
2655     if (!pss.block) {
2656         pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2657     }
2658 
2659     do {
2660         again = true;
2661         found = get_queued_page(rs, &pss);
2662 
2663         if (!found) {
2664             /* priority queue empty, so just search for something dirty */
2665             found = find_dirty_block(rs, &pss, &again);
2666         }
2667 
2668         if (found) {
2669             pages = ram_save_host_page(rs, &pss, last_stage);
2670         }
2671     } while (!pages && again);
2672 
2673     rs->last_seen_block = pss.block;
2674     rs->last_page = pss.page;
2675 
2676     return pages;
2677 }
2678 
2679 void acct_update_position(QEMUFile *f, size_t size, bool zero)
2680 {
2681     uint64_t pages = size / TARGET_PAGE_SIZE;
2682 
2683     if (zero) {
2684         ram_counters.duplicate += pages;
2685     } else {
2686         ram_counters.normal += pages;
2687         ram_counters.transferred += size;
2688         qemu_update_position(f, size);
2689     }
2690 }
2691 
2692 static uint64_t ram_bytes_total_common(bool count_ignored)
2693 {
2694     RAMBlock *block;
2695     uint64_t total = 0;
2696 
2697     rcu_read_lock();
2698     if (count_ignored) {
2699         RAMBLOCK_FOREACH_MIGRATABLE(block) {
2700             total += block->used_length;
2701         }
2702     } else {
2703         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2704             total += block->used_length;
2705         }
2706     }
2707     rcu_read_unlock();
2708     return total;
2709 }
2710 
2711 uint64_t ram_bytes_total(void)
2712 {
2713     return ram_bytes_total_common(false);
2714 }
2715 
2716 static void xbzrle_load_setup(void)
2717 {
2718     XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2719 }
2720 
2721 static void xbzrle_load_cleanup(void)
2722 {
2723     g_free(XBZRLE.decoded_buf);
2724     XBZRLE.decoded_buf = NULL;
2725 }
2726 
2727 static void ram_state_cleanup(RAMState **rsp)
2728 {
2729     if (*rsp) {
2730         migration_page_queue_free(*rsp);
2731         qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2732         qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2733         g_free(*rsp);
2734         *rsp = NULL;
2735     }
2736 }
2737 
2738 static void xbzrle_cleanup(void)
2739 {
2740     XBZRLE_cache_lock();
2741     if (XBZRLE.cache) {
2742         cache_fini(XBZRLE.cache);
2743         g_free(XBZRLE.encoded_buf);
2744         g_free(XBZRLE.current_buf);
2745         g_free(XBZRLE.zero_target_page);
2746         XBZRLE.cache = NULL;
2747         XBZRLE.encoded_buf = NULL;
2748         XBZRLE.current_buf = NULL;
2749         XBZRLE.zero_target_page = NULL;
2750     }
2751     XBZRLE_cache_unlock();
2752 }
2753 
2754 static void ram_save_cleanup(void *opaque)
2755 {
2756     RAMState **rsp = opaque;
2757     RAMBlock *block;
2758 
2759     /* caller have hold iothread lock or is in a bh, so there is
2760      * no writing race against the migration bitmap
2761      */
2762     memory_global_dirty_log_stop();
2763 
2764     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2765         g_free(block->clear_bmap);
2766         block->clear_bmap = NULL;
2767         g_free(block->bmap);
2768         block->bmap = NULL;
2769         g_free(block->unsentmap);
2770         block->unsentmap = NULL;
2771     }
2772 
2773     xbzrle_cleanup();
2774     compress_threads_save_cleanup();
2775     ram_state_cleanup(rsp);
2776 }
2777 
2778 static void ram_state_reset(RAMState *rs)
2779 {
2780     rs->last_seen_block = NULL;
2781     rs->last_sent_block = NULL;
2782     rs->last_page = 0;
2783     rs->last_version = ram_list.version;
2784     rs->ram_bulk_stage = true;
2785     rs->fpo_enabled = false;
2786 }
2787 
2788 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2789 
2790 /*
2791  * 'expected' is the value you expect the bitmap mostly to be full
2792  * of; it won't bother printing lines that are all this value.
2793  * If 'todump' is null the migration bitmap is dumped.
2794  */
2795 void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2796                            unsigned long pages)
2797 {
2798     int64_t cur;
2799     int64_t linelen = 128;
2800     char linebuf[129];
2801 
2802     for (cur = 0; cur < pages; cur += linelen) {
2803         int64_t curb;
2804         bool found = false;
2805         /*
2806          * Last line; catch the case where the line length
2807          * is longer than remaining ram
2808          */
2809         if (cur + linelen > pages) {
2810             linelen = pages - cur;
2811         }
2812         for (curb = 0; curb < linelen; curb++) {
2813             bool thisbit = test_bit(cur + curb, todump);
2814             linebuf[curb] = thisbit ? '1' : '.';
2815             found = found || (thisbit != expected);
2816         }
2817         if (found) {
2818             linebuf[curb] = '\0';
2819             fprintf(stderr,  "0x%08" PRIx64 " : %s\n", cur, linebuf);
2820         }
2821     }
2822 }
2823 
2824 /* **** functions for postcopy ***** */
2825 
2826 void ram_postcopy_migrated_memory_release(MigrationState *ms)
2827 {
2828     struct RAMBlock *block;
2829 
2830     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2831         unsigned long *bitmap = block->bmap;
2832         unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2833         unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2834 
2835         while (run_start < range) {
2836             unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2837             ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
2838                               (run_end - run_start) << TARGET_PAGE_BITS);
2839             run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2840         }
2841     }
2842 }
2843 
2844 /**
2845  * postcopy_send_discard_bm_ram: discard a RAMBlock
2846  *
2847  * Returns zero on success
2848  *
2849  * Callback from postcopy_each_ram_send_discard for each RAMBlock
2850  * Note: At this point the 'unsentmap' is the processed bitmap combined
2851  *       with the dirtymap; so a '1' means it's either dirty or unsent.
2852  *
2853  * @ms: current migration state
2854  * @pds: state for postcopy
2855  * @block: RAMBlock to discard
2856  */
2857 static int postcopy_send_discard_bm_ram(MigrationState *ms,
2858                                         PostcopyDiscardState *pds,
2859                                         RAMBlock *block)
2860 {
2861     unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2862     unsigned long current;
2863     unsigned long *unsentmap = block->unsentmap;
2864 
2865     for (current = 0; current < end; ) {
2866         unsigned long one = find_next_bit(unsentmap, end, current);
2867 
2868         if (one <= end) {
2869             unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
2870             unsigned long discard_length;
2871 
2872             if (zero >= end) {
2873                 discard_length = end - one;
2874             } else {
2875                 discard_length = zero - one;
2876             }
2877             if (discard_length) {
2878                 postcopy_discard_send_range(ms, pds, one, discard_length);
2879             }
2880             current = one + discard_length;
2881         } else {
2882             current = one;
2883         }
2884     }
2885 
2886     return 0;
2887 }
2888 
2889 /**
2890  * postcopy_each_ram_send_discard: discard all RAMBlocks
2891  *
2892  * Returns 0 for success or negative for error
2893  *
2894  * Utility for the outgoing postcopy code.
2895  *   Calls postcopy_send_discard_bm_ram for each RAMBlock
2896  *   passing it bitmap indexes and name.
2897  * (qemu_ram_foreach_block ends up passing unscaled lengths
2898  *  which would mean postcopy code would have to deal with target page)
2899  *
2900  * @ms: current migration state
2901  */
2902 static int postcopy_each_ram_send_discard(MigrationState *ms)
2903 {
2904     struct RAMBlock *block;
2905     int ret;
2906 
2907     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2908         PostcopyDiscardState *pds =
2909             postcopy_discard_send_init(ms, block->idstr);
2910 
2911         /*
2912          * Postcopy sends chunks of bitmap over the wire, but it
2913          * just needs indexes at this point, avoids it having
2914          * target page specific code.
2915          */
2916         ret = postcopy_send_discard_bm_ram(ms, pds, block);
2917         postcopy_discard_send_finish(ms, pds);
2918         if (ret) {
2919             return ret;
2920         }
2921     }
2922 
2923     return 0;
2924 }
2925 
2926 /**
2927  * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2928  *
2929  * Helper for postcopy_chunk_hostpages; it's called twice to
2930  * canonicalize the two bitmaps, that are similar, but one is
2931  * inverted.
2932  *
2933  * Postcopy requires that all target pages in a hostpage are dirty or
2934  * clean, not a mix.  This function canonicalizes the bitmaps.
2935  *
2936  * @ms: current migration state
2937  * @unsent_pass: if true we need to canonicalize partially unsent host pages
2938  *               otherwise we need to canonicalize partially dirty host pages
2939  * @block: block that contains the page we want to canonicalize
2940  * @pds: state for postcopy
2941  */
2942 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
2943                                           RAMBlock *block,
2944                                           PostcopyDiscardState *pds)
2945 {
2946     RAMState *rs = ram_state;
2947     unsigned long *bitmap = block->bmap;
2948     unsigned long *unsentmap = block->unsentmap;
2949     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2950     unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2951     unsigned long run_start;
2952 
2953     if (block->page_size == TARGET_PAGE_SIZE) {
2954         /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2955         return;
2956     }
2957 
2958     if (unsent_pass) {
2959         /* Find a sent page */
2960         run_start = find_next_zero_bit(unsentmap, pages, 0);
2961     } else {
2962         /* Find a dirty page */
2963         run_start = find_next_bit(bitmap, pages, 0);
2964     }
2965 
2966     while (run_start < pages) {
2967         bool do_fixup = false;
2968         unsigned long fixup_start_addr;
2969         unsigned long host_offset;
2970 
2971         /*
2972          * If the start of this run of pages is in the middle of a host
2973          * page, then we need to fixup this host page.
2974          */
2975         host_offset = run_start % host_ratio;
2976         if (host_offset) {
2977             do_fixup = true;
2978             run_start -= host_offset;
2979             fixup_start_addr = run_start;
2980             /* For the next pass */
2981             run_start = run_start + host_ratio;
2982         } else {
2983             /* Find the end of this run */
2984             unsigned long run_end;
2985             if (unsent_pass) {
2986                 run_end = find_next_bit(unsentmap, pages, run_start + 1);
2987             } else {
2988                 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
2989             }
2990             /*
2991              * If the end isn't at the start of a host page, then the
2992              * run doesn't finish at the end of a host page
2993              * and we need to discard.
2994              */
2995             host_offset = run_end % host_ratio;
2996             if (host_offset) {
2997                 do_fixup = true;
2998                 fixup_start_addr = run_end - host_offset;
2999                 /*
3000                  * This host page has gone, the next loop iteration starts
3001                  * from after the fixup
3002                  */
3003                 run_start = fixup_start_addr + host_ratio;
3004             } else {
3005                 /*
3006                  * No discards on this iteration, next loop starts from
3007                  * next sent/dirty page
3008                  */
3009                 run_start = run_end + 1;
3010             }
3011         }
3012 
3013         if (do_fixup) {
3014             unsigned long page;
3015 
3016             /* Tell the destination to discard this page */
3017             if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
3018                 /* For the unsent_pass we:
3019                  *     discard partially sent pages
3020                  * For the !unsent_pass (dirty) we:
3021                  *     discard partially dirty pages that were sent
3022                  *     (any partially sent pages were already discarded
3023                  *     by the previous unsent_pass)
3024                  */
3025                 postcopy_discard_send_range(ms, pds, fixup_start_addr,
3026                                             host_ratio);
3027             }
3028 
3029             /* Clean up the bitmap */
3030             for (page = fixup_start_addr;
3031                  page < fixup_start_addr + host_ratio; page++) {
3032                 /* All pages in this host page are now not sent */
3033                 set_bit(page, unsentmap);
3034 
3035                 /*
3036                  * Remark them as dirty, updating the count for any pages
3037                  * that weren't previously dirty.
3038                  */
3039                 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
3040             }
3041         }
3042 
3043         if (unsent_pass) {
3044             /* Find the next sent page for the next iteration */
3045             run_start = find_next_zero_bit(unsentmap, pages, run_start);
3046         } else {
3047             /* Find the next dirty page for the next iteration */
3048             run_start = find_next_bit(bitmap, pages, run_start);
3049         }
3050     }
3051 }
3052 
3053 /**
3054  * postcopy_chunk_hostpages: discard any partially sent host page
3055  *
3056  * Utility for the outgoing postcopy code.
3057  *
3058  * Discard any partially sent host-page size chunks, mark any partially
3059  * dirty host-page size chunks as all dirty.  In this case the host-page
3060  * is the host-page for the particular RAMBlock, i.e. it might be a huge page
3061  *
3062  * Returns zero on success
3063  *
3064  * @ms: current migration state
3065  * @block: block we want to work with
3066  */
3067 static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
3068 {
3069     PostcopyDiscardState *pds =
3070         postcopy_discard_send_init(ms, block->idstr);
3071 
3072     /* First pass: Discard all partially sent host pages */
3073     postcopy_chunk_hostpages_pass(ms, true, block, pds);
3074     /*
3075      * Second pass: Ensure that all partially dirty host pages are made
3076      * fully dirty.
3077      */
3078     postcopy_chunk_hostpages_pass(ms, false, block, pds);
3079 
3080     postcopy_discard_send_finish(ms, pds);
3081     return 0;
3082 }
3083 
3084 /**
3085  * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
3086  *
3087  * Returns zero on success
3088  *
3089  * Transmit the set of pages to be discarded after precopy to the target
3090  * these are pages that:
3091  *     a) Have been previously transmitted but are now dirty again
3092  *     b) Pages that have never been transmitted, this ensures that
3093  *        any pages on the destination that have been mapped by background
3094  *        tasks get discarded (transparent huge pages is the specific concern)
3095  * Hopefully this is pretty sparse
3096  *
3097  * @ms: current migration state
3098  */
3099 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
3100 {
3101     RAMState *rs = ram_state;
3102     RAMBlock *block;
3103     int ret;
3104 
3105     rcu_read_lock();
3106 
3107     /* This should be our last sync, the src is now paused */
3108     migration_bitmap_sync(rs);
3109 
3110     /* Easiest way to make sure we don't resume in the middle of a host-page */
3111     rs->last_seen_block = NULL;
3112     rs->last_sent_block = NULL;
3113     rs->last_page = 0;
3114 
3115     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3116         unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
3117         unsigned long *bitmap = block->bmap;
3118         unsigned long *unsentmap = block->unsentmap;
3119 
3120         if (!unsentmap) {
3121             /* We don't have a safe way to resize the sentmap, so
3122              * if the bitmap was resized it will be NULL at this
3123              * point.
3124              */
3125             error_report("migration ram resized during precopy phase");
3126             rcu_read_unlock();
3127             return -EINVAL;
3128         }
3129         /* Deal with TPS != HPS and huge pages */
3130         ret = postcopy_chunk_hostpages(ms, block);
3131         if (ret) {
3132             rcu_read_unlock();
3133             return ret;
3134         }
3135 
3136         /*
3137          * Update the unsentmap to be unsentmap = unsentmap | dirty
3138          */
3139         bitmap_or(unsentmap, unsentmap, bitmap, pages);
3140 #ifdef DEBUG_POSTCOPY
3141         ram_debug_dump_bitmap(unsentmap, true, pages);
3142 #endif
3143     }
3144     trace_ram_postcopy_send_discard_bitmap();
3145 
3146     ret = postcopy_each_ram_send_discard(ms);
3147     rcu_read_unlock();
3148 
3149     return ret;
3150 }
3151 
3152 /**
3153  * ram_discard_range: discard dirtied pages at the beginning of postcopy
3154  *
3155  * Returns zero on success
3156  *
3157  * @rbname: name of the RAMBlock of the request. NULL means the
3158  *          same that last one.
3159  * @start: RAMBlock starting page
3160  * @length: RAMBlock size
3161  */
3162 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
3163 {
3164     int ret = -1;
3165 
3166     trace_ram_discard_range(rbname, start, length);
3167 
3168     rcu_read_lock();
3169     RAMBlock *rb = qemu_ram_block_by_name(rbname);
3170 
3171     if (!rb) {
3172         error_report("ram_discard_range: Failed to find block '%s'", rbname);
3173         goto err;
3174     }
3175 
3176     /*
3177      * On source VM, we don't need to update the received bitmap since
3178      * we don't even have one.
3179      */
3180     if (rb->receivedmap) {
3181         bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
3182                      length >> qemu_target_page_bits());
3183     }
3184 
3185     ret = ram_block_discard_range(rb, start, length);
3186 
3187 err:
3188     rcu_read_unlock();
3189 
3190     return ret;
3191 }
3192 
3193 /*
3194  * For every allocation, we will try not to crash the VM if the
3195  * allocation failed.
3196  */
3197 static int xbzrle_init(void)
3198 {
3199     Error *local_err = NULL;
3200 
3201     if (!migrate_use_xbzrle()) {
3202         return 0;
3203     }
3204 
3205     XBZRLE_cache_lock();
3206 
3207     XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
3208     if (!XBZRLE.zero_target_page) {
3209         error_report("%s: Error allocating zero page", __func__);
3210         goto err_out;
3211     }
3212 
3213     XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
3214                               TARGET_PAGE_SIZE, &local_err);
3215     if (!XBZRLE.cache) {
3216         error_report_err(local_err);
3217         goto free_zero_page;
3218     }
3219 
3220     XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
3221     if (!XBZRLE.encoded_buf) {
3222         error_report("%s: Error allocating encoded_buf", __func__);
3223         goto free_cache;
3224     }
3225 
3226     XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
3227     if (!XBZRLE.current_buf) {
3228         error_report("%s: Error allocating current_buf", __func__);
3229         goto free_encoded_buf;
3230     }
3231 
3232     /* We are all good */
3233     XBZRLE_cache_unlock();
3234     return 0;
3235 
3236 free_encoded_buf:
3237     g_free(XBZRLE.encoded_buf);
3238     XBZRLE.encoded_buf = NULL;
3239 free_cache:
3240     cache_fini(XBZRLE.cache);
3241     XBZRLE.cache = NULL;
3242 free_zero_page:
3243     g_free(XBZRLE.zero_target_page);
3244     XBZRLE.zero_target_page = NULL;
3245 err_out:
3246     XBZRLE_cache_unlock();
3247     return -ENOMEM;
3248 }
3249 
3250 static int ram_state_init(RAMState **rsp)
3251 {
3252     *rsp = g_try_new0(RAMState, 1);
3253 
3254     if (!*rsp) {
3255         error_report("%s: Init ramstate fail", __func__);
3256         return -1;
3257     }
3258 
3259     qemu_mutex_init(&(*rsp)->bitmap_mutex);
3260     qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3261     QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
3262 
3263     /*
3264      * Count the total number of pages used by ram blocks not including any
3265      * gaps due to alignment or unplugs.
3266      * This must match with the initial values of dirty bitmap.
3267      */
3268     (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
3269     ram_state_reset(*rsp);
3270 
3271     return 0;
3272 }
3273 
3274 static void ram_list_init_bitmaps(void)
3275 {
3276     MigrationState *ms = migrate_get_current();
3277     RAMBlock *block;
3278     unsigned long pages;
3279     uint8_t shift;
3280 
3281     /* Skip setting bitmap if there is no RAM */
3282     if (ram_bytes_total()) {
3283         shift = ms->clear_bitmap_shift;
3284         if (shift > CLEAR_BITMAP_SHIFT_MAX) {
3285             error_report("clear_bitmap_shift (%u) too big, using "
3286                          "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
3287             shift = CLEAR_BITMAP_SHIFT_MAX;
3288         } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
3289             error_report("clear_bitmap_shift (%u) too small, using "
3290                          "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
3291             shift = CLEAR_BITMAP_SHIFT_MIN;
3292         }
3293 
3294         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3295             pages = block->max_length >> TARGET_PAGE_BITS;
3296             /*
3297              * The initial dirty bitmap for migration must be set with all
3298              * ones to make sure we'll migrate every guest RAM page to
3299              * destination.
3300              * Here we set RAMBlock.bmap all to 1 because when rebegin a
3301              * new migration after a failed migration, ram_list.
3302              * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
3303              * guest memory.
3304              */
3305             block->bmap = bitmap_new(pages);
3306             bitmap_set(block->bmap, 0, pages);
3307             block->clear_bmap_shift = shift;
3308             block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
3309             if (migrate_postcopy_ram()) {
3310                 block->unsentmap = bitmap_new(pages);
3311                 bitmap_set(block->unsentmap, 0, pages);
3312             }
3313         }
3314     }
3315 }
3316 
3317 static void ram_init_bitmaps(RAMState *rs)
3318 {
3319     /* For memory_global_dirty_log_start below.  */
3320     qemu_mutex_lock_iothread();
3321     qemu_mutex_lock_ramlist();
3322     rcu_read_lock();
3323 
3324     ram_list_init_bitmaps();
3325     memory_global_dirty_log_start();
3326     migration_bitmap_sync_precopy(rs);
3327 
3328     rcu_read_unlock();
3329     qemu_mutex_unlock_ramlist();
3330     qemu_mutex_unlock_iothread();
3331 }
3332 
3333 static int ram_init_all(RAMState **rsp)
3334 {
3335     if (ram_state_init(rsp)) {
3336         return -1;
3337     }
3338 
3339     if (xbzrle_init()) {
3340         ram_state_cleanup(rsp);
3341         return -1;
3342     }
3343 
3344     ram_init_bitmaps(*rsp);
3345 
3346     return 0;
3347 }
3348 
3349 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3350 {
3351     RAMBlock *block;
3352     uint64_t pages = 0;
3353 
3354     /*
3355      * Postcopy is not using xbzrle/compression, so no need for that.
3356      * Also, since source are already halted, we don't need to care
3357      * about dirty page logging as well.
3358      */
3359 
3360     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3361         pages += bitmap_count_one(block->bmap,
3362                                   block->used_length >> TARGET_PAGE_BITS);
3363     }
3364 
3365     /* This may not be aligned with current bitmaps. Recalculate. */
3366     rs->migration_dirty_pages = pages;
3367 
3368     rs->last_seen_block = NULL;
3369     rs->last_sent_block = NULL;
3370     rs->last_page = 0;
3371     rs->last_version = ram_list.version;
3372     /*
3373      * Disable the bulk stage, otherwise we'll resend the whole RAM no
3374      * matter what we have sent.
3375      */
3376     rs->ram_bulk_stage = false;
3377 
3378     /* Update RAMState cache of output QEMUFile */
3379     rs->f = out;
3380 
3381     trace_ram_state_resume_prepare(pages);
3382 }
3383 
3384 /*
3385  * This function clears bits of the free pages reported by the caller from the
3386  * migration dirty bitmap. @addr is the host address corresponding to the
3387  * start of the continuous guest free pages, and @len is the total bytes of
3388  * those pages.
3389  */
3390 void qemu_guest_free_page_hint(void *addr, size_t len)
3391 {
3392     RAMBlock *block;
3393     ram_addr_t offset;
3394     size_t used_len, start, npages;
3395     MigrationState *s = migrate_get_current();
3396 
3397     /* This function is currently expected to be used during live migration */
3398     if (!migration_is_setup_or_active(s->state)) {
3399         return;
3400     }
3401 
3402     for (; len > 0; len -= used_len, addr += used_len) {
3403         block = qemu_ram_block_from_host(addr, false, &offset);
3404         if (unlikely(!block || offset >= block->used_length)) {
3405             /*
3406              * The implementation might not support RAMBlock resize during
3407              * live migration, but it could happen in theory with future
3408              * updates. So we add a check here to capture that case.
3409              */
3410             error_report_once("%s unexpected error", __func__);
3411             return;
3412         }
3413 
3414         if (len <= block->used_length - offset) {
3415             used_len = len;
3416         } else {
3417             used_len = block->used_length - offset;
3418         }
3419 
3420         start = offset >> TARGET_PAGE_BITS;
3421         npages = used_len >> TARGET_PAGE_BITS;
3422 
3423         qemu_mutex_lock(&ram_state->bitmap_mutex);
3424         ram_state->migration_dirty_pages -=
3425                       bitmap_count_one_with_offset(block->bmap, start, npages);
3426         bitmap_clear(block->bmap, start, npages);
3427         qemu_mutex_unlock(&ram_state->bitmap_mutex);
3428     }
3429 }
3430 
3431 /*
3432  * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3433  * long-running RCU critical section.  When rcu-reclaims in the code
3434  * start to become numerous it will be necessary to reduce the
3435  * granularity of these critical sections.
3436  */
3437 
3438 /**
3439  * ram_save_setup: Setup RAM for migration
3440  *
3441  * Returns zero to indicate success and negative for error
3442  *
3443  * @f: QEMUFile where to send the data
3444  * @opaque: RAMState pointer
3445  */
3446 static int ram_save_setup(QEMUFile *f, void *opaque)
3447 {
3448     RAMState **rsp = opaque;
3449     RAMBlock *block;
3450 
3451     if (compress_threads_save_setup()) {
3452         return -1;
3453     }
3454 
3455     /* migration has already setup the bitmap, reuse it. */
3456     if (!migration_in_colo_state()) {
3457         if (ram_init_all(rsp) != 0) {
3458             compress_threads_save_cleanup();
3459             return -1;
3460         }
3461     }
3462     (*rsp)->f = f;
3463 
3464     rcu_read_lock();
3465 
3466     qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
3467 
3468     RAMBLOCK_FOREACH_MIGRATABLE(block) {
3469         qemu_put_byte(f, strlen(block->idstr));
3470         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3471         qemu_put_be64(f, block->used_length);
3472         if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
3473             qemu_put_be64(f, block->page_size);
3474         }
3475         if (migrate_ignore_shared()) {
3476             qemu_put_be64(f, block->mr->addr);
3477         }
3478     }
3479 
3480     rcu_read_unlock();
3481 
3482     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3483     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3484 
3485     multifd_send_sync_main();
3486     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3487     qemu_fflush(f);
3488 
3489     return 0;
3490 }
3491 
3492 /**
3493  * ram_save_iterate: iterative stage for migration
3494  *
3495  * Returns zero to indicate success and negative for error
3496  *
3497  * @f: QEMUFile where to send the data
3498  * @opaque: RAMState pointer
3499  */
3500 static int ram_save_iterate(QEMUFile *f, void *opaque)
3501 {
3502     RAMState **temp = opaque;
3503     RAMState *rs = *temp;
3504     int ret;
3505     int i;
3506     int64_t t0;
3507     int done = 0;
3508 
3509     if (blk_mig_bulk_active()) {
3510         /* Avoid transferring ram during bulk phase of block migration as
3511          * the bulk phase will usually take a long time and transferring
3512          * ram updates during that time is pointless. */
3513         goto out;
3514     }
3515 
3516     rcu_read_lock();
3517     if (ram_list.version != rs->last_version) {
3518         ram_state_reset(rs);
3519     }
3520 
3521     /* Read version before ram_list.blocks */
3522     smp_rmb();
3523 
3524     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3525 
3526     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3527     i = 0;
3528     while ((ret = qemu_file_rate_limit(f)) == 0 ||
3529             !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
3530         int pages;
3531 
3532         if (qemu_file_get_error(f)) {
3533             break;
3534         }
3535 
3536         pages = ram_find_and_save_block(rs, false);
3537         /* no more pages to sent */
3538         if (pages == 0) {
3539             done = 1;
3540             break;
3541         }
3542 
3543         if (pages < 0) {
3544             qemu_file_set_error(f, pages);
3545             break;
3546         }
3547 
3548         rs->target_page_count += pages;
3549 
3550         /* we want to check in the 1st loop, just in case it was the 1st time
3551            and we had to sync the dirty bitmap.
3552            qemu_clock_get_ns() is a bit expensive, so we only check each some
3553            iterations
3554         */
3555         if ((i & 63) == 0) {
3556             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
3557             if (t1 > MAX_WAIT) {
3558                 trace_ram_save_iterate_big_wait(t1, i);
3559                 break;
3560             }
3561         }
3562         i++;
3563     }
3564     rcu_read_unlock();
3565 
3566     /*
3567      * Must occur before EOS (or any QEMUFile operation)
3568      * because of RDMA protocol.
3569      */
3570     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3571 
3572 out:
3573     multifd_send_sync_main();
3574     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3575     qemu_fflush(f);
3576     ram_counters.transferred += 8;
3577 
3578     ret = qemu_file_get_error(f);
3579     if (ret < 0) {
3580         return ret;
3581     }
3582 
3583     return done;
3584 }
3585 
3586 /**
3587  * ram_save_complete: function called to send the remaining amount of ram
3588  *
3589  * Returns zero to indicate success or negative on error
3590  *
3591  * Called with iothread lock
3592  *
3593  * @f: QEMUFile where to send the data
3594  * @opaque: RAMState pointer
3595  */
3596 static int ram_save_complete(QEMUFile *f, void *opaque)
3597 {
3598     RAMState **temp = opaque;
3599     RAMState *rs = *temp;
3600     int ret = 0;
3601 
3602     rcu_read_lock();
3603 
3604     if (!migration_in_postcopy()) {
3605         migration_bitmap_sync_precopy(rs);
3606     }
3607 
3608     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3609 
3610     /* try transferring iterative blocks of memory */
3611 
3612     /* flush all remaining blocks regardless of rate limiting */
3613     while (true) {
3614         int pages;
3615 
3616         pages = ram_find_and_save_block(rs, !migration_in_colo_state());
3617         /* no more blocks to sent */
3618         if (pages == 0) {
3619             break;
3620         }
3621         if (pages < 0) {
3622             ret = pages;
3623             break;
3624         }
3625     }
3626 
3627     flush_compressed_data(rs);
3628     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
3629 
3630     rcu_read_unlock();
3631 
3632     multifd_send_sync_main();
3633     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3634     qemu_fflush(f);
3635 
3636     return ret;
3637 }
3638 
3639 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
3640                              uint64_t *res_precopy_only,
3641                              uint64_t *res_compatible,
3642                              uint64_t *res_postcopy_only)
3643 {
3644     RAMState **temp = opaque;
3645     RAMState *rs = *temp;
3646     uint64_t remaining_size;
3647 
3648     remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3649 
3650     if (!migration_in_postcopy() &&
3651         remaining_size < max_size) {
3652         qemu_mutex_lock_iothread();
3653         rcu_read_lock();
3654         migration_bitmap_sync_precopy(rs);
3655         rcu_read_unlock();
3656         qemu_mutex_unlock_iothread();
3657         remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3658     }
3659 
3660     if (migrate_postcopy_ram()) {
3661         /* We can do postcopy, and all the data is postcopiable */
3662         *res_compatible += remaining_size;
3663     } else {
3664         *res_precopy_only += remaining_size;
3665     }
3666 }
3667 
3668 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3669 {
3670     unsigned int xh_len;
3671     int xh_flags;
3672     uint8_t *loaded_data;
3673 
3674     /* extract RLE header */
3675     xh_flags = qemu_get_byte(f);
3676     xh_len = qemu_get_be16(f);
3677 
3678     if (xh_flags != ENCODING_FLAG_XBZRLE) {
3679         error_report("Failed to load XBZRLE page - wrong compression!");
3680         return -1;
3681     }
3682 
3683     if (xh_len > TARGET_PAGE_SIZE) {
3684         error_report("Failed to load XBZRLE page - len overflow!");
3685         return -1;
3686     }
3687     loaded_data = XBZRLE.decoded_buf;
3688     /* load data and decode */
3689     /* it can change loaded_data to point to an internal buffer */
3690     qemu_get_buffer_in_place(f, &loaded_data, xh_len);
3691 
3692     /* decode RLE */
3693     if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3694                              TARGET_PAGE_SIZE) == -1) {
3695         error_report("Failed to load XBZRLE page - decode error!");
3696         return -1;
3697     }
3698 
3699     return 0;
3700 }
3701 
3702 /**
3703  * ram_block_from_stream: read a RAMBlock id from the migration stream
3704  *
3705  * Must be called from within a rcu critical section.
3706  *
3707  * Returns a pointer from within the RCU-protected ram_list.
3708  *
3709  * @f: QEMUFile where to read the data from
3710  * @flags: Page flags (mostly to see if it's a continuation of previous block)
3711  */
3712 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
3713 {
3714     static RAMBlock *block = NULL;
3715     char id[256];
3716     uint8_t len;
3717 
3718     if (flags & RAM_SAVE_FLAG_CONTINUE) {
3719         if (!block) {
3720             error_report("Ack, bad migration stream!");
3721             return NULL;
3722         }
3723         return block;
3724     }
3725 
3726     len = qemu_get_byte(f);
3727     qemu_get_buffer(f, (uint8_t *)id, len);
3728     id[len] = 0;
3729 
3730     block = qemu_ram_block_by_name(id);
3731     if (!block) {
3732         error_report("Can't find block %s", id);
3733         return NULL;
3734     }
3735 
3736     if (ramblock_is_ignored(block)) {
3737         error_report("block %s should not be migrated !", id);
3738         return NULL;
3739     }
3740 
3741     return block;
3742 }
3743 
3744 static inline void *host_from_ram_block_offset(RAMBlock *block,
3745                                                ram_addr_t offset)
3746 {
3747     if (!offset_in_ramblock(block, offset)) {
3748         return NULL;
3749     }
3750 
3751     return block->host + offset;
3752 }
3753 
3754 static inline void *colo_cache_from_block_offset(RAMBlock *block,
3755                                                  ram_addr_t offset)
3756 {
3757     if (!offset_in_ramblock(block, offset)) {
3758         return NULL;
3759     }
3760     if (!block->colo_cache) {
3761         error_report("%s: colo_cache is NULL in block :%s",
3762                      __func__, block->idstr);
3763         return NULL;
3764     }
3765 
3766     /*
3767     * During colo checkpoint, we need bitmap of these migrated pages.
3768     * It help us to decide which pages in ram cache should be flushed
3769     * into VM's RAM later.
3770     */
3771     if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3772         ram_state->migration_dirty_pages++;
3773     }
3774     return block->colo_cache + offset;
3775 }
3776 
3777 /**
3778  * ram_handle_compressed: handle the zero page case
3779  *
3780  * If a page (or a whole RDMA chunk) has been
3781  * determined to be zero, then zap it.
3782  *
3783  * @host: host address for the zero page
3784  * @ch: what the page is filled from.  We only support zero
3785  * @size: size of the zero page
3786  */
3787 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3788 {
3789     if (ch != 0 || !is_zero_range(host, size)) {
3790         memset(host, ch, size);
3791     }
3792 }
3793 
3794 /* return the size after decompression, or negative value on error */
3795 static int
3796 qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3797                      const uint8_t *source, size_t source_len)
3798 {
3799     int err;
3800 
3801     err = inflateReset(stream);
3802     if (err != Z_OK) {
3803         return -1;
3804     }
3805 
3806     stream->avail_in = source_len;
3807     stream->next_in = (uint8_t *)source;
3808     stream->avail_out = dest_len;
3809     stream->next_out = dest;
3810 
3811     err = inflate(stream, Z_NO_FLUSH);
3812     if (err != Z_STREAM_END) {
3813         return -1;
3814     }
3815 
3816     return stream->total_out;
3817 }
3818 
3819 static void *do_data_decompress(void *opaque)
3820 {
3821     DecompressParam *param = opaque;
3822     unsigned long pagesize;
3823     uint8_t *des;
3824     int len, ret;
3825 
3826     qemu_mutex_lock(&param->mutex);
3827     while (!param->quit) {
3828         if (param->des) {
3829             des = param->des;
3830             len = param->len;
3831             param->des = 0;
3832             qemu_mutex_unlock(&param->mutex);
3833 
3834             pagesize = TARGET_PAGE_SIZE;
3835 
3836             ret = qemu_uncompress_data(&param->stream, des, pagesize,
3837                                        param->compbuf, len);
3838             if (ret < 0 && migrate_get_current()->decompress_error_check) {
3839                 error_report("decompress data failed");
3840                 qemu_file_set_error(decomp_file, ret);
3841             }
3842 
3843             qemu_mutex_lock(&decomp_done_lock);
3844             param->done = true;
3845             qemu_cond_signal(&decomp_done_cond);
3846             qemu_mutex_unlock(&decomp_done_lock);
3847 
3848             qemu_mutex_lock(&param->mutex);
3849         } else {
3850             qemu_cond_wait(&param->cond, &param->mutex);
3851         }
3852     }
3853     qemu_mutex_unlock(&param->mutex);
3854 
3855     return NULL;
3856 }
3857 
3858 static int wait_for_decompress_done(void)
3859 {
3860     int idx, thread_count;
3861 
3862     if (!migrate_use_compression()) {
3863         return 0;
3864     }
3865 
3866     thread_count = migrate_decompress_threads();
3867     qemu_mutex_lock(&decomp_done_lock);
3868     for (idx = 0; idx < thread_count; idx++) {
3869         while (!decomp_param[idx].done) {
3870             qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3871         }
3872     }
3873     qemu_mutex_unlock(&decomp_done_lock);
3874     return qemu_file_get_error(decomp_file);
3875 }
3876 
3877 static void compress_threads_load_cleanup(void)
3878 {
3879     int i, thread_count;
3880 
3881     if (!migrate_use_compression()) {
3882         return;
3883     }
3884     thread_count = migrate_decompress_threads();
3885     for (i = 0; i < thread_count; i++) {
3886         /*
3887          * we use it as a indicator which shows if the thread is
3888          * properly init'd or not
3889          */
3890         if (!decomp_param[i].compbuf) {
3891             break;
3892         }
3893 
3894         qemu_mutex_lock(&decomp_param[i].mutex);
3895         decomp_param[i].quit = true;
3896         qemu_cond_signal(&decomp_param[i].cond);
3897         qemu_mutex_unlock(&decomp_param[i].mutex);
3898     }
3899     for (i = 0; i < thread_count; i++) {
3900         if (!decomp_param[i].compbuf) {
3901             break;
3902         }
3903 
3904         qemu_thread_join(decompress_threads + i);
3905         qemu_mutex_destroy(&decomp_param[i].mutex);
3906         qemu_cond_destroy(&decomp_param[i].cond);
3907         inflateEnd(&decomp_param[i].stream);
3908         g_free(decomp_param[i].compbuf);
3909         decomp_param[i].compbuf = NULL;
3910     }
3911     g_free(decompress_threads);
3912     g_free(decomp_param);
3913     decompress_threads = NULL;
3914     decomp_param = NULL;
3915     decomp_file = NULL;
3916 }
3917 
3918 static int compress_threads_load_setup(QEMUFile *f)
3919 {
3920     int i, thread_count;
3921 
3922     if (!migrate_use_compression()) {
3923         return 0;
3924     }
3925 
3926     thread_count = migrate_decompress_threads();
3927     decompress_threads = g_new0(QemuThread, thread_count);
3928     decomp_param = g_new0(DecompressParam, thread_count);
3929     qemu_mutex_init(&decomp_done_lock);
3930     qemu_cond_init(&decomp_done_cond);
3931     decomp_file = f;
3932     for (i = 0; i < thread_count; i++) {
3933         if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3934             goto exit;
3935         }
3936 
3937         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3938         qemu_mutex_init(&decomp_param[i].mutex);
3939         qemu_cond_init(&decomp_param[i].cond);
3940         decomp_param[i].done = true;
3941         decomp_param[i].quit = false;
3942         qemu_thread_create(decompress_threads + i, "decompress",
3943                            do_data_decompress, decomp_param + i,
3944                            QEMU_THREAD_JOINABLE);
3945     }
3946     return 0;
3947 exit:
3948     compress_threads_load_cleanup();
3949     return -1;
3950 }
3951 
3952 static void decompress_data_with_multi_threads(QEMUFile *f,
3953                                                void *host, int len)
3954 {
3955     int idx, thread_count;
3956 
3957     thread_count = migrate_decompress_threads();
3958     qemu_mutex_lock(&decomp_done_lock);
3959     while (true) {
3960         for (idx = 0; idx < thread_count; idx++) {
3961             if (decomp_param[idx].done) {
3962                 decomp_param[idx].done = false;
3963                 qemu_mutex_lock(&decomp_param[idx].mutex);
3964                 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
3965                 decomp_param[idx].des = host;
3966                 decomp_param[idx].len = len;
3967                 qemu_cond_signal(&decomp_param[idx].cond);
3968                 qemu_mutex_unlock(&decomp_param[idx].mutex);
3969                 break;
3970             }
3971         }
3972         if (idx < thread_count) {
3973             break;
3974         } else {
3975             qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3976         }
3977     }
3978     qemu_mutex_unlock(&decomp_done_lock);
3979 }
3980 
3981 /*
3982  * colo cache: this is for secondary VM, we cache the whole
3983  * memory of the secondary VM, it is need to hold the global lock
3984  * to call this helper.
3985  */
3986 int colo_init_ram_cache(void)
3987 {
3988     RAMBlock *block;
3989 
3990     rcu_read_lock();
3991     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3992         block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3993                                                 NULL,
3994                                                 false);
3995         if (!block->colo_cache) {
3996             error_report("%s: Can't alloc memory for COLO cache of block %s,"
3997                          "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3998                          block->used_length);
3999             goto out_locked;
4000         }
4001         memcpy(block->colo_cache, block->host, block->used_length);
4002     }
4003     rcu_read_unlock();
4004     /*
4005     * Record the dirty pages that sent by PVM, we use this dirty bitmap together
4006     * with to decide which page in cache should be flushed into SVM's RAM. Here
4007     * we use the same name 'ram_bitmap' as for migration.
4008     */
4009     if (ram_bytes_total()) {
4010         RAMBlock *block;
4011 
4012         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4013             unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
4014 
4015             block->bmap = bitmap_new(pages);
4016             bitmap_set(block->bmap, 0, pages);
4017         }
4018     }
4019     ram_state = g_new0(RAMState, 1);
4020     ram_state->migration_dirty_pages = 0;
4021     qemu_mutex_init(&ram_state->bitmap_mutex);
4022     memory_global_dirty_log_start();
4023 
4024     return 0;
4025 
4026 out_locked:
4027 
4028     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4029         if (block->colo_cache) {
4030             qemu_anon_ram_free(block->colo_cache, block->used_length);
4031             block->colo_cache = NULL;
4032         }
4033     }
4034 
4035     rcu_read_unlock();
4036     return -errno;
4037 }
4038 
4039 /* It is need to hold the global lock to call this helper */
4040 void colo_release_ram_cache(void)
4041 {
4042     RAMBlock *block;
4043 
4044     memory_global_dirty_log_stop();
4045     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4046         g_free(block->bmap);
4047         block->bmap = NULL;
4048     }
4049 
4050     rcu_read_lock();
4051 
4052     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4053         if (block->colo_cache) {
4054             qemu_anon_ram_free(block->colo_cache, block->used_length);
4055             block->colo_cache = NULL;
4056         }
4057     }
4058 
4059     rcu_read_unlock();
4060     qemu_mutex_destroy(&ram_state->bitmap_mutex);
4061     g_free(ram_state);
4062     ram_state = NULL;
4063 }
4064 
4065 /**
4066  * ram_load_setup: Setup RAM for migration incoming side
4067  *
4068  * Returns zero to indicate success and negative for error
4069  *
4070  * @f: QEMUFile where to receive the data
4071  * @opaque: RAMState pointer
4072  */
4073 static int ram_load_setup(QEMUFile *f, void *opaque)
4074 {
4075     if (compress_threads_load_setup(f)) {
4076         return -1;
4077     }
4078 
4079     xbzrle_load_setup();
4080     ramblock_recv_map_init();
4081 
4082     return 0;
4083 }
4084 
4085 static int ram_load_cleanup(void *opaque)
4086 {
4087     RAMBlock *rb;
4088 
4089     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4090         if (ramblock_is_pmem(rb)) {
4091             pmem_persist(rb->host, rb->used_length);
4092         }
4093     }
4094 
4095     xbzrle_load_cleanup();
4096     compress_threads_load_cleanup();
4097 
4098     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4099         g_free(rb->receivedmap);
4100         rb->receivedmap = NULL;
4101     }
4102 
4103     return 0;
4104 }
4105 
4106 /**
4107  * ram_postcopy_incoming_init: allocate postcopy data structures
4108  *
4109  * Returns 0 for success and negative if there was one error
4110  *
4111  * @mis: current migration incoming state
4112  *
4113  * Allocate data structures etc needed by incoming migration with
4114  * postcopy-ram. postcopy-ram's similarly names
4115  * postcopy_ram_incoming_init does the work.
4116  */
4117 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
4118 {
4119     return postcopy_ram_incoming_init(mis);
4120 }
4121 
4122 /**
4123  * ram_load_postcopy: load a page in postcopy case
4124  *
4125  * Returns 0 for success or -errno in case of error
4126  *
4127  * Called in postcopy mode by ram_load().
4128  * rcu_read_lock is taken prior to this being called.
4129  *
4130  * @f: QEMUFile where to send the data
4131  */
4132 static int ram_load_postcopy(QEMUFile *f)
4133 {
4134     int flags = 0, ret = 0;
4135     bool place_needed = false;
4136     bool matches_target_page_size = false;
4137     MigrationIncomingState *mis = migration_incoming_get_current();
4138     /* Temporary page that is later 'placed' */
4139     void *postcopy_host_page = postcopy_get_tmp_page(mis);
4140     void *last_host = NULL;
4141     bool all_zero = false;
4142 
4143     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4144         ram_addr_t addr;
4145         void *host = NULL;
4146         void *page_buffer = NULL;
4147         void *place_source = NULL;
4148         RAMBlock *block = NULL;
4149         uint8_t ch;
4150 
4151         addr = qemu_get_be64(f);
4152 
4153         /*
4154          * If qemu file error, we should stop here, and then "addr"
4155          * may be invalid
4156          */
4157         ret = qemu_file_get_error(f);
4158         if (ret) {
4159             break;
4160         }
4161 
4162         flags = addr & ~TARGET_PAGE_MASK;
4163         addr &= TARGET_PAGE_MASK;
4164 
4165         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
4166         place_needed = false;
4167         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
4168             block = ram_block_from_stream(f, flags);
4169 
4170             host = host_from_ram_block_offset(block, addr);
4171             if (!host) {
4172                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4173                 ret = -EINVAL;
4174                 break;
4175             }
4176             matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
4177             /*
4178              * Postcopy requires that we place whole host pages atomically;
4179              * these may be huge pages for RAMBlocks that are backed by
4180              * hugetlbfs.
4181              * To make it atomic, the data is read into a temporary page
4182              * that's moved into place later.
4183              * The migration protocol uses,  possibly smaller, target-pages
4184              * however the source ensures it always sends all the components
4185              * of a host page in order.
4186              */
4187             page_buffer = postcopy_host_page +
4188                           ((uintptr_t)host & (block->page_size - 1));
4189             /* If all TP are zero then we can optimise the place */
4190             if (!((uintptr_t)host & (block->page_size - 1))) {
4191                 all_zero = true;
4192             } else {
4193                 /* not the 1st TP within the HP */
4194                 if (host != (last_host + TARGET_PAGE_SIZE)) {
4195                     error_report("Non-sequential target page %p/%p",
4196                                   host, last_host);
4197                     ret = -EINVAL;
4198                     break;
4199                 }
4200             }
4201 
4202 
4203             /*
4204              * If it's the last part of a host page then we place the host
4205              * page
4206              */
4207             place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
4208                                      (block->page_size - 1)) == 0;
4209             place_source = postcopy_host_page;
4210         }
4211         last_host = host;
4212 
4213         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4214         case RAM_SAVE_FLAG_ZERO:
4215             ch = qemu_get_byte(f);
4216             memset(page_buffer, ch, TARGET_PAGE_SIZE);
4217             if (ch) {
4218                 all_zero = false;
4219             }
4220             break;
4221 
4222         case RAM_SAVE_FLAG_PAGE:
4223             all_zero = false;
4224             if (!matches_target_page_size) {
4225                 /* For huge pages, we always use temporary buffer */
4226                 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
4227             } else {
4228                 /*
4229                  * For small pages that matches target page size, we
4230                  * avoid the qemu_file copy.  Instead we directly use
4231                  * the buffer of QEMUFile to place the page.  Note: we
4232                  * cannot do any QEMUFile operation before using that
4233                  * buffer to make sure the buffer is valid when
4234                  * placing the page.
4235                  */
4236                 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
4237                                          TARGET_PAGE_SIZE);
4238             }
4239             break;
4240         case RAM_SAVE_FLAG_EOS:
4241             /* normal exit */
4242             multifd_recv_sync_main();
4243             break;
4244         default:
4245             error_report("Unknown combination of migration flags: %#x"
4246                          " (postcopy mode)", flags);
4247             ret = -EINVAL;
4248             break;
4249         }
4250 
4251         /* Detect for any possible file errors */
4252         if (!ret && qemu_file_get_error(f)) {
4253             ret = qemu_file_get_error(f);
4254         }
4255 
4256         if (!ret && place_needed) {
4257             /* This gets called at the last target page in the host page */
4258             void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
4259 
4260             if (all_zero) {
4261                 ret = postcopy_place_page_zero(mis, place_dest,
4262                                                block);
4263             } else {
4264                 ret = postcopy_place_page(mis, place_dest,
4265                                           place_source, block);
4266             }
4267         }
4268     }
4269 
4270     return ret;
4271 }
4272 
4273 static bool postcopy_is_advised(void)
4274 {
4275     PostcopyState ps = postcopy_state_get();
4276     return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
4277 }
4278 
4279 static bool postcopy_is_running(void)
4280 {
4281     PostcopyState ps = postcopy_state_get();
4282     return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
4283 }
4284 
4285 /*
4286  * Flush content of RAM cache into SVM's memory.
4287  * Only flush the pages that be dirtied by PVM or SVM or both.
4288  */
4289 static void colo_flush_ram_cache(void)
4290 {
4291     RAMBlock *block = NULL;
4292     void *dst_host;
4293     void *src_host;
4294     unsigned long offset = 0;
4295 
4296     memory_global_dirty_log_sync();
4297     rcu_read_lock();
4298     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4299         migration_bitmap_sync_range(ram_state, block, block->used_length);
4300     }
4301     rcu_read_unlock();
4302 
4303     trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
4304     rcu_read_lock();
4305     block = QLIST_FIRST_RCU(&ram_list.blocks);
4306 
4307     while (block) {
4308         offset = migration_bitmap_find_dirty(ram_state, block, offset);
4309 
4310         if (offset << TARGET_PAGE_BITS >= block->used_length) {
4311             offset = 0;
4312             block = QLIST_NEXT_RCU(block, next);
4313         } else {
4314             migration_bitmap_clear_dirty(ram_state, block, offset);
4315             dst_host = block->host + (offset << TARGET_PAGE_BITS);
4316             src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
4317             memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
4318         }
4319     }
4320 
4321     rcu_read_unlock();
4322     trace_colo_flush_ram_cache_end();
4323 }
4324 
4325 static int ram_load(QEMUFile *f, void *opaque, int version_id)
4326 {
4327     int flags = 0, ret = 0, invalid_flags = 0;
4328     static uint64_t seq_iter;
4329     int len = 0;
4330     /*
4331      * If system is running in postcopy mode, page inserts to host memory must
4332      * be atomic
4333      */
4334     bool postcopy_running = postcopy_is_running();
4335     /* ADVISE is earlier, it shows the source has the postcopy capability on */
4336     bool postcopy_advised = postcopy_is_advised();
4337 
4338     seq_iter++;
4339 
4340     if (version_id != 4) {
4341         ret = -EINVAL;
4342     }
4343 
4344     if (!migrate_use_compression()) {
4345         invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4346     }
4347     /* This RCU critical section can be very long running.
4348      * When RCU reclaims in the code start to become numerous,
4349      * it will be necessary to reduce the granularity of this
4350      * critical section.
4351      */
4352     rcu_read_lock();
4353 
4354     if (postcopy_running) {
4355         ret = ram_load_postcopy(f);
4356     }
4357 
4358     while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4359         ram_addr_t addr, total_ram_bytes;
4360         void *host = NULL;
4361         uint8_t ch;
4362 
4363         addr = qemu_get_be64(f);
4364         flags = addr & ~TARGET_PAGE_MASK;
4365         addr &= TARGET_PAGE_MASK;
4366 
4367         if (flags & invalid_flags) {
4368             if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4369                 error_report("Received an unexpected compressed page");
4370             }
4371 
4372             ret = -EINVAL;
4373             break;
4374         }
4375 
4376         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
4377                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4378             RAMBlock *block = ram_block_from_stream(f, flags);
4379 
4380             /*
4381              * After going into COLO, we should load the Page into colo_cache.
4382              */
4383             if (migration_incoming_in_colo_state()) {
4384                 host = colo_cache_from_block_offset(block, addr);
4385             } else {
4386                 host = host_from_ram_block_offset(block, addr);
4387             }
4388             if (!host) {
4389                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4390                 ret = -EINVAL;
4391                 break;
4392             }
4393 
4394             if (!migration_incoming_in_colo_state()) {
4395                 ramblock_recv_bitmap_set(block, host);
4396             }
4397 
4398             trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
4399         }
4400 
4401         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4402         case RAM_SAVE_FLAG_MEM_SIZE:
4403             /* Synchronize RAM block list */
4404             total_ram_bytes = addr;
4405             while (!ret && total_ram_bytes) {
4406                 RAMBlock *block;
4407                 char id[256];
4408                 ram_addr_t length;
4409 
4410                 len = qemu_get_byte(f);
4411                 qemu_get_buffer(f, (uint8_t *)id, len);
4412                 id[len] = 0;
4413                 length = qemu_get_be64(f);
4414 
4415                 block = qemu_ram_block_by_name(id);
4416                 if (block && !qemu_ram_is_migratable(block)) {
4417                     error_report("block %s should not be migrated !", id);
4418                     ret = -EINVAL;
4419                 } else if (block) {
4420                     if (length != block->used_length) {
4421                         Error *local_err = NULL;
4422 
4423                         ret = qemu_ram_resize(block, length,
4424                                               &local_err);
4425                         if (local_err) {
4426                             error_report_err(local_err);
4427                         }
4428                     }
4429                     /* For postcopy we need to check hugepage sizes match */
4430                     if (postcopy_advised &&
4431                         block->page_size != qemu_host_page_size) {
4432                         uint64_t remote_page_size = qemu_get_be64(f);
4433                         if (remote_page_size != block->page_size) {
4434                             error_report("Mismatched RAM page size %s "
4435                                          "(local) %zd != %" PRId64,
4436                                          id, block->page_size,
4437                                          remote_page_size);
4438                             ret = -EINVAL;
4439                         }
4440                     }
4441                     if (migrate_ignore_shared()) {
4442                         hwaddr addr = qemu_get_be64(f);
4443                         if (ramblock_is_ignored(block) &&
4444                             block->mr->addr != addr) {
4445                             error_report("Mismatched GPAs for block %s "
4446                                          "%" PRId64 "!= %" PRId64,
4447                                          id, (uint64_t)addr,
4448                                          (uint64_t)block->mr->addr);
4449                             ret = -EINVAL;
4450                         }
4451                     }
4452                     ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4453                                           block->idstr);
4454                 } else {
4455                     error_report("Unknown ramblock \"%s\", cannot "
4456                                  "accept migration", id);
4457                     ret = -EINVAL;
4458                 }
4459 
4460                 total_ram_bytes -= length;
4461             }
4462             break;
4463 
4464         case RAM_SAVE_FLAG_ZERO:
4465             ch = qemu_get_byte(f);
4466             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4467             break;
4468 
4469         case RAM_SAVE_FLAG_PAGE:
4470             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4471             break;
4472 
4473         case RAM_SAVE_FLAG_COMPRESS_PAGE:
4474             len = qemu_get_be32(f);
4475             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4476                 error_report("Invalid compressed data length: %d", len);
4477                 ret = -EINVAL;
4478                 break;
4479             }
4480             decompress_data_with_multi_threads(f, host, len);
4481             break;
4482 
4483         case RAM_SAVE_FLAG_XBZRLE:
4484             if (load_xbzrle(f, addr, host) < 0) {
4485                 error_report("Failed to decompress XBZRLE page at "
4486                              RAM_ADDR_FMT, addr);
4487                 ret = -EINVAL;
4488                 break;
4489             }
4490             break;
4491         case RAM_SAVE_FLAG_EOS:
4492             /* normal exit */
4493             multifd_recv_sync_main();
4494             break;
4495         default:
4496             if (flags & RAM_SAVE_FLAG_HOOK) {
4497                 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
4498             } else {
4499                 error_report("Unknown combination of migration flags: %#x",
4500                              flags);
4501                 ret = -EINVAL;
4502             }
4503         }
4504         if (!ret) {
4505             ret = qemu_file_get_error(f);
4506         }
4507     }
4508 
4509     ret |= wait_for_decompress_done();
4510     rcu_read_unlock();
4511     trace_ram_load_complete(ret, seq_iter);
4512 
4513     if (!ret  && migration_incoming_in_colo_state()) {
4514         colo_flush_ram_cache();
4515     }
4516     return ret;
4517 }
4518 
4519 static bool ram_has_postcopy(void *opaque)
4520 {
4521     RAMBlock *rb;
4522     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4523         if (ramblock_is_pmem(rb)) {
4524             info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4525                          "is not supported now!", rb->idstr, rb->host);
4526             return false;
4527         }
4528     }
4529 
4530     return migrate_postcopy_ram();
4531 }
4532 
4533 /* Sync all the dirty bitmap with destination VM.  */
4534 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4535 {
4536     RAMBlock *block;
4537     QEMUFile *file = s->to_dst_file;
4538     int ramblock_count = 0;
4539 
4540     trace_ram_dirty_bitmap_sync_start();
4541 
4542     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4543         qemu_savevm_send_recv_bitmap(file, block->idstr);
4544         trace_ram_dirty_bitmap_request(block->idstr);
4545         ramblock_count++;
4546     }
4547 
4548     trace_ram_dirty_bitmap_sync_wait();
4549 
4550     /* Wait until all the ramblocks' dirty bitmap synced */
4551     while (ramblock_count--) {
4552         qemu_sem_wait(&s->rp_state.rp_sem);
4553     }
4554 
4555     trace_ram_dirty_bitmap_sync_complete();
4556 
4557     return 0;
4558 }
4559 
4560 static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4561 {
4562     qemu_sem_post(&s->rp_state.rp_sem);
4563 }
4564 
4565 /*
4566  * Read the received bitmap, revert it as the initial dirty bitmap.
4567  * This is only used when the postcopy migration is paused but wants
4568  * to resume from a middle point.
4569  */
4570 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4571 {
4572     int ret = -EINVAL;
4573     QEMUFile *file = s->rp_state.from_dst_file;
4574     unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
4575     uint64_t local_size = DIV_ROUND_UP(nbits, 8);
4576     uint64_t size, end_mark;
4577 
4578     trace_ram_dirty_bitmap_reload_begin(block->idstr);
4579 
4580     if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4581         error_report("%s: incorrect state %s", __func__,
4582                      MigrationStatus_str(s->state));
4583         return -EINVAL;
4584     }
4585 
4586     /*
4587      * Note: see comments in ramblock_recv_bitmap_send() on why we
4588      * need the endianess convertion, and the paddings.
4589      */
4590     local_size = ROUND_UP(local_size, 8);
4591 
4592     /* Add paddings */
4593     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4594 
4595     size = qemu_get_be64(file);
4596 
4597     /* The size of the bitmap should match with our ramblock */
4598     if (size != local_size) {
4599         error_report("%s: ramblock '%s' bitmap size mismatch "
4600                      "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4601                      block->idstr, size, local_size);
4602         ret = -EINVAL;
4603         goto out;
4604     }
4605 
4606     size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4607     end_mark = qemu_get_be64(file);
4608 
4609     ret = qemu_file_get_error(file);
4610     if (ret || size != local_size) {
4611         error_report("%s: read bitmap failed for ramblock '%s': %d"
4612                      " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4613                      __func__, block->idstr, ret, local_size, size);
4614         ret = -EIO;
4615         goto out;
4616     }
4617 
4618     if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4619         error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4620                      __func__, block->idstr, end_mark);
4621         ret = -EINVAL;
4622         goto out;
4623     }
4624 
4625     /*
4626      * Endianess convertion. We are during postcopy (though paused).
4627      * The dirty bitmap won't change. We can directly modify it.
4628      */
4629     bitmap_from_le(block->bmap, le_bitmap, nbits);
4630 
4631     /*
4632      * What we received is "received bitmap". Revert it as the initial
4633      * dirty bitmap for this ramblock.
4634      */
4635     bitmap_complement(block->bmap, block->bmap, nbits);
4636 
4637     trace_ram_dirty_bitmap_reload_complete(block->idstr);
4638 
4639     /*
4640      * We succeeded to sync bitmap for current ramblock. If this is
4641      * the last one to sync, we need to notify the main send thread.
4642      */
4643     ram_dirty_bitmap_reload_notify(s);
4644 
4645     ret = 0;
4646 out:
4647     g_free(le_bitmap);
4648     return ret;
4649 }
4650 
4651 static int ram_resume_prepare(MigrationState *s, void *opaque)
4652 {
4653     RAMState *rs = *(RAMState **)opaque;
4654     int ret;
4655 
4656     ret = ram_dirty_bitmap_sync_all(s, rs);
4657     if (ret) {
4658         return ret;
4659     }
4660 
4661     ram_state_resume_prepare(rs, s->to_dst_file);
4662 
4663     return 0;
4664 }
4665 
4666 static SaveVMHandlers savevm_ram_handlers = {
4667     .save_setup = ram_save_setup,
4668     .save_live_iterate = ram_save_iterate,
4669     .save_live_complete_postcopy = ram_save_complete,
4670     .save_live_complete_precopy = ram_save_complete,
4671     .has_postcopy = ram_has_postcopy,
4672     .save_live_pending = ram_save_pending,
4673     .load_state = ram_load,
4674     .save_cleanup = ram_save_cleanup,
4675     .load_setup = ram_load_setup,
4676     .load_cleanup = ram_load_cleanup,
4677     .resume_prepare = ram_resume_prepare,
4678 };
4679 
4680 void ram_mig_init(void)
4681 {
4682     qemu_mutex_init(&XBZRLE.lock);
4683     register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
4684 }
4685