xref: /openbmc/qemu/migration/ram.c (revision 6c35ed68)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2011-2015 Red Hat Inc
6  *
7  * Authors:
8  *  Juan Quintela <quintela@redhat.com>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 
29 #include "qemu/osdep.h"
30 #include "cpu.h"
31 #include <zlib.h>
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/pmem.h"
37 #include "xbzrle.h"
38 #include "ram.h"
39 #include "migration.h"
40 #include "socket.h"
41 #include "migration/register.h"
42 #include "migration/misc.h"
43 #include "qemu-file.h"
44 #include "postcopy-ram.h"
45 #include "page_cache.h"
46 #include "qemu/error-report.h"
47 #include "qapi/error.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qmp/qerror.h"
50 #include "trace.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
55 #include "block.h"
56 #include "sysemu/sysemu.h"
57 #include "qemu/uuid.h"
58 #include "savevm.h"
59 #include "qemu/iov.h"
60 
61 /***********************************************************/
62 /* ram save/restore */
63 
64 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65  * worked for pages that where filled with the same char.  We switched
66  * it to only search for the zero value.  And to avoid confusion with
67  * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68  */
69 
70 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
71 #define RAM_SAVE_FLAG_ZERO     0x02
72 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
73 #define RAM_SAVE_FLAG_PAGE     0x08
74 #define RAM_SAVE_FLAG_EOS      0x10
75 #define RAM_SAVE_FLAG_CONTINUE 0x20
76 #define RAM_SAVE_FLAG_XBZRLE   0x40
77 /* 0x80 is reserved in migration.h start with 0x100 next */
78 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
79 
80 static inline bool is_zero_range(uint8_t *p, uint64_t size)
81 {
82     return buffer_is_zero(p, size);
83 }
84 
85 XBZRLECacheStats xbzrle_counters;
86 
87 /* struct contains XBZRLE cache and a static page
88    used by the compression */
89 static struct {
90     /* buffer used for XBZRLE encoding */
91     uint8_t *encoded_buf;
92     /* buffer for storing page content */
93     uint8_t *current_buf;
94     /* Cache for XBZRLE, Protected by lock. */
95     PageCache *cache;
96     QemuMutex lock;
97     /* it will store a page full of zeros */
98     uint8_t *zero_target_page;
99     /* buffer used for XBZRLE decoding */
100     uint8_t *decoded_buf;
101 } XBZRLE;
102 
103 static void XBZRLE_cache_lock(void)
104 {
105     if (migrate_use_xbzrle())
106         qemu_mutex_lock(&XBZRLE.lock);
107 }
108 
109 static void XBZRLE_cache_unlock(void)
110 {
111     if (migrate_use_xbzrle())
112         qemu_mutex_unlock(&XBZRLE.lock);
113 }
114 
115 /**
116  * xbzrle_cache_resize: resize the xbzrle cache
117  *
118  * This function is called from qmp_migrate_set_cache_size in main
119  * thread, possibly while a migration is in progress.  A running
120  * migration may be using the cache and might finish during this call,
121  * hence changes to the cache are protected by XBZRLE.lock().
122  *
123  * Returns 0 for success or -1 for error
124  *
125  * @new_size: new cache size
126  * @errp: set *errp if the check failed, with reason
127  */
128 int xbzrle_cache_resize(int64_t new_size, Error **errp)
129 {
130     PageCache *new_cache;
131     int64_t ret = 0;
132 
133     /* Check for truncation */
134     if (new_size != (size_t)new_size) {
135         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136                    "exceeding address space");
137         return -1;
138     }
139 
140     if (new_size == migrate_xbzrle_cache_size()) {
141         /* nothing to do */
142         return 0;
143     }
144 
145     XBZRLE_cache_lock();
146 
147     if (XBZRLE.cache != NULL) {
148         new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
149         if (!new_cache) {
150             ret = -1;
151             goto out;
152         }
153 
154         cache_fini(XBZRLE.cache);
155         XBZRLE.cache = new_cache;
156     }
157 out:
158     XBZRLE_cache_unlock();
159     return ret;
160 }
161 
162 static bool ramblock_is_ignored(RAMBlock *block)
163 {
164     return !qemu_ram_is_migratable(block) ||
165            (migrate_ignore_shared() && qemu_ram_is_shared(block));
166 }
167 
168 /* Should be holding either ram_list.mutex, or the RCU lock. */
169 #define RAMBLOCK_FOREACH_NOT_IGNORED(block)            \
170     INTERNAL_RAMBLOCK_FOREACH(block)                   \
171         if (ramblock_is_ignored(block)) {} else
172 
173 #define RAMBLOCK_FOREACH_MIGRATABLE(block)             \
174     INTERNAL_RAMBLOCK_FOREACH(block)                   \
175         if (!qemu_ram_is_migratable(block)) {} else
176 
177 #undef RAMBLOCK_FOREACH
178 
179 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
180 {
181     RAMBlock *block;
182     int ret = 0;
183 
184     rcu_read_lock();
185     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
186         ret = func(block, opaque);
187         if (ret) {
188             break;
189         }
190     }
191     rcu_read_unlock();
192     return ret;
193 }
194 
195 static void ramblock_recv_map_init(void)
196 {
197     RAMBlock *rb;
198 
199     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
200         assert(!rb->receivedmap);
201         rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
202     }
203 }
204 
205 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
206 {
207     return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
208                     rb->receivedmap);
209 }
210 
211 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
212 {
213     return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
214 }
215 
216 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
217 {
218     set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
219 }
220 
221 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
222                                     size_t nr)
223 {
224     bitmap_set_atomic(rb->receivedmap,
225                       ramblock_recv_bitmap_offset(host_addr, rb),
226                       nr);
227 }
228 
229 #define  RAMBLOCK_RECV_BITMAP_ENDING  (0x0123456789abcdefULL)
230 
231 /*
232  * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
233  *
234  * Returns >0 if success with sent bytes, or <0 if error.
235  */
236 int64_t ramblock_recv_bitmap_send(QEMUFile *file,
237                                   const char *block_name)
238 {
239     RAMBlock *block = qemu_ram_block_by_name(block_name);
240     unsigned long *le_bitmap, nbits;
241     uint64_t size;
242 
243     if (!block) {
244         error_report("%s: invalid block name: %s", __func__, block_name);
245         return -1;
246     }
247 
248     nbits = block->used_length >> TARGET_PAGE_BITS;
249 
250     /*
251      * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
252      * machines we may need 4 more bytes for padding (see below
253      * comment). So extend it a bit before hand.
254      */
255     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
256 
257     /*
258      * Always use little endian when sending the bitmap. This is
259      * required that when source and destination VMs are not using the
260      * same endianess. (Note: big endian won't work.)
261      */
262     bitmap_to_le(le_bitmap, block->receivedmap, nbits);
263 
264     /* Size of the bitmap, in bytes */
265     size = DIV_ROUND_UP(nbits, 8);
266 
267     /*
268      * size is always aligned to 8 bytes for 64bit machines, but it
269      * may not be true for 32bit machines. We need this padding to
270      * make sure the migration can survive even between 32bit and
271      * 64bit machines.
272      */
273     size = ROUND_UP(size, 8);
274 
275     qemu_put_be64(file, size);
276     qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
277     /*
278      * Mark as an end, in case the middle part is screwed up due to
279      * some "misterious" reason.
280      */
281     qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
282     qemu_fflush(file);
283 
284     g_free(le_bitmap);
285 
286     if (qemu_file_get_error(file)) {
287         return qemu_file_get_error(file);
288     }
289 
290     return size + sizeof(size);
291 }
292 
293 /*
294  * An outstanding page request, on the source, having been received
295  * and queued
296  */
297 struct RAMSrcPageRequest {
298     RAMBlock *rb;
299     hwaddr    offset;
300     hwaddr    len;
301 
302     QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
303 };
304 
305 /* State of RAM for migration */
306 struct RAMState {
307     /* QEMUFile used for this migration */
308     QEMUFile *f;
309     /* Last block that we have visited searching for dirty pages */
310     RAMBlock *last_seen_block;
311     /* Last block from where we have sent data */
312     RAMBlock *last_sent_block;
313     /* Last dirty target page we have sent */
314     ram_addr_t last_page;
315     /* last ram version we have seen */
316     uint32_t last_version;
317     /* We are in the first round */
318     bool ram_bulk_stage;
319     /* The free page optimization is enabled */
320     bool fpo_enabled;
321     /* How many times we have dirty too many pages */
322     int dirty_rate_high_cnt;
323     /* these variables are used for bitmap sync */
324     /* last time we did a full bitmap_sync */
325     int64_t time_last_bitmap_sync;
326     /* bytes transferred at start_time */
327     uint64_t bytes_xfer_prev;
328     /* number of dirty pages since start_time */
329     uint64_t num_dirty_pages_period;
330     /* xbzrle misses since the beginning of the period */
331     uint64_t xbzrle_cache_miss_prev;
332 
333     /* compression statistics since the beginning of the period */
334     /* amount of count that no free thread to compress data */
335     uint64_t compress_thread_busy_prev;
336     /* amount bytes after compression */
337     uint64_t compressed_size_prev;
338     /* amount of compressed pages */
339     uint64_t compress_pages_prev;
340 
341     /* total handled target pages at the beginning of period */
342     uint64_t target_page_count_prev;
343     /* total handled target pages since start */
344     uint64_t target_page_count;
345     /* number of dirty bits in the bitmap */
346     uint64_t migration_dirty_pages;
347     /* Protects modification of the bitmap and migration dirty pages */
348     QemuMutex bitmap_mutex;
349     /* The RAMBlock used in the last src_page_requests */
350     RAMBlock *last_req_rb;
351     /* Queue of outstanding page requests from the destination */
352     QemuMutex src_page_req_mutex;
353     QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
354 };
355 typedef struct RAMState RAMState;
356 
357 static RAMState *ram_state;
358 
359 static NotifierWithReturnList precopy_notifier_list;
360 
361 void precopy_infrastructure_init(void)
362 {
363     notifier_with_return_list_init(&precopy_notifier_list);
364 }
365 
366 void precopy_add_notifier(NotifierWithReturn *n)
367 {
368     notifier_with_return_list_add(&precopy_notifier_list, n);
369 }
370 
371 void precopy_remove_notifier(NotifierWithReturn *n)
372 {
373     notifier_with_return_remove(n);
374 }
375 
376 int precopy_notify(PrecopyNotifyReason reason, Error **errp)
377 {
378     PrecopyNotifyData pnd;
379     pnd.reason = reason;
380     pnd.errp = errp;
381 
382     return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
383 }
384 
385 void precopy_enable_free_page_optimization(void)
386 {
387     if (!ram_state) {
388         return;
389     }
390 
391     ram_state->fpo_enabled = true;
392 }
393 
394 uint64_t ram_bytes_remaining(void)
395 {
396     return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
397                        0;
398 }
399 
400 MigrationStats ram_counters;
401 
402 /* used by the search for pages to send */
403 struct PageSearchStatus {
404     /* Current block being searched */
405     RAMBlock    *block;
406     /* Current page to search from */
407     unsigned long page;
408     /* Set once we wrap around */
409     bool         complete_round;
410 };
411 typedef struct PageSearchStatus PageSearchStatus;
412 
413 CompressionStats compression_counters;
414 
415 struct CompressParam {
416     bool done;
417     bool quit;
418     bool zero_page;
419     QEMUFile *file;
420     QemuMutex mutex;
421     QemuCond cond;
422     RAMBlock *block;
423     ram_addr_t offset;
424 
425     /* internally used fields */
426     z_stream stream;
427     uint8_t *originbuf;
428 };
429 typedef struct CompressParam CompressParam;
430 
431 struct DecompressParam {
432     bool done;
433     bool quit;
434     QemuMutex mutex;
435     QemuCond cond;
436     void *des;
437     uint8_t *compbuf;
438     int len;
439     z_stream stream;
440 };
441 typedef struct DecompressParam DecompressParam;
442 
443 static CompressParam *comp_param;
444 static QemuThread *compress_threads;
445 /* comp_done_cond is used to wake up the migration thread when
446  * one of the compression threads has finished the compression.
447  * comp_done_lock is used to co-work with comp_done_cond.
448  */
449 static QemuMutex comp_done_lock;
450 static QemuCond comp_done_cond;
451 /* The empty QEMUFileOps will be used by file in CompressParam */
452 static const QEMUFileOps empty_ops = { };
453 
454 static QEMUFile *decomp_file;
455 static DecompressParam *decomp_param;
456 static QemuThread *decompress_threads;
457 static QemuMutex decomp_done_lock;
458 static QemuCond decomp_done_cond;
459 
460 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
461                                  ram_addr_t offset, uint8_t *source_buf);
462 
463 static void *do_data_compress(void *opaque)
464 {
465     CompressParam *param = opaque;
466     RAMBlock *block;
467     ram_addr_t offset;
468     bool zero_page;
469 
470     qemu_mutex_lock(&param->mutex);
471     while (!param->quit) {
472         if (param->block) {
473             block = param->block;
474             offset = param->offset;
475             param->block = NULL;
476             qemu_mutex_unlock(&param->mutex);
477 
478             zero_page = do_compress_ram_page(param->file, &param->stream,
479                                              block, offset, param->originbuf);
480 
481             qemu_mutex_lock(&comp_done_lock);
482             param->done = true;
483             param->zero_page = zero_page;
484             qemu_cond_signal(&comp_done_cond);
485             qemu_mutex_unlock(&comp_done_lock);
486 
487             qemu_mutex_lock(&param->mutex);
488         } else {
489             qemu_cond_wait(&param->cond, &param->mutex);
490         }
491     }
492     qemu_mutex_unlock(&param->mutex);
493 
494     return NULL;
495 }
496 
497 static void compress_threads_save_cleanup(void)
498 {
499     int i, thread_count;
500 
501     if (!migrate_use_compression() || !comp_param) {
502         return;
503     }
504 
505     thread_count = migrate_compress_threads();
506     for (i = 0; i < thread_count; i++) {
507         /*
508          * we use it as a indicator which shows if the thread is
509          * properly init'd or not
510          */
511         if (!comp_param[i].file) {
512             break;
513         }
514 
515         qemu_mutex_lock(&comp_param[i].mutex);
516         comp_param[i].quit = true;
517         qemu_cond_signal(&comp_param[i].cond);
518         qemu_mutex_unlock(&comp_param[i].mutex);
519 
520         qemu_thread_join(compress_threads + i);
521         qemu_mutex_destroy(&comp_param[i].mutex);
522         qemu_cond_destroy(&comp_param[i].cond);
523         deflateEnd(&comp_param[i].stream);
524         g_free(comp_param[i].originbuf);
525         qemu_fclose(comp_param[i].file);
526         comp_param[i].file = NULL;
527     }
528     qemu_mutex_destroy(&comp_done_lock);
529     qemu_cond_destroy(&comp_done_cond);
530     g_free(compress_threads);
531     g_free(comp_param);
532     compress_threads = NULL;
533     comp_param = NULL;
534 }
535 
536 static int compress_threads_save_setup(void)
537 {
538     int i, thread_count;
539 
540     if (!migrate_use_compression()) {
541         return 0;
542     }
543     thread_count = migrate_compress_threads();
544     compress_threads = g_new0(QemuThread, thread_count);
545     comp_param = g_new0(CompressParam, thread_count);
546     qemu_cond_init(&comp_done_cond);
547     qemu_mutex_init(&comp_done_lock);
548     for (i = 0; i < thread_count; i++) {
549         comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
550         if (!comp_param[i].originbuf) {
551             goto exit;
552         }
553 
554         if (deflateInit(&comp_param[i].stream,
555                         migrate_compress_level()) != Z_OK) {
556             g_free(comp_param[i].originbuf);
557             goto exit;
558         }
559 
560         /* comp_param[i].file is just used as a dummy buffer to save data,
561          * set its ops to empty.
562          */
563         comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
564         comp_param[i].done = true;
565         comp_param[i].quit = false;
566         qemu_mutex_init(&comp_param[i].mutex);
567         qemu_cond_init(&comp_param[i].cond);
568         qemu_thread_create(compress_threads + i, "compress",
569                            do_data_compress, comp_param + i,
570                            QEMU_THREAD_JOINABLE);
571     }
572     return 0;
573 
574 exit:
575     compress_threads_save_cleanup();
576     return -1;
577 }
578 
579 /* Multiple fd's */
580 
581 #define MULTIFD_MAGIC 0x11223344U
582 #define MULTIFD_VERSION 1
583 
584 #define MULTIFD_FLAG_SYNC (1 << 0)
585 
586 /* This value needs to be a multiple of qemu_target_page_size() */
587 #define MULTIFD_PACKET_SIZE (512 * 1024)
588 
589 typedef struct {
590     uint32_t magic;
591     uint32_t version;
592     unsigned char uuid[16]; /* QemuUUID */
593     uint8_t id;
594     uint8_t unused1[7];     /* Reserved for future use */
595     uint64_t unused2[4];    /* Reserved for future use */
596 } __attribute__((packed)) MultiFDInit_t;
597 
598 typedef struct {
599     uint32_t magic;
600     uint32_t version;
601     uint32_t flags;
602     /* maximum number of allocated pages */
603     uint32_t pages_alloc;
604     uint32_t pages_used;
605     /* size of the next packet that contains pages */
606     uint32_t next_packet_size;
607     uint64_t packet_num;
608     uint64_t unused[4];    /* Reserved for future use */
609     char ramblock[256];
610     uint64_t offset[];
611 } __attribute__((packed)) MultiFDPacket_t;
612 
613 typedef struct {
614     /* number of used pages */
615     uint32_t used;
616     /* number of allocated pages */
617     uint32_t allocated;
618     /* global number of generated multifd packets */
619     uint64_t packet_num;
620     /* offset of each page */
621     ram_addr_t *offset;
622     /* pointer to each page */
623     struct iovec *iov;
624     RAMBlock *block;
625 } MultiFDPages_t;
626 
627 typedef struct {
628     /* this fields are not changed once the thread is created */
629     /* channel number */
630     uint8_t id;
631     /* channel thread name */
632     char *name;
633     /* channel thread id */
634     QemuThread thread;
635     /* communication channel */
636     QIOChannel *c;
637     /* sem where to wait for more work */
638     QemuSemaphore sem;
639     /* this mutex protects the following parameters */
640     QemuMutex mutex;
641     /* is this channel thread running */
642     bool running;
643     /* should this thread finish */
644     bool quit;
645     /* thread has work to do */
646     int pending_job;
647     /* array of pages to sent */
648     MultiFDPages_t *pages;
649     /* packet allocated len */
650     uint32_t packet_len;
651     /* pointer to the packet */
652     MultiFDPacket_t *packet;
653     /* multifd flags for each packet */
654     uint32_t flags;
655     /* size of the next packet that contains pages */
656     uint32_t next_packet_size;
657     /* global number of generated multifd packets */
658     uint64_t packet_num;
659     /* thread local variables */
660     /* packets sent through this channel */
661     uint64_t num_packets;
662     /* pages sent through this channel */
663     uint64_t num_pages;
664     /* syncs main thread and channels */
665     QemuSemaphore sem_sync;
666 }  MultiFDSendParams;
667 
668 typedef struct {
669     /* this fields are not changed once the thread is created */
670     /* channel number */
671     uint8_t id;
672     /* channel thread name */
673     char *name;
674     /* channel thread id */
675     QemuThread thread;
676     /* communication channel */
677     QIOChannel *c;
678     /* this mutex protects the following parameters */
679     QemuMutex mutex;
680     /* is this channel thread running */
681     bool running;
682     /* should this thread finish */
683     bool quit;
684     /* array of pages to receive */
685     MultiFDPages_t *pages;
686     /* packet allocated len */
687     uint32_t packet_len;
688     /* pointer to the packet */
689     MultiFDPacket_t *packet;
690     /* multifd flags for each packet */
691     uint32_t flags;
692     /* global number of generated multifd packets */
693     uint64_t packet_num;
694     /* thread local variables */
695     /* size of the next packet that contains pages */
696     uint32_t next_packet_size;
697     /* packets sent through this channel */
698     uint64_t num_packets;
699     /* pages sent through this channel */
700     uint64_t num_pages;
701     /* syncs main thread and channels */
702     QemuSemaphore sem_sync;
703 } MultiFDRecvParams;
704 
705 static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
706 {
707     MultiFDInit_t msg;
708     int ret;
709 
710     msg.magic = cpu_to_be32(MULTIFD_MAGIC);
711     msg.version = cpu_to_be32(MULTIFD_VERSION);
712     msg.id = p->id;
713     memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
714 
715     ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
716     if (ret != 0) {
717         return -1;
718     }
719     return 0;
720 }
721 
722 static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
723 {
724     MultiFDInit_t msg;
725     int ret;
726 
727     ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
728     if (ret != 0) {
729         return -1;
730     }
731 
732     msg.magic = be32_to_cpu(msg.magic);
733     msg.version = be32_to_cpu(msg.version);
734 
735     if (msg.magic != MULTIFD_MAGIC) {
736         error_setg(errp, "multifd: received packet magic %x "
737                    "expected %x", msg.magic, MULTIFD_MAGIC);
738         return -1;
739     }
740 
741     if (msg.version != MULTIFD_VERSION) {
742         error_setg(errp, "multifd: received packet version %d "
743                    "expected %d", msg.version, MULTIFD_VERSION);
744         return -1;
745     }
746 
747     if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
748         char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
749         char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
750 
751         error_setg(errp, "multifd: received uuid '%s' and expected "
752                    "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
753         g_free(uuid);
754         g_free(msg_uuid);
755         return -1;
756     }
757 
758     if (msg.id > migrate_multifd_channels()) {
759         error_setg(errp, "multifd: received channel version %d "
760                    "expected %d", msg.version, MULTIFD_VERSION);
761         return -1;
762     }
763 
764     return msg.id;
765 }
766 
767 static MultiFDPages_t *multifd_pages_init(size_t size)
768 {
769     MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
770 
771     pages->allocated = size;
772     pages->iov = g_new0(struct iovec, size);
773     pages->offset = g_new0(ram_addr_t, size);
774 
775     return pages;
776 }
777 
778 static void multifd_pages_clear(MultiFDPages_t *pages)
779 {
780     pages->used = 0;
781     pages->allocated = 0;
782     pages->packet_num = 0;
783     pages->block = NULL;
784     g_free(pages->iov);
785     pages->iov = NULL;
786     g_free(pages->offset);
787     pages->offset = NULL;
788     g_free(pages);
789 }
790 
791 static void multifd_send_fill_packet(MultiFDSendParams *p)
792 {
793     MultiFDPacket_t *packet = p->packet;
794     uint32_t page_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
795     int i;
796 
797     packet->magic = cpu_to_be32(MULTIFD_MAGIC);
798     packet->version = cpu_to_be32(MULTIFD_VERSION);
799     packet->flags = cpu_to_be32(p->flags);
800     packet->pages_alloc = cpu_to_be32(page_max);
801     packet->pages_used = cpu_to_be32(p->pages->used);
802     packet->next_packet_size = cpu_to_be32(p->next_packet_size);
803     packet->packet_num = cpu_to_be64(p->packet_num);
804 
805     if (p->pages->block) {
806         strncpy(packet->ramblock, p->pages->block->idstr, 256);
807     }
808 
809     for (i = 0; i < p->pages->used; i++) {
810         packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
811     }
812 }
813 
814 static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
815 {
816     MultiFDPacket_t *packet = p->packet;
817     uint32_t pages_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
818     RAMBlock *block;
819     int i;
820 
821     packet->magic = be32_to_cpu(packet->magic);
822     if (packet->magic != MULTIFD_MAGIC) {
823         error_setg(errp, "multifd: received packet "
824                    "magic %x and expected magic %x",
825                    packet->magic, MULTIFD_MAGIC);
826         return -1;
827     }
828 
829     packet->version = be32_to_cpu(packet->version);
830     if (packet->version != MULTIFD_VERSION) {
831         error_setg(errp, "multifd: received packet "
832                    "version %d and expected version %d",
833                    packet->version, MULTIFD_VERSION);
834         return -1;
835     }
836 
837     p->flags = be32_to_cpu(packet->flags);
838 
839     packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
840     /*
841      * If we recevied a packet that is 100 times bigger than expected
842      * just stop migration.  It is a magic number.
843      */
844     if (packet->pages_alloc > pages_max * 100) {
845         error_setg(errp, "multifd: received packet "
846                    "with size %d and expected a maximum size of %d",
847                    packet->pages_alloc, pages_max * 100) ;
848         return -1;
849     }
850     /*
851      * We received a packet that is bigger than expected but inside
852      * reasonable limits (see previous comment).  Just reallocate.
853      */
854     if (packet->pages_alloc > p->pages->allocated) {
855         multifd_pages_clear(p->pages);
856         p->pages = multifd_pages_init(packet->pages_alloc);
857     }
858 
859     p->pages->used = be32_to_cpu(packet->pages_used);
860     if (p->pages->used > packet->pages_alloc) {
861         error_setg(errp, "multifd: received packet "
862                    "with %d pages and expected maximum pages are %d",
863                    p->pages->used, packet->pages_alloc) ;
864         return -1;
865     }
866 
867     p->next_packet_size = be32_to_cpu(packet->next_packet_size);
868     p->packet_num = be64_to_cpu(packet->packet_num);
869 
870     if (p->pages->used) {
871         /* make sure that ramblock is 0 terminated */
872         packet->ramblock[255] = 0;
873         block = qemu_ram_block_by_name(packet->ramblock);
874         if (!block) {
875             error_setg(errp, "multifd: unknown ram block %s",
876                        packet->ramblock);
877             return -1;
878         }
879     }
880 
881     for (i = 0; i < p->pages->used; i++) {
882         ram_addr_t offset = be64_to_cpu(packet->offset[i]);
883 
884         if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
885             error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
886                        " (max " RAM_ADDR_FMT ")",
887                        offset, block->max_length);
888             return -1;
889         }
890         p->pages->iov[i].iov_base = block->host + offset;
891         p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
892     }
893 
894     return 0;
895 }
896 
897 struct {
898     MultiFDSendParams *params;
899     /* array of pages to sent */
900     MultiFDPages_t *pages;
901     /* global number of generated multifd packets */
902     uint64_t packet_num;
903     /* send channels ready */
904     QemuSemaphore channels_ready;
905 } *multifd_send_state;
906 
907 /*
908  * How we use multifd_send_state->pages and channel->pages?
909  *
910  * We create a pages for each channel, and a main one.  Each time that
911  * we need to send a batch of pages we interchange the ones between
912  * multifd_send_state and the channel that is sending it.  There are
913  * two reasons for that:
914  *    - to not have to do so many mallocs during migration
915  *    - to make easier to know what to free at the end of migration
916  *
917  * This way we always know who is the owner of each "pages" struct,
918  * and we don't need any locking.  It belongs to the migration thread
919  * or to the channel thread.  Switching is safe because the migration
920  * thread is using the channel mutex when changing it, and the channel
921  * have to had finish with its own, otherwise pending_job can't be
922  * false.
923  */
924 
925 static int multifd_send_pages(RAMState *rs)
926 {
927     int i;
928     static int next_channel;
929     MultiFDSendParams *p = NULL; /* make happy gcc */
930     MultiFDPages_t *pages = multifd_send_state->pages;
931     uint64_t transferred;
932 
933     qemu_sem_wait(&multifd_send_state->channels_ready);
934     for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
935         p = &multifd_send_state->params[i];
936 
937         qemu_mutex_lock(&p->mutex);
938         if (p->quit) {
939             error_report("%s: channel %d has already quit!", __func__, i);
940             qemu_mutex_unlock(&p->mutex);
941             return -1;
942         }
943         if (!p->pending_job) {
944             p->pending_job++;
945             next_channel = (i + 1) % migrate_multifd_channels();
946             break;
947         }
948         qemu_mutex_unlock(&p->mutex);
949     }
950     p->pages->used = 0;
951 
952     p->packet_num = multifd_send_state->packet_num++;
953     p->pages->block = NULL;
954     multifd_send_state->pages = p->pages;
955     p->pages = pages;
956     transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
957     qemu_file_update_transfer(rs->f, transferred);
958     ram_counters.multifd_bytes += transferred;
959     ram_counters.transferred += transferred;;
960     qemu_mutex_unlock(&p->mutex);
961     qemu_sem_post(&p->sem);
962 
963     return 1;
964 }
965 
966 static int multifd_queue_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
967 {
968     MultiFDPages_t *pages = multifd_send_state->pages;
969 
970     if (!pages->block) {
971         pages->block = block;
972     }
973 
974     if (pages->block == block) {
975         pages->offset[pages->used] = offset;
976         pages->iov[pages->used].iov_base = block->host + offset;
977         pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
978         pages->used++;
979 
980         if (pages->used < pages->allocated) {
981             return 1;
982         }
983     }
984 
985     if (multifd_send_pages(rs) < 0) {
986         return -1;
987     }
988 
989     if (pages->block != block) {
990         return  multifd_queue_page(rs, block, offset);
991     }
992 
993     return 1;
994 }
995 
996 static void multifd_send_terminate_threads(Error *err)
997 {
998     int i;
999 
1000     trace_multifd_send_terminate_threads(err != NULL);
1001 
1002     if (err) {
1003         MigrationState *s = migrate_get_current();
1004         migrate_set_error(s, err);
1005         if (s->state == MIGRATION_STATUS_SETUP ||
1006             s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
1007             s->state == MIGRATION_STATUS_DEVICE ||
1008             s->state == MIGRATION_STATUS_ACTIVE) {
1009             migrate_set_state(&s->state, s->state,
1010                               MIGRATION_STATUS_FAILED);
1011         }
1012     }
1013 
1014     for (i = 0; i < migrate_multifd_channels(); i++) {
1015         MultiFDSendParams *p = &multifd_send_state->params[i];
1016 
1017         qemu_mutex_lock(&p->mutex);
1018         p->quit = true;
1019         qemu_sem_post(&p->sem);
1020         qemu_mutex_unlock(&p->mutex);
1021     }
1022 }
1023 
1024 void multifd_save_cleanup(void)
1025 {
1026     int i;
1027 
1028     if (!migrate_use_multifd()) {
1029         return;
1030     }
1031     multifd_send_terminate_threads(NULL);
1032     for (i = 0; i < migrate_multifd_channels(); i++) {
1033         MultiFDSendParams *p = &multifd_send_state->params[i];
1034 
1035         if (p->running) {
1036             qemu_thread_join(&p->thread);
1037         }
1038         socket_send_channel_destroy(p->c);
1039         p->c = NULL;
1040         qemu_mutex_destroy(&p->mutex);
1041         qemu_sem_destroy(&p->sem);
1042         qemu_sem_destroy(&p->sem_sync);
1043         g_free(p->name);
1044         p->name = NULL;
1045         multifd_pages_clear(p->pages);
1046         p->pages = NULL;
1047         p->packet_len = 0;
1048         g_free(p->packet);
1049         p->packet = NULL;
1050     }
1051     qemu_sem_destroy(&multifd_send_state->channels_ready);
1052     g_free(multifd_send_state->params);
1053     multifd_send_state->params = NULL;
1054     multifd_pages_clear(multifd_send_state->pages);
1055     multifd_send_state->pages = NULL;
1056     g_free(multifd_send_state);
1057     multifd_send_state = NULL;
1058 }
1059 
1060 static void multifd_send_sync_main(RAMState *rs)
1061 {
1062     int i;
1063 
1064     if (!migrate_use_multifd()) {
1065         return;
1066     }
1067     if (multifd_send_state->pages->used) {
1068         if (multifd_send_pages(rs) < 0) {
1069             error_report("%s: multifd_send_pages fail", __func__);
1070             return;
1071         }
1072     }
1073     for (i = 0; i < migrate_multifd_channels(); i++) {
1074         MultiFDSendParams *p = &multifd_send_state->params[i];
1075 
1076         trace_multifd_send_sync_main_signal(p->id);
1077 
1078         qemu_mutex_lock(&p->mutex);
1079 
1080         if (p->quit) {
1081             error_report("%s: channel %d has already quit", __func__, i);
1082             qemu_mutex_unlock(&p->mutex);
1083             return;
1084         }
1085 
1086         p->packet_num = multifd_send_state->packet_num++;
1087         p->flags |= MULTIFD_FLAG_SYNC;
1088         p->pending_job++;
1089         qemu_file_update_transfer(rs->f, p->packet_len);
1090         ram_counters.multifd_bytes += p->packet_len;
1091         ram_counters.transferred += p->packet_len;
1092         qemu_mutex_unlock(&p->mutex);
1093         qemu_sem_post(&p->sem);
1094     }
1095     for (i = 0; i < migrate_multifd_channels(); i++) {
1096         MultiFDSendParams *p = &multifd_send_state->params[i];
1097 
1098         trace_multifd_send_sync_main_wait(p->id);
1099         qemu_sem_wait(&p->sem_sync);
1100     }
1101     trace_multifd_send_sync_main(multifd_send_state->packet_num);
1102 }
1103 
1104 static void *multifd_send_thread(void *opaque)
1105 {
1106     MultiFDSendParams *p = opaque;
1107     Error *local_err = NULL;
1108     int ret = 0;
1109     uint32_t flags = 0;
1110 
1111     trace_multifd_send_thread_start(p->id);
1112     rcu_register_thread();
1113 
1114     if (multifd_send_initial_packet(p, &local_err) < 0) {
1115         ret = -1;
1116         goto out;
1117     }
1118     /* initial packet */
1119     p->num_packets = 1;
1120 
1121     while (true) {
1122         qemu_sem_wait(&p->sem);
1123         qemu_mutex_lock(&p->mutex);
1124 
1125         if (p->pending_job) {
1126             uint32_t used = p->pages->used;
1127             uint64_t packet_num = p->packet_num;
1128             flags = p->flags;
1129 
1130             p->next_packet_size = used * qemu_target_page_size();
1131             multifd_send_fill_packet(p);
1132             p->flags = 0;
1133             p->num_packets++;
1134             p->num_pages += used;
1135             p->pages->used = 0;
1136             qemu_mutex_unlock(&p->mutex);
1137 
1138             trace_multifd_send(p->id, packet_num, used, flags,
1139                                p->next_packet_size);
1140 
1141             ret = qio_channel_write_all(p->c, (void *)p->packet,
1142                                         p->packet_len, &local_err);
1143             if (ret != 0) {
1144                 break;
1145             }
1146 
1147             if (used) {
1148                 ret = qio_channel_writev_all(p->c, p->pages->iov,
1149                                              used, &local_err);
1150                 if (ret != 0) {
1151                     break;
1152                 }
1153             }
1154 
1155             qemu_mutex_lock(&p->mutex);
1156             p->pending_job--;
1157             qemu_mutex_unlock(&p->mutex);
1158 
1159             if (flags & MULTIFD_FLAG_SYNC) {
1160                 qemu_sem_post(&p->sem_sync);
1161             }
1162             qemu_sem_post(&multifd_send_state->channels_ready);
1163         } else if (p->quit) {
1164             qemu_mutex_unlock(&p->mutex);
1165             break;
1166         } else {
1167             qemu_mutex_unlock(&p->mutex);
1168             /* sometimes there are spurious wakeups */
1169         }
1170     }
1171 
1172 out:
1173     if (local_err) {
1174         trace_multifd_send_error(p->id);
1175         multifd_send_terminate_threads(local_err);
1176     }
1177 
1178     /*
1179      * Error happen, I will exit, but I can't just leave, tell
1180      * who pay attention to me.
1181      */
1182     if (ret != 0) {
1183         qemu_sem_post(&p->sem_sync);
1184         qemu_sem_post(&multifd_send_state->channels_ready);
1185     }
1186 
1187     qemu_mutex_lock(&p->mutex);
1188     p->running = false;
1189     qemu_mutex_unlock(&p->mutex);
1190 
1191     rcu_unregister_thread();
1192     trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1193 
1194     return NULL;
1195 }
1196 
1197 static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1198 {
1199     MultiFDSendParams *p = opaque;
1200     QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1201     Error *local_err = NULL;
1202 
1203     trace_multifd_new_send_channel_async(p->id);
1204     if (qio_task_propagate_error(task, &local_err)) {
1205         migrate_set_error(migrate_get_current(), local_err);
1206         multifd_save_cleanup();
1207     } else {
1208         p->c = QIO_CHANNEL(sioc);
1209         qio_channel_set_delay(p->c, false);
1210         p->running = true;
1211         qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1212                            QEMU_THREAD_JOINABLE);
1213     }
1214 }
1215 
1216 int multifd_save_setup(void)
1217 {
1218     int thread_count;
1219     uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
1220     uint8_t i;
1221 
1222     if (!migrate_use_multifd()) {
1223         return 0;
1224     }
1225     thread_count = migrate_multifd_channels();
1226     multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1227     multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
1228     multifd_send_state->pages = multifd_pages_init(page_count);
1229     qemu_sem_init(&multifd_send_state->channels_ready, 0);
1230 
1231     for (i = 0; i < thread_count; i++) {
1232         MultiFDSendParams *p = &multifd_send_state->params[i];
1233 
1234         qemu_mutex_init(&p->mutex);
1235         qemu_sem_init(&p->sem, 0);
1236         qemu_sem_init(&p->sem_sync, 0);
1237         p->quit = false;
1238         p->pending_job = 0;
1239         p->id = i;
1240         p->pages = multifd_pages_init(page_count);
1241         p->packet_len = sizeof(MultiFDPacket_t)
1242                       + sizeof(ram_addr_t) * page_count;
1243         p->packet = g_malloc0(p->packet_len);
1244         p->name = g_strdup_printf("multifdsend_%d", i);
1245         socket_send_channel_create(multifd_new_send_channel_async, p);
1246     }
1247     return 0;
1248 }
1249 
1250 struct {
1251     MultiFDRecvParams *params;
1252     /* number of created threads */
1253     int count;
1254     /* syncs main thread and channels */
1255     QemuSemaphore sem_sync;
1256     /* global number of generated multifd packets */
1257     uint64_t packet_num;
1258 } *multifd_recv_state;
1259 
1260 static void multifd_recv_terminate_threads(Error *err)
1261 {
1262     int i;
1263 
1264     trace_multifd_recv_terminate_threads(err != NULL);
1265 
1266     if (err) {
1267         MigrationState *s = migrate_get_current();
1268         migrate_set_error(s, err);
1269         if (s->state == MIGRATION_STATUS_SETUP ||
1270             s->state == MIGRATION_STATUS_ACTIVE) {
1271             migrate_set_state(&s->state, s->state,
1272                               MIGRATION_STATUS_FAILED);
1273         }
1274     }
1275 
1276     for (i = 0; i < migrate_multifd_channels(); i++) {
1277         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1278 
1279         qemu_mutex_lock(&p->mutex);
1280         p->quit = true;
1281         /* We could arrive here for two reasons:
1282            - normal quit, i.e. everything went fine, just finished
1283            - error quit: We close the channels so the channel threads
1284              finish the qio_channel_read_all_eof() */
1285         qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1286         qemu_mutex_unlock(&p->mutex);
1287     }
1288 }
1289 
1290 int multifd_load_cleanup(Error **errp)
1291 {
1292     int i;
1293     int ret = 0;
1294 
1295     if (!migrate_use_multifd()) {
1296         return 0;
1297     }
1298     multifd_recv_terminate_threads(NULL);
1299     for (i = 0; i < migrate_multifd_channels(); i++) {
1300         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1301 
1302         if (p->running) {
1303             p->quit = true;
1304             /*
1305              * multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
1306              * however try to wakeup it without harm in cleanup phase.
1307              */
1308             qemu_sem_post(&p->sem_sync);
1309             qemu_thread_join(&p->thread);
1310         }
1311         object_unref(OBJECT(p->c));
1312         p->c = NULL;
1313         qemu_mutex_destroy(&p->mutex);
1314         qemu_sem_destroy(&p->sem_sync);
1315         g_free(p->name);
1316         p->name = NULL;
1317         multifd_pages_clear(p->pages);
1318         p->pages = NULL;
1319         p->packet_len = 0;
1320         g_free(p->packet);
1321         p->packet = NULL;
1322     }
1323     qemu_sem_destroy(&multifd_recv_state->sem_sync);
1324     g_free(multifd_recv_state->params);
1325     multifd_recv_state->params = NULL;
1326     g_free(multifd_recv_state);
1327     multifd_recv_state = NULL;
1328 
1329     return ret;
1330 }
1331 
1332 static void multifd_recv_sync_main(void)
1333 {
1334     int i;
1335 
1336     if (!migrate_use_multifd()) {
1337         return;
1338     }
1339     for (i = 0; i < migrate_multifd_channels(); i++) {
1340         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1341 
1342         trace_multifd_recv_sync_main_wait(p->id);
1343         qemu_sem_wait(&multifd_recv_state->sem_sync);
1344     }
1345     for (i = 0; i < migrate_multifd_channels(); i++) {
1346         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1347 
1348         qemu_mutex_lock(&p->mutex);
1349         if (multifd_recv_state->packet_num < p->packet_num) {
1350             multifd_recv_state->packet_num = p->packet_num;
1351         }
1352         qemu_mutex_unlock(&p->mutex);
1353         trace_multifd_recv_sync_main_signal(p->id);
1354         qemu_sem_post(&p->sem_sync);
1355     }
1356     trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1357 }
1358 
1359 static void *multifd_recv_thread(void *opaque)
1360 {
1361     MultiFDRecvParams *p = opaque;
1362     Error *local_err = NULL;
1363     int ret;
1364 
1365     trace_multifd_recv_thread_start(p->id);
1366     rcu_register_thread();
1367 
1368     while (true) {
1369         uint32_t used;
1370         uint32_t flags;
1371 
1372         if (p->quit) {
1373             break;
1374         }
1375 
1376         ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1377                                        p->packet_len, &local_err);
1378         if (ret == 0) {   /* EOF */
1379             break;
1380         }
1381         if (ret == -1) {   /* Error */
1382             break;
1383         }
1384 
1385         qemu_mutex_lock(&p->mutex);
1386         ret = multifd_recv_unfill_packet(p, &local_err);
1387         if (ret) {
1388             qemu_mutex_unlock(&p->mutex);
1389             break;
1390         }
1391 
1392         used = p->pages->used;
1393         flags = p->flags;
1394         trace_multifd_recv(p->id, p->packet_num, used, flags,
1395                            p->next_packet_size);
1396         p->num_packets++;
1397         p->num_pages += used;
1398         qemu_mutex_unlock(&p->mutex);
1399 
1400         if (used) {
1401             ret = qio_channel_readv_all(p->c, p->pages->iov,
1402                                         used, &local_err);
1403             if (ret != 0) {
1404                 break;
1405             }
1406         }
1407 
1408         if (flags & MULTIFD_FLAG_SYNC) {
1409             qemu_sem_post(&multifd_recv_state->sem_sync);
1410             qemu_sem_wait(&p->sem_sync);
1411         }
1412     }
1413 
1414     if (local_err) {
1415         multifd_recv_terminate_threads(local_err);
1416     }
1417     qemu_mutex_lock(&p->mutex);
1418     p->running = false;
1419     qemu_mutex_unlock(&p->mutex);
1420 
1421     rcu_unregister_thread();
1422     trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1423 
1424     return NULL;
1425 }
1426 
1427 int multifd_load_setup(void)
1428 {
1429     int thread_count;
1430     uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
1431     uint8_t i;
1432 
1433     if (!migrate_use_multifd()) {
1434         return 0;
1435     }
1436     thread_count = migrate_multifd_channels();
1437     multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1438     multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
1439     atomic_set(&multifd_recv_state->count, 0);
1440     qemu_sem_init(&multifd_recv_state->sem_sync, 0);
1441 
1442     for (i = 0; i < thread_count; i++) {
1443         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1444 
1445         qemu_mutex_init(&p->mutex);
1446         qemu_sem_init(&p->sem_sync, 0);
1447         p->quit = false;
1448         p->id = i;
1449         p->pages = multifd_pages_init(page_count);
1450         p->packet_len = sizeof(MultiFDPacket_t)
1451                       + sizeof(ram_addr_t) * page_count;
1452         p->packet = g_malloc0(p->packet_len);
1453         p->name = g_strdup_printf("multifdrecv_%d", i);
1454     }
1455     return 0;
1456 }
1457 
1458 bool multifd_recv_all_channels_created(void)
1459 {
1460     int thread_count = migrate_multifd_channels();
1461 
1462     if (!migrate_use_multifd()) {
1463         return true;
1464     }
1465 
1466     return thread_count == atomic_read(&multifd_recv_state->count);
1467 }
1468 
1469 /*
1470  * Try to receive all multifd channels to get ready for the migration.
1471  * - Return true and do not set @errp when correctly receving all channels;
1472  * - Return false and do not set @errp when correctly receiving the current one;
1473  * - Return false and set @errp when failing to receive the current channel.
1474  */
1475 bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
1476 {
1477     MultiFDRecvParams *p;
1478     Error *local_err = NULL;
1479     int id;
1480 
1481     id = multifd_recv_initial_packet(ioc, &local_err);
1482     if (id < 0) {
1483         multifd_recv_terminate_threads(local_err);
1484         error_propagate_prepend(errp, local_err,
1485                                 "failed to receive packet"
1486                                 " via multifd channel %d: ",
1487                                 atomic_read(&multifd_recv_state->count));
1488         return false;
1489     }
1490     trace_multifd_recv_new_channel(id);
1491 
1492     p = &multifd_recv_state->params[id];
1493     if (p->c != NULL) {
1494         error_setg(&local_err, "multifd: received id '%d' already setup'",
1495                    id);
1496         multifd_recv_terminate_threads(local_err);
1497         error_propagate(errp, local_err);
1498         return false;
1499     }
1500     p->c = ioc;
1501     object_ref(OBJECT(ioc));
1502     /* initial packet */
1503     p->num_packets = 1;
1504 
1505     p->running = true;
1506     qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1507                        QEMU_THREAD_JOINABLE);
1508     atomic_inc(&multifd_recv_state->count);
1509     return atomic_read(&multifd_recv_state->count) ==
1510            migrate_multifd_channels();
1511 }
1512 
1513 /**
1514  * save_page_header: write page header to wire
1515  *
1516  * If this is the 1st block, it also writes the block identification
1517  *
1518  * Returns the number of bytes written
1519  *
1520  * @f: QEMUFile where to send the data
1521  * @block: block that contains the page we want to send
1522  * @offset: offset inside the block for the page
1523  *          in the lower bits, it contains flags
1524  */
1525 static size_t save_page_header(RAMState *rs, QEMUFile *f,  RAMBlock *block,
1526                                ram_addr_t offset)
1527 {
1528     size_t size, len;
1529 
1530     if (block == rs->last_sent_block) {
1531         offset |= RAM_SAVE_FLAG_CONTINUE;
1532     }
1533     qemu_put_be64(f, offset);
1534     size = 8;
1535 
1536     if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
1537         len = strlen(block->idstr);
1538         qemu_put_byte(f, len);
1539         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
1540         size += 1 + len;
1541         rs->last_sent_block = block;
1542     }
1543     return size;
1544 }
1545 
1546 /**
1547  * mig_throttle_guest_down: throotle down the guest
1548  *
1549  * Reduce amount of guest cpu execution to hopefully slow down memory
1550  * writes. If guest dirty memory rate is reduced below the rate at
1551  * which we can transfer pages to the destination then we should be
1552  * able to complete migration. Some workloads dirty memory way too
1553  * fast and will not effectively converge, even with auto-converge.
1554  */
1555 static void mig_throttle_guest_down(void)
1556 {
1557     MigrationState *s = migrate_get_current();
1558     uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1559     uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
1560     int pct_max = s->parameters.max_cpu_throttle;
1561 
1562     /* We have not started throttling yet. Let's start it. */
1563     if (!cpu_throttle_active()) {
1564         cpu_throttle_set(pct_initial);
1565     } else {
1566         /* Throttling already on, just increase the rate */
1567         cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1568                          pct_max));
1569     }
1570 }
1571 
1572 /**
1573  * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1574  *
1575  * @rs: current RAM state
1576  * @current_addr: address for the zero page
1577  *
1578  * Update the xbzrle cache to reflect a page that's been sent as all 0.
1579  * The important thing is that a stale (not-yet-0'd) page be replaced
1580  * by the new data.
1581  * As a bonus, if the page wasn't in the cache it gets added so that
1582  * when a small write is made into the 0'd page it gets XBZRLE sent.
1583  */
1584 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
1585 {
1586     if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
1587         return;
1588     }
1589 
1590     /* We don't care if this fails to allocate a new cache page
1591      * as long as it updated an old one */
1592     cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
1593                  ram_counters.dirty_sync_count);
1594 }
1595 
1596 #define ENCODING_FLAG_XBZRLE 0x1
1597 
1598 /**
1599  * save_xbzrle_page: compress and send current page
1600  *
1601  * Returns: 1 means that we wrote the page
1602  *          0 means that page is identical to the one already sent
1603  *          -1 means that xbzrle would be longer than normal
1604  *
1605  * @rs: current RAM state
1606  * @current_data: pointer to the address of the page contents
1607  * @current_addr: addr of the page
1608  * @block: block that contains the page we want to send
1609  * @offset: offset inside the block for the page
1610  * @last_stage: if we are at the completion stage
1611  */
1612 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
1613                             ram_addr_t current_addr, RAMBlock *block,
1614                             ram_addr_t offset, bool last_stage)
1615 {
1616     int encoded_len = 0, bytes_xbzrle;
1617     uint8_t *prev_cached_page;
1618 
1619     if (!cache_is_cached(XBZRLE.cache, current_addr,
1620                          ram_counters.dirty_sync_count)) {
1621         xbzrle_counters.cache_miss++;
1622         if (!last_stage) {
1623             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
1624                              ram_counters.dirty_sync_count) == -1) {
1625                 return -1;
1626             } else {
1627                 /* update *current_data when the page has been
1628                    inserted into cache */
1629                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1630             }
1631         }
1632         return -1;
1633     }
1634 
1635     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1636 
1637     /* save current buffer into memory */
1638     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1639 
1640     /* XBZRLE encoding (if there is no overflow) */
1641     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1642                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1643                                        TARGET_PAGE_SIZE);
1644 
1645     /*
1646      * Update the cache contents, so that it corresponds to the data
1647      * sent, in all cases except where we skip the page.
1648      */
1649     if (!last_stage && encoded_len != 0) {
1650         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1651         /*
1652          * In the case where we couldn't compress, ensure that the caller
1653          * sends the data from the cache, since the guest might have
1654          * changed the RAM since we copied it.
1655          */
1656         *current_data = prev_cached_page;
1657     }
1658 
1659     if (encoded_len == 0) {
1660         trace_save_xbzrle_page_skipping();
1661         return 0;
1662     } else if (encoded_len == -1) {
1663         trace_save_xbzrle_page_overflow();
1664         xbzrle_counters.overflow++;
1665         return -1;
1666     }
1667 
1668     /* Send XBZRLE based compressed page */
1669     bytes_xbzrle = save_page_header(rs, rs->f, block,
1670                                     offset | RAM_SAVE_FLAG_XBZRLE);
1671     qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1672     qemu_put_be16(rs->f, encoded_len);
1673     qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
1674     bytes_xbzrle += encoded_len + 1 + 2;
1675     xbzrle_counters.pages++;
1676     xbzrle_counters.bytes += bytes_xbzrle;
1677     ram_counters.transferred += bytes_xbzrle;
1678 
1679     return 1;
1680 }
1681 
1682 /**
1683  * migration_bitmap_find_dirty: find the next dirty page from start
1684  *
1685  * Returns the page offset within memory region of the start of a dirty page
1686  *
1687  * @rs: current RAM state
1688  * @rb: RAMBlock where to search for dirty pages
1689  * @start: page where we start the search
1690  */
1691 static inline
1692 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
1693                                           unsigned long start)
1694 {
1695     unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1696     unsigned long *bitmap = rb->bmap;
1697     unsigned long next;
1698 
1699     if (ramblock_is_ignored(rb)) {
1700         return size;
1701     }
1702 
1703     /*
1704      * When the free page optimization is enabled, we need to check the bitmap
1705      * to send the non-free pages rather than all the pages in the bulk stage.
1706      */
1707     if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
1708         next = start + 1;
1709     } else {
1710         next = find_next_bit(bitmap, size, start);
1711     }
1712 
1713     return next;
1714 }
1715 
1716 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
1717                                                 RAMBlock *rb,
1718                                                 unsigned long page)
1719 {
1720     bool ret;
1721 
1722     qemu_mutex_lock(&rs->bitmap_mutex);
1723 
1724     /*
1725      * Clear dirty bitmap if needed.  This _must_ be called before we
1726      * send any of the page in the chunk because we need to make sure
1727      * we can capture further page content changes when we sync dirty
1728      * log the next time.  So as long as we are going to send any of
1729      * the page in the chunk we clear the remote dirty bitmap for all.
1730      * Clearing it earlier won't be a problem, but too late will.
1731      */
1732     if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
1733         uint8_t shift = rb->clear_bmap_shift;
1734         hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
1735         hwaddr start = (page << TARGET_PAGE_BITS) & (-size);
1736 
1737         /*
1738          * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
1739          * can make things easier sometimes since then start address
1740          * of the small chunk will always be 64 pages aligned so the
1741          * bitmap will always be aligned to unsigned long.  We should
1742          * even be able to remove this restriction but I'm simply
1743          * keeping it.
1744          */
1745         assert(shift >= 6);
1746         trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
1747         memory_region_clear_dirty_bitmap(rb->mr, start, size);
1748     }
1749 
1750     ret = test_and_clear_bit(page, rb->bmap);
1751 
1752     if (ret) {
1753         rs->migration_dirty_pages--;
1754     }
1755     qemu_mutex_unlock(&rs->bitmap_mutex);
1756 
1757     return ret;
1758 }
1759 
1760 /* Called with RCU critical section */
1761 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
1762 {
1763     rs->migration_dirty_pages +=
1764         cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length,
1765                                               &rs->num_dirty_pages_period);
1766 }
1767 
1768 /**
1769  * ram_pagesize_summary: calculate all the pagesizes of a VM
1770  *
1771  * Returns a summary bitmap of the page sizes of all RAMBlocks
1772  *
1773  * For VMs with just normal pages this is equivalent to the host page
1774  * size. If it's got some huge pages then it's the OR of all the
1775  * different page sizes.
1776  */
1777 uint64_t ram_pagesize_summary(void)
1778 {
1779     RAMBlock *block;
1780     uint64_t summary = 0;
1781 
1782     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1783         summary |= block->page_size;
1784     }
1785 
1786     return summary;
1787 }
1788 
1789 uint64_t ram_get_total_transferred_pages(void)
1790 {
1791     return  ram_counters.normal + ram_counters.duplicate +
1792                 compression_counters.pages + xbzrle_counters.pages;
1793 }
1794 
1795 static void migration_update_rates(RAMState *rs, int64_t end_time)
1796 {
1797     uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
1798     double compressed_size;
1799 
1800     /* calculate period counters */
1801     ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1802                 / (end_time - rs->time_last_bitmap_sync);
1803 
1804     if (!page_count) {
1805         return;
1806     }
1807 
1808     if (migrate_use_xbzrle()) {
1809         xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
1810             rs->xbzrle_cache_miss_prev) / page_count;
1811         rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1812     }
1813 
1814     if (migrate_use_compression()) {
1815         compression_counters.busy_rate = (double)(compression_counters.busy -
1816             rs->compress_thread_busy_prev) / page_count;
1817         rs->compress_thread_busy_prev = compression_counters.busy;
1818 
1819         compressed_size = compression_counters.compressed_size -
1820                           rs->compressed_size_prev;
1821         if (compressed_size) {
1822             double uncompressed_size = (compression_counters.pages -
1823                                     rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1824 
1825             /* Compression-Ratio = Uncompressed-size / Compressed-size */
1826             compression_counters.compression_rate =
1827                                         uncompressed_size / compressed_size;
1828 
1829             rs->compress_pages_prev = compression_counters.pages;
1830             rs->compressed_size_prev = compression_counters.compressed_size;
1831         }
1832     }
1833 }
1834 
1835 static void migration_bitmap_sync(RAMState *rs)
1836 {
1837     RAMBlock *block;
1838     int64_t end_time;
1839     uint64_t bytes_xfer_now;
1840 
1841     ram_counters.dirty_sync_count++;
1842 
1843     if (!rs->time_last_bitmap_sync) {
1844         rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1845     }
1846 
1847     trace_migration_bitmap_sync_start();
1848     memory_global_dirty_log_sync();
1849 
1850     qemu_mutex_lock(&rs->bitmap_mutex);
1851     rcu_read_lock();
1852     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1853         ramblock_sync_dirty_bitmap(rs, block);
1854     }
1855     ram_counters.remaining = ram_bytes_remaining();
1856     rcu_read_unlock();
1857     qemu_mutex_unlock(&rs->bitmap_mutex);
1858 
1859     memory_global_after_dirty_log_sync();
1860     trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1861 
1862     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1863 
1864     /* more than 1 second = 1000 millisecons */
1865     if (end_time > rs->time_last_bitmap_sync + 1000) {
1866         bytes_xfer_now = ram_counters.transferred;
1867 
1868         /* During block migration the auto-converge logic incorrectly detects
1869          * that ram migration makes no progress. Avoid this by disabling the
1870          * throttling logic during the bulk phase of block migration. */
1871         if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1872             /* The following detection logic can be refined later. For now:
1873                Check to see if the dirtied bytes is 50% more than the approx.
1874                amount of bytes that just got transferred since the last time we
1875                were in this routine. If that happens twice, start or increase
1876                throttling */
1877 
1878             if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
1879                    (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
1880                 (++rs->dirty_rate_high_cnt >= 2)) {
1881                     trace_migration_throttle();
1882                     rs->dirty_rate_high_cnt = 0;
1883                     mig_throttle_guest_down();
1884             }
1885         }
1886 
1887         migration_update_rates(rs, end_time);
1888 
1889         rs->target_page_count_prev = rs->target_page_count;
1890 
1891         /* reset period counters */
1892         rs->time_last_bitmap_sync = end_time;
1893         rs->num_dirty_pages_period = 0;
1894         rs->bytes_xfer_prev = bytes_xfer_now;
1895     }
1896     if (migrate_use_events()) {
1897         qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
1898     }
1899 }
1900 
1901 static void migration_bitmap_sync_precopy(RAMState *rs)
1902 {
1903     Error *local_err = NULL;
1904 
1905     /*
1906      * The current notifier usage is just an optimization to migration, so we
1907      * don't stop the normal migration process in the error case.
1908      */
1909     if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1910         error_report_err(local_err);
1911     }
1912 
1913     migration_bitmap_sync(rs);
1914 
1915     if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1916         error_report_err(local_err);
1917     }
1918 }
1919 
1920 /**
1921  * save_zero_page_to_file: send the zero page to the file
1922  *
1923  * Returns the size of data written to the file, 0 means the page is not
1924  * a zero page
1925  *
1926  * @rs: current RAM state
1927  * @file: the file where the data is saved
1928  * @block: block that contains the page we want to send
1929  * @offset: offset inside the block for the page
1930  */
1931 static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1932                                   RAMBlock *block, ram_addr_t offset)
1933 {
1934     uint8_t *p = block->host + offset;
1935     int len = 0;
1936 
1937     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1938         len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1939         qemu_put_byte(file, 0);
1940         len += 1;
1941     }
1942     return len;
1943 }
1944 
1945 /**
1946  * save_zero_page: send the zero page to the stream
1947  *
1948  * Returns the number of pages written.
1949  *
1950  * @rs: current RAM state
1951  * @block: block that contains the page we want to send
1952  * @offset: offset inside the block for the page
1953  */
1954 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
1955 {
1956     int len = save_zero_page_to_file(rs, rs->f, block, offset);
1957 
1958     if (len) {
1959         ram_counters.duplicate++;
1960         ram_counters.transferred += len;
1961         return 1;
1962     }
1963     return -1;
1964 }
1965 
1966 static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
1967 {
1968     if (!migrate_release_ram() || !migration_in_postcopy()) {
1969         return;
1970     }
1971 
1972     ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
1973 }
1974 
1975 /*
1976  * @pages: the number of pages written by the control path,
1977  *        < 0 - error
1978  *        > 0 - number of pages written
1979  *
1980  * Return true if the pages has been saved, otherwise false is returned.
1981  */
1982 static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1983                               int *pages)
1984 {
1985     uint64_t bytes_xmit = 0;
1986     int ret;
1987 
1988     *pages = -1;
1989     ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1990                                 &bytes_xmit);
1991     if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1992         return false;
1993     }
1994 
1995     if (bytes_xmit) {
1996         ram_counters.transferred += bytes_xmit;
1997         *pages = 1;
1998     }
1999 
2000     if (ret == RAM_SAVE_CONTROL_DELAYED) {
2001         return true;
2002     }
2003 
2004     if (bytes_xmit > 0) {
2005         ram_counters.normal++;
2006     } else if (bytes_xmit == 0) {
2007         ram_counters.duplicate++;
2008     }
2009 
2010     return true;
2011 }
2012 
2013 /*
2014  * directly send the page to the stream
2015  *
2016  * Returns the number of pages written.
2017  *
2018  * @rs: current RAM state
2019  * @block: block that contains the page we want to send
2020  * @offset: offset inside the block for the page
2021  * @buf: the page to be sent
2022  * @async: send to page asyncly
2023  */
2024 static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
2025                             uint8_t *buf, bool async)
2026 {
2027     ram_counters.transferred += save_page_header(rs, rs->f, block,
2028                                                  offset | RAM_SAVE_FLAG_PAGE);
2029     if (async) {
2030         qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
2031                               migrate_release_ram() &
2032                               migration_in_postcopy());
2033     } else {
2034         qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
2035     }
2036     ram_counters.transferred += TARGET_PAGE_SIZE;
2037     ram_counters.normal++;
2038     return 1;
2039 }
2040 
2041 /**
2042  * ram_save_page: send the given page to the stream
2043  *
2044  * Returns the number of pages written.
2045  *          < 0 - error
2046  *          >=0 - Number of pages written - this might legally be 0
2047  *                if xbzrle noticed the page was the same.
2048  *
2049  * @rs: current RAM state
2050  * @block: block that contains the page we want to send
2051  * @offset: offset inside the block for the page
2052  * @last_stage: if we are at the completion stage
2053  */
2054 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
2055 {
2056     int pages = -1;
2057     uint8_t *p;
2058     bool send_async = true;
2059     RAMBlock *block = pss->block;
2060     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2061     ram_addr_t current_addr = block->offset + offset;
2062 
2063     p = block->host + offset;
2064     trace_ram_save_page(block->idstr, (uint64_t)offset, p);
2065 
2066     XBZRLE_cache_lock();
2067     if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
2068         migrate_use_xbzrle()) {
2069         pages = save_xbzrle_page(rs, &p, current_addr, block,
2070                                  offset, last_stage);
2071         if (!last_stage) {
2072             /* Can't send this cached data async, since the cache page
2073              * might get updated before it gets to the wire
2074              */
2075             send_async = false;
2076         }
2077     }
2078 
2079     /* XBZRLE overflow or normal page */
2080     if (pages == -1) {
2081         pages = save_normal_page(rs, block, offset, p, send_async);
2082     }
2083 
2084     XBZRLE_cache_unlock();
2085 
2086     return pages;
2087 }
2088 
2089 static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
2090                                  ram_addr_t offset)
2091 {
2092     if (multifd_queue_page(rs, block, offset) < 0) {
2093         return -1;
2094     }
2095     ram_counters.normal++;
2096 
2097     return 1;
2098 }
2099 
2100 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
2101                                  ram_addr_t offset, uint8_t *source_buf)
2102 {
2103     RAMState *rs = ram_state;
2104     uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
2105     bool zero_page = false;
2106     int ret;
2107 
2108     if (save_zero_page_to_file(rs, f, block, offset)) {
2109         zero_page = true;
2110         goto exit;
2111     }
2112 
2113     save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
2114 
2115     /*
2116      * copy it to a internal buffer to avoid it being modified by VM
2117      * so that we can catch up the error during compression and
2118      * decompression
2119      */
2120     memcpy(source_buf, p, TARGET_PAGE_SIZE);
2121     ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
2122     if (ret < 0) {
2123         qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
2124         error_report("compressed data failed!");
2125         return false;
2126     }
2127 
2128 exit:
2129     ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
2130     return zero_page;
2131 }
2132 
2133 static void
2134 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
2135 {
2136     ram_counters.transferred += bytes_xmit;
2137 
2138     if (param->zero_page) {
2139         ram_counters.duplicate++;
2140         return;
2141     }
2142 
2143     /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
2144     compression_counters.compressed_size += bytes_xmit - 8;
2145     compression_counters.pages++;
2146 }
2147 
2148 static bool save_page_use_compression(RAMState *rs);
2149 
2150 static void flush_compressed_data(RAMState *rs)
2151 {
2152     int idx, len, thread_count;
2153 
2154     if (!save_page_use_compression(rs)) {
2155         return;
2156     }
2157     thread_count = migrate_compress_threads();
2158 
2159     qemu_mutex_lock(&comp_done_lock);
2160     for (idx = 0; idx < thread_count; idx++) {
2161         while (!comp_param[idx].done) {
2162             qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2163         }
2164     }
2165     qemu_mutex_unlock(&comp_done_lock);
2166 
2167     for (idx = 0; idx < thread_count; idx++) {
2168         qemu_mutex_lock(&comp_param[idx].mutex);
2169         if (!comp_param[idx].quit) {
2170             len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2171             /*
2172              * it's safe to fetch zero_page without holding comp_done_lock
2173              * as there is no further request submitted to the thread,
2174              * i.e, the thread should be waiting for a request at this point.
2175              */
2176             update_compress_thread_counts(&comp_param[idx], len);
2177         }
2178         qemu_mutex_unlock(&comp_param[idx].mutex);
2179     }
2180 }
2181 
2182 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
2183                                        ram_addr_t offset)
2184 {
2185     param->block = block;
2186     param->offset = offset;
2187 }
2188 
2189 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
2190                                            ram_addr_t offset)
2191 {
2192     int idx, thread_count, bytes_xmit = -1, pages = -1;
2193     bool wait = migrate_compress_wait_thread();
2194 
2195     thread_count = migrate_compress_threads();
2196     qemu_mutex_lock(&comp_done_lock);
2197 retry:
2198     for (idx = 0; idx < thread_count; idx++) {
2199         if (comp_param[idx].done) {
2200             comp_param[idx].done = false;
2201             bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2202             qemu_mutex_lock(&comp_param[idx].mutex);
2203             set_compress_params(&comp_param[idx], block, offset);
2204             qemu_cond_signal(&comp_param[idx].cond);
2205             qemu_mutex_unlock(&comp_param[idx].mutex);
2206             pages = 1;
2207             update_compress_thread_counts(&comp_param[idx], bytes_xmit);
2208             break;
2209         }
2210     }
2211 
2212     /*
2213      * wait for the free thread if the user specifies 'compress-wait-thread',
2214      * otherwise we will post the page out in the main thread as normal page.
2215      */
2216     if (pages < 0 && wait) {
2217         qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2218         goto retry;
2219     }
2220     qemu_mutex_unlock(&comp_done_lock);
2221 
2222     return pages;
2223 }
2224 
2225 /**
2226  * find_dirty_block: find the next dirty page and update any state
2227  * associated with the search process.
2228  *
2229  * Returns true if a page is found
2230  *
2231  * @rs: current RAM state
2232  * @pss: data about the state of the current dirty page scan
2233  * @again: set to false if the search has scanned the whole of RAM
2234  */
2235 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
2236 {
2237     pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
2238     if (pss->complete_round && pss->block == rs->last_seen_block &&
2239         pss->page >= rs->last_page) {
2240         /*
2241          * We've been once around the RAM and haven't found anything.
2242          * Give up.
2243          */
2244         *again = false;
2245         return false;
2246     }
2247     if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
2248         /* Didn't find anything in this RAM Block */
2249         pss->page = 0;
2250         pss->block = QLIST_NEXT_RCU(pss->block, next);
2251         if (!pss->block) {
2252             /*
2253              * If memory migration starts over, we will meet a dirtied page
2254              * which may still exists in compression threads's ring, so we
2255              * should flush the compressed data to make sure the new page
2256              * is not overwritten by the old one in the destination.
2257              *
2258              * Also If xbzrle is on, stop using the data compression at this
2259              * point. In theory, xbzrle can do better than compression.
2260              */
2261             flush_compressed_data(rs);
2262 
2263             /* Hit the end of the list */
2264             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2265             /* Flag that we've looped */
2266             pss->complete_round = true;
2267             rs->ram_bulk_stage = false;
2268         }
2269         /* Didn't find anything this time, but try again on the new block */
2270         *again = true;
2271         return false;
2272     } else {
2273         /* Can go around again, but... */
2274         *again = true;
2275         /* We've found something so probably don't need to */
2276         return true;
2277     }
2278 }
2279 
2280 /**
2281  * unqueue_page: gets a page of the queue
2282  *
2283  * Helper for 'get_queued_page' - gets a page off the queue
2284  *
2285  * Returns the block of the page (or NULL if none available)
2286  *
2287  * @rs: current RAM state
2288  * @offset: used to return the offset within the RAMBlock
2289  */
2290 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
2291 {
2292     RAMBlock *block = NULL;
2293 
2294     if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2295         return NULL;
2296     }
2297 
2298     qemu_mutex_lock(&rs->src_page_req_mutex);
2299     if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2300         struct RAMSrcPageRequest *entry =
2301                                 QSIMPLEQ_FIRST(&rs->src_page_requests);
2302         block = entry->rb;
2303         *offset = entry->offset;
2304 
2305         if (entry->len > TARGET_PAGE_SIZE) {
2306             entry->len -= TARGET_PAGE_SIZE;
2307             entry->offset += TARGET_PAGE_SIZE;
2308         } else {
2309             memory_region_unref(block->mr);
2310             QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2311             g_free(entry);
2312             migration_consume_urgent_request();
2313         }
2314     }
2315     qemu_mutex_unlock(&rs->src_page_req_mutex);
2316 
2317     return block;
2318 }
2319 
2320 /**
2321  * get_queued_page: unqueue a page from the postcopy requests
2322  *
2323  * Skips pages that are already sent (!dirty)
2324  *
2325  * Returns true if a queued page is found
2326  *
2327  * @rs: current RAM state
2328  * @pss: data about the state of the current dirty page scan
2329  */
2330 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
2331 {
2332     RAMBlock  *block;
2333     ram_addr_t offset;
2334     bool dirty;
2335 
2336     do {
2337         block = unqueue_page(rs, &offset);
2338         /*
2339          * We're sending this page, and since it's postcopy nothing else
2340          * will dirty it, and we must make sure it doesn't get sent again
2341          * even if this queue request was received after the background
2342          * search already sent it.
2343          */
2344         if (block) {
2345             unsigned long page;
2346 
2347             page = offset >> TARGET_PAGE_BITS;
2348             dirty = test_bit(page, block->bmap);
2349             if (!dirty) {
2350                 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
2351                                                 page);
2352             } else {
2353                 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
2354             }
2355         }
2356 
2357     } while (block && !dirty);
2358 
2359     if (block) {
2360         /*
2361          * As soon as we start servicing pages out of order, then we have
2362          * to kill the bulk stage, since the bulk stage assumes
2363          * in (migration_bitmap_find_and_reset_dirty) that every page is
2364          * dirty, that's no longer true.
2365          */
2366         rs->ram_bulk_stage = false;
2367 
2368         /*
2369          * We want the background search to continue from the queued page
2370          * since the guest is likely to want other pages near to the page
2371          * it just requested.
2372          */
2373         pss->block = block;
2374         pss->page = offset >> TARGET_PAGE_BITS;
2375 
2376         /*
2377          * This unqueued page would break the "one round" check, even is
2378          * really rare.
2379          */
2380         pss->complete_round = false;
2381     }
2382 
2383     return !!block;
2384 }
2385 
2386 /**
2387  * migration_page_queue_free: drop any remaining pages in the ram
2388  * request queue
2389  *
2390  * It should be empty at the end anyway, but in error cases there may
2391  * be some left.  in case that there is any page left, we drop it.
2392  *
2393  */
2394 static void migration_page_queue_free(RAMState *rs)
2395 {
2396     struct RAMSrcPageRequest *mspr, *next_mspr;
2397     /* This queue generally should be empty - but in the case of a failed
2398      * migration might have some droppings in.
2399      */
2400     rcu_read_lock();
2401     QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
2402         memory_region_unref(mspr->rb->mr);
2403         QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2404         g_free(mspr);
2405     }
2406     rcu_read_unlock();
2407 }
2408 
2409 /**
2410  * ram_save_queue_pages: queue the page for transmission
2411  *
2412  * A request from postcopy destination for example.
2413  *
2414  * Returns zero on success or negative on error
2415  *
2416  * @rbname: Name of the RAMBLock of the request. NULL means the
2417  *          same that last one.
2418  * @start: starting address from the start of the RAMBlock
2419  * @len: length (in bytes) to send
2420  */
2421 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
2422 {
2423     RAMBlock *ramblock;
2424     RAMState *rs = ram_state;
2425 
2426     ram_counters.postcopy_requests++;
2427     rcu_read_lock();
2428     if (!rbname) {
2429         /* Reuse last RAMBlock */
2430         ramblock = rs->last_req_rb;
2431 
2432         if (!ramblock) {
2433             /*
2434              * Shouldn't happen, we can't reuse the last RAMBlock if
2435              * it's the 1st request.
2436              */
2437             error_report("ram_save_queue_pages no previous block");
2438             goto err;
2439         }
2440     } else {
2441         ramblock = qemu_ram_block_by_name(rbname);
2442 
2443         if (!ramblock) {
2444             /* We shouldn't be asked for a non-existent RAMBlock */
2445             error_report("ram_save_queue_pages no block '%s'", rbname);
2446             goto err;
2447         }
2448         rs->last_req_rb = ramblock;
2449     }
2450     trace_ram_save_queue_pages(ramblock->idstr, start, len);
2451     if (start+len > ramblock->used_length) {
2452         error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2453                      RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
2454                      __func__, start, len, ramblock->used_length);
2455         goto err;
2456     }
2457 
2458     struct RAMSrcPageRequest *new_entry =
2459         g_malloc0(sizeof(struct RAMSrcPageRequest));
2460     new_entry->rb = ramblock;
2461     new_entry->offset = start;
2462     new_entry->len = len;
2463 
2464     memory_region_ref(ramblock->mr);
2465     qemu_mutex_lock(&rs->src_page_req_mutex);
2466     QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
2467     migration_make_urgent_request();
2468     qemu_mutex_unlock(&rs->src_page_req_mutex);
2469     rcu_read_unlock();
2470 
2471     return 0;
2472 
2473 err:
2474     rcu_read_unlock();
2475     return -1;
2476 }
2477 
2478 static bool save_page_use_compression(RAMState *rs)
2479 {
2480     if (!migrate_use_compression()) {
2481         return false;
2482     }
2483 
2484     /*
2485      * If xbzrle is on, stop using the data compression after first
2486      * round of migration even if compression is enabled. In theory,
2487      * xbzrle can do better than compression.
2488      */
2489     if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2490         return true;
2491     }
2492 
2493     return false;
2494 }
2495 
2496 /*
2497  * try to compress the page before posting it out, return true if the page
2498  * has been properly handled by compression, otherwise needs other
2499  * paths to handle it
2500  */
2501 static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2502 {
2503     if (!save_page_use_compression(rs)) {
2504         return false;
2505     }
2506 
2507     /*
2508      * When starting the process of a new block, the first page of
2509      * the block should be sent out before other pages in the same
2510      * block, and all the pages in last block should have been sent
2511      * out, keeping this order is important, because the 'cont' flag
2512      * is used to avoid resending the block name.
2513      *
2514      * We post the fist page as normal page as compression will take
2515      * much CPU resource.
2516      */
2517     if (block != rs->last_sent_block) {
2518         flush_compressed_data(rs);
2519         return false;
2520     }
2521 
2522     if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2523         return true;
2524     }
2525 
2526     compression_counters.busy++;
2527     return false;
2528 }
2529 
2530 /**
2531  * ram_save_target_page: save one target page
2532  *
2533  * Returns the number of pages written
2534  *
2535  * @rs: current RAM state
2536  * @pss: data about the page we want to send
2537  * @last_stage: if we are at the completion stage
2538  */
2539 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
2540                                 bool last_stage)
2541 {
2542     RAMBlock *block = pss->block;
2543     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2544     int res;
2545 
2546     if (control_save_page(rs, block, offset, &res)) {
2547         return res;
2548     }
2549 
2550     if (save_compress_page(rs, block, offset)) {
2551         return 1;
2552     }
2553 
2554     res = save_zero_page(rs, block, offset);
2555     if (res > 0) {
2556         /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2557          * page would be stale
2558          */
2559         if (!save_page_use_compression(rs)) {
2560             XBZRLE_cache_lock();
2561             xbzrle_cache_zero_page(rs, block->offset + offset);
2562             XBZRLE_cache_unlock();
2563         }
2564         ram_release_pages(block->idstr, offset, res);
2565         return res;
2566     }
2567 
2568     /*
2569      * do not use multifd for compression as the first page in the new
2570      * block should be posted out before sending the compressed page
2571      */
2572     if (!save_page_use_compression(rs) && migrate_use_multifd()) {
2573         return ram_save_multifd_page(rs, block, offset);
2574     }
2575 
2576     return ram_save_page(rs, pss, last_stage);
2577 }
2578 
2579 /**
2580  * ram_save_host_page: save a whole host page
2581  *
2582  * Starting at *offset send pages up to the end of the current host
2583  * page. It's valid for the initial offset to point into the middle of
2584  * a host page in which case the remainder of the hostpage is sent.
2585  * Only dirty target pages are sent. Note that the host page size may
2586  * be a huge page for this block.
2587  * The saving stops at the boundary of the used_length of the block
2588  * if the RAMBlock isn't a multiple of the host page size.
2589  *
2590  * Returns the number of pages written or negative on error
2591  *
2592  * @rs: current RAM state
2593  * @ms: current migration state
2594  * @pss: data about the page we want to send
2595  * @last_stage: if we are at the completion stage
2596  */
2597 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
2598                               bool last_stage)
2599 {
2600     int tmppages, pages = 0;
2601     size_t pagesize_bits =
2602         qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2603 
2604     if (ramblock_is_ignored(pss->block)) {
2605         error_report("block %s should not be migrated !", pss->block->idstr);
2606         return 0;
2607     }
2608 
2609     do {
2610         /* Check the pages is dirty and if it is send it */
2611         if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2612             pss->page++;
2613             continue;
2614         }
2615 
2616         tmppages = ram_save_target_page(rs, pss, last_stage);
2617         if (tmppages < 0) {
2618             return tmppages;
2619         }
2620 
2621         pages += tmppages;
2622         pss->page++;
2623     } while ((pss->page & (pagesize_bits - 1)) &&
2624              offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
2625 
2626     /* The offset we leave with is the last one we looked at */
2627     pss->page--;
2628     return pages;
2629 }
2630 
2631 /**
2632  * ram_find_and_save_block: finds a dirty page and sends it to f
2633  *
2634  * Called within an RCU critical section.
2635  *
2636  * Returns the number of pages written where zero means no dirty pages,
2637  * or negative on error
2638  *
2639  * @rs: current RAM state
2640  * @last_stage: if we are at the completion stage
2641  *
2642  * On systems where host-page-size > target-page-size it will send all the
2643  * pages in a host page that are dirty.
2644  */
2645 
2646 static int ram_find_and_save_block(RAMState *rs, bool last_stage)
2647 {
2648     PageSearchStatus pss;
2649     int pages = 0;
2650     bool again, found;
2651 
2652     /* No dirty page as there is zero RAM */
2653     if (!ram_bytes_total()) {
2654         return pages;
2655     }
2656 
2657     pss.block = rs->last_seen_block;
2658     pss.page = rs->last_page;
2659     pss.complete_round = false;
2660 
2661     if (!pss.block) {
2662         pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2663     }
2664 
2665     do {
2666         again = true;
2667         found = get_queued_page(rs, &pss);
2668 
2669         if (!found) {
2670             /* priority queue empty, so just search for something dirty */
2671             found = find_dirty_block(rs, &pss, &again);
2672         }
2673 
2674         if (found) {
2675             pages = ram_save_host_page(rs, &pss, last_stage);
2676         }
2677     } while (!pages && again);
2678 
2679     rs->last_seen_block = pss.block;
2680     rs->last_page = pss.page;
2681 
2682     return pages;
2683 }
2684 
2685 void acct_update_position(QEMUFile *f, size_t size, bool zero)
2686 {
2687     uint64_t pages = size / TARGET_PAGE_SIZE;
2688 
2689     if (zero) {
2690         ram_counters.duplicate += pages;
2691     } else {
2692         ram_counters.normal += pages;
2693         ram_counters.transferred += size;
2694         qemu_update_position(f, size);
2695     }
2696 }
2697 
2698 static uint64_t ram_bytes_total_common(bool count_ignored)
2699 {
2700     RAMBlock *block;
2701     uint64_t total = 0;
2702 
2703     rcu_read_lock();
2704     if (count_ignored) {
2705         RAMBLOCK_FOREACH_MIGRATABLE(block) {
2706             total += block->used_length;
2707         }
2708     } else {
2709         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2710             total += block->used_length;
2711         }
2712     }
2713     rcu_read_unlock();
2714     return total;
2715 }
2716 
2717 uint64_t ram_bytes_total(void)
2718 {
2719     return ram_bytes_total_common(false);
2720 }
2721 
2722 static void xbzrle_load_setup(void)
2723 {
2724     XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2725 }
2726 
2727 static void xbzrle_load_cleanup(void)
2728 {
2729     g_free(XBZRLE.decoded_buf);
2730     XBZRLE.decoded_buf = NULL;
2731 }
2732 
2733 static void ram_state_cleanup(RAMState **rsp)
2734 {
2735     if (*rsp) {
2736         migration_page_queue_free(*rsp);
2737         qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2738         qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2739         g_free(*rsp);
2740         *rsp = NULL;
2741     }
2742 }
2743 
2744 static void xbzrle_cleanup(void)
2745 {
2746     XBZRLE_cache_lock();
2747     if (XBZRLE.cache) {
2748         cache_fini(XBZRLE.cache);
2749         g_free(XBZRLE.encoded_buf);
2750         g_free(XBZRLE.current_buf);
2751         g_free(XBZRLE.zero_target_page);
2752         XBZRLE.cache = NULL;
2753         XBZRLE.encoded_buf = NULL;
2754         XBZRLE.current_buf = NULL;
2755         XBZRLE.zero_target_page = NULL;
2756     }
2757     XBZRLE_cache_unlock();
2758 }
2759 
2760 static void ram_save_cleanup(void *opaque)
2761 {
2762     RAMState **rsp = opaque;
2763     RAMBlock *block;
2764 
2765     /* caller have hold iothread lock or is in a bh, so there is
2766      * no writing race against the migration bitmap
2767      */
2768     memory_global_dirty_log_stop();
2769 
2770     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2771         g_free(block->clear_bmap);
2772         block->clear_bmap = NULL;
2773         g_free(block->bmap);
2774         block->bmap = NULL;
2775     }
2776 
2777     xbzrle_cleanup();
2778     compress_threads_save_cleanup();
2779     ram_state_cleanup(rsp);
2780 }
2781 
2782 static void ram_state_reset(RAMState *rs)
2783 {
2784     rs->last_seen_block = NULL;
2785     rs->last_sent_block = NULL;
2786     rs->last_page = 0;
2787     rs->last_version = ram_list.version;
2788     rs->ram_bulk_stage = true;
2789     rs->fpo_enabled = false;
2790 }
2791 
2792 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2793 
2794 /*
2795  * 'expected' is the value you expect the bitmap mostly to be full
2796  * of; it won't bother printing lines that are all this value.
2797  * If 'todump' is null the migration bitmap is dumped.
2798  */
2799 void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2800                            unsigned long pages)
2801 {
2802     int64_t cur;
2803     int64_t linelen = 128;
2804     char linebuf[129];
2805 
2806     for (cur = 0; cur < pages; cur += linelen) {
2807         int64_t curb;
2808         bool found = false;
2809         /*
2810          * Last line; catch the case where the line length
2811          * is longer than remaining ram
2812          */
2813         if (cur + linelen > pages) {
2814             linelen = pages - cur;
2815         }
2816         for (curb = 0; curb < linelen; curb++) {
2817             bool thisbit = test_bit(cur + curb, todump);
2818             linebuf[curb] = thisbit ? '1' : '.';
2819             found = found || (thisbit != expected);
2820         }
2821         if (found) {
2822             linebuf[curb] = '\0';
2823             fprintf(stderr,  "0x%08" PRIx64 " : %s\n", cur, linebuf);
2824         }
2825     }
2826 }
2827 
2828 /* **** functions for postcopy ***** */
2829 
2830 void ram_postcopy_migrated_memory_release(MigrationState *ms)
2831 {
2832     struct RAMBlock *block;
2833 
2834     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2835         unsigned long *bitmap = block->bmap;
2836         unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2837         unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2838 
2839         while (run_start < range) {
2840             unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2841             ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
2842                               (run_end - run_start) << TARGET_PAGE_BITS);
2843             run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2844         }
2845     }
2846 }
2847 
2848 /**
2849  * postcopy_send_discard_bm_ram: discard a RAMBlock
2850  *
2851  * Returns zero on success
2852  *
2853  * Callback from postcopy_each_ram_send_discard for each RAMBlock
2854  *
2855  * @ms: current migration state
2856  * @block: RAMBlock to discard
2857  */
2858 static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
2859 {
2860     unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2861     unsigned long current;
2862     unsigned long *bitmap = block->bmap;
2863 
2864     for (current = 0; current < end; ) {
2865         unsigned long one = find_next_bit(bitmap, end, current);
2866         unsigned long zero, discard_length;
2867 
2868         if (one >= end) {
2869             break;
2870         }
2871 
2872         zero = find_next_zero_bit(bitmap, end, one + 1);
2873 
2874         if (zero >= end) {
2875             discard_length = end - one;
2876         } else {
2877             discard_length = zero - one;
2878         }
2879         postcopy_discard_send_range(ms, one, discard_length);
2880         current = one + discard_length;
2881     }
2882 
2883     return 0;
2884 }
2885 
2886 /**
2887  * postcopy_each_ram_send_discard: discard all RAMBlocks
2888  *
2889  * Returns 0 for success or negative for error
2890  *
2891  * Utility for the outgoing postcopy code.
2892  *   Calls postcopy_send_discard_bm_ram for each RAMBlock
2893  *   passing it bitmap indexes and name.
2894  * (qemu_ram_foreach_block ends up passing unscaled lengths
2895  *  which would mean postcopy code would have to deal with target page)
2896  *
2897  * @ms: current migration state
2898  */
2899 static int postcopy_each_ram_send_discard(MigrationState *ms)
2900 {
2901     struct RAMBlock *block;
2902     int ret;
2903 
2904     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2905         postcopy_discard_send_init(ms, block->idstr);
2906 
2907         /*
2908          * Postcopy sends chunks of bitmap over the wire, but it
2909          * just needs indexes at this point, avoids it having
2910          * target page specific code.
2911          */
2912         ret = postcopy_send_discard_bm_ram(ms, block);
2913         postcopy_discard_send_finish(ms);
2914         if (ret) {
2915             return ret;
2916         }
2917     }
2918 
2919     return 0;
2920 }
2921 
2922 /**
2923  * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2924  *
2925  * Helper for postcopy_chunk_hostpages; it's called twice to
2926  * canonicalize the two bitmaps, that are similar, but one is
2927  * inverted.
2928  *
2929  * Postcopy requires that all target pages in a hostpage are dirty or
2930  * clean, not a mix.  This function canonicalizes the bitmaps.
2931  *
2932  * @ms: current migration state
2933  * @block: block that contains the page we want to canonicalize
2934  */
2935 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
2936 {
2937     RAMState *rs = ram_state;
2938     unsigned long *bitmap = block->bmap;
2939     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2940     unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2941     unsigned long run_start;
2942 
2943     if (block->page_size == TARGET_PAGE_SIZE) {
2944         /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2945         return;
2946     }
2947 
2948     /* Find a dirty page */
2949     run_start = find_next_bit(bitmap, pages, 0);
2950 
2951     while (run_start < pages) {
2952 
2953         /*
2954          * If the start of this run of pages is in the middle of a host
2955          * page, then we need to fixup this host page.
2956          */
2957         if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
2958             /* Find the end of this run */
2959             run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
2960             /*
2961              * If the end isn't at the start of a host page, then the
2962              * run doesn't finish at the end of a host page
2963              * and we need to discard.
2964              */
2965         }
2966 
2967         if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
2968             unsigned long page;
2969             unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2970                                                              host_ratio);
2971             run_start = QEMU_ALIGN_UP(run_start, host_ratio);
2972 
2973             /* Clean up the bitmap */
2974             for (page = fixup_start_addr;
2975                  page < fixup_start_addr + host_ratio; page++) {
2976                 /*
2977                  * Remark them as dirty, updating the count for any pages
2978                  * that weren't previously dirty.
2979                  */
2980                 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
2981             }
2982         }
2983 
2984         /* Find the next dirty page for the next iteration */
2985         run_start = find_next_bit(bitmap, pages, run_start);
2986     }
2987 }
2988 
2989 /**
2990  * postcopy_chunk_hostpages: discard any partially sent host page
2991  *
2992  * Utility for the outgoing postcopy code.
2993  *
2994  * Discard any partially sent host-page size chunks, mark any partially
2995  * dirty host-page size chunks as all dirty.  In this case the host-page
2996  * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2997  *
2998  * Returns zero on success
2999  *
3000  * @ms: current migration state
3001  * @block: block we want to work with
3002  */
3003 static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
3004 {
3005     postcopy_discard_send_init(ms, block->idstr);
3006 
3007     /*
3008      * Ensure that all partially dirty host pages are made fully dirty.
3009      */
3010     postcopy_chunk_hostpages_pass(ms, block);
3011 
3012     postcopy_discard_send_finish(ms);
3013     return 0;
3014 }
3015 
3016 /**
3017  * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
3018  *
3019  * Returns zero on success
3020  *
3021  * Transmit the set of pages to be discarded after precopy to the target
3022  * these are pages that:
3023  *     a) Have been previously transmitted but are now dirty again
3024  *     b) Pages that have never been transmitted, this ensures that
3025  *        any pages on the destination that have been mapped by background
3026  *        tasks get discarded (transparent huge pages is the specific concern)
3027  * Hopefully this is pretty sparse
3028  *
3029  * @ms: current migration state
3030  */
3031 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
3032 {
3033     RAMState *rs = ram_state;
3034     RAMBlock *block;
3035     int ret;
3036 
3037     rcu_read_lock();
3038 
3039     /* This should be our last sync, the src is now paused */
3040     migration_bitmap_sync(rs);
3041 
3042     /* Easiest way to make sure we don't resume in the middle of a host-page */
3043     rs->last_seen_block = NULL;
3044     rs->last_sent_block = NULL;
3045     rs->last_page = 0;
3046 
3047     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3048         /* Deal with TPS != HPS and huge pages */
3049         ret = postcopy_chunk_hostpages(ms, block);
3050         if (ret) {
3051             rcu_read_unlock();
3052             return ret;
3053         }
3054 
3055 #ifdef DEBUG_POSTCOPY
3056         ram_debug_dump_bitmap(block->bmap, true,
3057                               block->used_length >> TARGET_PAGE_BITS);
3058 #endif
3059     }
3060     trace_ram_postcopy_send_discard_bitmap();
3061 
3062     ret = postcopy_each_ram_send_discard(ms);
3063     rcu_read_unlock();
3064 
3065     return ret;
3066 }
3067 
3068 /**
3069  * ram_discard_range: discard dirtied pages at the beginning of postcopy
3070  *
3071  * Returns zero on success
3072  *
3073  * @rbname: name of the RAMBlock of the request. NULL means the
3074  *          same that last one.
3075  * @start: RAMBlock starting page
3076  * @length: RAMBlock size
3077  */
3078 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
3079 {
3080     int ret = -1;
3081 
3082     trace_ram_discard_range(rbname, start, length);
3083 
3084     rcu_read_lock();
3085     RAMBlock *rb = qemu_ram_block_by_name(rbname);
3086 
3087     if (!rb) {
3088         error_report("ram_discard_range: Failed to find block '%s'", rbname);
3089         goto err;
3090     }
3091 
3092     /*
3093      * On source VM, we don't need to update the received bitmap since
3094      * we don't even have one.
3095      */
3096     if (rb->receivedmap) {
3097         bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
3098                      length >> qemu_target_page_bits());
3099     }
3100 
3101     ret = ram_block_discard_range(rb, start, length);
3102 
3103 err:
3104     rcu_read_unlock();
3105 
3106     return ret;
3107 }
3108 
3109 /*
3110  * For every allocation, we will try not to crash the VM if the
3111  * allocation failed.
3112  */
3113 static int xbzrle_init(void)
3114 {
3115     Error *local_err = NULL;
3116 
3117     if (!migrate_use_xbzrle()) {
3118         return 0;
3119     }
3120 
3121     XBZRLE_cache_lock();
3122 
3123     XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
3124     if (!XBZRLE.zero_target_page) {
3125         error_report("%s: Error allocating zero page", __func__);
3126         goto err_out;
3127     }
3128 
3129     XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
3130                               TARGET_PAGE_SIZE, &local_err);
3131     if (!XBZRLE.cache) {
3132         error_report_err(local_err);
3133         goto free_zero_page;
3134     }
3135 
3136     XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
3137     if (!XBZRLE.encoded_buf) {
3138         error_report("%s: Error allocating encoded_buf", __func__);
3139         goto free_cache;
3140     }
3141 
3142     XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
3143     if (!XBZRLE.current_buf) {
3144         error_report("%s: Error allocating current_buf", __func__);
3145         goto free_encoded_buf;
3146     }
3147 
3148     /* We are all good */
3149     XBZRLE_cache_unlock();
3150     return 0;
3151 
3152 free_encoded_buf:
3153     g_free(XBZRLE.encoded_buf);
3154     XBZRLE.encoded_buf = NULL;
3155 free_cache:
3156     cache_fini(XBZRLE.cache);
3157     XBZRLE.cache = NULL;
3158 free_zero_page:
3159     g_free(XBZRLE.zero_target_page);
3160     XBZRLE.zero_target_page = NULL;
3161 err_out:
3162     XBZRLE_cache_unlock();
3163     return -ENOMEM;
3164 }
3165 
3166 static int ram_state_init(RAMState **rsp)
3167 {
3168     *rsp = g_try_new0(RAMState, 1);
3169 
3170     if (!*rsp) {
3171         error_report("%s: Init ramstate fail", __func__);
3172         return -1;
3173     }
3174 
3175     qemu_mutex_init(&(*rsp)->bitmap_mutex);
3176     qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3177     QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
3178 
3179     /*
3180      * Count the total number of pages used by ram blocks not including any
3181      * gaps due to alignment or unplugs.
3182      * This must match with the initial values of dirty bitmap.
3183      */
3184     (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
3185     ram_state_reset(*rsp);
3186 
3187     return 0;
3188 }
3189 
3190 static void ram_list_init_bitmaps(void)
3191 {
3192     MigrationState *ms = migrate_get_current();
3193     RAMBlock *block;
3194     unsigned long pages;
3195     uint8_t shift;
3196 
3197     /* Skip setting bitmap if there is no RAM */
3198     if (ram_bytes_total()) {
3199         shift = ms->clear_bitmap_shift;
3200         if (shift > CLEAR_BITMAP_SHIFT_MAX) {
3201             error_report("clear_bitmap_shift (%u) too big, using "
3202                          "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
3203             shift = CLEAR_BITMAP_SHIFT_MAX;
3204         } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
3205             error_report("clear_bitmap_shift (%u) too small, using "
3206                          "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
3207             shift = CLEAR_BITMAP_SHIFT_MIN;
3208         }
3209 
3210         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3211             pages = block->max_length >> TARGET_PAGE_BITS;
3212             /*
3213              * The initial dirty bitmap for migration must be set with all
3214              * ones to make sure we'll migrate every guest RAM page to
3215              * destination.
3216              * Here we set RAMBlock.bmap all to 1 because when rebegin a
3217              * new migration after a failed migration, ram_list.
3218              * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
3219              * guest memory.
3220              */
3221             block->bmap = bitmap_new(pages);
3222             bitmap_set(block->bmap, 0, pages);
3223             block->clear_bmap_shift = shift;
3224             block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
3225         }
3226     }
3227 }
3228 
3229 static void ram_init_bitmaps(RAMState *rs)
3230 {
3231     /* For memory_global_dirty_log_start below.  */
3232     qemu_mutex_lock_iothread();
3233     qemu_mutex_lock_ramlist();
3234     rcu_read_lock();
3235 
3236     ram_list_init_bitmaps();
3237     memory_global_dirty_log_start();
3238     migration_bitmap_sync_precopy(rs);
3239 
3240     rcu_read_unlock();
3241     qemu_mutex_unlock_ramlist();
3242     qemu_mutex_unlock_iothread();
3243 }
3244 
3245 static int ram_init_all(RAMState **rsp)
3246 {
3247     if (ram_state_init(rsp)) {
3248         return -1;
3249     }
3250 
3251     if (xbzrle_init()) {
3252         ram_state_cleanup(rsp);
3253         return -1;
3254     }
3255 
3256     ram_init_bitmaps(*rsp);
3257 
3258     return 0;
3259 }
3260 
3261 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3262 {
3263     RAMBlock *block;
3264     uint64_t pages = 0;
3265 
3266     /*
3267      * Postcopy is not using xbzrle/compression, so no need for that.
3268      * Also, since source are already halted, we don't need to care
3269      * about dirty page logging as well.
3270      */
3271 
3272     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3273         pages += bitmap_count_one(block->bmap,
3274                                   block->used_length >> TARGET_PAGE_BITS);
3275     }
3276 
3277     /* This may not be aligned with current bitmaps. Recalculate. */
3278     rs->migration_dirty_pages = pages;
3279 
3280     rs->last_seen_block = NULL;
3281     rs->last_sent_block = NULL;
3282     rs->last_page = 0;
3283     rs->last_version = ram_list.version;
3284     /*
3285      * Disable the bulk stage, otherwise we'll resend the whole RAM no
3286      * matter what we have sent.
3287      */
3288     rs->ram_bulk_stage = false;
3289 
3290     /* Update RAMState cache of output QEMUFile */
3291     rs->f = out;
3292 
3293     trace_ram_state_resume_prepare(pages);
3294 }
3295 
3296 /*
3297  * This function clears bits of the free pages reported by the caller from the
3298  * migration dirty bitmap. @addr is the host address corresponding to the
3299  * start of the continuous guest free pages, and @len is the total bytes of
3300  * those pages.
3301  */
3302 void qemu_guest_free_page_hint(void *addr, size_t len)
3303 {
3304     RAMBlock *block;
3305     ram_addr_t offset;
3306     size_t used_len, start, npages;
3307     MigrationState *s = migrate_get_current();
3308 
3309     /* This function is currently expected to be used during live migration */
3310     if (!migration_is_setup_or_active(s->state)) {
3311         return;
3312     }
3313 
3314     for (; len > 0; len -= used_len, addr += used_len) {
3315         block = qemu_ram_block_from_host(addr, false, &offset);
3316         if (unlikely(!block || offset >= block->used_length)) {
3317             /*
3318              * The implementation might not support RAMBlock resize during
3319              * live migration, but it could happen in theory with future
3320              * updates. So we add a check here to capture that case.
3321              */
3322             error_report_once("%s unexpected error", __func__);
3323             return;
3324         }
3325 
3326         if (len <= block->used_length - offset) {
3327             used_len = len;
3328         } else {
3329             used_len = block->used_length - offset;
3330         }
3331 
3332         start = offset >> TARGET_PAGE_BITS;
3333         npages = used_len >> TARGET_PAGE_BITS;
3334 
3335         qemu_mutex_lock(&ram_state->bitmap_mutex);
3336         ram_state->migration_dirty_pages -=
3337                       bitmap_count_one_with_offset(block->bmap, start, npages);
3338         bitmap_clear(block->bmap, start, npages);
3339         qemu_mutex_unlock(&ram_state->bitmap_mutex);
3340     }
3341 }
3342 
3343 /*
3344  * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3345  * long-running RCU critical section.  When rcu-reclaims in the code
3346  * start to become numerous it will be necessary to reduce the
3347  * granularity of these critical sections.
3348  */
3349 
3350 /**
3351  * ram_save_setup: Setup RAM for migration
3352  *
3353  * Returns zero to indicate success and negative for error
3354  *
3355  * @f: QEMUFile where to send the data
3356  * @opaque: RAMState pointer
3357  */
3358 static int ram_save_setup(QEMUFile *f, void *opaque)
3359 {
3360     RAMState **rsp = opaque;
3361     RAMBlock *block;
3362 
3363     if (compress_threads_save_setup()) {
3364         return -1;
3365     }
3366 
3367     /* migration has already setup the bitmap, reuse it. */
3368     if (!migration_in_colo_state()) {
3369         if (ram_init_all(rsp) != 0) {
3370             compress_threads_save_cleanup();
3371             return -1;
3372         }
3373     }
3374     (*rsp)->f = f;
3375 
3376     rcu_read_lock();
3377 
3378     qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
3379 
3380     RAMBLOCK_FOREACH_MIGRATABLE(block) {
3381         qemu_put_byte(f, strlen(block->idstr));
3382         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3383         qemu_put_be64(f, block->used_length);
3384         if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
3385             qemu_put_be64(f, block->page_size);
3386         }
3387         if (migrate_ignore_shared()) {
3388             qemu_put_be64(f, block->mr->addr);
3389         }
3390     }
3391 
3392     rcu_read_unlock();
3393 
3394     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3395     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3396 
3397     multifd_send_sync_main(*rsp);
3398     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3399     qemu_fflush(f);
3400 
3401     return 0;
3402 }
3403 
3404 /**
3405  * ram_save_iterate: iterative stage for migration
3406  *
3407  * Returns zero to indicate success and negative for error
3408  *
3409  * @f: QEMUFile where to send the data
3410  * @opaque: RAMState pointer
3411  */
3412 static int ram_save_iterate(QEMUFile *f, void *opaque)
3413 {
3414     RAMState **temp = opaque;
3415     RAMState *rs = *temp;
3416     int ret;
3417     int i;
3418     int64_t t0;
3419     int done = 0;
3420 
3421     if (blk_mig_bulk_active()) {
3422         /* Avoid transferring ram during bulk phase of block migration as
3423          * the bulk phase will usually take a long time and transferring
3424          * ram updates during that time is pointless. */
3425         goto out;
3426     }
3427 
3428     rcu_read_lock();
3429     if (ram_list.version != rs->last_version) {
3430         ram_state_reset(rs);
3431     }
3432 
3433     /* Read version before ram_list.blocks */
3434     smp_rmb();
3435 
3436     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3437 
3438     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3439     i = 0;
3440     while ((ret = qemu_file_rate_limit(f)) == 0 ||
3441             !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
3442         int pages;
3443 
3444         if (qemu_file_get_error(f)) {
3445             break;
3446         }
3447 
3448         pages = ram_find_and_save_block(rs, false);
3449         /* no more pages to sent */
3450         if (pages == 0) {
3451             done = 1;
3452             break;
3453         }
3454 
3455         if (pages < 0) {
3456             qemu_file_set_error(f, pages);
3457             break;
3458         }
3459 
3460         rs->target_page_count += pages;
3461 
3462         /* we want to check in the 1st loop, just in case it was the 1st time
3463            and we had to sync the dirty bitmap.
3464            qemu_clock_get_ns() is a bit expensive, so we only check each some
3465            iterations
3466         */
3467         if ((i & 63) == 0) {
3468             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
3469             if (t1 > MAX_WAIT) {
3470                 trace_ram_save_iterate_big_wait(t1, i);
3471                 break;
3472             }
3473         }
3474         i++;
3475     }
3476     rcu_read_unlock();
3477 
3478     /*
3479      * Must occur before EOS (or any QEMUFile operation)
3480      * because of RDMA protocol.
3481      */
3482     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3483 
3484 out:
3485     multifd_send_sync_main(rs);
3486     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3487     qemu_fflush(f);
3488     ram_counters.transferred += 8;
3489 
3490     ret = qemu_file_get_error(f);
3491     if (ret < 0) {
3492         return ret;
3493     }
3494 
3495     return done;
3496 }
3497 
3498 /**
3499  * ram_save_complete: function called to send the remaining amount of ram
3500  *
3501  * Returns zero to indicate success or negative on error
3502  *
3503  * Called with iothread lock
3504  *
3505  * @f: QEMUFile where to send the data
3506  * @opaque: RAMState pointer
3507  */
3508 static int ram_save_complete(QEMUFile *f, void *opaque)
3509 {
3510     RAMState **temp = opaque;
3511     RAMState *rs = *temp;
3512     int ret = 0;
3513 
3514     rcu_read_lock();
3515 
3516     if (!migration_in_postcopy()) {
3517         migration_bitmap_sync_precopy(rs);
3518     }
3519 
3520     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3521 
3522     /* try transferring iterative blocks of memory */
3523 
3524     /* flush all remaining blocks regardless of rate limiting */
3525     while (true) {
3526         int pages;
3527 
3528         pages = ram_find_and_save_block(rs, !migration_in_colo_state());
3529         /* no more blocks to sent */
3530         if (pages == 0) {
3531             break;
3532         }
3533         if (pages < 0) {
3534             ret = pages;
3535             break;
3536         }
3537     }
3538 
3539     flush_compressed_data(rs);
3540     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
3541 
3542     rcu_read_unlock();
3543 
3544     multifd_send_sync_main(rs);
3545     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3546     qemu_fflush(f);
3547 
3548     return ret;
3549 }
3550 
3551 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
3552                              uint64_t *res_precopy_only,
3553                              uint64_t *res_compatible,
3554                              uint64_t *res_postcopy_only)
3555 {
3556     RAMState **temp = opaque;
3557     RAMState *rs = *temp;
3558     uint64_t remaining_size;
3559 
3560     remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3561 
3562     if (!migration_in_postcopy() &&
3563         remaining_size < max_size) {
3564         qemu_mutex_lock_iothread();
3565         rcu_read_lock();
3566         migration_bitmap_sync_precopy(rs);
3567         rcu_read_unlock();
3568         qemu_mutex_unlock_iothread();
3569         remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3570     }
3571 
3572     if (migrate_postcopy_ram()) {
3573         /* We can do postcopy, and all the data is postcopiable */
3574         *res_compatible += remaining_size;
3575     } else {
3576         *res_precopy_only += remaining_size;
3577     }
3578 }
3579 
3580 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3581 {
3582     unsigned int xh_len;
3583     int xh_flags;
3584     uint8_t *loaded_data;
3585 
3586     /* extract RLE header */
3587     xh_flags = qemu_get_byte(f);
3588     xh_len = qemu_get_be16(f);
3589 
3590     if (xh_flags != ENCODING_FLAG_XBZRLE) {
3591         error_report("Failed to load XBZRLE page - wrong compression!");
3592         return -1;
3593     }
3594 
3595     if (xh_len > TARGET_PAGE_SIZE) {
3596         error_report("Failed to load XBZRLE page - len overflow!");
3597         return -1;
3598     }
3599     loaded_data = XBZRLE.decoded_buf;
3600     /* load data and decode */
3601     /* it can change loaded_data to point to an internal buffer */
3602     qemu_get_buffer_in_place(f, &loaded_data, xh_len);
3603 
3604     /* decode RLE */
3605     if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3606                              TARGET_PAGE_SIZE) == -1) {
3607         error_report("Failed to load XBZRLE page - decode error!");
3608         return -1;
3609     }
3610 
3611     return 0;
3612 }
3613 
3614 /**
3615  * ram_block_from_stream: read a RAMBlock id from the migration stream
3616  *
3617  * Must be called from within a rcu critical section.
3618  *
3619  * Returns a pointer from within the RCU-protected ram_list.
3620  *
3621  * @f: QEMUFile where to read the data from
3622  * @flags: Page flags (mostly to see if it's a continuation of previous block)
3623  */
3624 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
3625 {
3626     static RAMBlock *block = NULL;
3627     char id[256];
3628     uint8_t len;
3629 
3630     if (flags & RAM_SAVE_FLAG_CONTINUE) {
3631         if (!block) {
3632             error_report("Ack, bad migration stream!");
3633             return NULL;
3634         }
3635         return block;
3636     }
3637 
3638     len = qemu_get_byte(f);
3639     qemu_get_buffer(f, (uint8_t *)id, len);
3640     id[len] = 0;
3641 
3642     block = qemu_ram_block_by_name(id);
3643     if (!block) {
3644         error_report("Can't find block %s", id);
3645         return NULL;
3646     }
3647 
3648     if (ramblock_is_ignored(block)) {
3649         error_report("block %s should not be migrated !", id);
3650         return NULL;
3651     }
3652 
3653     return block;
3654 }
3655 
3656 static inline void *host_from_ram_block_offset(RAMBlock *block,
3657                                                ram_addr_t offset)
3658 {
3659     if (!offset_in_ramblock(block, offset)) {
3660         return NULL;
3661     }
3662 
3663     return block->host + offset;
3664 }
3665 
3666 static inline void *colo_cache_from_block_offset(RAMBlock *block,
3667                                                  ram_addr_t offset)
3668 {
3669     if (!offset_in_ramblock(block, offset)) {
3670         return NULL;
3671     }
3672     if (!block->colo_cache) {
3673         error_report("%s: colo_cache is NULL in block :%s",
3674                      __func__, block->idstr);
3675         return NULL;
3676     }
3677 
3678     /*
3679     * During colo checkpoint, we need bitmap of these migrated pages.
3680     * It help us to decide which pages in ram cache should be flushed
3681     * into VM's RAM later.
3682     */
3683     if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3684         ram_state->migration_dirty_pages++;
3685     }
3686     return block->colo_cache + offset;
3687 }
3688 
3689 /**
3690  * ram_handle_compressed: handle the zero page case
3691  *
3692  * If a page (or a whole RDMA chunk) has been
3693  * determined to be zero, then zap it.
3694  *
3695  * @host: host address for the zero page
3696  * @ch: what the page is filled from.  We only support zero
3697  * @size: size of the zero page
3698  */
3699 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3700 {
3701     if (ch != 0 || !is_zero_range(host, size)) {
3702         memset(host, ch, size);
3703     }
3704 }
3705 
3706 /* return the size after decompression, or negative value on error */
3707 static int
3708 qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3709                      const uint8_t *source, size_t source_len)
3710 {
3711     int err;
3712 
3713     err = inflateReset(stream);
3714     if (err != Z_OK) {
3715         return -1;
3716     }
3717 
3718     stream->avail_in = source_len;
3719     stream->next_in = (uint8_t *)source;
3720     stream->avail_out = dest_len;
3721     stream->next_out = dest;
3722 
3723     err = inflate(stream, Z_NO_FLUSH);
3724     if (err != Z_STREAM_END) {
3725         return -1;
3726     }
3727 
3728     return stream->total_out;
3729 }
3730 
3731 static void *do_data_decompress(void *opaque)
3732 {
3733     DecompressParam *param = opaque;
3734     unsigned long pagesize;
3735     uint8_t *des;
3736     int len, ret;
3737 
3738     qemu_mutex_lock(&param->mutex);
3739     while (!param->quit) {
3740         if (param->des) {
3741             des = param->des;
3742             len = param->len;
3743             param->des = 0;
3744             qemu_mutex_unlock(&param->mutex);
3745 
3746             pagesize = TARGET_PAGE_SIZE;
3747 
3748             ret = qemu_uncompress_data(&param->stream, des, pagesize,
3749                                        param->compbuf, len);
3750             if (ret < 0 && migrate_get_current()->decompress_error_check) {
3751                 error_report("decompress data failed");
3752                 qemu_file_set_error(decomp_file, ret);
3753             }
3754 
3755             qemu_mutex_lock(&decomp_done_lock);
3756             param->done = true;
3757             qemu_cond_signal(&decomp_done_cond);
3758             qemu_mutex_unlock(&decomp_done_lock);
3759 
3760             qemu_mutex_lock(&param->mutex);
3761         } else {
3762             qemu_cond_wait(&param->cond, &param->mutex);
3763         }
3764     }
3765     qemu_mutex_unlock(&param->mutex);
3766 
3767     return NULL;
3768 }
3769 
3770 static int wait_for_decompress_done(void)
3771 {
3772     int idx, thread_count;
3773 
3774     if (!migrate_use_compression()) {
3775         return 0;
3776     }
3777 
3778     thread_count = migrate_decompress_threads();
3779     qemu_mutex_lock(&decomp_done_lock);
3780     for (idx = 0; idx < thread_count; idx++) {
3781         while (!decomp_param[idx].done) {
3782             qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3783         }
3784     }
3785     qemu_mutex_unlock(&decomp_done_lock);
3786     return qemu_file_get_error(decomp_file);
3787 }
3788 
3789 static void compress_threads_load_cleanup(void)
3790 {
3791     int i, thread_count;
3792 
3793     if (!migrate_use_compression()) {
3794         return;
3795     }
3796     thread_count = migrate_decompress_threads();
3797     for (i = 0; i < thread_count; i++) {
3798         /*
3799          * we use it as a indicator which shows if the thread is
3800          * properly init'd or not
3801          */
3802         if (!decomp_param[i].compbuf) {
3803             break;
3804         }
3805 
3806         qemu_mutex_lock(&decomp_param[i].mutex);
3807         decomp_param[i].quit = true;
3808         qemu_cond_signal(&decomp_param[i].cond);
3809         qemu_mutex_unlock(&decomp_param[i].mutex);
3810     }
3811     for (i = 0; i < thread_count; i++) {
3812         if (!decomp_param[i].compbuf) {
3813             break;
3814         }
3815 
3816         qemu_thread_join(decompress_threads + i);
3817         qemu_mutex_destroy(&decomp_param[i].mutex);
3818         qemu_cond_destroy(&decomp_param[i].cond);
3819         inflateEnd(&decomp_param[i].stream);
3820         g_free(decomp_param[i].compbuf);
3821         decomp_param[i].compbuf = NULL;
3822     }
3823     g_free(decompress_threads);
3824     g_free(decomp_param);
3825     decompress_threads = NULL;
3826     decomp_param = NULL;
3827     decomp_file = NULL;
3828 }
3829 
3830 static int compress_threads_load_setup(QEMUFile *f)
3831 {
3832     int i, thread_count;
3833 
3834     if (!migrate_use_compression()) {
3835         return 0;
3836     }
3837 
3838     thread_count = migrate_decompress_threads();
3839     decompress_threads = g_new0(QemuThread, thread_count);
3840     decomp_param = g_new0(DecompressParam, thread_count);
3841     qemu_mutex_init(&decomp_done_lock);
3842     qemu_cond_init(&decomp_done_cond);
3843     decomp_file = f;
3844     for (i = 0; i < thread_count; i++) {
3845         if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3846             goto exit;
3847         }
3848 
3849         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3850         qemu_mutex_init(&decomp_param[i].mutex);
3851         qemu_cond_init(&decomp_param[i].cond);
3852         decomp_param[i].done = true;
3853         decomp_param[i].quit = false;
3854         qemu_thread_create(decompress_threads + i, "decompress",
3855                            do_data_decompress, decomp_param + i,
3856                            QEMU_THREAD_JOINABLE);
3857     }
3858     return 0;
3859 exit:
3860     compress_threads_load_cleanup();
3861     return -1;
3862 }
3863 
3864 static void decompress_data_with_multi_threads(QEMUFile *f,
3865                                                void *host, int len)
3866 {
3867     int idx, thread_count;
3868 
3869     thread_count = migrate_decompress_threads();
3870     qemu_mutex_lock(&decomp_done_lock);
3871     while (true) {
3872         for (idx = 0; idx < thread_count; idx++) {
3873             if (decomp_param[idx].done) {
3874                 decomp_param[idx].done = false;
3875                 qemu_mutex_lock(&decomp_param[idx].mutex);
3876                 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
3877                 decomp_param[idx].des = host;
3878                 decomp_param[idx].len = len;
3879                 qemu_cond_signal(&decomp_param[idx].cond);
3880                 qemu_mutex_unlock(&decomp_param[idx].mutex);
3881                 break;
3882             }
3883         }
3884         if (idx < thread_count) {
3885             break;
3886         } else {
3887             qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3888         }
3889     }
3890     qemu_mutex_unlock(&decomp_done_lock);
3891 }
3892 
3893 /*
3894  * colo cache: this is for secondary VM, we cache the whole
3895  * memory of the secondary VM, it is need to hold the global lock
3896  * to call this helper.
3897  */
3898 int colo_init_ram_cache(void)
3899 {
3900     RAMBlock *block;
3901 
3902     rcu_read_lock();
3903     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3904         block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3905                                                 NULL,
3906                                                 false);
3907         if (!block->colo_cache) {
3908             error_report("%s: Can't alloc memory for COLO cache of block %s,"
3909                          "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3910                          block->used_length);
3911             goto out_locked;
3912         }
3913         memcpy(block->colo_cache, block->host, block->used_length);
3914     }
3915     rcu_read_unlock();
3916     /*
3917     * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3918     * with to decide which page in cache should be flushed into SVM's RAM. Here
3919     * we use the same name 'ram_bitmap' as for migration.
3920     */
3921     if (ram_bytes_total()) {
3922         RAMBlock *block;
3923 
3924         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3925             unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3926 
3927             block->bmap = bitmap_new(pages);
3928             bitmap_set(block->bmap, 0, pages);
3929         }
3930     }
3931     ram_state = g_new0(RAMState, 1);
3932     ram_state->migration_dirty_pages = 0;
3933     qemu_mutex_init(&ram_state->bitmap_mutex);
3934     memory_global_dirty_log_start();
3935 
3936     return 0;
3937 
3938 out_locked:
3939 
3940     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3941         if (block->colo_cache) {
3942             qemu_anon_ram_free(block->colo_cache, block->used_length);
3943             block->colo_cache = NULL;
3944         }
3945     }
3946 
3947     rcu_read_unlock();
3948     return -errno;
3949 }
3950 
3951 /* It is need to hold the global lock to call this helper */
3952 void colo_release_ram_cache(void)
3953 {
3954     RAMBlock *block;
3955 
3956     memory_global_dirty_log_stop();
3957     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3958         g_free(block->bmap);
3959         block->bmap = NULL;
3960     }
3961 
3962     rcu_read_lock();
3963 
3964     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3965         if (block->colo_cache) {
3966             qemu_anon_ram_free(block->colo_cache, block->used_length);
3967             block->colo_cache = NULL;
3968         }
3969     }
3970 
3971     rcu_read_unlock();
3972     qemu_mutex_destroy(&ram_state->bitmap_mutex);
3973     g_free(ram_state);
3974     ram_state = NULL;
3975 }
3976 
3977 /**
3978  * ram_load_setup: Setup RAM for migration incoming side
3979  *
3980  * Returns zero to indicate success and negative for error
3981  *
3982  * @f: QEMUFile where to receive the data
3983  * @opaque: RAMState pointer
3984  */
3985 static int ram_load_setup(QEMUFile *f, void *opaque)
3986 {
3987     if (compress_threads_load_setup(f)) {
3988         return -1;
3989     }
3990 
3991     xbzrle_load_setup();
3992     ramblock_recv_map_init();
3993 
3994     return 0;
3995 }
3996 
3997 static int ram_load_cleanup(void *opaque)
3998 {
3999     RAMBlock *rb;
4000 
4001     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4002         if (ramblock_is_pmem(rb)) {
4003             pmem_persist(rb->host, rb->used_length);
4004         }
4005     }
4006 
4007     xbzrle_load_cleanup();
4008     compress_threads_load_cleanup();
4009 
4010     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4011         g_free(rb->receivedmap);
4012         rb->receivedmap = NULL;
4013     }
4014 
4015     return 0;
4016 }
4017 
4018 /**
4019  * ram_postcopy_incoming_init: allocate postcopy data structures
4020  *
4021  * Returns 0 for success and negative if there was one error
4022  *
4023  * @mis: current migration incoming state
4024  *
4025  * Allocate data structures etc needed by incoming migration with
4026  * postcopy-ram. postcopy-ram's similarly names
4027  * postcopy_ram_incoming_init does the work.
4028  */
4029 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
4030 {
4031     return postcopy_ram_incoming_init(mis);
4032 }
4033 
4034 /**
4035  * ram_load_postcopy: load a page in postcopy case
4036  *
4037  * Returns 0 for success or -errno in case of error
4038  *
4039  * Called in postcopy mode by ram_load().
4040  * rcu_read_lock is taken prior to this being called.
4041  *
4042  * @f: QEMUFile where to send the data
4043  */
4044 static int ram_load_postcopy(QEMUFile *f)
4045 {
4046     int flags = 0, ret = 0;
4047     bool place_needed = false;
4048     bool matches_target_page_size = false;
4049     MigrationIncomingState *mis = migration_incoming_get_current();
4050     /* Temporary page that is later 'placed' */
4051     void *postcopy_host_page = postcopy_get_tmp_page(mis);
4052     void *last_host = NULL;
4053     bool all_zero = false;
4054 
4055     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4056         ram_addr_t addr;
4057         void *host = NULL;
4058         void *page_buffer = NULL;
4059         void *place_source = NULL;
4060         RAMBlock *block = NULL;
4061         uint8_t ch;
4062 
4063         addr = qemu_get_be64(f);
4064 
4065         /*
4066          * If qemu file error, we should stop here, and then "addr"
4067          * may be invalid
4068          */
4069         ret = qemu_file_get_error(f);
4070         if (ret) {
4071             break;
4072         }
4073 
4074         flags = addr & ~TARGET_PAGE_MASK;
4075         addr &= TARGET_PAGE_MASK;
4076 
4077         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
4078         place_needed = false;
4079         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
4080             block = ram_block_from_stream(f, flags);
4081 
4082             host = host_from_ram_block_offset(block, addr);
4083             if (!host) {
4084                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4085                 ret = -EINVAL;
4086                 break;
4087             }
4088             matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
4089             /*
4090              * Postcopy requires that we place whole host pages atomically;
4091              * these may be huge pages for RAMBlocks that are backed by
4092              * hugetlbfs.
4093              * To make it atomic, the data is read into a temporary page
4094              * that's moved into place later.
4095              * The migration protocol uses,  possibly smaller, target-pages
4096              * however the source ensures it always sends all the components
4097              * of a host page in order.
4098              */
4099             page_buffer = postcopy_host_page +
4100                           ((uintptr_t)host & (block->page_size - 1));
4101             /* If all TP are zero then we can optimise the place */
4102             if (!((uintptr_t)host & (block->page_size - 1))) {
4103                 all_zero = true;
4104             } else {
4105                 /* not the 1st TP within the HP */
4106                 if (host != (last_host + TARGET_PAGE_SIZE)) {
4107                     error_report("Non-sequential target page %p/%p",
4108                                   host, last_host);
4109                     ret = -EINVAL;
4110                     break;
4111                 }
4112             }
4113 
4114 
4115             /*
4116              * If it's the last part of a host page then we place the host
4117              * page
4118              */
4119             place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
4120                                      (block->page_size - 1)) == 0;
4121             place_source = postcopy_host_page;
4122         }
4123         last_host = host;
4124 
4125         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4126         case RAM_SAVE_FLAG_ZERO:
4127             ch = qemu_get_byte(f);
4128             memset(page_buffer, ch, TARGET_PAGE_SIZE);
4129             if (ch) {
4130                 all_zero = false;
4131             }
4132             break;
4133 
4134         case RAM_SAVE_FLAG_PAGE:
4135             all_zero = false;
4136             if (!matches_target_page_size) {
4137                 /* For huge pages, we always use temporary buffer */
4138                 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
4139             } else {
4140                 /*
4141                  * For small pages that matches target page size, we
4142                  * avoid the qemu_file copy.  Instead we directly use
4143                  * the buffer of QEMUFile to place the page.  Note: we
4144                  * cannot do any QEMUFile operation before using that
4145                  * buffer to make sure the buffer is valid when
4146                  * placing the page.
4147                  */
4148                 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
4149                                          TARGET_PAGE_SIZE);
4150             }
4151             break;
4152         case RAM_SAVE_FLAG_EOS:
4153             /* normal exit */
4154             multifd_recv_sync_main();
4155             break;
4156         default:
4157             error_report("Unknown combination of migration flags: %#x"
4158                          " (postcopy mode)", flags);
4159             ret = -EINVAL;
4160             break;
4161         }
4162 
4163         /* Detect for any possible file errors */
4164         if (!ret && qemu_file_get_error(f)) {
4165             ret = qemu_file_get_error(f);
4166         }
4167 
4168         if (!ret && place_needed) {
4169             /* This gets called at the last target page in the host page */
4170             void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
4171 
4172             if (all_zero) {
4173                 ret = postcopy_place_page_zero(mis, place_dest,
4174                                                block);
4175             } else {
4176                 ret = postcopy_place_page(mis, place_dest,
4177                                           place_source, block);
4178             }
4179         }
4180     }
4181 
4182     return ret;
4183 }
4184 
4185 static bool postcopy_is_advised(void)
4186 {
4187     PostcopyState ps = postcopy_state_get();
4188     return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
4189 }
4190 
4191 static bool postcopy_is_running(void)
4192 {
4193     PostcopyState ps = postcopy_state_get();
4194     return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
4195 }
4196 
4197 /*
4198  * Flush content of RAM cache into SVM's memory.
4199  * Only flush the pages that be dirtied by PVM or SVM or both.
4200  */
4201 static void colo_flush_ram_cache(void)
4202 {
4203     RAMBlock *block = NULL;
4204     void *dst_host;
4205     void *src_host;
4206     unsigned long offset = 0;
4207 
4208     memory_global_dirty_log_sync();
4209     rcu_read_lock();
4210     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4211         ramblock_sync_dirty_bitmap(ram_state, block);
4212     }
4213     rcu_read_unlock();
4214 
4215     trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
4216     rcu_read_lock();
4217     block = QLIST_FIRST_RCU(&ram_list.blocks);
4218 
4219     while (block) {
4220         offset = migration_bitmap_find_dirty(ram_state, block, offset);
4221 
4222         if (offset << TARGET_PAGE_BITS >= block->used_length) {
4223             offset = 0;
4224             block = QLIST_NEXT_RCU(block, next);
4225         } else {
4226             migration_bitmap_clear_dirty(ram_state, block, offset);
4227             dst_host = block->host + (offset << TARGET_PAGE_BITS);
4228             src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
4229             memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
4230         }
4231     }
4232 
4233     rcu_read_unlock();
4234     trace_colo_flush_ram_cache_end();
4235 }
4236 
4237 /**
4238  * ram_load_precopy: load pages in precopy case
4239  *
4240  * Returns 0 for success or -errno in case of error
4241  *
4242  * Called in precopy mode by ram_load().
4243  * rcu_read_lock is taken prior to this being called.
4244  *
4245  * @f: QEMUFile where to send the data
4246  */
4247 static int ram_load_precopy(QEMUFile *f)
4248 {
4249     int flags = 0, ret = 0, invalid_flags = 0, len = 0;
4250     /* ADVISE is earlier, it shows the source has the postcopy capability on */
4251     bool postcopy_advised = postcopy_is_advised();
4252     if (!migrate_use_compression()) {
4253         invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4254     }
4255 
4256     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4257         ram_addr_t addr, total_ram_bytes;
4258         void *host = NULL;
4259         uint8_t ch;
4260 
4261         addr = qemu_get_be64(f);
4262         flags = addr & ~TARGET_PAGE_MASK;
4263         addr &= TARGET_PAGE_MASK;
4264 
4265         if (flags & invalid_flags) {
4266             if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4267                 error_report("Received an unexpected compressed page");
4268             }
4269 
4270             ret = -EINVAL;
4271             break;
4272         }
4273 
4274         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
4275                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4276             RAMBlock *block = ram_block_from_stream(f, flags);
4277 
4278             /*
4279              * After going into COLO, we should load the Page into colo_cache.
4280              */
4281             if (migration_incoming_in_colo_state()) {
4282                 host = colo_cache_from_block_offset(block, addr);
4283             } else {
4284                 host = host_from_ram_block_offset(block, addr);
4285             }
4286             if (!host) {
4287                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4288                 ret = -EINVAL;
4289                 break;
4290             }
4291 
4292             if (!migration_incoming_in_colo_state()) {
4293                 ramblock_recv_bitmap_set(block, host);
4294             }
4295 
4296             trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
4297         }
4298 
4299         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4300         case RAM_SAVE_FLAG_MEM_SIZE:
4301             /* Synchronize RAM block list */
4302             total_ram_bytes = addr;
4303             while (!ret && total_ram_bytes) {
4304                 RAMBlock *block;
4305                 char id[256];
4306                 ram_addr_t length;
4307 
4308                 len = qemu_get_byte(f);
4309                 qemu_get_buffer(f, (uint8_t *)id, len);
4310                 id[len] = 0;
4311                 length = qemu_get_be64(f);
4312 
4313                 block = qemu_ram_block_by_name(id);
4314                 if (block && !qemu_ram_is_migratable(block)) {
4315                     error_report("block %s should not be migrated !", id);
4316                     ret = -EINVAL;
4317                 } else if (block) {
4318                     if (length != block->used_length) {
4319                         Error *local_err = NULL;
4320 
4321                         ret = qemu_ram_resize(block, length,
4322                                               &local_err);
4323                         if (local_err) {
4324                             error_report_err(local_err);
4325                         }
4326                     }
4327                     /* For postcopy we need to check hugepage sizes match */
4328                     if (postcopy_advised &&
4329                         block->page_size != qemu_host_page_size) {
4330                         uint64_t remote_page_size = qemu_get_be64(f);
4331                         if (remote_page_size != block->page_size) {
4332                             error_report("Mismatched RAM page size %s "
4333                                          "(local) %zd != %" PRId64,
4334                                          id, block->page_size,
4335                                          remote_page_size);
4336                             ret = -EINVAL;
4337                         }
4338                     }
4339                     if (migrate_ignore_shared()) {
4340                         hwaddr addr = qemu_get_be64(f);
4341                         if (ramblock_is_ignored(block) &&
4342                             block->mr->addr != addr) {
4343                             error_report("Mismatched GPAs for block %s "
4344                                          "%" PRId64 "!= %" PRId64,
4345                                          id, (uint64_t)addr,
4346                                          (uint64_t)block->mr->addr);
4347                             ret = -EINVAL;
4348                         }
4349                     }
4350                     ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4351                                           block->idstr);
4352                 } else {
4353                     error_report("Unknown ramblock \"%s\", cannot "
4354                                  "accept migration", id);
4355                     ret = -EINVAL;
4356                 }
4357 
4358                 total_ram_bytes -= length;
4359             }
4360             break;
4361 
4362         case RAM_SAVE_FLAG_ZERO:
4363             ch = qemu_get_byte(f);
4364             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4365             break;
4366 
4367         case RAM_SAVE_FLAG_PAGE:
4368             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4369             break;
4370 
4371         case RAM_SAVE_FLAG_COMPRESS_PAGE:
4372             len = qemu_get_be32(f);
4373             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4374                 error_report("Invalid compressed data length: %d", len);
4375                 ret = -EINVAL;
4376                 break;
4377             }
4378             decompress_data_with_multi_threads(f, host, len);
4379             break;
4380 
4381         case RAM_SAVE_FLAG_XBZRLE:
4382             if (load_xbzrle(f, addr, host) < 0) {
4383                 error_report("Failed to decompress XBZRLE page at "
4384                              RAM_ADDR_FMT, addr);
4385                 ret = -EINVAL;
4386                 break;
4387             }
4388             break;
4389         case RAM_SAVE_FLAG_EOS:
4390             /* normal exit */
4391             multifd_recv_sync_main();
4392             break;
4393         default:
4394             if (flags & RAM_SAVE_FLAG_HOOK) {
4395                 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
4396             } else {
4397                 error_report("Unknown combination of migration flags: %#x",
4398                              flags);
4399                 ret = -EINVAL;
4400             }
4401         }
4402         if (!ret) {
4403             ret = qemu_file_get_error(f);
4404         }
4405     }
4406 
4407     return ret;
4408 }
4409 
4410 static int ram_load(QEMUFile *f, void *opaque, int version_id)
4411 {
4412     int ret = 0;
4413     static uint64_t seq_iter;
4414     /*
4415      * If system is running in postcopy mode, page inserts to host memory must
4416      * be atomic
4417      */
4418     bool postcopy_running = postcopy_is_running();
4419 
4420     seq_iter++;
4421 
4422     if (version_id != 4) {
4423         return -EINVAL;
4424     }
4425 
4426     /*
4427      * This RCU critical section can be very long running.
4428      * When RCU reclaims in the code start to become numerous,
4429      * it will be necessary to reduce the granularity of this
4430      * critical section.
4431      */
4432     rcu_read_lock();
4433 
4434     if (postcopy_running) {
4435         ret = ram_load_postcopy(f);
4436     } else {
4437         ret = ram_load_precopy(f);
4438     }
4439 
4440     ret |= wait_for_decompress_done();
4441     rcu_read_unlock();
4442     trace_ram_load_complete(ret, seq_iter);
4443 
4444     if (!ret  && migration_incoming_in_colo_state()) {
4445         colo_flush_ram_cache();
4446     }
4447     return ret;
4448 }
4449 
4450 static bool ram_has_postcopy(void *opaque)
4451 {
4452     RAMBlock *rb;
4453     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4454         if (ramblock_is_pmem(rb)) {
4455             info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4456                          "is not supported now!", rb->idstr, rb->host);
4457             return false;
4458         }
4459     }
4460 
4461     return migrate_postcopy_ram();
4462 }
4463 
4464 /* Sync all the dirty bitmap with destination VM.  */
4465 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4466 {
4467     RAMBlock *block;
4468     QEMUFile *file = s->to_dst_file;
4469     int ramblock_count = 0;
4470 
4471     trace_ram_dirty_bitmap_sync_start();
4472 
4473     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4474         qemu_savevm_send_recv_bitmap(file, block->idstr);
4475         trace_ram_dirty_bitmap_request(block->idstr);
4476         ramblock_count++;
4477     }
4478 
4479     trace_ram_dirty_bitmap_sync_wait();
4480 
4481     /* Wait until all the ramblocks' dirty bitmap synced */
4482     while (ramblock_count--) {
4483         qemu_sem_wait(&s->rp_state.rp_sem);
4484     }
4485 
4486     trace_ram_dirty_bitmap_sync_complete();
4487 
4488     return 0;
4489 }
4490 
4491 static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4492 {
4493     qemu_sem_post(&s->rp_state.rp_sem);
4494 }
4495 
4496 /*
4497  * Read the received bitmap, revert it as the initial dirty bitmap.
4498  * This is only used when the postcopy migration is paused but wants
4499  * to resume from a middle point.
4500  */
4501 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4502 {
4503     int ret = -EINVAL;
4504     QEMUFile *file = s->rp_state.from_dst_file;
4505     unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
4506     uint64_t local_size = DIV_ROUND_UP(nbits, 8);
4507     uint64_t size, end_mark;
4508 
4509     trace_ram_dirty_bitmap_reload_begin(block->idstr);
4510 
4511     if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4512         error_report("%s: incorrect state %s", __func__,
4513                      MigrationStatus_str(s->state));
4514         return -EINVAL;
4515     }
4516 
4517     /*
4518      * Note: see comments in ramblock_recv_bitmap_send() on why we
4519      * need the endianess convertion, and the paddings.
4520      */
4521     local_size = ROUND_UP(local_size, 8);
4522 
4523     /* Add paddings */
4524     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4525 
4526     size = qemu_get_be64(file);
4527 
4528     /* The size of the bitmap should match with our ramblock */
4529     if (size != local_size) {
4530         error_report("%s: ramblock '%s' bitmap size mismatch "
4531                      "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4532                      block->idstr, size, local_size);
4533         ret = -EINVAL;
4534         goto out;
4535     }
4536 
4537     size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4538     end_mark = qemu_get_be64(file);
4539 
4540     ret = qemu_file_get_error(file);
4541     if (ret || size != local_size) {
4542         error_report("%s: read bitmap failed for ramblock '%s': %d"
4543                      " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4544                      __func__, block->idstr, ret, local_size, size);
4545         ret = -EIO;
4546         goto out;
4547     }
4548 
4549     if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4550         error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4551                      __func__, block->idstr, end_mark);
4552         ret = -EINVAL;
4553         goto out;
4554     }
4555 
4556     /*
4557      * Endianess convertion. We are during postcopy (though paused).
4558      * The dirty bitmap won't change. We can directly modify it.
4559      */
4560     bitmap_from_le(block->bmap, le_bitmap, nbits);
4561 
4562     /*
4563      * What we received is "received bitmap". Revert it as the initial
4564      * dirty bitmap for this ramblock.
4565      */
4566     bitmap_complement(block->bmap, block->bmap, nbits);
4567 
4568     trace_ram_dirty_bitmap_reload_complete(block->idstr);
4569 
4570     /*
4571      * We succeeded to sync bitmap for current ramblock. If this is
4572      * the last one to sync, we need to notify the main send thread.
4573      */
4574     ram_dirty_bitmap_reload_notify(s);
4575 
4576     ret = 0;
4577 out:
4578     g_free(le_bitmap);
4579     return ret;
4580 }
4581 
4582 static int ram_resume_prepare(MigrationState *s, void *opaque)
4583 {
4584     RAMState *rs = *(RAMState **)opaque;
4585     int ret;
4586 
4587     ret = ram_dirty_bitmap_sync_all(s, rs);
4588     if (ret) {
4589         return ret;
4590     }
4591 
4592     ram_state_resume_prepare(rs, s->to_dst_file);
4593 
4594     return 0;
4595 }
4596 
4597 static SaveVMHandlers savevm_ram_handlers = {
4598     .save_setup = ram_save_setup,
4599     .save_live_iterate = ram_save_iterate,
4600     .save_live_complete_postcopy = ram_save_complete,
4601     .save_live_complete_precopy = ram_save_complete,
4602     .has_postcopy = ram_has_postcopy,
4603     .save_live_pending = ram_save_pending,
4604     .load_state = ram_load,
4605     .save_cleanup = ram_save_cleanup,
4606     .load_setup = ram_load_setup,
4607     .load_cleanup = ram_load_cleanup,
4608     .resume_prepare = ram_resume_prepare,
4609 };
4610 
4611 void ram_mig_init(void)
4612 {
4613     qemu_mutex_init(&XBZRLE.lock);
4614     register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
4615 }
4616