xref: /openbmc/qemu/migration/ram.c (revision 2e5b09fd)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2011-2015 Red Hat Inc
6  *
7  * Authors:
8  *  Juan Quintela <quintela@redhat.com>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 
29 #include "qemu/osdep.h"
30 #include "cpu.h"
31 #include <zlib.h>
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/pmem.h"
37 #include "xbzrle.h"
38 #include "ram.h"
39 #include "migration.h"
40 #include "socket.h"
41 #include "migration/register.h"
42 #include "migration/misc.h"
43 #include "qemu-file.h"
44 #include "postcopy-ram.h"
45 #include "page_cache.h"
46 #include "qemu/error-report.h"
47 #include "qapi/error.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qmp/qerror.h"
50 #include "trace.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
55 #include "block.h"
56 #include "sysemu/sysemu.h"
57 #include "qemu/uuid.h"
58 #include "savevm.h"
59 #include "qemu/iov.h"
60 
61 /***********************************************************/
62 /* ram save/restore */
63 
64 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65  * worked for pages that where filled with the same char.  We switched
66  * it to only search for the zero value.  And to avoid confusion with
67  * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68  */
69 
70 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
71 #define RAM_SAVE_FLAG_ZERO     0x02
72 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
73 #define RAM_SAVE_FLAG_PAGE     0x08
74 #define RAM_SAVE_FLAG_EOS      0x10
75 #define RAM_SAVE_FLAG_CONTINUE 0x20
76 #define RAM_SAVE_FLAG_XBZRLE   0x40
77 /* 0x80 is reserved in migration.h start with 0x100 next */
78 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
79 
80 static inline bool is_zero_range(uint8_t *p, uint64_t size)
81 {
82     return buffer_is_zero(p, size);
83 }
84 
85 XBZRLECacheStats xbzrle_counters;
86 
87 /* struct contains XBZRLE cache and a static page
88    used by the compression */
89 static struct {
90     /* buffer used for XBZRLE encoding */
91     uint8_t *encoded_buf;
92     /* buffer for storing page content */
93     uint8_t *current_buf;
94     /* Cache for XBZRLE, Protected by lock. */
95     PageCache *cache;
96     QemuMutex lock;
97     /* it will store a page full of zeros */
98     uint8_t *zero_target_page;
99     /* buffer used for XBZRLE decoding */
100     uint8_t *decoded_buf;
101 } XBZRLE;
102 
103 static void XBZRLE_cache_lock(void)
104 {
105     if (migrate_use_xbzrle())
106         qemu_mutex_lock(&XBZRLE.lock);
107 }
108 
109 static void XBZRLE_cache_unlock(void)
110 {
111     if (migrate_use_xbzrle())
112         qemu_mutex_unlock(&XBZRLE.lock);
113 }
114 
115 /**
116  * xbzrle_cache_resize: resize the xbzrle cache
117  *
118  * This function is called from qmp_migrate_set_cache_size in main
119  * thread, possibly while a migration is in progress.  A running
120  * migration may be using the cache and might finish during this call,
121  * hence changes to the cache are protected by XBZRLE.lock().
122  *
123  * Returns 0 for success or -1 for error
124  *
125  * @new_size: new cache size
126  * @errp: set *errp if the check failed, with reason
127  */
128 int xbzrle_cache_resize(int64_t new_size, Error **errp)
129 {
130     PageCache *new_cache;
131     int64_t ret = 0;
132 
133     /* Check for truncation */
134     if (new_size != (size_t)new_size) {
135         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136                    "exceeding address space");
137         return -1;
138     }
139 
140     if (new_size == migrate_xbzrle_cache_size()) {
141         /* nothing to do */
142         return 0;
143     }
144 
145     XBZRLE_cache_lock();
146 
147     if (XBZRLE.cache != NULL) {
148         new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
149         if (!new_cache) {
150             ret = -1;
151             goto out;
152         }
153 
154         cache_fini(XBZRLE.cache);
155         XBZRLE.cache = new_cache;
156     }
157 out:
158     XBZRLE_cache_unlock();
159     return ret;
160 }
161 
162 static bool ramblock_is_ignored(RAMBlock *block)
163 {
164     return !qemu_ram_is_migratable(block) ||
165            (migrate_ignore_shared() && qemu_ram_is_shared(block));
166 }
167 
168 /* Should be holding either ram_list.mutex, or the RCU lock. */
169 #define RAMBLOCK_FOREACH_NOT_IGNORED(block)            \
170     INTERNAL_RAMBLOCK_FOREACH(block)                   \
171         if (ramblock_is_ignored(block)) {} else
172 
173 #define RAMBLOCK_FOREACH_MIGRATABLE(block)             \
174     INTERNAL_RAMBLOCK_FOREACH(block)                   \
175         if (!qemu_ram_is_migratable(block)) {} else
176 
177 #undef RAMBLOCK_FOREACH
178 
179 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
180 {
181     RAMBlock *block;
182     int ret = 0;
183 
184     rcu_read_lock();
185     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
186         ret = func(block, opaque);
187         if (ret) {
188             break;
189         }
190     }
191     rcu_read_unlock();
192     return ret;
193 }
194 
195 static void ramblock_recv_map_init(void)
196 {
197     RAMBlock *rb;
198 
199     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
200         assert(!rb->receivedmap);
201         rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
202     }
203 }
204 
205 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
206 {
207     return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
208                     rb->receivedmap);
209 }
210 
211 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
212 {
213     return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
214 }
215 
216 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
217 {
218     set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
219 }
220 
221 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
222                                     size_t nr)
223 {
224     bitmap_set_atomic(rb->receivedmap,
225                       ramblock_recv_bitmap_offset(host_addr, rb),
226                       nr);
227 }
228 
229 #define  RAMBLOCK_RECV_BITMAP_ENDING  (0x0123456789abcdefULL)
230 
231 /*
232  * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
233  *
234  * Returns >0 if success with sent bytes, or <0 if error.
235  */
236 int64_t ramblock_recv_bitmap_send(QEMUFile *file,
237                                   const char *block_name)
238 {
239     RAMBlock *block = qemu_ram_block_by_name(block_name);
240     unsigned long *le_bitmap, nbits;
241     uint64_t size;
242 
243     if (!block) {
244         error_report("%s: invalid block name: %s", __func__, block_name);
245         return -1;
246     }
247 
248     nbits = block->used_length >> TARGET_PAGE_BITS;
249 
250     /*
251      * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
252      * machines we may need 4 more bytes for padding (see below
253      * comment). So extend it a bit before hand.
254      */
255     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
256 
257     /*
258      * Always use little endian when sending the bitmap. This is
259      * required that when source and destination VMs are not using the
260      * same endianess. (Note: big endian won't work.)
261      */
262     bitmap_to_le(le_bitmap, block->receivedmap, nbits);
263 
264     /* Size of the bitmap, in bytes */
265     size = DIV_ROUND_UP(nbits, 8);
266 
267     /*
268      * size is always aligned to 8 bytes for 64bit machines, but it
269      * may not be true for 32bit machines. We need this padding to
270      * make sure the migration can survive even between 32bit and
271      * 64bit machines.
272      */
273     size = ROUND_UP(size, 8);
274 
275     qemu_put_be64(file, size);
276     qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
277     /*
278      * Mark as an end, in case the middle part is screwed up due to
279      * some "misterious" reason.
280      */
281     qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
282     qemu_fflush(file);
283 
284     g_free(le_bitmap);
285 
286     if (qemu_file_get_error(file)) {
287         return qemu_file_get_error(file);
288     }
289 
290     return size + sizeof(size);
291 }
292 
293 /*
294  * An outstanding page request, on the source, having been received
295  * and queued
296  */
297 struct RAMSrcPageRequest {
298     RAMBlock *rb;
299     hwaddr    offset;
300     hwaddr    len;
301 
302     QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
303 };
304 
305 /* State of RAM for migration */
306 struct RAMState {
307     /* QEMUFile used for this migration */
308     QEMUFile *f;
309     /* Last block that we have visited searching for dirty pages */
310     RAMBlock *last_seen_block;
311     /* Last block from where we have sent data */
312     RAMBlock *last_sent_block;
313     /* Last dirty target page we have sent */
314     ram_addr_t last_page;
315     /* last ram version we have seen */
316     uint32_t last_version;
317     /* We are in the first round */
318     bool ram_bulk_stage;
319     /* The free page optimization is enabled */
320     bool fpo_enabled;
321     /* How many times we have dirty too many pages */
322     int dirty_rate_high_cnt;
323     /* these variables are used for bitmap sync */
324     /* last time we did a full bitmap_sync */
325     int64_t time_last_bitmap_sync;
326     /* bytes transferred at start_time */
327     uint64_t bytes_xfer_prev;
328     /* number of dirty pages since start_time */
329     uint64_t num_dirty_pages_period;
330     /* xbzrle misses since the beginning of the period */
331     uint64_t xbzrle_cache_miss_prev;
332 
333     /* compression statistics since the beginning of the period */
334     /* amount of count that no free thread to compress data */
335     uint64_t compress_thread_busy_prev;
336     /* amount bytes after compression */
337     uint64_t compressed_size_prev;
338     /* amount of compressed pages */
339     uint64_t compress_pages_prev;
340 
341     /* total handled target pages at the beginning of period */
342     uint64_t target_page_count_prev;
343     /* total handled target pages since start */
344     uint64_t target_page_count;
345     /* number of dirty bits in the bitmap */
346     uint64_t migration_dirty_pages;
347     /* Protects modification of the bitmap and migration dirty pages */
348     QemuMutex bitmap_mutex;
349     /* The RAMBlock used in the last src_page_requests */
350     RAMBlock *last_req_rb;
351     /* Queue of outstanding page requests from the destination */
352     QemuMutex src_page_req_mutex;
353     QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
354 };
355 typedef struct RAMState RAMState;
356 
357 static RAMState *ram_state;
358 
359 static NotifierWithReturnList precopy_notifier_list;
360 
361 void precopy_infrastructure_init(void)
362 {
363     notifier_with_return_list_init(&precopy_notifier_list);
364 }
365 
366 void precopy_add_notifier(NotifierWithReturn *n)
367 {
368     notifier_with_return_list_add(&precopy_notifier_list, n);
369 }
370 
371 void precopy_remove_notifier(NotifierWithReturn *n)
372 {
373     notifier_with_return_remove(n);
374 }
375 
376 int precopy_notify(PrecopyNotifyReason reason, Error **errp)
377 {
378     PrecopyNotifyData pnd;
379     pnd.reason = reason;
380     pnd.errp = errp;
381 
382     return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
383 }
384 
385 void precopy_enable_free_page_optimization(void)
386 {
387     if (!ram_state) {
388         return;
389     }
390 
391     ram_state->fpo_enabled = true;
392 }
393 
394 uint64_t ram_bytes_remaining(void)
395 {
396     return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
397                        0;
398 }
399 
400 MigrationStats ram_counters;
401 
402 /* used by the search for pages to send */
403 struct PageSearchStatus {
404     /* Current block being searched */
405     RAMBlock    *block;
406     /* Current page to search from */
407     unsigned long page;
408     /* Set once we wrap around */
409     bool         complete_round;
410 };
411 typedef struct PageSearchStatus PageSearchStatus;
412 
413 CompressionStats compression_counters;
414 
415 struct CompressParam {
416     bool done;
417     bool quit;
418     bool zero_page;
419     QEMUFile *file;
420     QemuMutex mutex;
421     QemuCond cond;
422     RAMBlock *block;
423     ram_addr_t offset;
424 
425     /* internally used fields */
426     z_stream stream;
427     uint8_t *originbuf;
428 };
429 typedef struct CompressParam CompressParam;
430 
431 struct DecompressParam {
432     bool done;
433     bool quit;
434     QemuMutex mutex;
435     QemuCond cond;
436     void *des;
437     uint8_t *compbuf;
438     int len;
439     z_stream stream;
440 };
441 typedef struct DecompressParam DecompressParam;
442 
443 static CompressParam *comp_param;
444 static QemuThread *compress_threads;
445 /* comp_done_cond is used to wake up the migration thread when
446  * one of the compression threads has finished the compression.
447  * comp_done_lock is used to co-work with comp_done_cond.
448  */
449 static QemuMutex comp_done_lock;
450 static QemuCond comp_done_cond;
451 /* The empty QEMUFileOps will be used by file in CompressParam */
452 static const QEMUFileOps empty_ops = { };
453 
454 static QEMUFile *decomp_file;
455 static DecompressParam *decomp_param;
456 static QemuThread *decompress_threads;
457 static QemuMutex decomp_done_lock;
458 static QemuCond decomp_done_cond;
459 
460 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
461                                  ram_addr_t offset, uint8_t *source_buf);
462 
463 static void *do_data_compress(void *opaque)
464 {
465     CompressParam *param = opaque;
466     RAMBlock *block;
467     ram_addr_t offset;
468     bool zero_page;
469 
470     qemu_mutex_lock(&param->mutex);
471     while (!param->quit) {
472         if (param->block) {
473             block = param->block;
474             offset = param->offset;
475             param->block = NULL;
476             qemu_mutex_unlock(&param->mutex);
477 
478             zero_page = do_compress_ram_page(param->file, &param->stream,
479                                              block, offset, param->originbuf);
480 
481             qemu_mutex_lock(&comp_done_lock);
482             param->done = true;
483             param->zero_page = zero_page;
484             qemu_cond_signal(&comp_done_cond);
485             qemu_mutex_unlock(&comp_done_lock);
486 
487             qemu_mutex_lock(&param->mutex);
488         } else {
489             qemu_cond_wait(&param->cond, &param->mutex);
490         }
491     }
492     qemu_mutex_unlock(&param->mutex);
493 
494     return NULL;
495 }
496 
497 static void compress_threads_save_cleanup(void)
498 {
499     int i, thread_count;
500 
501     if (!migrate_use_compression() || !comp_param) {
502         return;
503     }
504 
505     thread_count = migrate_compress_threads();
506     for (i = 0; i < thread_count; i++) {
507         /*
508          * we use it as a indicator which shows if the thread is
509          * properly init'd or not
510          */
511         if (!comp_param[i].file) {
512             break;
513         }
514 
515         qemu_mutex_lock(&comp_param[i].mutex);
516         comp_param[i].quit = true;
517         qemu_cond_signal(&comp_param[i].cond);
518         qemu_mutex_unlock(&comp_param[i].mutex);
519 
520         qemu_thread_join(compress_threads + i);
521         qemu_mutex_destroy(&comp_param[i].mutex);
522         qemu_cond_destroy(&comp_param[i].cond);
523         deflateEnd(&comp_param[i].stream);
524         g_free(comp_param[i].originbuf);
525         qemu_fclose(comp_param[i].file);
526         comp_param[i].file = NULL;
527     }
528     qemu_mutex_destroy(&comp_done_lock);
529     qemu_cond_destroy(&comp_done_cond);
530     g_free(compress_threads);
531     g_free(comp_param);
532     compress_threads = NULL;
533     comp_param = NULL;
534 }
535 
536 static int compress_threads_save_setup(void)
537 {
538     int i, thread_count;
539 
540     if (!migrate_use_compression()) {
541         return 0;
542     }
543     thread_count = migrate_compress_threads();
544     compress_threads = g_new0(QemuThread, thread_count);
545     comp_param = g_new0(CompressParam, thread_count);
546     qemu_cond_init(&comp_done_cond);
547     qemu_mutex_init(&comp_done_lock);
548     for (i = 0; i < thread_count; i++) {
549         comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
550         if (!comp_param[i].originbuf) {
551             goto exit;
552         }
553 
554         if (deflateInit(&comp_param[i].stream,
555                         migrate_compress_level()) != Z_OK) {
556             g_free(comp_param[i].originbuf);
557             goto exit;
558         }
559 
560         /* comp_param[i].file is just used as a dummy buffer to save data,
561          * set its ops to empty.
562          */
563         comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
564         comp_param[i].done = true;
565         comp_param[i].quit = false;
566         qemu_mutex_init(&comp_param[i].mutex);
567         qemu_cond_init(&comp_param[i].cond);
568         qemu_thread_create(compress_threads + i, "compress",
569                            do_data_compress, comp_param + i,
570                            QEMU_THREAD_JOINABLE);
571     }
572     return 0;
573 
574 exit:
575     compress_threads_save_cleanup();
576     return -1;
577 }
578 
579 /* Multiple fd's */
580 
581 #define MULTIFD_MAGIC 0x11223344U
582 #define MULTIFD_VERSION 1
583 
584 #define MULTIFD_FLAG_SYNC (1 << 0)
585 
586 /* This value needs to be a multiple of qemu_target_page_size() */
587 #define MULTIFD_PACKET_SIZE (512 * 1024)
588 
589 typedef struct {
590     uint32_t magic;
591     uint32_t version;
592     unsigned char uuid[16]; /* QemuUUID */
593     uint8_t id;
594     uint8_t unused1[7];     /* Reserved for future use */
595     uint64_t unused2[4];    /* Reserved for future use */
596 } __attribute__((packed)) MultiFDInit_t;
597 
598 typedef struct {
599     uint32_t magic;
600     uint32_t version;
601     uint32_t flags;
602     /* maximum number of allocated pages */
603     uint32_t pages_alloc;
604     uint32_t pages_used;
605     /* size of the next packet that contains pages */
606     uint32_t next_packet_size;
607     uint64_t packet_num;
608     uint64_t unused[4];    /* Reserved for future use */
609     char ramblock[256];
610     uint64_t offset[];
611 } __attribute__((packed)) MultiFDPacket_t;
612 
613 typedef struct {
614     /* number of used pages */
615     uint32_t used;
616     /* number of allocated pages */
617     uint32_t allocated;
618     /* global number of generated multifd packets */
619     uint64_t packet_num;
620     /* offset of each page */
621     ram_addr_t *offset;
622     /* pointer to each page */
623     struct iovec *iov;
624     RAMBlock *block;
625 } MultiFDPages_t;
626 
627 typedef struct {
628     /* this fields are not changed once the thread is created */
629     /* channel number */
630     uint8_t id;
631     /* channel thread name */
632     char *name;
633     /* channel thread id */
634     QemuThread thread;
635     /* communication channel */
636     QIOChannel *c;
637     /* sem where to wait for more work */
638     QemuSemaphore sem;
639     /* this mutex protects the following parameters */
640     QemuMutex mutex;
641     /* is this channel thread running */
642     bool running;
643     /* should this thread finish */
644     bool quit;
645     /* thread has work to do */
646     int pending_job;
647     /* array of pages to sent */
648     MultiFDPages_t *pages;
649     /* packet allocated len */
650     uint32_t packet_len;
651     /* pointer to the packet */
652     MultiFDPacket_t *packet;
653     /* multifd flags for each packet */
654     uint32_t flags;
655     /* size of the next packet that contains pages */
656     uint32_t next_packet_size;
657     /* global number of generated multifd packets */
658     uint64_t packet_num;
659     /* thread local variables */
660     /* packets sent through this channel */
661     uint64_t num_packets;
662     /* pages sent through this channel */
663     uint64_t num_pages;
664     /* syncs main thread and channels */
665     QemuSemaphore sem_sync;
666 }  MultiFDSendParams;
667 
668 typedef struct {
669     /* this fields are not changed once the thread is created */
670     /* channel number */
671     uint8_t id;
672     /* channel thread name */
673     char *name;
674     /* channel thread id */
675     QemuThread thread;
676     /* communication channel */
677     QIOChannel *c;
678     /* this mutex protects the following parameters */
679     QemuMutex mutex;
680     /* is this channel thread running */
681     bool running;
682     /* should this thread finish */
683     bool quit;
684     /* array of pages to receive */
685     MultiFDPages_t *pages;
686     /* packet allocated len */
687     uint32_t packet_len;
688     /* pointer to the packet */
689     MultiFDPacket_t *packet;
690     /* multifd flags for each packet */
691     uint32_t flags;
692     /* global number of generated multifd packets */
693     uint64_t packet_num;
694     /* thread local variables */
695     /* size of the next packet that contains pages */
696     uint32_t next_packet_size;
697     /* packets sent through this channel */
698     uint64_t num_packets;
699     /* pages sent through this channel */
700     uint64_t num_pages;
701     /* syncs main thread and channels */
702     QemuSemaphore sem_sync;
703 } MultiFDRecvParams;
704 
705 static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
706 {
707     MultiFDInit_t msg;
708     int ret;
709 
710     msg.magic = cpu_to_be32(MULTIFD_MAGIC);
711     msg.version = cpu_to_be32(MULTIFD_VERSION);
712     msg.id = p->id;
713     memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
714 
715     ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
716     if (ret != 0) {
717         return -1;
718     }
719     return 0;
720 }
721 
722 static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
723 {
724     MultiFDInit_t msg;
725     int ret;
726 
727     ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
728     if (ret != 0) {
729         return -1;
730     }
731 
732     msg.magic = be32_to_cpu(msg.magic);
733     msg.version = be32_to_cpu(msg.version);
734 
735     if (msg.magic != MULTIFD_MAGIC) {
736         error_setg(errp, "multifd: received packet magic %x "
737                    "expected %x", msg.magic, MULTIFD_MAGIC);
738         return -1;
739     }
740 
741     if (msg.version != MULTIFD_VERSION) {
742         error_setg(errp, "multifd: received packet version %d "
743                    "expected %d", msg.version, MULTIFD_VERSION);
744         return -1;
745     }
746 
747     if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
748         char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
749         char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
750 
751         error_setg(errp, "multifd: received uuid '%s' and expected "
752                    "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
753         g_free(uuid);
754         g_free(msg_uuid);
755         return -1;
756     }
757 
758     if (msg.id > migrate_multifd_channels()) {
759         error_setg(errp, "multifd: received channel version %d "
760                    "expected %d", msg.version, MULTIFD_VERSION);
761         return -1;
762     }
763 
764     return msg.id;
765 }
766 
767 static MultiFDPages_t *multifd_pages_init(size_t size)
768 {
769     MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
770 
771     pages->allocated = size;
772     pages->iov = g_new0(struct iovec, size);
773     pages->offset = g_new0(ram_addr_t, size);
774 
775     return pages;
776 }
777 
778 static void multifd_pages_clear(MultiFDPages_t *pages)
779 {
780     pages->used = 0;
781     pages->allocated = 0;
782     pages->packet_num = 0;
783     pages->block = NULL;
784     g_free(pages->iov);
785     pages->iov = NULL;
786     g_free(pages->offset);
787     pages->offset = NULL;
788     g_free(pages);
789 }
790 
791 static void multifd_send_fill_packet(MultiFDSendParams *p)
792 {
793     MultiFDPacket_t *packet = p->packet;
794     uint32_t page_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
795     int i;
796 
797     packet->magic = cpu_to_be32(MULTIFD_MAGIC);
798     packet->version = cpu_to_be32(MULTIFD_VERSION);
799     packet->flags = cpu_to_be32(p->flags);
800     packet->pages_alloc = cpu_to_be32(page_max);
801     packet->pages_used = cpu_to_be32(p->pages->used);
802     packet->next_packet_size = cpu_to_be32(p->next_packet_size);
803     packet->packet_num = cpu_to_be64(p->packet_num);
804 
805     if (p->pages->block) {
806         strncpy(packet->ramblock, p->pages->block->idstr, 256);
807     }
808 
809     for (i = 0; i < p->pages->used; i++) {
810         packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
811     }
812 }
813 
814 static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
815 {
816     MultiFDPacket_t *packet = p->packet;
817     uint32_t pages_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
818     RAMBlock *block;
819     int i;
820 
821     packet->magic = be32_to_cpu(packet->magic);
822     if (packet->magic != MULTIFD_MAGIC) {
823         error_setg(errp, "multifd: received packet "
824                    "magic %x and expected magic %x",
825                    packet->magic, MULTIFD_MAGIC);
826         return -1;
827     }
828 
829     packet->version = be32_to_cpu(packet->version);
830     if (packet->version != MULTIFD_VERSION) {
831         error_setg(errp, "multifd: received packet "
832                    "version %d and expected version %d",
833                    packet->version, MULTIFD_VERSION);
834         return -1;
835     }
836 
837     p->flags = be32_to_cpu(packet->flags);
838 
839     packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
840     /*
841      * If we recevied a packet that is 100 times bigger than expected
842      * just stop migration.  It is a magic number.
843      */
844     if (packet->pages_alloc > pages_max * 100) {
845         error_setg(errp, "multifd: received packet "
846                    "with size %d and expected a maximum size of %d",
847                    packet->pages_alloc, pages_max * 100) ;
848         return -1;
849     }
850     /*
851      * We received a packet that is bigger than expected but inside
852      * reasonable limits (see previous comment).  Just reallocate.
853      */
854     if (packet->pages_alloc > p->pages->allocated) {
855         multifd_pages_clear(p->pages);
856         p->pages = multifd_pages_init(packet->pages_alloc);
857     }
858 
859     p->pages->used = be32_to_cpu(packet->pages_used);
860     if (p->pages->used > packet->pages_alloc) {
861         error_setg(errp, "multifd: received packet "
862                    "with %d pages and expected maximum pages are %d",
863                    p->pages->used, packet->pages_alloc) ;
864         return -1;
865     }
866 
867     p->next_packet_size = be32_to_cpu(packet->next_packet_size);
868     p->packet_num = be64_to_cpu(packet->packet_num);
869 
870     if (p->pages->used) {
871         /* make sure that ramblock is 0 terminated */
872         packet->ramblock[255] = 0;
873         block = qemu_ram_block_by_name(packet->ramblock);
874         if (!block) {
875             error_setg(errp, "multifd: unknown ram block %s",
876                        packet->ramblock);
877             return -1;
878         }
879     }
880 
881     for (i = 0; i < p->pages->used; i++) {
882         ram_addr_t offset = be64_to_cpu(packet->offset[i]);
883 
884         if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
885             error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
886                        " (max " RAM_ADDR_FMT ")",
887                        offset, block->max_length);
888             return -1;
889         }
890         p->pages->iov[i].iov_base = block->host + offset;
891         p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
892     }
893 
894     return 0;
895 }
896 
897 struct {
898     MultiFDSendParams *params;
899     /* array of pages to sent */
900     MultiFDPages_t *pages;
901     /* global number of generated multifd packets */
902     uint64_t packet_num;
903     /* send channels ready */
904     QemuSemaphore channels_ready;
905 } *multifd_send_state;
906 
907 /*
908  * How we use multifd_send_state->pages and channel->pages?
909  *
910  * We create a pages for each channel, and a main one.  Each time that
911  * we need to send a batch of pages we interchange the ones between
912  * multifd_send_state and the channel that is sending it.  There are
913  * two reasons for that:
914  *    - to not have to do so many mallocs during migration
915  *    - to make easier to know what to free at the end of migration
916  *
917  * This way we always know who is the owner of each "pages" struct,
918  * and we don't need any locking.  It belongs to the migration thread
919  * or to the channel thread.  Switching is safe because the migration
920  * thread is using the channel mutex when changing it, and the channel
921  * have to had finish with its own, otherwise pending_job can't be
922  * false.
923  */
924 
925 static int multifd_send_pages(RAMState *rs)
926 {
927     int i;
928     static int next_channel;
929     MultiFDSendParams *p = NULL; /* make happy gcc */
930     MultiFDPages_t *pages = multifd_send_state->pages;
931     uint64_t transferred;
932 
933     qemu_sem_wait(&multifd_send_state->channels_ready);
934     for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
935         p = &multifd_send_state->params[i];
936 
937         qemu_mutex_lock(&p->mutex);
938         if (p->quit) {
939             error_report("%s: channel %d has already quit!", __func__, i);
940             qemu_mutex_unlock(&p->mutex);
941             return -1;
942         }
943         if (!p->pending_job) {
944             p->pending_job++;
945             next_channel = (i + 1) % migrate_multifd_channels();
946             break;
947         }
948         qemu_mutex_unlock(&p->mutex);
949     }
950     p->pages->used = 0;
951 
952     p->packet_num = multifd_send_state->packet_num++;
953     p->pages->block = NULL;
954     multifd_send_state->pages = p->pages;
955     p->pages = pages;
956     transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
957     qemu_file_update_transfer(rs->f, transferred);
958     ram_counters.multifd_bytes += transferred;
959     ram_counters.transferred += transferred;;
960     qemu_mutex_unlock(&p->mutex);
961     qemu_sem_post(&p->sem);
962 
963     return 1;
964 }
965 
966 static int multifd_queue_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
967 {
968     MultiFDPages_t *pages = multifd_send_state->pages;
969 
970     if (!pages->block) {
971         pages->block = block;
972     }
973 
974     if (pages->block == block) {
975         pages->offset[pages->used] = offset;
976         pages->iov[pages->used].iov_base = block->host + offset;
977         pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
978         pages->used++;
979 
980         if (pages->used < pages->allocated) {
981             return 1;
982         }
983     }
984 
985     if (multifd_send_pages(rs) < 0) {
986         return -1;
987     }
988 
989     if (pages->block != block) {
990         return  multifd_queue_page(rs, block, offset);
991     }
992 
993     return 1;
994 }
995 
996 static void multifd_send_terminate_threads(Error *err)
997 {
998     int i;
999 
1000     trace_multifd_send_terminate_threads(err != NULL);
1001 
1002     if (err) {
1003         MigrationState *s = migrate_get_current();
1004         migrate_set_error(s, err);
1005         if (s->state == MIGRATION_STATUS_SETUP ||
1006             s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
1007             s->state == MIGRATION_STATUS_DEVICE ||
1008             s->state == MIGRATION_STATUS_ACTIVE) {
1009             migrate_set_state(&s->state, s->state,
1010                               MIGRATION_STATUS_FAILED);
1011         }
1012     }
1013 
1014     for (i = 0; i < migrate_multifd_channels(); i++) {
1015         MultiFDSendParams *p = &multifd_send_state->params[i];
1016 
1017         qemu_mutex_lock(&p->mutex);
1018         p->quit = true;
1019         qemu_sem_post(&p->sem);
1020         qemu_mutex_unlock(&p->mutex);
1021     }
1022 }
1023 
1024 void multifd_save_cleanup(void)
1025 {
1026     int i;
1027 
1028     if (!migrate_use_multifd()) {
1029         return;
1030     }
1031     multifd_send_terminate_threads(NULL);
1032     for (i = 0; i < migrate_multifd_channels(); i++) {
1033         MultiFDSendParams *p = &multifd_send_state->params[i];
1034 
1035         if (p->running) {
1036             qemu_thread_join(&p->thread);
1037         }
1038         socket_send_channel_destroy(p->c);
1039         p->c = NULL;
1040         qemu_mutex_destroy(&p->mutex);
1041         qemu_sem_destroy(&p->sem);
1042         qemu_sem_destroy(&p->sem_sync);
1043         g_free(p->name);
1044         p->name = NULL;
1045         multifd_pages_clear(p->pages);
1046         p->pages = NULL;
1047         p->packet_len = 0;
1048         g_free(p->packet);
1049         p->packet = NULL;
1050     }
1051     qemu_sem_destroy(&multifd_send_state->channels_ready);
1052     g_free(multifd_send_state->params);
1053     multifd_send_state->params = NULL;
1054     multifd_pages_clear(multifd_send_state->pages);
1055     multifd_send_state->pages = NULL;
1056     g_free(multifd_send_state);
1057     multifd_send_state = NULL;
1058 }
1059 
1060 static void multifd_send_sync_main(RAMState *rs)
1061 {
1062     int i;
1063 
1064     if (!migrate_use_multifd()) {
1065         return;
1066     }
1067     if (multifd_send_state->pages->used) {
1068         if (multifd_send_pages(rs) < 0) {
1069             error_report("%s: multifd_send_pages fail", __func__);
1070             return;
1071         }
1072     }
1073     for (i = 0; i < migrate_multifd_channels(); i++) {
1074         MultiFDSendParams *p = &multifd_send_state->params[i];
1075 
1076         trace_multifd_send_sync_main_signal(p->id);
1077 
1078         qemu_mutex_lock(&p->mutex);
1079 
1080         if (p->quit) {
1081             error_report("%s: channel %d has already quit", __func__, i);
1082             qemu_mutex_unlock(&p->mutex);
1083             return;
1084         }
1085 
1086         p->packet_num = multifd_send_state->packet_num++;
1087         p->flags |= MULTIFD_FLAG_SYNC;
1088         p->pending_job++;
1089         qemu_file_update_transfer(rs->f, p->packet_len);
1090         ram_counters.multifd_bytes += p->packet_len;
1091         ram_counters.transferred += p->packet_len;
1092         qemu_mutex_unlock(&p->mutex);
1093         qemu_sem_post(&p->sem);
1094     }
1095     for (i = 0; i < migrate_multifd_channels(); i++) {
1096         MultiFDSendParams *p = &multifd_send_state->params[i];
1097 
1098         trace_multifd_send_sync_main_wait(p->id);
1099         qemu_sem_wait(&p->sem_sync);
1100     }
1101     trace_multifd_send_sync_main(multifd_send_state->packet_num);
1102 }
1103 
1104 static void *multifd_send_thread(void *opaque)
1105 {
1106     MultiFDSendParams *p = opaque;
1107     Error *local_err = NULL;
1108     int ret = 0;
1109     uint32_t flags = 0;
1110 
1111     trace_multifd_send_thread_start(p->id);
1112     rcu_register_thread();
1113 
1114     if (multifd_send_initial_packet(p, &local_err) < 0) {
1115         goto out;
1116     }
1117     /* initial packet */
1118     p->num_packets = 1;
1119 
1120     while (true) {
1121         qemu_sem_wait(&p->sem);
1122         qemu_mutex_lock(&p->mutex);
1123 
1124         if (p->pending_job) {
1125             uint32_t used = p->pages->used;
1126             uint64_t packet_num = p->packet_num;
1127             flags = p->flags;
1128 
1129             p->next_packet_size = used * qemu_target_page_size();
1130             multifd_send_fill_packet(p);
1131             p->flags = 0;
1132             p->num_packets++;
1133             p->num_pages += used;
1134             p->pages->used = 0;
1135             qemu_mutex_unlock(&p->mutex);
1136 
1137             trace_multifd_send(p->id, packet_num, used, flags,
1138                                p->next_packet_size);
1139 
1140             ret = qio_channel_write_all(p->c, (void *)p->packet,
1141                                         p->packet_len, &local_err);
1142             if (ret != 0) {
1143                 break;
1144             }
1145 
1146             if (used) {
1147                 ret = qio_channel_writev_all(p->c, p->pages->iov,
1148                                              used, &local_err);
1149                 if (ret != 0) {
1150                     break;
1151                 }
1152             }
1153 
1154             qemu_mutex_lock(&p->mutex);
1155             p->pending_job--;
1156             qemu_mutex_unlock(&p->mutex);
1157 
1158             if (flags & MULTIFD_FLAG_SYNC) {
1159                 qemu_sem_post(&p->sem_sync);
1160             }
1161             qemu_sem_post(&multifd_send_state->channels_ready);
1162         } else if (p->quit) {
1163             qemu_mutex_unlock(&p->mutex);
1164             break;
1165         } else {
1166             qemu_mutex_unlock(&p->mutex);
1167             /* sometimes there are spurious wakeups */
1168         }
1169     }
1170 
1171 out:
1172     if (local_err) {
1173         trace_multifd_send_error(p->id);
1174         multifd_send_terminate_threads(local_err);
1175     }
1176 
1177     /*
1178      * Error happen, I will exit, but I can't just leave, tell
1179      * who pay attention to me.
1180      */
1181     if (ret != 0) {
1182         if (flags & MULTIFD_FLAG_SYNC) {
1183             qemu_sem_post(&p->sem_sync);
1184         }
1185         qemu_sem_post(&multifd_send_state->channels_ready);
1186     }
1187 
1188     qemu_mutex_lock(&p->mutex);
1189     p->running = false;
1190     qemu_mutex_unlock(&p->mutex);
1191 
1192     rcu_unregister_thread();
1193     trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1194 
1195     return NULL;
1196 }
1197 
1198 static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1199 {
1200     MultiFDSendParams *p = opaque;
1201     QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1202     Error *local_err = NULL;
1203 
1204     trace_multifd_new_send_channel_async(p->id);
1205     if (qio_task_propagate_error(task, &local_err)) {
1206         migrate_set_error(migrate_get_current(), local_err);
1207         multifd_save_cleanup();
1208     } else {
1209         p->c = QIO_CHANNEL(sioc);
1210         qio_channel_set_delay(p->c, false);
1211         p->running = true;
1212         qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1213                            QEMU_THREAD_JOINABLE);
1214     }
1215 }
1216 
1217 int multifd_save_setup(void)
1218 {
1219     int thread_count;
1220     uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
1221     uint8_t i;
1222 
1223     if (!migrate_use_multifd()) {
1224         return 0;
1225     }
1226     thread_count = migrate_multifd_channels();
1227     multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1228     multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
1229     multifd_send_state->pages = multifd_pages_init(page_count);
1230     qemu_sem_init(&multifd_send_state->channels_ready, 0);
1231 
1232     for (i = 0; i < thread_count; i++) {
1233         MultiFDSendParams *p = &multifd_send_state->params[i];
1234 
1235         qemu_mutex_init(&p->mutex);
1236         qemu_sem_init(&p->sem, 0);
1237         qemu_sem_init(&p->sem_sync, 0);
1238         p->quit = false;
1239         p->pending_job = 0;
1240         p->id = i;
1241         p->pages = multifd_pages_init(page_count);
1242         p->packet_len = sizeof(MultiFDPacket_t)
1243                       + sizeof(ram_addr_t) * page_count;
1244         p->packet = g_malloc0(p->packet_len);
1245         p->name = g_strdup_printf("multifdsend_%d", i);
1246         socket_send_channel_create(multifd_new_send_channel_async, p);
1247     }
1248     return 0;
1249 }
1250 
1251 struct {
1252     MultiFDRecvParams *params;
1253     /* number of created threads */
1254     int count;
1255     /* syncs main thread and channels */
1256     QemuSemaphore sem_sync;
1257     /* global number of generated multifd packets */
1258     uint64_t packet_num;
1259 } *multifd_recv_state;
1260 
1261 static void multifd_recv_terminate_threads(Error *err)
1262 {
1263     int i;
1264 
1265     trace_multifd_recv_terminate_threads(err != NULL);
1266 
1267     if (err) {
1268         MigrationState *s = migrate_get_current();
1269         migrate_set_error(s, err);
1270         if (s->state == MIGRATION_STATUS_SETUP ||
1271             s->state == MIGRATION_STATUS_ACTIVE) {
1272             migrate_set_state(&s->state, s->state,
1273                               MIGRATION_STATUS_FAILED);
1274         }
1275     }
1276 
1277     for (i = 0; i < migrate_multifd_channels(); i++) {
1278         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1279 
1280         qemu_mutex_lock(&p->mutex);
1281         p->quit = true;
1282         /* We could arrive here for two reasons:
1283            - normal quit, i.e. everything went fine, just finished
1284            - error quit: We close the channels so the channel threads
1285              finish the qio_channel_read_all_eof() */
1286         qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1287         qemu_mutex_unlock(&p->mutex);
1288     }
1289 }
1290 
1291 int multifd_load_cleanup(Error **errp)
1292 {
1293     int i;
1294     int ret = 0;
1295 
1296     if (!migrate_use_multifd()) {
1297         return 0;
1298     }
1299     multifd_recv_terminate_threads(NULL);
1300     for (i = 0; i < migrate_multifd_channels(); i++) {
1301         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1302 
1303         if (p->running) {
1304             p->quit = true;
1305             /*
1306              * multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
1307              * however try to wakeup it without harm in cleanup phase.
1308              */
1309             qemu_sem_post(&p->sem_sync);
1310             qemu_thread_join(&p->thread);
1311         }
1312         object_unref(OBJECT(p->c));
1313         p->c = NULL;
1314         qemu_mutex_destroy(&p->mutex);
1315         qemu_sem_destroy(&p->sem_sync);
1316         g_free(p->name);
1317         p->name = NULL;
1318         multifd_pages_clear(p->pages);
1319         p->pages = NULL;
1320         p->packet_len = 0;
1321         g_free(p->packet);
1322         p->packet = NULL;
1323     }
1324     qemu_sem_destroy(&multifd_recv_state->sem_sync);
1325     g_free(multifd_recv_state->params);
1326     multifd_recv_state->params = NULL;
1327     g_free(multifd_recv_state);
1328     multifd_recv_state = NULL;
1329 
1330     return ret;
1331 }
1332 
1333 static void multifd_recv_sync_main(void)
1334 {
1335     int i;
1336 
1337     if (!migrate_use_multifd()) {
1338         return;
1339     }
1340     for (i = 0; i < migrate_multifd_channels(); i++) {
1341         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1342 
1343         trace_multifd_recv_sync_main_wait(p->id);
1344         qemu_sem_wait(&multifd_recv_state->sem_sync);
1345     }
1346     for (i = 0; i < migrate_multifd_channels(); i++) {
1347         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1348 
1349         qemu_mutex_lock(&p->mutex);
1350         if (multifd_recv_state->packet_num < p->packet_num) {
1351             multifd_recv_state->packet_num = p->packet_num;
1352         }
1353         qemu_mutex_unlock(&p->mutex);
1354         trace_multifd_recv_sync_main_signal(p->id);
1355         qemu_sem_post(&p->sem_sync);
1356     }
1357     trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1358 }
1359 
1360 static void *multifd_recv_thread(void *opaque)
1361 {
1362     MultiFDRecvParams *p = opaque;
1363     Error *local_err = NULL;
1364     int ret;
1365 
1366     trace_multifd_recv_thread_start(p->id);
1367     rcu_register_thread();
1368 
1369     while (true) {
1370         uint32_t used;
1371         uint32_t flags;
1372 
1373         if (p->quit) {
1374             break;
1375         }
1376 
1377         ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1378                                        p->packet_len, &local_err);
1379         if (ret == 0) {   /* EOF */
1380             break;
1381         }
1382         if (ret == -1) {   /* Error */
1383             break;
1384         }
1385 
1386         qemu_mutex_lock(&p->mutex);
1387         ret = multifd_recv_unfill_packet(p, &local_err);
1388         if (ret) {
1389             qemu_mutex_unlock(&p->mutex);
1390             break;
1391         }
1392 
1393         used = p->pages->used;
1394         flags = p->flags;
1395         trace_multifd_recv(p->id, p->packet_num, used, flags,
1396                            p->next_packet_size);
1397         p->num_packets++;
1398         p->num_pages += used;
1399         qemu_mutex_unlock(&p->mutex);
1400 
1401         if (used) {
1402             ret = qio_channel_readv_all(p->c, p->pages->iov,
1403                                         used, &local_err);
1404             if (ret != 0) {
1405                 break;
1406             }
1407         }
1408 
1409         if (flags & MULTIFD_FLAG_SYNC) {
1410             qemu_sem_post(&multifd_recv_state->sem_sync);
1411             qemu_sem_wait(&p->sem_sync);
1412         }
1413     }
1414 
1415     if (local_err) {
1416         multifd_recv_terminate_threads(local_err);
1417     }
1418     qemu_mutex_lock(&p->mutex);
1419     p->running = false;
1420     qemu_mutex_unlock(&p->mutex);
1421 
1422     rcu_unregister_thread();
1423     trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1424 
1425     return NULL;
1426 }
1427 
1428 int multifd_load_setup(void)
1429 {
1430     int thread_count;
1431     uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
1432     uint8_t i;
1433 
1434     if (!migrate_use_multifd()) {
1435         return 0;
1436     }
1437     thread_count = migrate_multifd_channels();
1438     multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1439     multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
1440     atomic_set(&multifd_recv_state->count, 0);
1441     qemu_sem_init(&multifd_recv_state->sem_sync, 0);
1442 
1443     for (i = 0; i < thread_count; i++) {
1444         MultiFDRecvParams *p = &multifd_recv_state->params[i];
1445 
1446         qemu_mutex_init(&p->mutex);
1447         qemu_sem_init(&p->sem_sync, 0);
1448         p->quit = false;
1449         p->id = i;
1450         p->pages = multifd_pages_init(page_count);
1451         p->packet_len = sizeof(MultiFDPacket_t)
1452                       + sizeof(ram_addr_t) * page_count;
1453         p->packet = g_malloc0(p->packet_len);
1454         p->name = g_strdup_printf("multifdrecv_%d", i);
1455     }
1456     return 0;
1457 }
1458 
1459 bool multifd_recv_all_channels_created(void)
1460 {
1461     int thread_count = migrate_multifd_channels();
1462 
1463     if (!migrate_use_multifd()) {
1464         return true;
1465     }
1466 
1467     return thread_count == atomic_read(&multifd_recv_state->count);
1468 }
1469 
1470 /*
1471  * Try to receive all multifd channels to get ready for the migration.
1472  * - Return true and do not set @errp when correctly receving all channels;
1473  * - Return false and do not set @errp when correctly receiving the current one;
1474  * - Return false and set @errp when failing to receive the current channel.
1475  */
1476 bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
1477 {
1478     MultiFDRecvParams *p;
1479     Error *local_err = NULL;
1480     int id;
1481 
1482     id = multifd_recv_initial_packet(ioc, &local_err);
1483     if (id < 0) {
1484         multifd_recv_terminate_threads(local_err);
1485         error_propagate_prepend(errp, local_err,
1486                                 "failed to receive packet"
1487                                 " via multifd channel %d: ",
1488                                 atomic_read(&multifd_recv_state->count));
1489         return false;
1490     }
1491     trace_multifd_recv_new_channel(id);
1492 
1493     p = &multifd_recv_state->params[id];
1494     if (p->c != NULL) {
1495         error_setg(&local_err, "multifd: received id '%d' already setup'",
1496                    id);
1497         multifd_recv_terminate_threads(local_err);
1498         error_propagate(errp, local_err);
1499         return false;
1500     }
1501     p->c = ioc;
1502     object_ref(OBJECT(ioc));
1503     /* initial packet */
1504     p->num_packets = 1;
1505 
1506     p->running = true;
1507     qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1508                        QEMU_THREAD_JOINABLE);
1509     atomic_inc(&multifd_recv_state->count);
1510     return atomic_read(&multifd_recv_state->count) ==
1511            migrate_multifd_channels();
1512 }
1513 
1514 /**
1515  * save_page_header: write page header to wire
1516  *
1517  * If this is the 1st block, it also writes the block identification
1518  *
1519  * Returns the number of bytes written
1520  *
1521  * @f: QEMUFile where to send the data
1522  * @block: block that contains the page we want to send
1523  * @offset: offset inside the block for the page
1524  *          in the lower bits, it contains flags
1525  */
1526 static size_t save_page_header(RAMState *rs, QEMUFile *f,  RAMBlock *block,
1527                                ram_addr_t offset)
1528 {
1529     size_t size, len;
1530 
1531     if (block == rs->last_sent_block) {
1532         offset |= RAM_SAVE_FLAG_CONTINUE;
1533     }
1534     qemu_put_be64(f, offset);
1535     size = 8;
1536 
1537     if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
1538         len = strlen(block->idstr);
1539         qemu_put_byte(f, len);
1540         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
1541         size += 1 + len;
1542         rs->last_sent_block = block;
1543     }
1544     return size;
1545 }
1546 
1547 /**
1548  * mig_throttle_guest_down: throotle down the guest
1549  *
1550  * Reduce amount of guest cpu execution to hopefully slow down memory
1551  * writes. If guest dirty memory rate is reduced below the rate at
1552  * which we can transfer pages to the destination then we should be
1553  * able to complete migration. Some workloads dirty memory way too
1554  * fast and will not effectively converge, even with auto-converge.
1555  */
1556 static void mig_throttle_guest_down(void)
1557 {
1558     MigrationState *s = migrate_get_current();
1559     uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1560     uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
1561     int pct_max = s->parameters.max_cpu_throttle;
1562 
1563     /* We have not started throttling yet. Let's start it. */
1564     if (!cpu_throttle_active()) {
1565         cpu_throttle_set(pct_initial);
1566     } else {
1567         /* Throttling already on, just increase the rate */
1568         cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1569                          pct_max));
1570     }
1571 }
1572 
1573 /**
1574  * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1575  *
1576  * @rs: current RAM state
1577  * @current_addr: address for the zero page
1578  *
1579  * Update the xbzrle cache to reflect a page that's been sent as all 0.
1580  * The important thing is that a stale (not-yet-0'd) page be replaced
1581  * by the new data.
1582  * As a bonus, if the page wasn't in the cache it gets added so that
1583  * when a small write is made into the 0'd page it gets XBZRLE sent.
1584  */
1585 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
1586 {
1587     if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
1588         return;
1589     }
1590 
1591     /* We don't care if this fails to allocate a new cache page
1592      * as long as it updated an old one */
1593     cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
1594                  ram_counters.dirty_sync_count);
1595 }
1596 
1597 #define ENCODING_FLAG_XBZRLE 0x1
1598 
1599 /**
1600  * save_xbzrle_page: compress and send current page
1601  *
1602  * Returns: 1 means that we wrote the page
1603  *          0 means that page is identical to the one already sent
1604  *          -1 means that xbzrle would be longer than normal
1605  *
1606  * @rs: current RAM state
1607  * @current_data: pointer to the address of the page contents
1608  * @current_addr: addr of the page
1609  * @block: block that contains the page we want to send
1610  * @offset: offset inside the block for the page
1611  * @last_stage: if we are at the completion stage
1612  */
1613 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
1614                             ram_addr_t current_addr, RAMBlock *block,
1615                             ram_addr_t offset, bool last_stage)
1616 {
1617     int encoded_len = 0, bytes_xbzrle;
1618     uint8_t *prev_cached_page;
1619 
1620     if (!cache_is_cached(XBZRLE.cache, current_addr,
1621                          ram_counters.dirty_sync_count)) {
1622         xbzrle_counters.cache_miss++;
1623         if (!last_stage) {
1624             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
1625                              ram_counters.dirty_sync_count) == -1) {
1626                 return -1;
1627             } else {
1628                 /* update *current_data when the page has been
1629                    inserted into cache */
1630                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1631             }
1632         }
1633         return -1;
1634     }
1635 
1636     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1637 
1638     /* save current buffer into memory */
1639     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1640 
1641     /* XBZRLE encoding (if there is no overflow) */
1642     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1643                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1644                                        TARGET_PAGE_SIZE);
1645 
1646     /*
1647      * Update the cache contents, so that it corresponds to the data
1648      * sent, in all cases except where we skip the page.
1649      */
1650     if (!last_stage && encoded_len != 0) {
1651         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1652         /*
1653          * In the case where we couldn't compress, ensure that the caller
1654          * sends the data from the cache, since the guest might have
1655          * changed the RAM since we copied it.
1656          */
1657         *current_data = prev_cached_page;
1658     }
1659 
1660     if (encoded_len == 0) {
1661         trace_save_xbzrle_page_skipping();
1662         return 0;
1663     } else if (encoded_len == -1) {
1664         trace_save_xbzrle_page_overflow();
1665         xbzrle_counters.overflow++;
1666         return -1;
1667     }
1668 
1669     /* Send XBZRLE based compressed page */
1670     bytes_xbzrle = save_page_header(rs, rs->f, block,
1671                                     offset | RAM_SAVE_FLAG_XBZRLE);
1672     qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1673     qemu_put_be16(rs->f, encoded_len);
1674     qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
1675     bytes_xbzrle += encoded_len + 1 + 2;
1676     xbzrle_counters.pages++;
1677     xbzrle_counters.bytes += bytes_xbzrle;
1678     ram_counters.transferred += bytes_xbzrle;
1679 
1680     return 1;
1681 }
1682 
1683 /**
1684  * migration_bitmap_find_dirty: find the next dirty page from start
1685  *
1686  * Returns the page offset within memory region of the start of a dirty page
1687  *
1688  * @rs: current RAM state
1689  * @rb: RAMBlock where to search for dirty pages
1690  * @start: page where we start the search
1691  */
1692 static inline
1693 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
1694                                           unsigned long start)
1695 {
1696     unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1697     unsigned long *bitmap = rb->bmap;
1698     unsigned long next;
1699 
1700     if (ramblock_is_ignored(rb)) {
1701         return size;
1702     }
1703 
1704     /*
1705      * When the free page optimization is enabled, we need to check the bitmap
1706      * to send the non-free pages rather than all the pages in the bulk stage.
1707      */
1708     if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
1709         next = start + 1;
1710     } else {
1711         next = find_next_bit(bitmap, size, start);
1712     }
1713 
1714     return next;
1715 }
1716 
1717 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
1718                                                 RAMBlock *rb,
1719                                                 unsigned long page)
1720 {
1721     bool ret;
1722 
1723     qemu_mutex_lock(&rs->bitmap_mutex);
1724 
1725     /*
1726      * Clear dirty bitmap if needed.  This _must_ be called before we
1727      * send any of the page in the chunk because we need to make sure
1728      * we can capture further page content changes when we sync dirty
1729      * log the next time.  So as long as we are going to send any of
1730      * the page in the chunk we clear the remote dirty bitmap for all.
1731      * Clearing it earlier won't be a problem, but too late will.
1732      */
1733     if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
1734         uint8_t shift = rb->clear_bmap_shift;
1735         hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
1736         hwaddr start = (page << TARGET_PAGE_BITS) & (-size);
1737 
1738         /*
1739          * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
1740          * can make things easier sometimes since then start address
1741          * of the small chunk will always be 64 pages aligned so the
1742          * bitmap will always be aligned to unsigned long.  We should
1743          * even be able to remove this restriction but I'm simply
1744          * keeping it.
1745          */
1746         assert(shift >= 6);
1747         trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
1748         memory_region_clear_dirty_bitmap(rb->mr, start, size);
1749     }
1750 
1751     ret = test_and_clear_bit(page, rb->bmap);
1752 
1753     if (ret) {
1754         rs->migration_dirty_pages--;
1755     }
1756     qemu_mutex_unlock(&rs->bitmap_mutex);
1757 
1758     return ret;
1759 }
1760 
1761 /* Called with RCU critical section */
1762 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
1763 {
1764     rs->migration_dirty_pages +=
1765         cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length,
1766                                               &rs->num_dirty_pages_period);
1767 }
1768 
1769 /**
1770  * ram_pagesize_summary: calculate all the pagesizes of a VM
1771  *
1772  * Returns a summary bitmap of the page sizes of all RAMBlocks
1773  *
1774  * For VMs with just normal pages this is equivalent to the host page
1775  * size. If it's got some huge pages then it's the OR of all the
1776  * different page sizes.
1777  */
1778 uint64_t ram_pagesize_summary(void)
1779 {
1780     RAMBlock *block;
1781     uint64_t summary = 0;
1782 
1783     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1784         summary |= block->page_size;
1785     }
1786 
1787     return summary;
1788 }
1789 
1790 uint64_t ram_get_total_transferred_pages(void)
1791 {
1792     return  ram_counters.normal + ram_counters.duplicate +
1793                 compression_counters.pages + xbzrle_counters.pages;
1794 }
1795 
1796 static void migration_update_rates(RAMState *rs, int64_t end_time)
1797 {
1798     uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
1799     double compressed_size;
1800 
1801     /* calculate period counters */
1802     ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1803                 / (end_time - rs->time_last_bitmap_sync);
1804 
1805     if (!page_count) {
1806         return;
1807     }
1808 
1809     if (migrate_use_xbzrle()) {
1810         xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
1811             rs->xbzrle_cache_miss_prev) / page_count;
1812         rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1813     }
1814 
1815     if (migrate_use_compression()) {
1816         compression_counters.busy_rate = (double)(compression_counters.busy -
1817             rs->compress_thread_busy_prev) / page_count;
1818         rs->compress_thread_busy_prev = compression_counters.busy;
1819 
1820         compressed_size = compression_counters.compressed_size -
1821                           rs->compressed_size_prev;
1822         if (compressed_size) {
1823             double uncompressed_size = (compression_counters.pages -
1824                                     rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1825 
1826             /* Compression-Ratio = Uncompressed-size / Compressed-size */
1827             compression_counters.compression_rate =
1828                                         uncompressed_size / compressed_size;
1829 
1830             rs->compress_pages_prev = compression_counters.pages;
1831             rs->compressed_size_prev = compression_counters.compressed_size;
1832         }
1833     }
1834 }
1835 
1836 static void migration_bitmap_sync(RAMState *rs)
1837 {
1838     RAMBlock *block;
1839     int64_t end_time;
1840     uint64_t bytes_xfer_now;
1841 
1842     ram_counters.dirty_sync_count++;
1843 
1844     if (!rs->time_last_bitmap_sync) {
1845         rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1846     }
1847 
1848     trace_migration_bitmap_sync_start();
1849     memory_global_dirty_log_sync();
1850 
1851     qemu_mutex_lock(&rs->bitmap_mutex);
1852     rcu_read_lock();
1853     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1854         ramblock_sync_dirty_bitmap(rs, block);
1855     }
1856     ram_counters.remaining = ram_bytes_remaining();
1857     rcu_read_unlock();
1858     qemu_mutex_unlock(&rs->bitmap_mutex);
1859 
1860     trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1861 
1862     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1863 
1864     /* more than 1 second = 1000 millisecons */
1865     if (end_time > rs->time_last_bitmap_sync + 1000) {
1866         bytes_xfer_now = ram_counters.transferred;
1867 
1868         /* During block migration the auto-converge logic incorrectly detects
1869          * that ram migration makes no progress. Avoid this by disabling the
1870          * throttling logic during the bulk phase of block migration. */
1871         if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1872             /* The following detection logic can be refined later. For now:
1873                Check to see if the dirtied bytes is 50% more than the approx.
1874                amount of bytes that just got transferred since the last time we
1875                were in this routine. If that happens twice, start or increase
1876                throttling */
1877 
1878             if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
1879                    (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
1880                 (++rs->dirty_rate_high_cnt >= 2)) {
1881                     trace_migration_throttle();
1882                     rs->dirty_rate_high_cnt = 0;
1883                     mig_throttle_guest_down();
1884             }
1885         }
1886 
1887         migration_update_rates(rs, end_time);
1888 
1889         rs->target_page_count_prev = rs->target_page_count;
1890 
1891         /* reset period counters */
1892         rs->time_last_bitmap_sync = end_time;
1893         rs->num_dirty_pages_period = 0;
1894         rs->bytes_xfer_prev = bytes_xfer_now;
1895     }
1896     if (migrate_use_events()) {
1897         qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
1898     }
1899 }
1900 
1901 static void migration_bitmap_sync_precopy(RAMState *rs)
1902 {
1903     Error *local_err = NULL;
1904 
1905     /*
1906      * The current notifier usage is just an optimization to migration, so we
1907      * don't stop the normal migration process in the error case.
1908      */
1909     if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1910         error_report_err(local_err);
1911     }
1912 
1913     migration_bitmap_sync(rs);
1914 
1915     if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1916         error_report_err(local_err);
1917     }
1918 }
1919 
1920 /**
1921  * save_zero_page_to_file: send the zero page to the file
1922  *
1923  * Returns the size of data written to the file, 0 means the page is not
1924  * a zero page
1925  *
1926  * @rs: current RAM state
1927  * @file: the file where the data is saved
1928  * @block: block that contains the page we want to send
1929  * @offset: offset inside the block for the page
1930  */
1931 static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1932                                   RAMBlock *block, ram_addr_t offset)
1933 {
1934     uint8_t *p = block->host + offset;
1935     int len = 0;
1936 
1937     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1938         len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1939         qemu_put_byte(file, 0);
1940         len += 1;
1941     }
1942     return len;
1943 }
1944 
1945 /**
1946  * save_zero_page: send the zero page to the stream
1947  *
1948  * Returns the number of pages written.
1949  *
1950  * @rs: current RAM state
1951  * @block: block that contains the page we want to send
1952  * @offset: offset inside the block for the page
1953  */
1954 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
1955 {
1956     int len = save_zero_page_to_file(rs, rs->f, block, offset);
1957 
1958     if (len) {
1959         ram_counters.duplicate++;
1960         ram_counters.transferred += len;
1961         return 1;
1962     }
1963     return -1;
1964 }
1965 
1966 static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
1967 {
1968     if (!migrate_release_ram() || !migration_in_postcopy()) {
1969         return;
1970     }
1971 
1972     ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
1973 }
1974 
1975 /*
1976  * @pages: the number of pages written by the control path,
1977  *        < 0 - error
1978  *        > 0 - number of pages written
1979  *
1980  * Return true if the pages has been saved, otherwise false is returned.
1981  */
1982 static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1983                               int *pages)
1984 {
1985     uint64_t bytes_xmit = 0;
1986     int ret;
1987 
1988     *pages = -1;
1989     ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1990                                 &bytes_xmit);
1991     if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1992         return false;
1993     }
1994 
1995     if (bytes_xmit) {
1996         ram_counters.transferred += bytes_xmit;
1997         *pages = 1;
1998     }
1999 
2000     if (ret == RAM_SAVE_CONTROL_DELAYED) {
2001         return true;
2002     }
2003 
2004     if (bytes_xmit > 0) {
2005         ram_counters.normal++;
2006     } else if (bytes_xmit == 0) {
2007         ram_counters.duplicate++;
2008     }
2009 
2010     return true;
2011 }
2012 
2013 /*
2014  * directly send the page to the stream
2015  *
2016  * Returns the number of pages written.
2017  *
2018  * @rs: current RAM state
2019  * @block: block that contains the page we want to send
2020  * @offset: offset inside the block for the page
2021  * @buf: the page to be sent
2022  * @async: send to page asyncly
2023  */
2024 static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
2025                             uint8_t *buf, bool async)
2026 {
2027     ram_counters.transferred += save_page_header(rs, rs->f, block,
2028                                                  offset | RAM_SAVE_FLAG_PAGE);
2029     if (async) {
2030         qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
2031                               migrate_release_ram() &
2032                               migration_in_postcopy());
2033     } else {
2034         qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
2035     }
2036     ram_counters.transferred += TARGET_PAGE_SIZE;
2037     ram_counters.normal++;
2038     return 1;
2039 }
2040 
2041 /**
2042  * ram_save_page: send the given page to the stream
2043  *
2044  * Returns the number of pages written.
2045  *          < 0 - error
2046  *          >=0 - Number of pages written - this might legally be 0
2047  *                if xbzrle noticed the page was the same.
2048  *
2049  * @rs: current RAM state
2050  * @block: block that contains the page we want to send
2051  * @offset: offset inside the block for the page
2052  * @last_stage: if we are at the completion stage
2053  */
2054 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
2055 {
2056     int pages = -1;
2057     uint8_t *p;
2058     bool send_async = true;
2059     RAMBlock *block = pss->block;
2060     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2061     ram_addr_t current_addr = block->offset + offset;
2062 
2063     p = block->host + offset;
2064     trace_ram_save_page(block->idstr, (uint64_t)offset, p);
2065 
2066     XBZRLE_cache_lock();
2067     if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
2068         migrate_use_xbzrle()) {
2069         pages = save_xbzrle_page(rs, &p, current_addr, block,
2070                                  offset, last_stage);
2071         if (!last_stage) {
2072             /* Can't send this cached data async, since the cache page
2073              * might get updated before it gets to the wire
2074              */
2075             send_async = false;
2076         }
2077     }
2078 
2079     /* XBZRLE overflow or normal page */
2080     if (pages == -1) {
2081         pages = save_normal_page(rs, block, offset, p, send_async);
2082     }
2083 
2084     XBZRLE_cache_unlock();
2085 
2086     return pages;
2087 }
2088 
2089 static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
2090                                  ram_addr_t offset)
2091 {
2092     if (multifd_queue_page(rs, block, offset) < 0) {
2093         return -1;
2094     }
2095     ram_counters.normal++;
2096 
2097     return 1;
2098 }
2099 
2100 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
2101                                  ram_addr_t offset, uint8_t *source_buf)
2102 {
2103     RAMState *rs = ram_state;
2104     uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
2105     bool zero_page = false;
2106     int ret;
2107 
2108     if (save_zero_page_to_file(rs, f, block, offset)) {
2109         zero_page = true;
2110         goto exit;
2111     }
2112 
2113     save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
2114 
2115     /*
2116      * copy it to a internal buffer to avoid it being modified by VM
2117      * so that we can catch up the error during compression and
2118      * decompression
2119      */
2120     memcpy(source_buf, p, TARGET_PAGE_SIZE);
2121     ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
2122     if (ret < 0) {
2123         qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
2124         error_report("compressed data failed!");
2125         return false;
2126     }
2127 
2128 exit:
2129     ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
2130     return zero_page;
2131 }
2132 
2133 static void
2134 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
2135 {
2136     ram_counters.transferred += bytes_xmit;
2137 
2138     if (param->zero_page) {
2139         ram_counters.duplicate++;
2140         return;
2141     }
2142 
2143     /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
2144     compression_counters.compressed_size += bytes_xmit - 8;
2145     compression_counters.pages++;
2146 }
2147 
2148 static bool save_page_use_compression(RAMState *rs);
2149 
2150 static void flush_compressed_data(RAMState *rs)
2151 {
2152     int idx, len, thread_count;
2153 
2154     if (!save_page_use_compression(rs)) {
2155         return;
2156     }
2157     thread_count = migrate_compress_threads();
2158 
2159     qemu_mutex_lock(&comp_done_lock);
2160     for (idx = 0; idx < thread_count; idx++) {
2161         while (!comp_param[idx].done) {
2162             qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2163         }
2164     }
2165     qemu_mutex_unlock(&comp_done_lock);
2166 
2167     for (idx = 0; idx < thread_count; idx++) {
2168         qemu_mutex_lock(&comp_param[idx].mutex);
2169         if (!comp_param[idx].quit) {
2170             len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2171             /*
2172              * it's safe to fetch zero_page without holding comp_done_lock
2173              * as there is no further request submitted to the thread,
2174              * i.e, the thread should be waiting for a request at this point.
2175              */
2176             update_compress_thread_counts(&comp_param[idx], len);
2177         }
2178         qemu_mutex_unlock(&comp_param[idx].mutex);
2179     }
2180 }
2181 
2182 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
2183                                        ram_addr_t offset)
2184 {
2185     param->block = block;
2186     param->offset = offset;
2187 }
2188 
2189 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
2190                                            ram_addr_t offset)
2191 {
2192     int idx, thread_count, bytes_xmit = -1, pages = -1;
2193     bool wait = migrate_compress_wait_thread();
2194 
2195     thread_count = migrate_compress_threads();
2196     qemu_mutex_lock(&comp_done_lock);
2197 retry:
2198     for (idx = 0; idx < thread_count; idx++) {
2199         if (comp_param[idx].done) {
2200             comp_param[idx].done = false;
2201             bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2202             qemu_mutex_lock(&comp_param[idx].mutex);
2203             set_compress_params(&comp_param[idx], block, offset);
2204             qemu_cond_signal(&comp_param[idx].cond);
2205             qemu_mutex_unlock(&comp_param[idx].mutex);
2206             pages = 1;
2207             update_compress_thread_counts(&comp_param[idx], bytes_xmit);
2208             break;
2209         }
2210     }
2211 
2212     /*
2213      * wait for the free thread if the user specifies 'compress-wait-thread',
2214      * otherwise we will post the page out in the main thread as normal page.
2215      */
2216     if (pages < 0 && wait) {
2217         qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2218         goto retry;
2219     }
2220     qemu_mutex_unlock(&comp_done_lock);
2221 
2222     return pages;
2223 }
2224 
2225 /**
2226  * find_dirty_block: find the next dirty page and update any state
2227  * associated with the search process.
2228  *
2229  * Returns true if a page is found
2230  *
2231  * @rs: current RAM state
2232  * @pss: data about the state of the current dirty page scan
2233  * @again: set to false if the search has scanned the whole of RAM
2234  */
2235 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
2236 {
2237     pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
2238     if (pss->complete_round && pss->block == rs->last_seen_block &&
2239         pss->page >= rs->last_page) {
2240         /*
2241          * We've been once around the RAM and haven't found anything.
2242          * Give up.
2243          */
2244         *again = false;
2245         return false;
2246     }
2247     if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
2248         /* Didn't find anything in this RAM Block */
2249         pss->page = 0;
2250         pss->block = QLIST_NEXT_RCU(pss->block, next);
2251         if (!pss->block) {
2252             /*
2253              * If memory migration starts over, we will meet a dirtied page
2254              * which may still exists in compression threads's ring, so we
2255              * should flush the compressed data to make sure the new page
2256              * is not overwritten by the old one in the destination.
2257              *
2258              * Also If xbzrle is on, stop using the data compression at this
2259              * point. In theory, xbzrle can do better than compression.
2260              */
2261             flush_compressed_data(rs);
2262 
2263             /* Hit the end of the list */
2264             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2265             /* Flag that we've looped */
2266             pss->complete_round = true;
2267             rs->ram_bulk_stage = false;
2268         }
2269         /* Didn't find anything this time, but try again on the new block */
2270         *again = true;
2271         return false;
2272     } else {
2273         /* Can go around again, but... */
2274         *again = true;
2275         /* We've found something so probably don't need to */
2276         return true;
2277     }
2278 }
2279 
2280 /**
2281  * unqueue_page: gets a page of the queue
2282  *
2283  * Helper for 'get_queued_page' - gets a page off the queue
2284  *
2285  * Returns the block of the page (or NULL if none available)
2286  *
2287  * @rs: current RAM state
2288  * @offset: used to return the offset within the RAMBlock
2289  */
2290 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
2291 {
2292     RAMBlock *block = NULL;
2293 
2294     if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2295         return NULL;
2296     }
2297 
2298     qemu_mutex_lock(&rs->src_page_req_mutex);
2299     if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2300         struct RAMSrcPageRequest *entry =
2301                                 QSIMPLEQ_FIRST(&rs->src_page_requests);
2302         block = entry->rb;
2303         *offset = entry->offset;
2304 
2305         if (entry->len > TARGET_PAGE_SIZE) {
2306             entry->len -= TARGET_PAGE_SIZE;
2307             entry->offset += TARGET_PAGE_SIZE;
2308         } else {
2309             memory_region_unref(block->mr);
2310             QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2311             g_free(entry);
2312             migration_consume_urgent_request();
2313         }
2314     }
2315     qemu_mutex_unlock(&rs->src_page_req_mutex);
2316 
2317     return block;
2318 }
2319 
2320 /**
2321  * get_queued_page: unqueue a page from the postcopy requests
2322  *
2323  * Skips pages that are already sent (!dirty)
2324  *
2325  * Returns true if a queued page is found
2326  *
2327  * @rs: current RAM state
2328  * @pss: data about the state of the current dirty page scan
2329  */
2330 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
2331 {
2332     RAMBlock  *block;
2333     ram_addr_t offset;
2334     bool dirty;
2335 
2336     do {
2337         block = unqueue_page(rs, &offset);
2338         /*
2339          * We're sending this page, and since it's postcopy nothing else
2340          * will dirty it, and we must make sure it doesn't get sent again
2341          * even if this queue request was received after the background
2342          * search already sent it.
2343          */
2344         if (block) {
2345             unsigned long page;
2346 
2347             page = offset >> TARGET_PAGE_BITS;
2348             dirty = test_bit(page, block->bmap);
2349             if (!dirty) {
2350                 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
2351                        page, test_bit(page, block->unsentmap));
2352             } else {
2353                 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
2354             }
2355         }
2356 
2357     } while (block && !dirty);
2358 
2359     if (block) {
2360         /*
2361          * As soon as we start servicing pages out of order, then we have
2362          * to kill the bulk stage, since the bulk stage assumes
2363          * in (migration_bitmap_find_and_reset_dirty) that every page is
2364          * dirty, that's no longer true.
2365          */
2366         rs->ram_bulk_stage = false;
2367 
2368         /*
2369          * We want the background search to continue from the queued page
2370          * since the guest is likely to want other pages near to the page
2371          * it just requested.
2372          */
2373         pss->block = block;
2374         pss->page = offset >> TARGET_PAGE_BITS;
2375 
2376         /*
2377          * This unqueued page would break the "one round" check, even is
2378          * really rare.
2379          */
2380         pss->complete_round = false;
2381     }
2382 
2383     return !!block;
2384 }
2385 
2386 /**
2387  * migration_page_queue_free: drop any remaining pages in the ram
2388  * request queue
2389  *
2390  * It should be empty at the end anyway, but in error cases there may
2391  * be some left.  in case that there is any page left, we drop it.
2392  *
2393  */
2394 static void migration_page_queue_free(RAMState *rs)
2395 {
2396     struct RAMSrcPageRequest *mspr, *next_mspr;
2397     /* This queue generally should be empty - but in the case of a failed
2398      * migration might have some droppings in.
2399      */
2400     rcu_read_lock();
2401     QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
2402         memory_region_unref(mspr->rb->mr);
2403         QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2404         g_free(mspr);
2405     }
2406     rcu_read_unlock();
2407 }
2408 
2409 /**
2410  * ram_save_queue_pages: queue the page for transmission
2411  *
2412  * A request from postcopy destination for example.
2413  *
2414  * Returns zero on success or negative on error
2415  *
2416  * @rbname: Name of the RAMBLock of the request. NULL means the
2417  *          same that last one.
2418  * @start: starting address from the start of the RAMBlock
2419  * @len: length (in bytes) to send
2420  */
2421 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
2422 {
2423     RAMBlock *ramblock;
2424     RAMState *rs = ram_state;
2425 
2426     ram_counters.postcopy_requests++;
2427     rcu_read_lock();
2428     if (!rbname) {
2429         /* Reuse last RAMBlock */
2430         ramblock = rs->last_req_rb;
2431 
2432         if (!ramblock) {
2433             /*
2434              * Shouldn't happen, we can't reuse the last RAMBlock if
2435              * it's the 1st request.
2436              */
2437             error_report("ram_save_queue_pages no previous block");
2438             goto err;
2439         }
2440     } else {
2441         ramblock = qemu_ram_block_by_name(rbname);
2442 
2443         if (!ramblock) {
2444             /* We shouldn't be asked for a non-existent RAMBlock */
2445             error_report("ram_save_queue_pages no block '%s'", rbname);
2446             goto err;
2447         }
2448         rs->last_req_rb = ramblock;
2449     }
2450     trace_ram_save_queue_pages(ramblock->idstr, start, len);
2451     if (start+len > ramblock->used_length) {
2452         error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2453                      RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
2454                      __func__, start, len, ramblock->used_length);
2455         goto err;
2456     }
2457 
2458     struct RAMSrcPageRequest *new_entry =
2459         g_malloc0(sizeof(struct RAMSrcPageRequest));
2460     new_entry->rb = ramblock;
2461     new_entry->offset = start;
2462     new_entry->len = len;
2463 
2464     memory_region_ref(ramblock->mr);
2465     qemu_mutex_lock(&rs->src_page_req_mutex);
2466     QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
2467     migration_make_urgent_request();
2468     qemu_mutex_unlock(&rs->src_page_req_mutex);
2469     rcu_read_unlock();
2470 
2471     return 0;
2472 
2473 err:
2474     rcu_read_unlock();
2475     return -1;
2476 }
2477 
2478 static bool save_page_use_compression(RAMState *rs)
2479 {
2480     if (!migrate_use_compression()) {
2481         return false;
2482     }
2483 
2484     /*
2485      * If xbzrle is on, stop using the data compression after first
2486      * round of migration even if compression is enabled. In theory,
2487      * xbzrle can do better than compression.
2488      */
2489     if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2490         return true;
2491     }
2492 
2493     return false;
2494 }
2495 
2496 /*
2497  * try to compress the page before posting it out, return true if the page
2498  * has been properly handled by compression, otherwise needs other
2499  * paths to handle it
2500  */
2501 static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2502 {
2503     if (!save_page_use_compression(rs)) {
2504         return false;
2505     }
2506 
2507     /*
2508      * When starting the process of a new block, the first page of
2509      * the block should be sent out before other pages in the same
2510      * block, and all the pages in last block should have been sent
2511      * out, keeping this order is important, because the 'cont' flag
2512      * is used to avoid resending the block name.
2513      *
2514      * We post the fist page as normal page as compression will take
2515      * much CPU resource.
2516      */
2517     if (block != rs->last_sent_block) {
2518         flush_compressed_data(rs);
2519         return false;
2520     }
2521 
2522     if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2523         return true;
2524     }
2525 
2526     compression_counters.busy++;
2527     return false;
2528 }
2529 
2530 /**
2531  * ram_save_target_page: save one target page
2532  *
2533  * Returns the number of pages written
2534  *
2535  * @rs: current RAM state
2536  * @pss: data about the page we want to send
2537  * @last_stage: if we are at the completion stage
2538  */
2539 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
2540                                 bool last_stage)
2541 {
2542     RAMBlock *block = pss->block;
2543     ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2544     int res;
2545 
2546     if (control_save_page(rs, block, offset, &res)) {
2547         return res;
2548     }
2549 
2550     if (save_compress_page(rs, block, offset)) {
2551         return 1;
2552     }
2553 
2554     res = save_zero_page(rs, block, offset);
2555     if (res > 0) {
2556         /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2557          * page would be stale
2558          */
2559         if (!save_page_use_compression(rs)) {
2560             XBZRLE_cache_lock();
2561             xbzrle_cache_zero_page(rs, block->offset + offset);
2562             XBZRLE_cache_unlock();
2563         }
2564         ram_release_pages(block->idstr, offset, res);
2565         return res;
2566     }
2567 
2568     /*
2569      * do not use multifd for compression as the first page in the new
2570      * block should be posted out before sending the compressed page
2571      */
2572     if (!save_page_use_compression(rs) && migrate_use_multifd()) {
2573         return ram_save_multifd_page(rs, block, offset);
2574     }
2575 
2576     return ram_save_page(rs, pss, last_stage);
2577 }
2578 
2579 /**
2580  * ram_save_host_page: save a whole host page
2581  *
2582  * Starting at *offset send pages up to the end of the current host
2583  * page. It's valid for the initial offset to point into the middle of
2584  * a host page in which case the remainder of the hostpage is sent.
2585  * Only dirty target pages are sent. Note that the host page size may
2586  * be a huge page for this block.
2587  * The saving stops at the boundary of the used_length of the block
2588  * if the RAMBlock isn't a multiple of the host page size.
2589  *
2590  * Returns the number of pages written or negative on error
2591  *
2592  * @rs: current RAM state
2593  * @ms: current migration state
2594  * @pss: data about the page we want to send
2595  * @last_stage: if we are at the completion stage
2596  */
2597 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
2598                               bool last_stage)
2599 {
2600     int tmppages, pages = 0;
2601     size_t pagesize_bits =
2602         qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2603 
2604     if (ramblock_is_ignored(pss->block)) {
2605         error_report("block %s should not be migrated !", pss->block->idstr);
2606         return 0;
2607     }
2608 
2609     do {
2610         /* Check the pages is dirty and if it is send it */
2611         if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2612             pss->page++;
2613             continue;
2614         }
2615 
2616         tmppages = ram_save_target_page(rs, pss, last_stage);
2617         if (tmppages < 0) {
2618             return tmppages;
2619         }
2620 
2621         pages += tmppages;
2622         if (pss->block->unsentmap) {
2623             clear_bit(pss->page, pss->block->unsentmap);
2624         }
2625 
2626         pss->page++;
2627     } while ((pss->page & (pagesize_bits - 1)) &&
2628              offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
2629 
2630     /* The offset we leave with is the last one we looked at */
2631     pss->page--;
2632     return pages;
2633 }
2634 
2635 /**
2636  * ram_find_and_save_block: finds a dirty page and sends it to f
2637  *
2638  * Called within an RCU critical section.
2639  *
2640  * Returns the number of pages written where zero means no dirty pages,
2641  * or negative on error
2642  *
2643  * @rs: current RAM state
2644  * @last_stage: if we are at the completion stage
2645  *
2646  * On systems where host-page-size > target-page-size it will send all the
2647  * pages in a host page that are dirty.
2648  */
2649 
2650 static int ram_find_and_save_block(RAMState *rs, bool last_stage)
2651 {
2652     PageSearchStatus pss;
2653     int pages = 0;
2654     bool again, found;
2655 
2656     /* No dirty page as there is zero RAM */
2657     if (!ram_bytes_total()) {
2658         return pages;
2659     }
2660 
2661     pss.block = rs->last_seen_block;
2662     pss.page = rs->last_page;
2663     pss.complete_round = false;
2664 
2665     if (!pss.block) {
2666         pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2667     }
2668 
2669     do {
2670         again = true;
2671         found = get_queued_page(rs, &pss);
2672 
2673         if (!found) {
2674             /* priority queue empty, so just search for something dirty */
2675             found = find_dirty_block(rs, &pss, &again);
2676         }
2677 
2678         if (found) {
2679             pages = ram_save_host_page(rs, &pss, last_stage);
2680         }
2681     } while (!pages && again);
2682 
2683     rs->last_seen_block = pss.block;
2684     rs->last_page = pss.page;
2685 
2686     return pages;
2687 }
2688 
2689 void acct_update_position(QEMUFile *f, size_t size, bool zero)
2690 {
2691     uint64_t pages = size / TARGET_PAGE_SIZE;
2692 
2693     if (zero) {
2694         ram_counters.duplicate += pages;
2695     } else {
2696         ram_counters.normal += pages;
2697         ram_counters.transferred += size;
2698         qemu_update_position(f, size);
2699     }
2700 }
2701 
2702 static uint64_t ram_bytes_total_common(bool count_ignored)
2703 {
2704     RAMBlock *block;
2705     uint64_t total = 0;
2706 
2707     rcu_read_lock();
2708     if (count_ignored) {
2709         RAMBLOCK_FOREACH_MIGRATABLE(block) {
2710             total += block->used_length;
2711         }
2712     } else {
2713         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2714             total += block->used_length;
2715         }
2716     }
2717     rcu_read_unlock();
2718     return total;
2719 }
2720 
2721 uint64_t ram_bytes_total(void)
2722 {
2723     return ram_bytes_total_common(false);
2724 }
2725 
2726 static void xbzrle_load_setup(void)
2727 {
2728     XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2729 }
2730 
2731 static void xbzrle_load_cleanup(void)
2732 {
2733     g_free(XBZRLE.decoded_buf);
2734     XBZRLE.decoded_buf = NULL;
2735 }
2736 
2737 static void ram_state_cleanup(RAMState **rsp)
2738 {
2739     if (*rsp) {
2740         migration_page_queue_free(*rsp);
2741         qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2742         qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2743         g_free(*rsp);
2744         *rsp = NULL;
2745     }
2746 }
2747 
2748 static void xbzrle_cleanup(void)
2749 {
2750     XBZRLE_cache_lock();
2751     if (XBZRLE.cache) {
2752         cache_fini(XBZRLE.cache);
2753         g_free(XBZRLE.encoded_buf);
2754         g_free(XBZRLE.current_buf);
2755         g_free(XBZRLE.zero_target_page);
2756         XBZRLE.cache = NULL;
2757         XBZRLE.encoded_buf = NULL;
2758         XBZRLE.current_buf = NULL;
2759         XBZRLE.zero_target_page = NULL;
2760     }
2761     XBZRLE_cache_unlock();
2762 }
2763 
2764 static void ram_save_cleanup(void *opaque)
2765 {
2766     RAMState **rsp = opaque;
2767     RAMBlock *block;
2768 
2769     /* caller have hold iothread lock or is in a bh, so there is
2770      * no writing race against the migration bitmap
2771      */
2772     memory_global_dirty_log_stop();
2773 
2774     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2775         g_free(block->clear_bmap);
2776         block->clear_bmap = NULL;
2777         g_free(block->bmap);
2778         block->bmap = NULL;
2779         g_free(block->unsentmap);
2780         block->unsentmap = NULL;
2781     }
2782 
2783     xbzrle_cleanup();
2784     compress_threads_save_cleanup();
2785     ram_state_cleanup(rsp);
2786 }
2787 
2788 static void ram_state_reset(RAMState *rs)
2789 {
2790     rs->last_seen_block = NULL;
2791     rs->last_sent_block = NULL;
2792     rs->last_page = 0;
2793     rs->last_version = ram_list.version;
2794     rs->ram_bulk_stage = true;
2795     rs->fpo_enabled = false;
2796 }
2797 
2798 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2799 
2800 /*
2801  * 'expected' is the value you expect the bitmap mostly to be full
2802  * of; it won't bother printing lines that are all this value.
2803  * If 'todump' is null the migration bitmap is dumped.
2804  */
2805 void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2806                            unsigned long pages)
2807 {
2808     int64_t cur;
2809     int64_t linelen = 128;
2810     char linebuf[129];
2811 
2812     for (cur = 0; cur < pages; cur += linelen) {
2813         int64_t curb;
2814         bool found = false;
2815         /*
2816          * Last line; catch the case where the line length
2817          * is longer than remaining ram
2818          */
2819         if (cur + linelen > pages) {
2820             linelen = pages - cur;
2821         }
2822         for (curb = 0; curb < linelen; curb++) {
2823             bool thisbit = test_bit(cur + curb, todump);
2824             linebuf[curb] = thisbit ? '1' : '.';
2825             found = found || (thisbit != expected);
2826         }
2827         if (found) {
2828             linebuf[curb] = '\0';
2829             fprintf(stderr,  "0x%08" PRIx64 " : %s\n", cur, linebuf);
2830         }
2831     }
2832 }
2833 
2834 /* **** functions for postcopy ***** */
2835 
2836 void ram_postcopy_migrated_memory_release(MigrationState *ms)
2837 {
2838     struct RAMBlock *block;
2839 
2840     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2841         unsigned long *bitmap = block->bmap;
2842         unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2843         unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2844 
2845         while (run_start < range) {
2846             unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2847             ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
2848                               (run_end - run_start) << TARGET_PAGE_BITS);
2849             run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2850         }
2851     }
2852 }
2853 
2854 /**
2855  * postcopy_send_discard_bm_ram: discard a RAMBlock
2856  *
2857  * Returns zero on success
2858  *
2859  * Callback from postcopy_each_ram_send_discard for each RAMBlock
2860  * Note: At this point the 'unsentmap' is the processed bitmap combined
2861  *       with the dirtymap; so a '1' means it's either dirty or unsent.
2862  *
2863  * @ms: current migration state
2864  * @block: RAMBlock to discard
2865  */
2866 static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
2867 {
2868     unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2869     unsigned long current;
2870     unsigned long *unsentmap = block->unsentmap;
2871 
2872     for (current = 0; current < end; ) {
2873         unsigned long one = find_next_bit(unsentmap, end, current);
2874         unsigned long zero, discard_length;
2875 
2876         if (one >= end) {
2877             break;
2878         }
2879 
2880         zero = find_next_zero_bit(unsentmap, end, one + 1);
2881 
2882         if (zero >= end) {
2883             discard_length = end - one;
2884         } else {
2885             discard_length = zero - one;
2886         }
2887         postcopy_discard_send_range(ms, one, discard_length);
2888         current = one + discard_length;
2889     }
2890 
2891     return 0;
2892 }
2893 
2894 /**
2895  * postcopy_each_ram_send_discard: discard all RAMBlocks
2896  *
2897  * Returns 0 for success or negative for error
2898  *
2899  * Utility for the outgoing postcopy code.
2900  *   Calls postcopy_send_discard_bm_ram for each RAMBlock
2901  *   passing it bitmap indexes and name.
2902  * (qemu_ram_foreach_block ends up passing unscaled lengths
2903  *  which would mean postcopy code would have to deal with target page)
2904  *
2905  * @ms: current migration state
2906  */
2907 static int postcopy_each_ram_send_discard(MigrationState *ms)
2908 {
2909     struct RAMBlock *block;
2910     int ret;
2911 
2912     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2913         postcopy_discard_send_init(ms, block->idstr);
2914 
2915         /*
2916          * Postcopy sends chunks of bitmap over the wire, but it
2917          * just needs indexes at this point, avoids it having
2918          * target page specific code.
2919          */
2920         ret = postcopy_send_discard_bm_ram(ms, block);
2921         postcopy_discard_send_finish(ms);
2922         if (ret) {
2923             return ret;
2924         }
2925     }
2926 
2927     return 0;
2928 }
2929 
2930 /**
2931  * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2932  *
2933  * Helper for postcopy_chunk_hostpages; it's called twice to
2934  * canonicalize the two bitmaps, that are similar, but one is
2935  * inverted.
2936  *
2937  * Postcopy requires that all target pages in a hostpage are dirty or
2938  * clean, not a mix.  This function canonicalizes the bitmaps.
2939  *
2940  * @ms: current migration state
2941  * @unsent_pass: if true we need to canonicalize partially unsent host pages
2942  *               otherwise we need to canonicalize partially dirty host pages
2943  * @block: block that contains the page we want to canonicalize
2944  */
2945 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
2946                                           RAMBlock *block)
2947 {
2948     RAMState *rs = ram_state;
2949     unsigned long *bitmap = block->bmap;
2950     unsigned long *unsentmap = block->unsentmap;
2951     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2952     unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2953     unsigned long run_start;
2954 
2955     if (block->page_size == TARGET_PAGE_SIZE) {
2956         /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2957         return;
2958     }
2959 
2960     if (unsent_pass) {
2961         /* Find a sent page */
2962         run_start = find_next_zero_bit(unsentmap, pages, 0);
2963     } else {
2964         /* Find a dirty page */
2965         run_start = find_next_bit(bitmap, pages, 0);
2966     }
2967 
2968     while (run_start < pages) {
2969 
2970         /*
2971          * If the start of this run of pages is in the middle of a host
2972          * page, then we need to fixup this host page.
2973          */
2974         if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
2975             /* Find the end of this run */
2976             if (unsent_pass) {
2977                 run_start = find_next_bit(unsentmap, pages, run_start + 1);
2978             } else {
2979                 run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
2980             }
2981             /*
2982              * If the end isn't at the start of a host page, then the
2983              * run doesn't finish at the end of a host page
2984              * and we need to discard.
2985              */
2986         }
2987 
2988         if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
2989             unsigned long page;
2990             unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2991                                                              host_ratio);
2992             run_start = QEMU_ALIGN_UP(run_start, host_ratio);
2993 
2994             /* Tell the destination to discard this page */
2995             if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
2996                 /* For the unsent_pass we:
2997                  *     discard partially sent pages
2998                  * For the !unsent_pass (dirty) we:
2999                  *     discard partially dirty pages that were sent
3000                  *     (any partially sent pages were already discarded
3001                  *     by the previous unsent_pass)
3002                  */
3003                 postcopy_discard_send_range(ms, fixup_start_addr, host_ratio);
3004             }
3005 
3006             /* Clean up the bitmap */
3007             for (page = fixup_start_addr;
3008                  page < fixup_start_addr + host_ratio; page++) {
3009                 /* All pages in this host page are now not sent */
3010                 set_bit(page, unsentmap);
3011 
3012                 /*
3013                  * Remark them as dirty, updating the count for any pages
3014                  * that weren't previously dirty.
3015                  */
3016                 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
3017             }
3018         }
3019 
3020         if (unsent_pass) {
3021             /* Find the next sent page for the next iteration */
3022             run_start = find_next_zero_bit(unsentmap, pages, run_start);
3023         } else {
3024             /* Find the next dirty page for the next iteration */
3025             run_start = find_next_bit(bitmap, pages, run_start);
3026         }
3027     }
3028 }
3029 
3030 /**
3031  * postcopy_chunk_hostpages: discard any partially sent host page
3032  *
3033  * Utility for the outgoing postcopy code.
3034  *
3035  * Discard any partially sent host-page size chunks, mark any partially
3036  * dirty host-page size chunks as all dirty.  In this case the host-page
3037  * is the host-page for the particular RAMBlock, i.e. it might be a huge page
3038  *
3039  * Returns zero on success
3040  *
3041  * @ms: current migration state
3042  * @block: block we want to work with
3043  */
3044 static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
3045 {
3046     postcopy_discard_send_init(ms, block->idstr);
3047 
3048     /* First pass: Discard all partially sent host pages */
3049     postcopy_chunk_hostpages_pass(ms, true, block);
3050     /*
3051      * Second pass: Ensure that all partially dirty host pages are made
3052      * fully dirty.
3053      */
3054     postcopy_chunk_hostpages_pass(ms, false, block);
3055 
3056     postcopy_discard_send_finish(ms);
3057     return 0;
3058 }
3059 
3060 /**
3061  * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
3062  *
3063  * Returns zero on success
3064  *
3065  * Transmit the set of pages to be discarded after precopy to the target
3066  * these are pages that:
3067  *     a) Have been previously transmitted but are now dirty again
3068  *     b) Pages that have never been transmitted, this ensures that
3069  *        any pages on the destination that have been mapped by background
3070  *        tasks get discarded (transparent huge pages is the specific concern)
3071  * Hopefully this is pretty sparse
3072  *
3073  * @ms: current migration state
3074  */
3075 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
3076 {
3077     RAMState *rs = ram_state;
3078     RAMBlock *block;
3079     int ret;
3080 
3081     rcu_read_lock();
3082 
3083     /* This should be our last sync, the src is now paused */
3084     migration_bitmap_sync(rs);
3085 
3086     /* Easiest way to make sure we don't resume in the middle of a host-page */
3087     rs->last_seen_block = NULL;
3088     rs->last_sent_block = NULL;
3089     rs->last_page = 0;
3090 
3091     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3092         unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
3093         unsigned long *bitmap = block->bmap;
3094         unsigned long *unsentmap = block->unsentmap;
3095 
3096         if (!unsentmap) {
3097             /* We don't have a safe way to resize the sentmap, so
3098              * if the bitmap was resized it will be NULL at this
3099              * point.
3100              */
3101             error_report("migration ram resized during precopy phase");
3102             rcu_read_unlock();
3103             return -EINVAL;
3104         }
3105         /* Deal with TPS != HPS and huge pages */
3106         ret = postcopy_chunk_hostpages(ms, block);
3107         if (ret) {
3108             rcu_read_unlock();
3109             return ret;
3110         }
3111 
3112         /*
3113          * Update the unsentmap to be unsentmap = unsentmap | dirty
3114          */
3115         bitmap_or(unsentmap, unsentmap, bitmap, pages);
3116 #ifdef DEBUG_POSTCOPY
3117         ram_debug_dump_bitmap(unsentmap, true, pages);
3118 #endif
3119     }
3120     trace_ram_postcopy_send_discard_bitmap();
3121 
3122     ret = postcopy_each_ram_send_discard(ms);
3123     rcu_read_unlock();
3124 
3125     return ret;
3126 }
3127 
3128 /**
3129  * ram_discard_range: discard dirtied pages at the beginning of postcopy
3130  *
3131  * Returns zero on success
3132  *
3133  * @rbname: name of the RAMBlock of the request. NULL means the
3134  *          same that last one.
3135  * @start: RAMBlock starting page
3136  * @length: RAMBlock size
3137  */
3138 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
3139 {
3140     int ret = -1;
3141 
3142     trace_ram_discard_range(rbname, start, length);
3143 
3144     rcu_read_lock();
3145     RAMBlock *rb = qemu_ram_block_by_name(rbname);
3146 
3147     if (!rb) {
3148         error_report("ram_discard_range: Failed to find block '%s'", rbname);
3149         goto err;
3150     }
3151 
3152     /*
3153      * On source VM, we don't need to update the received bitmap since
3154      * we don't even have one.
3155      */
3156     if (rb->receivedmap) {
3157         bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
3158                      length >> qemu_target_page_bits());
3159     }
3160 
3161     ret = ram_block_discard_range(rb, start, length);
3162 
3163 err:
3164     rcu_read_unlock();
3165 
3166     return ret;
3167 }
3168 
3169 /*
3170  * For every allocation, we will try not to crash the VM if the
3171  * allocation failed.
3172  */
3173 static int xbzrle_init(void)
3174 {
3175     Error *local_err = NULL;
3176 
3177     if (!migrate_use_xbzrle()) {
3178         return 0;
3179     }
3180 
3181     XBZRLE_cache_lock();
3182 
3183     XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
3184     if (!XBZRLE.zero_target_page) {
3185         error_report("%s: Error allocating zero page", __func__);
3186         goto err_out;
3187     }
3188 
3189     XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
3190                               TARGET_PAGE_SIZE, &local_err);
3191     if (!XBZRLE.cache) {
3192         error_report_err(local_err);
3193         goto free_zero_page;
3194     }
3195 
3196     XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
3197     if (!XBZRLE.encoded_buf) {
3198         error_report("%s: Error allocating encoded_buf", __func__);
3199         goto free_cache;
3200     }
3201 
3202     XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
3203     if (!XBZRLE.current_buf) {
3204         error_report("%s: Error allocating current_buf", __func__);
3205         goto free_encoded_buf;
3206     }
3207 
3208     /* We are all good */
3209     XBZRLE_cache_unlock();
3210     return 0;
3211 
3212 free_encoded_buf:
3213     g_free(XBZRLE.encoded_buf);
3214     XBZRLE.encoded_buf = NULL;
3215 free_cache:
3216     cache_fini(XBZRLE.cache);
3217     XBZRLE.cache = NULL;
3218 free_zero_page:
3219     g_free(XBZRLE.zero_target_page);
3220     XBZRLE.zero_target_page = NULL;
3221 err_out:
3222     XBZRLE_cache_unlock();
3223     return -ENOMEM;
3224 }
3225 
3226 static int ram_state_init(RAMState **rsp)
3227 {
3228     *rsp = g_try_new0(RAMState, 1);
3229 
3230     if (!*rsp) {
3231         error_report("%s: Init ramstate fail", __func__);
3232         return -1;
3233     }
3234 
3235     qemu_mutex_init(&(*rsp)->bitmap_mutex);
3236     qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3237     QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
3238 
3239     /*
3240      * Count the total number of pages used by ram blocks not including any
3241      * gaps due to alignment or unplugs.
3242      * This must match with the initial values of dirty bitmap.
3243      */
3244     (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
3245     ram_state_reset(*rsp);
3246 
3247     return 0;
3248 }
3249 
3250 static void ram_list_init_bitmaps(void)
3251 {
3252     MigrationState *ms = migrate_get_current();
3253     RAMBlock *block;
3254     unsigned long pages;
3255     uint8_t shift;
3256 
3257     /* Skip setting bitmap if there is no RAM */
3258     if (ram_bytes_total()) {
3259         shift = ms->clear_bitmap_shift;
3260         if (shift > CLEAR_BITMAP_SHIFT_MAX) {
3261             error_report("clear_bitmap_shift (%u) too big, using "
3262                          "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
3263             shift = CLEAR_BITMAP_SHIFT_MAX;
3264         } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
3265             error_report("clear_bitmap_shift (%u) too small, using "
3266                          "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
3267             shift = CLEAR_BITMAP_SHIFT_MIN;
3268         }
3269 
3270         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3271             pages = block->max_length >> TARGET_PAGE_BITS;
3272             /*
3273              * The initial dirty bitmap for migration must be set with all
3274              * ones to make sure we'll migrate every guest RAM page to
3275              * destination.
3276              * Here we set RAMBlock.bmap all to 1 because when rebegin a
3277              * new migration after a failed migration, ram_list.
3278              * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
3279              * guest memory.
3280              */
3281             block->bmap = bitmap_new(pages);
3282             bitmap_set(block->bmap, 0, pages);
3283             block->clear_bmap_shift = shift;
3284             block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
3285             if (migrate_postcopy_ram()) {
3286                 block->unsentmap = bitmap_new(pages);
3287                 bitmap_set(block->unsentmap, 0, pages);
3288             }
3289         }
3290     }
3291 }
3292 
3293 static void ram_init_bitmaps(RAMState *rs)
3294 {
3295     /* For memory_global_dirty_log_start below.  */
3296     qemu_mutex_lock_iothread();
3297     qemu_mutex_lock_ramlist();
3298     rcu_read_lock();
3299 
3300     ram_list_init_bitmaps();
3301     memory_global_dirty_log_start();
3302     migration_bitmap_sync_precopy(rs);
3303 
3304     rcu_read_unlock();
3305     qemu_mutex_unlock_ramlist();
3306     qemu_mutex_unlock_iothread();
3307 }
3308 
3309 static int ram_init_all(RAMState **rsp)
3310 {
3311     if (ram_state_init(rsp)) {
3312         return -1;
3313     }
3314 
3315     if (xbzrle_init()) {
3316         ram_state_cleanup(rsp);
3317         return -1;
3318     }
3319 
3320     ram_init_bitmaps(*rsp);
3321 
3322     return 0;
3323 }
3324 
3325 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3326 {
3327     RAMBlock *block;
3328     uint64_t pages = 0;
3329 
3330     /*
3331      * Postcopy is not using xbzrle/compression, so no need for that.
3332      * Also, since source are already halted, we don't need to care
3333      * about dirty page logging as well.
3334      */
3335 
3336     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3337         pages += bitmap_count_one(block->bmap,
3338                                   block->used_length >> TARGET_PAGE_BITS);
3339     }
3340 
3341     /* This may not be aligned with current bitmaps. Recalculate. */
3342     rs->migration_dirty_pages = pages;
3343 
3344     rs->last_seen_block = NULL;
3345     rs->last_sent_block = NULL;
3346     rs->last_page = 0;
3347     rs->last_version = ram_list.version;
3348     /*
3349      * Disable the bulk stage, otherwise we'll resend the whole RAM no
3350      * matter what we have sent.
3351      */
3352     rs->ram_bulk_stage = false;
3353 
3354     /* Update RAMState cache of output QEMUFile */
3355     rs->f = out;
3356 
3357     trace_ram_state_resume_prepare(pages);
3358 }
3359 
3360 /*
3361  * This function clears bits of the free pages reported by the caller from the
3362  * migration dirty bitmap. @addr is the host address corresponding to the
3363  * start of the continuous guest free pages, and @len is the total bytes of
3364  * those pages.
3365  */
3366 void qemu_guest_free_page_hint(void *addr, size_t len)
3367 {
3368     RAMBlock *block;
3369     ram_addr_t offset;
3370     size_t used_len, start, npages;
3371     MigrationState *s = migrate_get_current();
3372 
3373     /* This function is currently expected to be used during live migration */
3374     if (!migration_is_setup_or_active(s->state)) {
3375         return;
3376     }
3377 
3378     for (; len > 0; len -= used_len, addr += used_len) {
3379         block = qemu_ram_block_from_host(addr, false, &offset);
3380         if (unlikely(!block || offset >= block->used_length)) {
3381             /*
3382              * The implementation might not support RAMBlock resize during
3383              * live migration, but it could happen in theory with future
3384              * updates. So we add a check here to capture that case.
3385              */
3386             error_report_once("%s unexpected error", __func__);
3387             return;
3388         }
3389 
3390         if (len <= block->used_length - offset) {
3391             used_len = len;
3392         } else {
3393             used_len = block->used_length - offset;
3394         }
3395 
3396         start = offset >> TARGET_PAGE_BITS;
3397         npages = used_len >> TARGET_PAGE_BITS;
3398 
3399         qemu_mutex_lock(&ram_state->bitmap_mutex);
3400         ram_state->migration_dirty_pages -=
3401                       bitmap_count_one_with_offset(block->bmap, start, npages);
3402         bitmap_clear(block->bmap, start, npages);
3403         qemu_mutex_unlock(&ram_state->bitmap_mutex);
3404     }
3405 }
3406 
3407 /*
3408  * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3409  * long-running RCU critical section.  When rcu-reclaims in the code
3410  * start to become numerous it will be necessary to reduce the
3411  * granularity of these critical sections.
3412  */
3413 
3414 /**
3415  * ram_save_setup: Setup RAM for migration
3416  *
3417  * Returns zero to indicate success and negative for error
3418  *
3419  * @f: QEMUFile where to send the data
3420  * @opaque: RAMState pointer
3421  */
3422 static int ram_save_setup(QEMUFile *f, void *opaque)
3423 {
3424     RAMState **rsp = opaque;
3425     RAMBlock *block;
3426 
3427     if (compress_threads_save_setup()) {
3428         return -1;
3429     }
3430 
3431     /* migration has already setup the bitmap, reuse it. */
3432     if (!migration_in_colo_state()) {
3433         if (ram_init_all(rsp) != 0) {
3434             compress_threads_save_cleanup();
3435             return -1;
3436         }
3437     }
3438     (*rsp)->f = f;
3439 
3440     rcu_read_lock();
3441 
3442     qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
3443 
3444     RAMBLOCK_FOREACH_MIGRATABLE(block) {
3445         qemu_put_byte(f, strlen(block->idstr));
3446         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3447         qemu_put_be64(f, block->used_length);
3448         if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
3449             qemu_put_be64(f, block->page_size);
3450         }
3451         if (migrate_ignore_shared()) {
3452             qemu_put_be64(f, block->mr->addr);
3453         }
3454     }
3455 
3456     rcu_read_unlock();
3457 
3458     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3459     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3460 
3461     multifd_send_sync_main(*rsp);
3462     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3463     qemu_fflush(f);
3464 
3465     return 0;
3466 }
3467 
3468 /**
3469  * ram_save_iterate: iterative stage for migration
3470  *
3471  * Returns zero to indicate success and negative for error
3472  *
3473  * @f: QEMUFile where to send the data
3474  * @opaque: RAMState pointer
3475  */
3476 static int ram_save_iterate(QEMUFile *f, void *opaque)
3477 {
3478     RAMState **temp = opaque;
3479     RAMState *rs = *temp;
3480     int ret;
3481     int i;
3482     int64_t t0;
3483     int done = 0;
3484 
3485     if (blk_mig_bulk_active()) {
3486         /* Avoid transferring ram during bulk phase of block migration as
3487          * the bulk phase will usually take a long time and transferring
3488          * ram updates during that time is pointless. */
3489         goto out;
3490     }
3491 
3492     rcu_read_lock();
3493     if (ram_list.version != rs->last_version) {
3494         ram_state_reset(rs);
3495     }
3496 
3497     /* Read version before ram_list.blocks */
3498     smp_rmb();
3499 
3500     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3501 
3502     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3503     i = 0;
3504     while ((ret = qemu_file_rate_limit(f)) == 0 ||
3505             !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
3506         int pages;
3507 
3508         if (qemu_file_get_error(f)) {
3509             break;
3510         }
3511 
3512         pages = ram_find_and_save_block(rs, false);
3513         /* no more pages to sent */
3514         if (pages == 0) {
3515             done = 1;
3516             break;
3517         }
3518 
3519         if (pages < 0) {
3520             qemu_file_set_error(f, pages);
3521             break;
3522         }
3523 
3524         rs->target_page_count += pages;
3525 
3526         /* we want to check in the 1st loop, just in case it was the 1st time
3527            and we had to sync the dirty bitmap.
3528            qemu_clock_get_ns() is a bit expensive, so we only check each some
3529            iterations
3530         */
3531         if ((i & 63) == 0) {
3532             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
3533             if (t1 > MAX_WAIT) {
3534                 trace_ram_save_iterate_big_wait(t1, i);
3535                 break;
3536             }
3537         }
3538         i++;
3539     }
3540     rcu_read_unlock();
3541 
3542     /*
3543      * Must occur before EOS (or any QEMUFile operation)
3544      * because of RDMA protocol.
3545      */
3546     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3547 
3548 out:
3549     multifd_send_sync_main(rs);
3550     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3551     qemu_fflush(f);
3552     ram_counters.transferred += 8;
3553 
3554     ret = qemu_file_get_error(f);
3555     if (ret < 0) {
3556         return ret;
3557     }
3558 
3559     return done;
3560 }
3561 
3562 /**
3563  * ram_save_complete: function called to send the remaining amount of ram
3564  *
3565  * Returns zero to indicate success or negative on error
3566  *
3567  * Called with iothread lock
3568  *
3569  * @f: QEMUFile where to send the data
3570  * @opaque: RAMState pointer
3571  */
3572 static int ram_save_complete(QEMUFile *f, void *opaque)
3573 {
3574     RAMState **temp = opaque;
3575     RAMState *rs = *temp;
3576     int ret = 0;
3577 
3578     rcu_read_lock();
3579 
3580     if (!migration_in_postcopy()) {
3581         migration_bitmap_sync_precopy(rs);
3582     }
3583 
3584     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3585 
3586     /* try transferring iterative blocks of memory */
3587 
3588     /* flush all remaining blocks regardless of rate limiting */
3589     while (true) {
3590         int pages;
3591 
3592         pages = ram_find_and_save_block(rs, !migration_in_colo_state());
3593         /* no more blocks to sent */
3594         if (pages == 0) {
3595             break;
3596         }
3597         if (pages < 0) {
3598             ret = pages;
3599             break;
3600         }
3601     }
3602 
3603     flush_compressed_data(rs);
3604     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
3605 
3606     rcu_read_unlock();
3607 
3608     multifd_send_sync_main(rs);
3609     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3610     qemu_fflush(f);
3611 
3612     return ret;
3613 }
3614 
3615 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
3616                              uint64_t *res_precopy_only,
3617                              uint64_t *res_compatible,
3618                              uint64_t *res_postcopy_only)
3619 {
3620     RAMState **temp = opaque;
3621     RAMState *rs = *temp;
3622     uint64_t remaining_size;
3623 
3624     remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3625 
3626     if (!migration_in_postcopy() &&
3627         remaining_size < max_size) {
3628         qemu_mutex_lock_iothread();
3629         rcu_read_lock();
3630         migration_bitmap_sync_precopy(rs);
3631         rcu_read_unlock();
3632         qemu_mutex_unlock_iothread();
3633         remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3634     }
3635 
3636     if (migrate_postcopy_ram()) {
3637         /* We can do postcopy, and all the data is postcopiable */
3638         *res_compatible += remaining_size;
3639     } else {
3640         *res_precopy_only += remaining_size;
3641     }
3642 }
3643 
3644 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3645 {
3646     unsigned int xh_len;
3647     int xh_flags;
3648     uint8_t *loaded_data;
3649 
3650     /* extract RLE header */
3651     xh_flags = qemu_get_byte(f);
3652     xh_len = qemu_get_be16(f);
3653 
3654     if (xh_flags != ENCODING_FLAG_XBZRLE) {
3655         error_report("Failed to load XBZRLE page - wrong compression!");
3656         return -1;
3657     }
3658 
3659     if (xh_len > TARGET_PAGE_SIZE) {
3660         error_report("Failed to load XBZRLE page - len overflow!");
3661         return -1;
3662     }
3663     loaded_data = XBZRLE.decoded_buf;
3664     /* load data and decode */
3665     /* it can change loaded_data to point to an internal buffer */
3666     qemu_get_buffer_in_place(f, &loaded_data, xh_len);
3667 
3668     /* decode RLE */
3669     if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3670                              TARGET_PAGE_SIZE) == -1) {
3671         error_report("Failed to load XBZRLE page - decode error!");
3672         return -1;
3673     }
3674 
3675     return 0;
3676 }
3677 
3678 /**
3679  * ram_block_from_stream: read a RAMBlock id from the migration stream
3680  *
3681  * Must be called from within a rcu critical section.
3682  *
3683  * Returns a pointer from within the RCU-protected ram_list.
3684  *
3685  * @f: QEMUFile where to read the data from
3686  * @flags: Page flags (mostly to see if it's a continuation of previous block)
3687  */
3688 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
3689 {
3690     static RAMBlock *block = NULL;
3691     char id[256];
3692     uint8_t len;
3693 
3694     if (flags & RAM_SAVE_FLAG_CONTINUE) {
3695         if (!block) {
3696             error_report("Ack, bad migration stream!");
3697             return NULL;
3698         }
3699         return block;
3700     }
3701 
3702     len = qemu_get_byte(f);
3703     qemu_get_buffer(f, (uint8_t *)id, len);
3704     id[len] = 0;
3705 
3706     block = qemu_ram_block_by_name(id);
3707     if (!block) {
3708         error_report("Can't find block %s", id);
3709         return NULL;
3710     }
3711 
3712     if (ramblock_is_ignored(block)) {
3713         error_report("block %s should not be migrated !", id);
3714         return NULL;
3715     }
3716 
3717     return block;
3718 }
3719 
3720 static inline void *host_from_ram_block_offset(RAMBlock *block,
3721                                                ram_addr_t offset)
3722 {
3723     if (!offset_in_ramblock(block, offset)) {
3724         return NULL;
3725     }
3726 
3727     return block->host + offset;
3728 }
3729 
3730 static inline void *colo_cache_from_block_offset(RAMBlock *block,
3731                                                  ram_addr_t offset)
3732 {
3733     if (!offset_in_ramblock(block, offset)) {
3734         return NULL;
3735     }
3736     if (!block->colo_cache) {
3737         error_report("%s: colo_cache is NULL in block :%s",
3738                      __func__, block->idstr);
3739         return NULL;
3740     }
3741 
3742     /*
3743     * During colo checkpoint, we need bitmap of these migrated pages.
3744     * It help us to decide which pages in ram cache should be flushed
3745     * into VM's RAM later.
3746     */
3747     if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3748         ram_state->migration_dirty_pages++;
3749     }
3750     return block->colo_cache + offset;
3751 }
3752 
3753 /**
3754  * ram_handle_compressed: handle the zero page case
3755  *
3756  * If a page (or a whole RDMA chunk) has been
3757  * determined to be zero, then zap it.
3758  *
3759  * @host: host address for the zero page
3760  * @ch: what the page is filled from.  We only support zero
3761  * @size: size of the zero page
3762  */
3763 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3764 {
3765     if (ch != 0 || !is_zero_range(host, size)) {
3766         memset(host, ch, size);
3767     }
3768 }
3769 
3770 /* return the size after decompression, or negative value on error */
3771 static int
3772 qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3773                      const uint8_t *source, size_t source_len)
3774 {
3775     int err;
3776 
3777     err = inflateReset(stream);
3778     if (err != Z_OK) {
3779         return -1;
3780     }
3781 
3782     stream->avail_in = source_len;
3783     stream->next_in = (uint8_t *)source;
3784     stream->avail_out = dest_len;
3785     stream->next_out = dest;
3786 
3787     err = inflate(stream, Z_NO_FLUSH);
3788     if (err != Z_STREAM_END) {
3789         return -1;
3790     }
3791 
3792     return stream->total_out;
3793 }
3794 
3795 static void *do_data_decompress(void *opaque)
3796 {
3797     DecompressParam *param = opaque;
3798     unsigned long pagesize;
3799     uint8_t *des;
3800     int len, ret;
3801 
3802     qemu_mutex_lock(&param->mutex);
3803     while (!param->quit) {
3804         if (param->des) {
3805             des = param->des;
3806             len = param->len;
3807             param->des = 0;
3808             qemu_mutex_unlock(&param->mutex);
3809 
3810             pagesize = TARGET_PAGE_SIZE;
3811 
3812             ret = qemu_uncompress_data(&param->stream, des, pagesize,
3813                                        param->compbuf, len);
3814             if (ret < 0 && migrate_get_current()->decompress_error_check) {
3815                 error_report("decompress data failed");
3816                 qemu_file_set_error(decomp_file, ret);
3817             }
3818 
3819             qemu_mutex_lock(&decomp_done_lock);
3820             param->done = true;
3821             qemu_cond_signal(&decomp_done_cond);
3822             qemu_mutex_unlock(&decomp_done_lock);
3823 
3824             qemu_mutex_lock(&param->mutex);
3825         } else {
3826             qemu_cond_wait(&param->cond, &param->mutex);
3827         }
3828     }
3829     qemu_mutex_unlock(&param->mutex);
3830 
3831     return NULL;
3832 }
3833 
3834 static int wait_for_decompress_done(void)
3835 {
3836     int idx, thread_count;
3837 
3838     if (!migrate_use_compression()) {
3839         return 0;
3840     }
3841 
3842     thread_count = migrate_decompress_threads();
3843     qemu_mutex_lock(&decomp_done_lock);
3844     for (idx = 0; idx < thread_count; idx++) {
3845         while (!decomp_param[idx].done) {
3846             qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3847         }
3848     }
3849     qemu_mutex_unlock(&decomp_done_lock);
3850     return qemu_file_get_error(decomp_file);
3851 }
3852 
3853 static void compress_threads_load_cleanup(void)
3854 {
3855     int i, thread_count;
3856 
3857     if (!migrate_use_compression()) {
3858         return;
3859     }
3860     thread_count = migrate_decompress_threads();
3861     for (i = 0; i < thread_count; i++) {
3862         /*
3863          * we use it as a indicator which shows if the thread is
3864          * properly init'd or not
3865          */
3866         if (!decomp_param[i].compbuf) {
3867             break;
3868         }
3869 
3870         qemu_mutex_lock(&decomp_param[i].mutex);
3871         decomp_param[i].quit = true;
3872         qemu_cond_signal(&decomp_param[i].cond);
3873         qemu_mutex_unlock(&decomp_param[i].mutex);
3874     }
3875     for (i = 0; i < thread_count; i++) {
3876         if (!decomp_param[i].compbuf) {
3877             break;
3878         }
3879 
3880         qemu_thread_join(decompress_threads + i);
3881         qemu_mutex_destroy(&decomp_param[i].mutex);
3882         qemu_cond_destroy(&decomp_param[i].cond);
3883         inflateEnd(&decomp_param[i].stream);
3884         g_free(decomp_param[i].compbuf);
3885         decomp_param[i].compbuf = NULL;
3886     }
3887     g_free(decompress_threads);
3888     g_free(decomp_param);
3889     decompress_threads = NULL;
3890     decomp_param = NULL;
3891     decomp_file = NULL;
3892 }
3893 
3894 static int compress_threads_load_setup(QEMUFile *f)
3895 {
3896     int i, thread_count;
3897 
3898     if (!migrate_use_compression()) {
3899         return 0;
3900     }
3901 
3902     thread_count = migrate_decompress_threads();
3903     decompress_threads = g_new0(QemuThread, thread_count);
3904     decomp_param = g_new0(DecompressParam, thread_count);
3905     qemu_mutex_init(&decomp_done_lock);
3906     qemu_cond_init(&decomp_done_cond);
3907     decomp_file = f;
3908     for (i = 0; i < thread_count; i++) {
3909         if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3910             goto exit;
3911         }
3912 
3913         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3914         qemu_mutex_init(&decomp_param[i].mutex);
3915         qemu_cond_init(&decomp_param[i].cond);
3916         decomp_param[i].done = true;
3917         decomp_param[i].quit = false;
3918         qemu_thread_create(decompress_threads + i, "decompress",
3919                            do_data_decompress, decomp_param + i,
3920                            QEMU_THREAD_JOINABLE);
3921     }
3922     return 0;
3923 exit:
3924     compress_threads_load_cleanup();
3925     return -1;
3926 }
3927 
3928 static void decompress_data_with_multi_threads(QEMUFile *f,
3929                                                void *host, int len)
3930 {
3931     int idx, thread_count;
3932 
3933     thread_count = migrate_decompress_threads();
3934     qemu_mutex_lock(&decomp_done_lock);
3935     while (true) {
3936         for (idx = 0; idx < thread_count; idx++) {
3937             if (decomp_param[idx].done) {
3938                 decomp_param[idx].done = false;
3939                 qemu_mutex_lock(&decomp_param[idx].mutex);
3940                 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
3941                 decomp_param[idx].des = host;
3942                 decomp_param[idx].len = len;
3943                 qemu_cond_signal(&decomp_param[idx].cond);
3944                 qemu_mutex_unlock(&decomp_param[idx].mutex);
3945                 break;
3946             }
3947         }
3948         if (idx < thread_count) {
3949             break;
3950         } else {
3951             qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3952         }
3953     }
3954     qemu_mutex_unlock(&decomp_done_lock);
3955 }
3956 
3957 /*
3958  * colo cache: this is for secondary VM, we cache the whole
3959  * memory of the secondary VM, it is need to hold the global lock
3960  * to call this helper.
3961  */
3962 int colo_init_ram_cache(void)
3963 {
3964     RAMBlock *block;
3965 
3966     rcu_read_lock();
3967     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3968         block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3969                                                 NULL,
3970                                                 false);
3971         if (!block->colo_cache) {
3972             error_report("%s: Can't alloc memory for COLO cache of block %s,"
3973                          "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3974                          block->used_length);
3975             goto out_locked;
3976         }
3977         memcpy(block->colo_cache, block->host, block->used_length);
3978     }
3979     rcu_read_unlock();
3980     /*
3981     * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3982     * with to decide which page in cache should be flushed into SVM's RAM. Here
3983     * we use the same name 'ram_bitmap' as for migration.
3984     */
3985     if (ram_bytes_total()) {
3986         RAMBlock *block;
3987 
3988         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3989             unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3990 
3991             block->bmap = bitmap_new(pages);
3992             bitmap_set(block->bmap, 0, pages);
3993         }
3994     }
3995     ram_state = g_new0(RAMState, 1);
3996     ram_state->migration_dirty_pages = 0;
3997     qemu_mutex_init(&ram_state->bitmap_mutex);
3998     memory_global_dirty_log_start();
3999 
4000     return 0;
4001 
4002 out_locked:
4003 
4004     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4005         if (block->colo_cache) {
4006             qemu_anon_ram_free(block->colo_cache, block->used_length);
4007             block->colo_cache = NULL;
4008         }
4009     }
4010 
4011     rcu_read_unlock();
4012     return -errno;
4013 }
4014 
4015 /* It is need to hold the global lock to call this helper */
4016 void colo_release_ram_cache(void)
4017 {
4018     RAMBlock *block;
4019 
4020     memory_global_dirty_log_stop();
4021     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4022         g_free(block->bmap);
4023         block->bmap = NULL;
4024     }
4025 
4026     rcu_read_lock();
4027 
4028     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4029         if (block->colo_cache) {
4030             qemu_anon_ram_free(block->colo_cache, block->used_length);
4031             block->colo_cache = NULL;
4032         }
4033     }
4034 
4035     rcu_read_unlock();
4036     qemu_mutex_destroy(&ram_state->bitmap_mutex);
4037     g_free(ram_state);
4038     ram_state = NULL;
4039 }
4040 
4041 /**
4042  * ram_load_setup: Setup RAM for migration incoming side
4043  *
4044  * Returns zero to indicate success and negative for error
4045  *
4046  * @f: QEMUFile where to receive the data
4047  * @opaque: RAMState pointer
4048  */
4049 static int ram_load_setup(QEMUFile *f, void *opaque)
4050 {
4051     if (compress_threads_load_setup(f)) {
4052         return -1;
4053     }
4054 
4055     xbzrle_load_setup();
4056     ramblock_recv_map_init();
4057 
4058     return 0;
4059 }
4060 
4061 static int ram_load_cleanup(void *opaque)
4062 {
4063     RAMBlock *rb;
4064 
4065     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4066         if (ramblock_is_pmem(rb)) {
4067             pmem_persist(rb->host, rb->used_length);
4068         }
4069     }
4070 
4071     xbzrle_load_cleanup();
4072     compress_threads_load_cleanup();
4073 
4074     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4075         g_free(rb->receivedmap);
4076         rb->receivedmap = NULL;
4077     }
4078 
4079     return 0;
4080 }
4081 
4082 /**
4083  * ram_postcopy_incoming_init: allocate postcopy data structures
4084  *
4085  * Returns 0 for success and negative if there was one error
4086  *
4087  * @mis: current migration incoming state
4088  *
4089  * Allocate data structures etc needed by incoming migration with
4090  * postcopy-ram. postcopy-ram's similarly names
4091  * postcopy_ram_incoming_init does the work.
4092  */
4093 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
4094 {
4095     return postcopy_ram_incoming_init(mis);
4096 }
4097 
4098 /**
4099  * ram_load_postcopy: load a page in postcopy case
4100  *
4101  * Returns 0 for success or -errno in case of error
4102  *
4103  * Called in postcopy mode by ram_load().
4104  * rcu_read_lock is taken prior to this being called.
4105  *
4106  * @f: QEMUFile where to send the data
4107  */
4108 static int ram_load_postcopy(QEMUFile *f)
4109 {
4110     int flags = 0, ret = 0;
4111     bool place_needed = false;
4112     bool matches_target_page_size = false;
4113     MigrationIncomingState *mis = migration_incoming_get_current();
4114     /* Temporary page that is later 'placed' */
4115     void *postcopy_host_page = postcopy_get_tmp_page(mis);
4116     void *last_host = NULL;
4117     bool all_zero = false;
4118 
4119     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4120         ram_addr_t addr;
4121         void *host = NULL;
4122         void *page_buffer = NULL;
4123         void *place_source = NULL;
4124         RAMBlock *block = NULL;
4125         uint8_t ch;
4126 
4127         addr = qemu_get_be64(f);
4128 
4129         /*
4130          * If qemu file error, we should stop here, and then "addr"
4131          * may be invalid
4132          */
4133         ret = qemu_file_get_error(f);
4134         if (ret) {
4135             break;
4136         }
4137 
4138         flags = addr & ~TARGET_PAGE_MASK;
4139         addr &= TARGET_PAGE_MASK;
4140 
4141         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
4142         place_needed = false;
4143         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
4144             block = ram_block_from_stream(f, flags);
4145 
4146             host = host_from_ram_block_offset(block, addr);
4147             if (!host) {
4148                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4149                 ret = -EINVAL;
4150                 break;
4151             }
4152             matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
4153             /*
4154              * Postcopy requires that we place whole host pages atomically;
4155              * these may be huge pages for RAMBlocks that are backed by
4156              * hugetlbfs.
4157              * To make it atomic, the data is read into a temporary page
4158              * that's moved into place later.
4159              * The migration protocol uses,  possibly smaller, target-pages
4160              * however the source ensures it always sends all the components
4161              * of a host page in order.
4162              */
4163             page_buffer = postcopy_host_page +
4164                           ((uintptr_t)host & (block->page_size - 1));
4165             /* If all TP are zero then we can optimise the place */
4166             if (!((uintptr_t)host & (block->page_size - 1))) {
4167                 all_zero = true;
4168             } else {
4169                 /* not the 1st TP within the HP */
4170                 if (host != (last_host + TARGET_PAGE_SIZE)) {
4171                     error_report("Non-sequential target page %p/%p",
4172                                   host, last_host);
4173                     ret = -EINVAL;
4174                     break;
4175                 }
4176             }
4177 
4178 
4179             /*
4180              * If it's the last part of a host page then we place the host
4181              * page
4182              */
4183             place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
4184                                      (block->page_size - 1)) == 0;
4185             place_source = postcopy_host_page;
4186         }
4187         last_host = host;
4188 
4189         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4190         case RAM_SAVE_FLAG_ZERO:
4191             ch = qemu_get_byte(f);
4192             memset(page_buffer, ch, TARGET_PAGE_SIZE);
4193             if (ch) {
4194                 all_zero = false;
4195             }
4196             break;
4197 
4198         case RAM_SAVE_FLAG_PAGE:
4199             all_zero = false;
4200             if (!matches_target_page_size) {
4201                 /* For huge pages, we always use temporary buffer */
4202                 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
4203             } else {
4204                 /*
4205                  * For small pages that matches target page size, we
4206                  * avoid the qemu_file copy.  Instead we directly use
4207                  * the buffer of QEMUFile to place the page.  Note: we
4208                  * cannot do any QEMUFile operation before using that
4209                  * buffer to make sure the buffer is valid when
4210                  * placing the page.
4211                  */
4212                 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
4213                                          TARGET_PAGE_SIZE);
4214             }
4215             break;
4216         case RAM_SAVE_FLAG_EOS:
4217             /* normal exit */
4218             multifd_recv_sync_main();
4219             break;
4220         default:
4221             error_report("Unknown combination of migration flags: %#x"
4222                          " (postcopy mode)", flags);
4223             ret = -EINVAL;
4224             break;
4225         }
4226 
4227         /* Detect for any possible file errors */
4228         if (!ret && qemu_file_get_error(f)) {
4229             ret = qemu_file_get_error(f);
4230         }
4231 
4232         if (!ret && place_needed) {
4233             /* This gets called at the last target page in the host page */
4234             void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
4235 
4236             if (all_zero) {
4237                 ret = postcopy_place_page_zero(mis, place_dest,
4238                                                block);
4239             } else {
4240                 ret = postcopy_place_page(mis, place_dest,
4241                                           place_source, block);
4242             }
4243         }
4244     }
4245 
4246     return ret;
4247 }
4248 
4249 static bool postcopy_is_advised(void)
4250 {
4251     PostcopyState ps = postcopy_state_get();
4252     return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
4253 }
4254 
4255 static bool postcopy_is_running(void)
4256 {
4257     PostcopyState ps = postcopy_state_get();
4258     return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
4259 }
4260 
4261 /*
4262  * Flush content of RAM cache into SVM's memory.
4263  * Only flush the pages that be dirtied by PVM or SVM or both.
4264  */
4265 static void colo_flush_ram_cache(void)
4266 {
4267     RAMBlock *block = NULL;
4268     void *dst_host;
4269     void *src_host;
4270     unsigned long offset = 0;
4271 
4272     memory_global_dirty_log_sync();
4273     rcu_read_lock();
4274     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4275         ramblock_sync_dirty_bitmap(ram_state, block);
4276     }
4277     rcu_read_unlock();
4278 
4279     trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
4280     rcu_read_lock();
4281     block = QLIST_FIRST_RCU(&ram_list.blocks);
4282 
4283     while (block) {
4284         offset = migration_bitmap_find_dirty(ram_state, block, offset);
4285 
4286         if (offset << TARGET_PAGE_BITS >= block->used_length) {
4287             offset = 0;
4288             block = QLIST_NEXT_RCU(block, next);
4289         } else {
4290             migration_bitmap_clear_dirty(ram_state, block, offset);
4291             dst_host = block->host + (offset << TARGET_PAGE_BITS);
4292             src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
4293             memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
4294         }
4295     }
4296 
4297     rcu_read_unlock();
4298     trace_colo_flush_ram_cache_end();
4299 }
4300 
4301 /**
4302  * ram_load_precopy: load pages in precopy case
4303  *
4304  * Returns 0 for success or -errno in case of error
4305  *
4306  * Called in precopy mode by ram_load().
4307  * rcu_read_lock is taken prior to this being called.
4308  *
4309  * @f: QEMUFile where to send the data
4310  */
4311 static int ram_load_precopy(QEMUFile *f)
4312 {
4313     int flags = 0, ret = 0, invalid_flags = 0, len = 0;
4314     /* ADVISE is earlier, it shows the source has the postcopy capability on */
4315     bool postcopy_advised = postcopy_is_advised();
4316     if (!migrate_use_compression()) {
4317         invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4318     }
4319 
4320     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4321         ram_addr_t addr, total_ram_bytes;
4322         void *host = NULL;
4323         uint8_t ch;
4324 
4325         addr = qemu_get_be64(f);
4326         flags = addr & ~TARGET_PAGE_MASK;
4327         addr &= TARGET_PAGE_MASK;
4328 
4329         if (flags & invalid_flags) {
4330             if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4331                 error_report("Received an unexpected compressed page");
4332             }
4333 
4334             ret = -EINVAL;
4335             break;
4336         }
4337 
4338         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
4339                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4340             RAMBlock *block = ram_block_from_stream(f, flags);
4341 
4342             /*
4343              * After going into COLO, we should load the Page into colo_cache.
4344              */
4345             if (migration_incoming_in_colo_state()) {
4346                 host = colo_cache_from_block_offset(block, addr);
4347             } else {
4348                 host = host_from_ram_block_offset(block, addr);
4349             }
4350             if (!host) {
4351                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4352                 ret = -EINVAL;
4353                 break;
4354             }
4355 
4356             if (!migration_incoming_in_colo_state()) {
4357                 ramblock_recv_bitmap_set(block, host);
4358             }
4359 
4360             trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
4361         }
4362 
4363         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4364         case RAM_SAVE_FLAG_MEM_SIZE:
4365             /* Synchronize RAM block list */
4366             total_ram_bytes = addr;
4367             while (!ret && total_ram_bytes) {
4368                 RAMBlock *block;
4369                 char id[256];
4370                 ram_addr_t length;
4371 
4372                 len = qemu_get_byte(f);
4373                 qemu_get_buffer(f, (uint8_t *)id, len);
4374                 id[len] = 0;
4375                 length = qemu_get_be64(f);
4376 
4377                 block = qemu_ram_block_by_name(id);
4378                 if (block && !qemu_ram_is_migratable(block)) {
4379                     error_report("block %s should not be migrated !", id);
4380                     ret = -EINVAL;
4381                 } else if (block) {
4382                     if (length != block->used_length) {
4383                         Error *local_err = NULL;
4384 
4385                         ret = qemu_ram_resize(block, length,
4386                                               &local_err);
4387                         if (local_err) {
4388                             error_report_err(local_err);
4389                         }
4390                     }
4391                     /* For postcopy we need to check hugepage sizes match */
4392                     if (postcopy_advised &&
4393                         block->page_size != qemu_host_page_size) {
4394                         uint64_t remote_page_size = qemu_get_be64(f);
4395                         if (remote_page_size != block->page_size) {
4396                             error_report("Mismatched RAM page size %s "
4397                                          "(local) %zd != %" PRId64,
4398                                          id, block->page_size,
4399                                          remote_page_size);
4400                             ret = -EINVAL;
4401                         }
4402                     }
4403                     if (migrate_ignore_shared()) {
4404                         hwaddr addr = qemu_get_be64(f);
4405                         if (ramblock_is_ignored(block) &&
4406                             block->mr->addr != addr) {
4407                             error_report("Mismatched GPAs for block %s "
4408                                          "%" PRId64 "!= %" PRId64,
4409                                          id, (uint64_t)addr,
4410                                          (uint64_t)block->mr->addr);
4411                             ret = -EINVAL;
4412                         }
4413                     }
4414                     ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4415                                           block->idstr);
4416                 } else {
4417                     error_report("Unknown ramblock \"%s\", cannot "
4418                                  "accept migration", id);
4419                     ret = -EINVAL;
4420                 }
4421 
4422                 total_ram_bytes -= length;
4423             }
4424             break;
4425 
4426         case RAM_SAVE_FLAG_ZERO:
4427             ch = qemu_get_byte(f);
4428             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4429             break;
4430 
4431         case RAM_SAVE_FLAG_PAGE:
4432             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4433             break;
4434 
4435         case RAM_SAVE_FLAG_COMPRESS_PAGE:
4436             len = qemu_get_be32(f);
4437             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4438                 error_report("Invalid compressed data length: %d", len);
4439                 ret = -EINVAL;
4440                 break;
4441             }
4442             decompress_data_with_multi_threads(f, host, len);
4443             break;
4444 
4445         case RAM_SAVE_FLAG_XBZRLE:
4446             if (load_xbzrle(f, addr, host) < 0) {
4447                 error_report("Failed to decompress XBZRLE page at "
4448                              RAM_ADDR_FMT, addr);
4449                 ret = -EINVAL;
4450                 break;
4451             }
4452             break;
4453         case RAM_SAVE_FLAG_EOS:
4454             /* normal exit */
4455             multifd_recv_sync_main();
4456             break;
4457         default:
4458             if (flags & RAM_SAVE_FLAG_HOOK) {
4459                 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
4460             } else {
4461                 error_report("Unknown combination of migration flags: %#x",
4462                              flags);
4463                 ret = -EINVAL;
4464             }
4465         }
4466         if (!ret) {
4467             ret = qemu_file_get_error(f);
4468         }
4469     }
4470 
4471     return ret;
4472 }
4473 
4474 static int ram_load(QEMUFile *f, void *opaque, int version_id)
4475 {
4476     int ret = 0;
4477     static uint64_t seq_iter;
4478     /*
4479      * If system is running in postcopy mode, page inserts to host memory must
4480      * be atomic
4481      */
4482     bool postcopy_running = postcopy_is_running();
4483 
4484     seq_iter++;
4485 
4486     if (version_id != 4) {
4487         return -EINVAL;
4488     }
4489 
4490     /*
4491      * This RCU critical section can be very long running.
4492      * When RCU reclaims in the code start to become numerous,
4493      * it will be necessary to reduce the granularity of this
4494      * critical section.
4495      */
4496     rcu_read_lock();
4497 
4498     if (postcopy_running) {
4499         ret = ram_load_postcopy(f);
4500     } else {
4501         ret = ram_load_precopy(f);
4502     }
4503 
4504     ret |= wait_for_decompress_done();
4505     rcu_read_unlock();
4506     trace_ram_load_complete(ret, seq_iter);
4507 
4508     if (!ret  && migration_incoming_in_colo_state()) {
4509         colo_flush_ram_cache();
4510     }
4511     return ret;
4512 }
4513 
4514 static bool ram_has_postcopy(void *opaque)
4515 {
4516     RAMBlock *rb;
4517     RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4518         if (ramblock_is_pmem(rb)) {
4519             info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4520                          "is not supported now!", rb->idstr, rb->host);
4521             return false;
4522         }
4523     }
4524 
4525     return migrate_postcopy_ram();
4526 }
4527 
4528 /* Sync all the dirty bitmap with destination VM.  */
4529 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4530 {
4531     RAMBlock *block;
4532     QEMUFile *file = s->to_dst_file;
4533     int ramblock_count = 0;
4534 
4535     trace_ram_dirty_bitmap_sync_start();
4536 
4537     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4538         qemu_savevm_send_recv_bitmap(file, block->idstr);
4539         trace_ram_dirty_bitmap_request(block->idstr);
4540         ramblock_count++;
4541     }
4542 
4543     trace_ram_dirty_bitmap_sync_wait();
4544 
4545     /* Wait until all the ramblocks' dirty bitmap synced */
4546     while (ramblock_count--) {
4547         qemu_sem_wait(&s->rp_state.rp_sem);
4548     }
4549 
4550     trace_ram_dirty_bitmap_sync_complete();
4551 
4552     return 0;
4553 }
4554 
4555 static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4556 {
4557     qemu_sem_post(&s->rp_state.rp_sem);
4558 }
4559 
4560 /*
4561  * Read the received bitmap, revert it as the initial dirty bitmap.
4562  * This is only used when the postcopy migration is paused but wants
4563  * to resume from a middle point.
4564  */
4565 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4566 {
4567     int ret = -EINVAL;
4568     QEMUFile *file = s->rp_state.from_dst_file;
4569     unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
4570     uint64_t local_size = DIV_ROUND_UP(nbits, 8);
4571     uint64_t size, end_mark;
4572 
4573     trace_ram_dirty_bitmap_reload_begin(block->idstr);
4574 
4575     if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4576         error_report("%s: incorrect state %s", __func__,
4577                      MigrationStatus_str(s->state));
4578         return -EINVAL;
4579     }
4580 
4581     /*
4582      * Note: see comments in ramblock_recv_bitmap_send() on why we
4583      * need the endianess convertion, and the paddings.
4584      */
4585     local_size = ROUND_UP(local_size, 8);
4586 
4587     /* Add paddings */
4588     le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4589 
4590     size = qemu_get_be64(file);
4591 
4592     /* The size of the bitmap should match with our ramblock */
4593     if (size != local_size) {
4594         error_report("%s: ramblock '%s' bitmap size mismatch "
4595                      "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4596                      block->idstr, size, local_size);
4597         ret = -EINVAL;
4598         goto out;
4599     }
4600 
4601     size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4602     end_mark = qemu_get_be64(file);
4603 
4604     ret = qemu_file_get_error(file);
4605     if (ret || size != local_size) {
4606         error_report("%s: read bitmap failed for ramblock '%s': %d"
4607                      " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4608                      __func__, block->idstr, ret, local_size, size);
4609         ret = -EIO;
4610         goto out;
4611     }
4612 
4613     if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4614         error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4615                      __func__, block->idstr, end_mark);
4616         ret = -EINVAL;
4617         goto out;
4618     }
4619 
4620     /*
4621      * Endianess convertion. We are during postcopy (though paused).
4622      * The dirty bitmap won't change. We can directly modify it.
4623      */
4624     bitmap_from_le(block->bmap, le_bitmap, nbits);
4625 
4626     /*
4627      * What we received is "received bitmap". Revert it as the initial
4628      * dirty bitmap for this ramblock.
4629      */
4630     bitmap_complement(block->bmap, block->bmap, nbits);
4631 
4632     trace_ram_dirty_bitmap_reload_complete(block->idstr);
4633 
4634     /*
4635      * We succeeded to sync bitmap for current ramblock. If this is
4636      * the last one to sync, we need to notify the main send thread.
4637      */
4638     ram_dirty_bitmap_reload_notify(s);
4639 
4640     ret = 0;
4641 out:
4642     g_free(le_bitmap);
4643     return ret;
4644 }
4645 
4646 static int ram_resume_prepare(MigrationState *s, void *opaque)
4647 {
4648     RAMState *rs = *(RAMState **)opaque;
4649     int ret;
4650 
4651     ret = ram_dirty_bitmap_sync_all(s, rs);
4652     if (ret) {
4653         return ret;
4654     }
4655 
4656     ram_state_resume_prepare(rs, s->to_dst_file);
4657 
4658     return 0;
4659 }
4660 
4661 static SaveVMHandlers savevm_ram_handlers = {
4662     .save_setup = ram_save_setup,
4663     .save_live_iterate = ram_save_iterate,
4664     .save_live_complete_postcopy = ram_save_complete,
4665     .save_live_complete_precopy = ram_save_complete,
4666     .has_postcopy = ram_has_postcopy,
4667     .save_live_pending = ram_save_pending,
4668     .load_state = ram_load,
4669     .save_cleanup = ram_save_cleanup,
4670     .load_setup = ram_load_setup,
4671     .load_cleanup = ram_load_cleanup,
4672     .resume_prepare = ram_resume_prepare,
4673 };
4674 
4675 void ram_mig_init(void)
4676 {
4677     qemu_mutex_init(&XBZRLE.lock);
4678     register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
4679 }
4680