xref: /openbmc/qemu/migration/migration.c (revision f9e1ef74)
1 /*
2  * QEMU live migration
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/blocker.h"
21 #include "exec.h"
22 #include "fd.h"
23 #include "socket.h"
24 #include "sysemu/runstate.h"
25 #include "sysemu/sysemu.h"
26 #include "sysemu/cpu-throttle.h"
27 #include "rdma.h"
28 #include "ram.h"
29 #include "migration/global_state.h"
30 #include "migration/misc.h"
31 #include "migration.h"
32 #include "savevm.h"
33 #include "qemu-file.h"
34 #include "channel.h"
35 #include "migration/vmstate.h"
36 #include "block/block.h"
37 #include "qapi/error.h"
38 #include "qapi/clone-visitor.h"
39 #include "qapi/qapi-visit-migration.h"
40 #include "qapi/qapi-visit-sockets.h"
41 #include "qapi/qapi-commands-migration.h"
42 #include "qapi/qapi-events-migration.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/qmp/qnull.h"
45 #include "qemu/rcu.h"
46 #include "block.h"
47 #include "postcopy-ram.h"
48 #include "qemu/thread.h"
49 #include "trace.h"
50 #include "exec/target_page.h"
51 #include "io/channel-buffer.h"
52 #include "io/channel-tls.h"
53 #include "migration/colo.h"
54 #include "hw/boards.h"
55 #include "hw/qdev-properties.h"
56 #include "hw/qdev-properties-system.h"
57 #include "monitor/monitor.h"
58 #include "net/announce.h"
59 #include "qemu/queue.h"
60 #include "multifd.h"
61 #include "threadinfo.h"
62 #include "qemu/yank.h"
63 #include "sysemu/cpus.h"
64 #include "yank_functions.h"
65 #include "sysemu/qtest.h"
66 
67 #define MAX_THROTTLE  (128 << 20)      /* Migration transfer speed throttling */
68 
69 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
70  * data. */
71 #define BUFFER_DELAY     100
72 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
73 
74 /* Time in milliseconds we are allowed to stop the source,
75  * for sending the last part */
76 #define DEFAULT_MIGRATE_SET_DOWNTIME 300
77 
78 /* Maximum migrate downtime set to 2000 seconds */
79 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000
80 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)
81 
82 /* Default compression thread count */
83 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
84 /* Default decompression thread count, usually decompression is at
85  * least 4 times as fast as compression.*/
86 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
87 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
88 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
89 /* Define default autoconverge cpu throttle migration parameters */
90 #define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50
91 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
92 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
93 #define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99
94 
95 /* Migration XBZRLE default cache size */
96 #define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024)
97 
98 /* The delay time (in ms) between two COLO checkpoints */
99 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100)
100 #define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2
101 #define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE
102 /* 0: means nocompress, 1: best speed, ... 9: best compress ratio */
103 #define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1
104 /* 0: means nocompress, 1: best speed, ... 20: best compress ratio */
105 #define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1
106 
107 /* Background transfer rate for postcopy, 0 means unlimited, note
108  * that page requests can still exceed this limit.
109  */
110 #define DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH 0
111 
112 /*
113  * Parameters for self_announce_delay giving a stream of RARP/ARP
114  * packets after migration.
115  */
116 #define DEFAULT_MIGRATE_ANNOUNCE_INITIAL  50
117 #define DEFAULT_MIGRATE_ANNOUNCE_MAX     550
118 #define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS    5
119 #define DEFAULT_MIGRATE_ANNOUNCE_STEP    100
120 
121 static NotifierList migration_state_notifiers =
122     NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
123 
124 /* Messages sent on the return path from destination to source */
125 enum mig_rp_message_type {
126     MIG_RP_MSG_INVALID = 0,  /* Must be 0 */
127     MIG_RP_MSG_SHUT,         /* sibling will not send any more RP messages */
128     MIG_RP_MSG_PONG,         /* Response to a PING; data (seq: be32 ) */
129 
130     MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
131     MIG_RP_MSG_REQ_PAGES,    /* data (start: be64, len: be32) */
132     MIG_RP_MSG_RECV_BITMAP,  /* send recved_bitmap back to source */
133     MIG_RP_MSG_RESUME_ACK,   /* tell source that we are ready to resume */
134 
135     MIG_RP_MSG_MAX
136 };
137 
138 /* Migration capabilities set */
139 struct MigrateCapsSet {
140     int size;                       /* Capability set size */
141     MigrationCapability caps[];     /* Variadic array of capabilities */
142 };
143 typedef struct MigrateCapsSet MigrateCapsSet;
144 
145 /* Define and initialize MigrateCapsSet */
146 #define INITIALIZE_MIGRATE_CAPS_SET(_name, ...)   \
147     MigrateCapsSet _name = {    \
148         .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \
149         .caps = { __VA_ARGS__ } \
150     }
151 
152 /* Background-snapshot compatibility check list */
153 static const
154 INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot,
155     MIGRATION_CAPABILITY_POSTCOPY_RAM,
156     MIGRATION_CAPABILITY_DIRTY_BITMAPS,
157     MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME,
158     MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE,
159     MIGRATION_CAPABILITY_RETURN_PATH,
160     MIGRATION_CAPABILITY_MULTIFD,
161     MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER,
162     MIGRATION_CAPABILITY_AUTO_CONVERGE,
163     MIGRATION_CAPABILITY_RELEASE_RAM,
164     MIGRATION_CAPABILITY_RDMA_PIN_ALL,
165     MIGRATION_CAPABILITY_COMPRESS,
166     MIGRATION_CAPABILITY_XBZRLE,
167     MIGRATION_CAPABILITY_X_COLO,
168     MIGRATION_CAPABILITY_VALIDATE_UUID,
169     MIGRATION_CAPABILITY_ZERO_COPY_SEND);
170 
171 /* When we add fault tolerance, we could have several
172    migrations at once.  For now we don't need to add
173    dynamic creation of migration */
174 
175 static MigrationState *current_migration;
176 static MigrationIncomingState *current_incoming;
177 
178 static GSList *migration_blockers;
179 
180 static bool migration_object_check(MigrationState *ms, Error **errp);
181 static int migration_maybe_pause(MigrationState *s,
182                                  int *current_active_state,
183                                  int new_state);
184 static void migrate_fd_cancel(MigrationState *s);
185 
186 static bool migration_needs_multiple_sockets(void)
187 {
188     return migrate_use_multifd() || migrate_postcopy_preempt();
189 }
190 
191 static bool uri_supports_multi_channels(const char *uri)
192 {
193     return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) ||
194            strstart(uri, "vsock:", NULL);
195 }
196 
197 static bool
198 migration_channels_and_uri_compatible(const char *uri, Error **errp)
199 {
200     if (migration_needs_multiple_sockets() &&
201         !uri_supports_multi_channels(uri)) {
202         error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)");
203         return false;
204     }
205 
206     return true;
207 }
208 
209 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
210 {
211     uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp;
212 
213     return (a > b) - (a < b);
214 }
215 
216 void migration_object_init(void)
217 {
218     /* This can only be called once. */
219     assert(!current_migration);
220     current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
221 
222     /*
223      * Init the migrate incoming object as well no matter whether
224      * we'll use it or not.
225      */
226     assert(!current_incoming);
227     current_incoming = g_new0(MigrationIncomingState, 1);
228     current_incoming->state = MIGRATION_STATUS_NONE;
229     current_incoming->postcopy_remote_fds =
230         g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD));
231     qemu_mutex_init(&current_incoming->rp_mutex);
232     qemu_mutex_init(&current_incoming->postcopy_prio_thread_mutex);
233     qemu_event_init(&current_incoming->main_thread_load_event, false);
234     qemu_sem_init(&current_incoming->postcopy_pause_sem_dst, 0);
235     qemu_sem_init(&current_incoming->postcopy_pause_sem_fault, 0);
236     qemu_sem_init(&current_incoming->postcopy_pause_sem_fast_load, 0);
237     qemu_sem_init(&current_incoming->postcopy_qemufile_dst_done, 0);
238 
239     qemu_mutex_init(&current_incoming->page_request_mutex);
240     current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
241 
242     migration_object_check(current_migration, &error_fatal);
243 
244     blk_mig_init();
245     ram_mig_init();
246     dirty_bitmap_mig_init();
247 }
248 
249 void migration_cancel(const Error *error)
250 {
251     if (error) {
252         migrate_set_error(current_migration, error);
253     }
254     migrate_fd_cancel(current_migration);
255 }
256 
257 void migration_shutdown(void)
258 {
259     /*
260      * When the QEMU main thread exit, the COLO thread
261      * may wait a semaphore. So, we should wakeup the
262      * COLO thread before migration shutdown.
263      */
264     colo_shutdown();
265     /*
266      * Cancel the current migration - that will (eventually)
267      * stop the migration using this structure
268      */
269     migration_cancel(NULL);
270     object_unref(OBJECT(current_migration));
271 
272     /*
273      * Cancel outgoing migration of dirty bitmaps. It should
274      * at least unref used block nodes.
275      */
276     dirty_bitmap_mig_cancel_outgoing();
277 
278     /*
279      * Cancel incoming migration of dirty bitmaps. Dirty bitmaps
280      * are non-critical data, and their loss never considered as
281      * something serious.
282      */
283     dirty_bitmap_mig_cancel_incoming();
284 }
285 
286 /* For outgoing */
287 MigrationState *migrate_get_current(void)
288 {
289     /* This can only be called after the object created. */
290     assert(current_migration);
291     return current_migration;
292 }
293 
294 MigrationIncomingState *migration_incoming_get_current(void)
295 {
296     assert(current_incoming);
297     return current_incoming;
298 }
299 
300 void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
301 {
302     if (mis->socket_address_list) {
303         qapi_free_SocketAddressList(mis->socket_address_list);
304         mis->socket_address_list = NULL;
305     }
306 
307     if (mis->transport_cleanup) {
308         mis->transport_cleanup(mis->transport_data);
309         mis->transport_data = mis->transport_cleanup = NULL;
310     }
311 }
312 
313 void migration_incoming_state_destroy(void)
314 {
315     struct MigrationIncomingState *mis = migration_incoming_get_current();
316 
317     multifd_load_cleanup();
318 
319     if (mis->to_src_file) {
320         /* Tell source that we are done */
321         migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
322         qemu_fclose(mis->to_src_file);
323         mis->to_src_file = NULL;
324     }
325 
326     if (mis->from_src_file) {
327         migration_ioc_unregister_yank_from_file(mis->from_src_file);
328         qemu_fclose(mis->from_src_file);
329         mis->from_src_file = NULL;
330     }
331     if (mis->postcopy_remote_fds) {
332         g_array_free(mis->postcopy_remote_fds, TRUE);
333         mis->postcopy_remote_fds = NULL;
334     }
335 
336     migration_incoming_transport_cleanup(mis);
337     qemu_event_reset(&mis->main_thread_load_event);
338 
339     if (mis->page_requested) {
340         g_tree_destroy(mis->page_requested);
341         mis->page_requested = NULL;
342     }
343 
344     if (mis->postcopy_qemufile_dst) {
345         migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst);
346         qemu_fclose(mis->postcopy_qemufile_dst);
347         mis->postcopy_qemufile_dst = NULL;
348     }
349 
350     yank_unregister_instance(MIGRATION_YANK_INSTANCE);
351 }
352 
353 static void migrate_generate_event(int new_state)
354 {
355     if (migrate_use_events()) {
356         qapi_event_send_migration(new_state);
357     }
358 }
359 
360 static bool migrate_late_block_activate(void)
361 {
362     MigrationState *s;
363 
364     s = migrate_get_current();
365 
366     return s->capabilities[MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
367 }
368 
369 /*
370  * Send a message on the return channel back to the source
371  * of the migration.
372  */
373 static int migrate_send_rp_message(MigrationIncomingState *mis,
374                                    enum mig_rp_message_type message_type,
375                                    uint16_t len, void *data)
376 {
377     int ret = 0;
378 
379     trace_migrate_send_rp_message((int)message_type, len);
380     QEMU_LOCK_GUARD(&mis->rp_mutex);
381 
382     /*
383      * It's possible that the file handle got lost due to network
384      * failures.
385      */
386     if (!mis->to_src_file) {
387         ret = -EIO;
388         return ret;
389     }
390 
391     qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
392     qemu_put_be16(mis->to_src_file, len);
393     qemu_put_buffer(mis->to_src_file, data, len);
394     qemu_fflush(mis->to_src_file);
395 
396     /* It's possible that qemu file got error during sending */
397     ret = qemu_file_get_error(mis->to_src_file);
398 
399     return ret;
400 }
401 
402 /* Request one page from the source VM at the given start address.
403  *   rb: the RAMBlock to request the page in
404  *   Start: Address offset within the RB
405  *   Len: Length in bytes required - must be a multiple of pagesize
406  */
407 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
408                                       RAMBlock *rb, ram_addr_t start)
409 {
410     uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
411     size_t msglen = 12; /* start + len */
412     size_t len = qemu_ram_pagesize(rb);
413     enum mig_rp_message_type msg_type;
414     const char *rbname;
415     int rbname_len;
416 
417     *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
418     *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
419 
420     /*
421      * We maintain the last ramblock that we requested for page.  Note that we
422      * don't need locking because this function will only be called within the
423      * postcopy ram fault thread.
424      */
425     if (rb != mis->last_rb) {
426         mis->last_rb = rb;
427 
428         rbname = qemu_ram_get_idstr(rb);
429         rbname_len = strlen(rbname);
430 
431         assert(rbname_len < 256);
432 
433         bufc[msglen++] = rbname_len;
434         memcpy(bufc + msglen, rbname, rbname_len);
435         msglen += rbname_len;
436         msg_type = MIG_RP_MSG_REQ_PAGES_ID;
437     } else {
438         msg_type = MIG_RP_MSG_REQ_PAGES;
439     }
440 
441     return migrate_send_rp_message(mis, msg_type, msglen, bufc);
442 }
443 
444 int migrate_send_rp_req_pages(MigrationIncomingState *mis,
445                               RAMBlock *rb, ram_addr_t start, uint64_t haddr)
446 {
447     void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
448     bool received = false;
449 
450     WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
451         received = ramblock_recv_bitmap_test_byte_offset(rb, start);
452         if (!received && !g_tree_lookup(mis->page_requested, aligned)) {
453             /*
454              * The page has not been received, and it's not yet in the page
455              * request list.  Queue it.  Set the value of element to 1, so that
456              * things like g_tree_lookup() will return TRUE (1) when found.
457              */
458             g_tree_insert(mis->page_requested, aligned, (gpointer)1);
459             mis->page_requested_count++;
460             trace_postcopy_page_req_add(aligned, mis->page_requested_count);
461         }
462     }
463 
464     /*
465      * If the page is there, skip sending the message.  We don't even need the
466      * lock because as long as the page arrived, it'll be there forever.
467      */
468     if (received) {
469         return 0;
470     }
471 
472     return migrate_send_rp_message_req_pages(mis, rb, start);
473 }
474 
475 static bool migration_colo_enabled;
476 bool migration_incoming_colo_enabled(void)
477 {
478     return migration_colo_enabled;
479 }
480 
481 void migration_incoming_disable_colo(void)
482 {
483     ram_block_discard_disable(false);
484     migration_colo_enabled = false;
485 }
486 
487 int migration_incoming_enable_colo(void)
488 {
489     if (ram_block_discard_disable(true)) {
490         error_report("COLO: cannot disable RAM discard");
491         return -EBUSY;
492     }
493     migration_colo_enabled = true;
494     return 0;
495 }
496 
497 void migrate_add_address(SocketAddress *address)
498 {
499     MigrationIncomingState *mis = migration_incoming_get_current();
500 
501     QAPI_LIST_PREPEND(mis->socket_address_list,
502                       QAPI_CLONE(SocketAddress, address));
503 }
504 
505 static void qemu_start_incoming_migration(const char *uri, Error **errp)
506 {
507     const char *p = NULL;
508 
509     /* URI is not suitable for migration? */
510     if (!migration_channels_and_uri_compatible(uri, errp)) {
511         return;
512     }
513 
514     qapi_event_send_migration(MIGRATION_STATUS_SETUP);
515     if (strstart(uri, "tcp:", &p) ||
516         strstart(uri, "unix:", NULL) ||
517         strstart(uri, "vsock:", NULL)) {
518         socket_start_incoming_migration(p ? p : uri, errp);
519 #ifdef CONFIG_RDMA
520     } else if (strstart(uri, "rdma:", &p)) {
521         rdma_start_incoming_migration(p, errp);
522 #endif
523     } else if (strstart(uri, "exec:", &p)) {
524         exec_start_incoming_migration(p, errp);
525     } else if (strstart(uri, "fd:", &p)) {
526         fd_start_incoming_migration(p, errp);
527     } else {
528         error_setg(errp, "unknown migration protocol: %s", uri);
529     }
530 }
531 
532 static void process_incoming_migration_bh(void *opaque)
533 {
534     Error *local_err = NULL;
535     MigrationIncomingState *mis = opaque;
536 
537     /* If capability late_block_activate is set:
538      * Only fire up the block code now if we're going to restart the
539      * VM, else 'cont' will do it.
540      * This causes file locking to happen; so we don't want it to happen
541      * unless we really are starting the VM.
542      */
543     if (!migrate_late_block_activate() ||
544          (autostart && (!global_state_received() ||
545             global_state_get_runstate() == RUN_STATE_RUNNING))) {
546         /* Make sure all file formats throw away their mutable metadata.
547          * If we get an error here, just don't restart the VM yet. */
548         bdrv_activate_all(&local_err);
549         if (local_err) {
550             error_report_err(local_err);
551             local_err = NULL;
552             autostart = false;
553         }
554     }
555 
556     /*
557      * This must happen after all error conditions are dealt with and
558      * we're sure the VM is going to be running on this host.
559      */
560     qemu_announce_self(&mis->announce_timer, migrate_announce_params());
561 
562     multifd_load_shutdown();
563 
564     dirty_bitmap_mig_before_vm_start();
565 
566     if (!global_state_received() ||
567         global_state_get_runstate() == RUN_STATE_RUNNING) {
568         if (autostart) {
569             vm_start();
570         } else {
571             runstate_set(RUN_STATE_PAUSED);
572         }
573     } else if (migration_incoming_colo_enabled()) {
574         migration_incoming_disable_colo();
575         vm_start();
576     } else {
577         runstate_set(global_state_get_runstate());
578     }
579     /*
580      * This must happen after any state changes since as soon as an external
581      * observer sees this event they might start to prod at the VM assuming
582      * it's ready to use.
583      */
584     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
585                       MIGRATION_STATUS_COMPLETED);
586     qemu_bh_delete(mis->bh);
587     migration_incoming_state_destroy();
588 }
589 
590 static void coroutine_fn
591 process_incoming_migration_co(void *opaque)
592 {
593     MigrationIncomingState *mis = migration_incoming_get_current();
594     PostcopyState ps;
595     int ret;
596     Error *local_err = NULL;
597 
598     assert(mis->from_src_file);
599     mis->migration_incoming_co = qemu_coroutine_self();
600     mis->largest_page_size = qemu_ram_pagesize_largest();
601     postcopy_state_set(POSTCOPY_INCOMING_NONE);
602     migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
603                       MIGRATION_STATUS_ACTIVE);
604     ret = qemu_loadvm_state(mis->from_src_file);
605 
606     ps = postcopy_state_get();
607     trace_process_incoming_migration_co_end(ret, ps);
608     if (ps != POSTCOPY_INCOMING_NONE) {
609         if (ps == POSTCOPY_INCOMING_ADVISE) {
610             /*
611              * Where a migration had postcopy enabled (and thus went to advise)
612              * but managed to complete within the precopy period, we can use
613              * the normal exit.
614              */
615             postcopy_ram_incoming_cleanup(mis);
616         } else if (ret >= 0) {
617             /*
618              * Postcopy was started, cleanup should happen at the end of the
619              * postcopy thread.
620              */
621             trace_process_incoming_migration_co_postcopy_end_main();
622             return;
623         }
624         /* Else if something went wrong then just fall out of the normal exit */
625     }
626 
627     /* we get COLO info, and know if we are in COLO mode */
628     if (!ret && migration_incoming_colo_enabled()) {
629         /* Make sure all file formats throw away their mutable metadata */
630         bdrv_activate_all(&local_err);
631         if (local_err) {
632             error_report_err(local_err);
633             goto fail;
634         }
635 
636         qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
637              colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
638         mis->have_colo_incoming_thread = true;
639         qemu_coroutine_yield();
640 
641         qemu_mutex_unlock_iothread();
642         /* Wait checkpoint incoming thread exit before free resource */
643         qemu_thread_join(&mis->colo_incoming_thread);
644         qemu_mutex_lock_iothread();
645         /* We hold the global iothread lock, so it is safe here */
646         colo_release_ram_cache();
647     }
648 
649     if (ret < 0) {
650         error_report("load of migration failed: %s", strerror(-ret));
651         goto fail;
652     }
653     mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
654     qemu_bh_schedule(mis->bh);
655     mis->migration_incoming_co = NULL;
656     return;
657 fail:
658     local_err = NULL;
659     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
660                       MIGRATION_STATUS_FAILED);
661     qemu_fclose(mis->from_src_file);
662 
663     multifd_load_cleanup();
664 
665     exit(EXIT_FAILURE);
666 }
667 
668 /**
669  * migration_incoming_setup: Setup incoming migration
670  * @f: file for main migration channel
671  * @errp: where to put errors
672  *
673  * Returns: %true on success, %false on error.
674  */
675 static bool migration_incoming_setup(QEMUFile *f, Error **errp)
676 {
677     MigrationIncomingState *mis = migration_incoming_get_current();
678 
679     if (!mis->from_src_file) {
680         mis->from_src_file = f;
681     }
682     qemu_file_set_blocking(f, false);
683     return true;
684 }
685 
686 void migration_incoming_process(void)
687 {
688     Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);
689     qemu_coroutine_enter(co);
690 }
691 
692 /* Returns true if recovered from a paused migration, otherwise false */
693 static bool postcopy_try_recover(void)
694 {
695     MigrationIncomingState *mis = migration_incoming_get_current();
696 
697     if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
698         /* Resumed from a paused postcopy migration */
699 
700         /* This should be set already in migration_incoming_setup() */
701         assert(mis->from_src_file);
702         /* Postcopy has standalone thread to do vm load */
703         qemu_file_set_blocking(mis->from_src_file, true);
704 
705         /* Re-configure the return path */
706         mis->to_src_file = qemu_file_get_return_path(mis->from_src_file);
707 
708         migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
709                           MIGRATION_STATUS_POSTCOPY_RECOVER);
710 
711         /*
712          * Here, we only wake up the main loading thread (while the
713          * rest threads will still be waiting), so that we can receive
714          * commands from source now, and answer it if needed. The
715          * rest threads will be woken up afterwards until we are sure
716          * that source is ready to reply to page requests.
717          */
718         qemu_sem_post(&mis->postcopy_pause_sem_dst);
719         return true;
720     }
721 
722     return false;
723 }
724 
725 void migration_fd_process_incoming(QEMUFile *f, Error **errp)
726 {
727     if (!migration_incoming_setup(f, errp)) {
728         return;
729     }
730     if (postcopy_try_recover()) {
731         return;
732     }
733     migration_incoming_process();
734 }
735 
736 /*
737  * Returns true when we want to start a new incoming migration process,
738  * false otherwise.
739  */
740 static bool migration_should_start_incoming(bool main_channel)
741 {
742     /* Multifd doesn't start unless all channels are established */
743     if (migrate_use_multifd()) {
744         return migration_has_all_channels();
745     }
746 
747     /* Preempt channel only starts when the main channel is created */
748     if (migrate_postcopy_preempt()) {
749         return main_channel;
750     }
751 
752     /*
753      * For all the rest types of migration, we should only reach here when
754      * it's the main channel that's being created, and we should always
755      * proceed with this channel.
756      */
757     assert(main_channel);
758     return true;
759 }
760 
761 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
762 {
763     MigrationIncomingState *mis = migration_incoming_get_current();
764     Error *local_err = NULL;
765     QEMUFile *f;
766     bool default_channel = true;
767     uint32_t channel_magic = 0;
768     int ret = 0;
769 
770     if (migrate_use_multifd() && !migrate_postcopy_ram() &&
771         qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
772         /*
773          * With multiple channels, it is possible that we receive channels
774          * out of order on destination side, causing incorrect mapping of
775          * source channels on destination side. Check channel MAGIC to
776          * decide type of channel. Please note this is best effort, postcopy
777          * preempt channel does not send any magic number so avoid it for
778          * postcopy live migration. Also tls live migration already does
779          * tls handshake while initializing main channel so with tls this
780          * issue is not possible.
781          */
782         ret = migration_channel_read_peek(ioc, (void *)&channel_magic,
783                                           sizeof(channel_magic), &local_err);
784 
785         if (ret != 0) {
786             error_propagate(errp, local_err);
787             return;
788         }
789 
790         default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC));
791     } else {
792         default_channel = !mis->from_src_file;
793     }
794 
795     if (multifd_load_setup(errp) != 0) {
796         error_setg(errp, "Failed to setup multifd channels");
797         return;
798     }
799 
800     if (default_channel) {
801         f = qemu_file_new_input(ioc);
802 
803         if (!migration_incoming_setup(f, errp)) {
804             return;
805         }
806     } else {
807         /* Multiple connections */
808         assert(migration_needs_multiple_sockets());
809         if (migrate_use_multifd()) {
810             multifd_recv_new_channel(ioc, &local_err);
811         } else {
812             assert(migrate_postcopy_preempt());
813             f = qemu_file_new_input(ioc);
814             postcopy_preempt_new_channel(mis, f);
815         }
816         if (local_err) {
817             error_propagate(errp, local_err);
818             return;
819         }
820     }
821 
822     if (migration_should_start_incoming(default_channel)) {
823         /* If it's a recovery, we're done */
824         if (postcopy_try_recover()) {
825             return;
826         }
827         migration_incoming_process();
828     }
829 }
830 
831 /**
832  * @migration_has_all_channels: We have received all channels that we need
833  *
834  * Returns true when we have got connections to all the channels that
835  * we need for migration.
836  */
837 bool migration_has_all_channels(void)
838 {
839     MigrationIncomingState *mis = migration_incoming_get_current();
840 
841     if (!mis->from_src_file) {
842         return false;
843     }
844 
845     if (migrate_use_multifd()) {
846         return multifd_recv_all_channels_created();
847     }
848 
849     if (migrate_postcopy_preempt()) {
850         return mis->postcopy_qemufile_dst != NULL;
851     }
852 
853     return true;
854 }
855 
856 /*
857  * Send a 'SHUT' message on the return channel with the given value
858  * to indicate that we've finished with the RP.  Non-0 value indicates
859  * error.
860  */
861 void migrate_send_rp_shut(MigrationIncomingState *mis,
862                           uint32_t value)
863 {
864     uint32_t buf;
865 
866     buf = cpu_to_be32(value);
867     migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
868 }
869 
870 /*
871  * Send a 'PONG' message on the return channel with the given value
872  * (normally in response to a 'PING')
873  */
874 void migrate_send_rp_pong(MigrationIncomingState *mis,
875                           uint32_t value)
876 {
877     uint32_t buf;
878 
879     buf = cpu_to_be32(value);
880     migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
881 }
882 
883 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
884                                  char *block_name)
885 {
886     char buf[512];
887     int len;
888     int64_t res;
889 
890     /*
891      * First, we send the header part. It contains only the len of
892      * idstr, and the idstr itself.
893      */
894     len = strlen(block_name);
895     buf[0] = len;
896     memcpy(buf + 1, block_name, len);
897 
898     if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
899         error_report("%s: MSG_RP_RECV_BITMAP only used for recovery",
900                      __func__);
901         return;
902     }
903 
904     migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf);
905 
906     /*
907      * Next, we dump the received bitmap to the stream.
908      *
909      * TODO: currently we are safe since we are the only one that is
910      * using the to_src_file handle (fault thread is still paused),
911      * and it's ok even not taking the mutex. However the best way is
912      * to take the lock before sending the message header, and release
913      * the lock after sending the bitmap.
914      */
915     qemu_mutex_lock(&mis->rp_mutex);
916     res = ramblock_recv_bitmap_send(mis->to_src_file, block_name);
917     qemu_mutex_unlock(&mis->rp_mutex);
918 
919     trace_migrate_send_rp_recv_bitmap(block_name, res);
920 }
921 
922 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
923 {
924     uint32_t buf;
925 
926     buf = cpu_to_be32(value);
927     migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
928 }
929 
930 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
931 {
932     MigrationCapabilityStatusList *head = NULL, **tail = &head;
933     MigrationCapabilityStatus *caps;
934     MigrationState *s = migrate_get_current();
935     int i;
936 
937     for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
938 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
939         if (i == MIGRATION_CAPABILITY_BLOCK) {
940             continue;
941         }
942 #endif
943         caps = g_malloc0(sizeof(*caps));
944         caps->capability = i;
945         caps->state = s->capabilities[i];
946         QAPI_LIST_APPEND(tail, caps);
947     }
948 
949     return head;
950 }
951 
952 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
953 {
954     MigrationParameters *params;
955     MigrationState *s = migrate_get_current();
956 
957     /* TODO use QAPI_CLONE() instead of duplicating it inline */
958     params = g_malloc0(sizeof(*params));
959     params->has_compress_level = true;
960     params->compress_level = s->parameters.compress_level;
961     params->has_compress_threads = true;
962     params->compress_threads = s->parameters.compress_threads;
963     params->has_compress_wait_thread = true;
964     params->compress_wait_thread = s->parameters.compress_wait_thread;
965     params->has_decompress_threads = true;
966     params->decompress_threads = s->parameters.decompress_threads;
967     params->has_throttle_trigger_threshold = true;
968     params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold;
969     params->has_cpu_throttle_initial = true;
970     params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
971     params->has_cpu_throttle_increment = true;
972     params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
973     params->has_cpu_throttle_tailslow = true;
974     params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow;
975     params->tls_creds = g_strdup(s->parameters.tls_creds);
976     params->tls_hostname = g_strdup(s->parameters.tls_hostname);
977     params->tls_authz = g_strdup(s->parameters.tls_authz ?
978                                  s->parameters.tls_authz : "");
979     params->has_max_bandwidth = true;
980     params->max_bandwidth = s->parameters.max_bandwidth;
981     params->has_downtime_limit = true;
982     params->downtime_limit = s->parameters.downtime_limit;
983     params->has_x_checkpoint_delay = true;
984     params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
985     params->has_block_incremental = true;
986     params->block_incremental = s->parameters.block_incremental;
987     params->has_multifd_channels = true;
988     params->multifd_channels = s->parameters.multifd_channels;
989     params->has_multifd_compression = true;
990     params->multifd_compression = s->parameters.multifd_compression;
991     params->has_multifd_zlib_level = true;
992     params->multifd_zlib_level = s->parameters.multifd_zlib_level;
993     params->has_multifd_zstd_level = true;
994     params->multifd_zstd_level = s->parameters.multifd_zstd_level;
995     params->has_xbzrle_cache_size = true;
996     params->xbzrle_cache_size = s->parameters.xbzrle_cache_size;
997     params->has_max_postcopy_bandwidth = true;
998     params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth;
999     params->has_max_cpu_throttle = true;
1000     params->max_cpu_throttle = s->parameters.max_cpu_throttle;
1001     params->has_announce_initial = true;
1002     params->announce_initial = s->parameters.announce_initial;
1003     params->has_announce_max = true;
1004     params->announce_max = s->parameters.announce_max;
1005     params->has_announce_rounds = true;
1006     params->announce_rounds = s->parameters.announce_rounds;
1007     params->has_announce_step = true;
1008     params->announce_step = s->parameters.announce_step;
1009 
1010     if (s->parameters.has_block_bitmap_mapping) {
1011         params->has_block_bitmap_mapping = true;
1012         params->block_bitmap_mapping =
1013             QAPI_CLONE(BitmapMigrationNodeAliasList,
1014                        s->parameters.block_bitmap_mapping);
1015     }
1016 
1017     return params;
1018 }
1019 
1020 AnnounceParameters *migrate_announce_params(void)
1021 {
1022     static AnnounceParameters ap;
1023 
1024     MigrationState *s = migrate_get_current();
1025 
1026     ap.initial = s->parameters.announce_initial;
1027     ap.max = s->parameters.announce_max;
1028     ap.rounds = s->parameters.announce_rounds;
1029     ap.step = s->parameters.announce_step;
1030 
1031     return &ap;
1032 }
1033 
1034 /*
1035  * Return true if we're already in the middle of a migration
1036  * (i.e. any of the active or setup states)
1037  */
1038 bool migration_is_setup_or_active(int state)
1039 {
1040     switch (state) {
1041     case MIGRATION_STATUS_ACTIVE:
1042     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1043     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1044     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1045     case MIGRATION_STATUS_SETUP:
1046     case MIGRATION_STATUS_PRE_SWITCHOVER:
1047     case MIGRATION_STATUS_DEVICE:
1048     case MIGRATION_STATUS_WAIT_UNPLUG:
1049     case MIGRATION_STATUS_COLO:
1050         return true;
1051 
1052     default:
1053         return false;
1054 
1055     }
1056 }
1057 
1058 bool migration_is_running(int state)
1059 {
1060     switch (state) {
1061     case MIGRATION_STATUS_ACTIVE:
1062     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1063     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1064     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1065     case MIGRATION_STATUS_SETUP:
1066     case MIGRATION_STATUS_PRE_SWITCHOVER:
1067     case MIGRATION_STATUS_DEVICE:
1068     case MIGRATION_STATUS_WAIT_UNPLUG:
1069     case MIGRATION_STATUS_CANCELLING:
1070         return true;
1071 
1072     default:
1073         return false;
1074 
1075     }
1076 }
1077 
1078 static bool migrate_show_downtime(MigrationState *s)
1079 {
1080     return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy();
1081 }
1082 
1083 static void populate_time_info(MigrationInfo *info, MigrationState *s)
1084 {
1085     info->has_status = true;
1086     info->has_setup_time = true;
1087     info->setup_time = s->setup_time;
1088 
1089     if (s->state == MIGRATION_STATUS_COMPLETED) {
1090         info->has_total_time = true;
1091         info->total_time = s->total_time;
1092     } else {
1093         info->has_total_time = true;
1094         info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
1095                            s->start_time;
1096     }
1097 
1098     if (migrate_show_downtime(s)) {
1099         info->has_downtime = true;
1100         info->downtime = s->downtime;
1101     } else {
1102         info->has_expected_downtime = true;
1103         info->expected_downtime = s->expected_downtime;
1104     }
1105 }
1106 
1107 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
1108 {
1109     size_t page_size = qemu_target_page_size();
1110 
1111     info->ram = g_malloc0(sizeof(*info->ram));
1112     info->ram->transferred = stat64_get(&ram_counters.transferred);
1113     info->ram->total = ram_bytes_total();
1114     info->ram->duplicate = stat64_get(&ram_counters.zero_pages);
1115     /* legacy value.  It is not used anymore */
1116     info->ram->skipped = 0;
1117     info->ram->normal = stat64_get(&ram_counters.normal_pages);
1118     info->ram->normal_bytes = info->ram->normal * page_size;
1119     info->ram->mbps = s->mbps;
1120     info->ram->dirty_sync_count =
1121         stat64_get(&ram_counters.dirty_sync_count);
1122     info->ram->dirty_sync_missed_zero_copy =
1123         stat64_get(&ram_counters.dirty_sync_missed_zero_copy);
1124     info->ram->postcopy_requests =
1125         stat64_get(&ram_counters.postcopy_requests);
1126     info->ram->page_size = page_size;
1127     info->ram->multifd_bytes = stat64_get(&ram_counters.multifd_bytes);
1128     info->ram->pages_per_second = s->pages_per_second;
1129     info->ram->precopy_bytes = stat64_get(&ram_counters.precopy_bytes);
1130     info->ram->downtime_bytes = stat64_get(&ram_counters.downtime_bytes);
1131     info->ram->postcopy_bytes = stat64_get(&ram_counters.postcopy_bytes);
1132 
1133     if (migrate_use_xbzrle()) {
1134         info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
1135         info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
1136         info->xbzrle_cache->bytes = xbzrle_counters.bytes;
1137         info->xbzrle_cache->pages = xbzrle_counters.pages;
1138         info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
1139         info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
1140         info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate;
1141         info->xbzrle_cache->overflow = xbzrle_counters.overflow;
1142     }
1143 
1144     if (migrate_use_compression()) {
1145         info->compression = g_malloc0(sizeof(*info->compression));
1146         info->compression->pages = compression_counters.pages;
1147         info->compression->busy = compression_counters.busy;
1148         info->compression->busy_rate = compression_counters.busy_rate;
1149         info->compression->compressed_size =
1150                                     compression_counters.compressed_size;
1151         info->compression->compression_rate =
1152                                     compression_counters.compression_rate;
1153     }
1154 
1155     if (cpu_throttle_active()) {
1156         info->has_cpu_throttle_percentage = true;
1157         info->cpu_throttle_percentage = cpu_throttle_get_percentage();
1158     }
1159 
1160     if (s->state != MIGRATION_STATUS_COMPLETED) {
1161         info->ram->remaining = ram_bytes_remaining();
1162         info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate;
1163     }
1164 }
1165 
1166 static void populate_disk_info(MigrationInfo *info)
1167 {
1168     if (blk_mig_active()) {
1169         info->disk = g_malloc0(sizeof(*info->disk));
1170         info->disk->transferred = blk_mig_bytes_transferred();
1171         info->disk->remaining = blk_mig_bytes_remaining();
1172         info->disk->total = blk_mig_bytes_total();
1173     }
1174 }
1175 
1176 static void fill_source_migration_info(MigrationInfo *info)
1177 {
1178     MigrationState *s = migrate_get_current();
1179     int state = qatomic_read(&s->state);
1180     GSList *cur_blocker = migration_blockers;
1181 
1182     info->blocked_reasons = NULL;
1183 
1184     /*
1185      * There are two types of reasons a migration might be blocked;
1186      * a) devices marked in VMState as non-migratable, and
1187      * b) Explicit migration blockers
1188      * We need to add both of them here.
1189      */
1190     qemu_savevm_non_migratable_list(&info->blocked_reasons);
1191 
1192     while (cur_blocker) {
1193         QAPI_LIST_PREPEND(info->blocked_reasons,
1194                           g_strdup(error_get_pretty(cur_blocker->data)));
1195         cur_blocker = g_slist_next(cur_blocker);
1196     }
1197     info->has_blocked_reasons = info->blocked_reasons != NULL;
1198 
1199     switch (state) {
1200     case MIGRATION_STATUS_NONE:
1201         /* no migration has happened ever */
1202         /* do not overwrite destination migration status */
1203         return;
1204     case MIGRATION_STATUS_SETUP:
1205         info->has_status = true;
1206         info->has_total_time = false;
1207         break;
1208     case MIGRATION_STATUS_ACTIVE:
1209     case MIGRATION_STATUS_CANCELLING:
1210     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1211     case MIGRATION_STATUS_PRE_SWITCHOVER:
1212     case MIGRATION_STATUS_DEVICE:
1213     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1214     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1215         /* TODO add some postcopy stats */
1216         populate_time_info(info, s);
1217         populate_ram_info(info, s);
1218         populate_disk_info(info);
1219         populate_vfio_info(info);
1220         break;
1221     case MIGRATION_STATUS_COLO:
1222         info->has_status = true;
1223         /* TODO: display COLO specific information (checkpoint info etc.) */
1224         break;
1225     case MIGRATION_STATUS_COMPLETED:
1226         populate_time_info(info, s);
1227         populate_ram_info(info, s);
1228         populate_vfio_info(info);
1229         break;
1230     case MIGRATION_STATUS_FAILED:
1231         info->has_status = true;
1232         if (s->error) {
1233             info->error_desc = g_strdup(error_get_pretty(s->error));
1234         }
1235         break;
1236     case MIGRATION_STATUS_CANCELLED:
1237         info->has_status = true;
1238         break;
1239     case MIGRATION_STATUS_WAIT_UNPLUG:
1240         info->has_status = true;
1241         break;
1242     }
1243     info->status = state;
1244 }
1245 
1246 typedef enum WriteTrackingSupport {
1247     WT_SUPPORT_UNKNOWN = 0,
1248     WT_SUPPORT_ABSENT,
1249     WT_SUPPORT_AVAILABLE,
1250     WT_SUPPORT_COMPATIBLE
1251 } WriteTrackingSupport;
1252 
1253 static
1254 WriteTrackingSupport migrate_query_write_tracking(void)
1255 {
1256     /* Check if kernel supports required UFFD features */
1257     if (!ram_write_tracking_available()) {
1258         return WT_SUPPORT_ABSENT;
1259     }
1260     /*
1261      * Check if current memory configuration is
1262      * compatible with required UFFD features.
1263      */
1264     if (!ram_write_tracking_compatible()) {
1265         return WT_SUPPORT_AVAILABLE;
1266     }
1267 
1268     return WT_SUPPORT_COMPATIBLE;
1269 }
1270 
1271 /**
1272  * @migration_caps_check - check capability compatibility
1273  *
1274  * @old_caps: old capability list
1275  * @new_caps: new capability list
1276  * @errp: set *errp if the check failed, with reason
1277  *
1278  * Returns true if check passed, otherwise false.
1279  */
1280 static bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
1281 {
1282     MigrationIncomingState *mis = migration_incoming_get_current();
1283 
1284 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
1285     if (new_caps[MIGRATION_CAPABILITY_BLOCK]) {
1286         error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
1287                    "block migration");
1288         error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
1289         return false;
1290     }
1291 #endif
1292 
1293 #ifndef CONFIG_REPLICATION
1294     if (new_caps[MIGRATION_CAPABILITY_X_COLO]) {
1295         error_setg(errp, "QEMU compiled without replication module"
1296                    " can't enable COLO");
1297         error_append_hint(errp, "Please enable replication before COLO.\n");
1298         return false;
1299     }
1300 #endif
1301 
1302     if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
1303         /* This check is reasonably expensive, so only when it's being
1304          * set the first time, also it's only the destination that needs
1305          * special support.
1306          */
1307         if (!old_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] &&
1308             runstate_check(RUN_STATE_INMIGRATE) &&
1309             !postcopy_ram_supported_by_host(mis)) {
1310             /* postcopy_ram_supported_by_host will have emitted a more
1311              * detailed message
1312              */
1313             error_setg(errp, "Postcopy is not supported");
1314             return false;
1315         }
1316 
1317         if (new_caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) {
1318             error_setg(errp, "Postcopy is not compatible with ignore-shared");
1319             return false;
1320         }
1321     }
1322 
1323     if (new_caps[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) {
1324         WriteTrackingSupport wt_support;
1325         int idx;
1326         /*
1327          * Check if 'background-snapshot' capability is supported by
1328          * host kernel and compatible with guest memory configuration.
1329          */
1330         wt_support = migrate_query_write_tracking();
1331         if (wt_support < WT_SUPPORT_AVAILABLE) {
1332             error_setg(errp, "Background-snapshot is not supported by host kernel");
1333             return false;
1334         }
1335         if (wt_support < WT_SUPPORT_COMPATIBLE) {
1336             error_setg(errp, "Background-snapshot is not compatible "
1337                     "with guest memory configuration");
1338             return false;
1339         }
1340 
1341         /*
1342          * Check if there are any migration capabilities
1343          * incompatible with 'background-snapshot'.
1344          */
1345         for (idx = 0; idx < check_caps_background_snapshot.size; idx++) {
1346             int incomp_cap = check_caps_background_snapshot.caps[idx];
1347             if (new_caps[incomp_cap]) {
1348                 error_setg(errp,
1349                         "Background-snapshot is not compatible with %s",
1350                         MigrationCapability_str(incomp_cap));
1351                 return false;
1352             }
1353         }
1354     }
1355 
1356 #ifdef CONFIG_LINUX
1357     if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND] &&
1358         (!new_caps[MIGRATION_CAPABILITY_MULTIFD] ||
1359          new_caps[MIGRATION_CAPABILITY_COMPRESS] ||
1360          new_caps[MIGRATION_CAPABILITY_XBZRLE] ||
1361          migrate_multifd_compression() ||
1362          migrate_use_tls())) {
1363         error_setg(errp,
1364                    "Zero copy only available for non-compressed non-TLS multifd migration");
1365         return false;
1366     }
1367 #else
1368     if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND]) {
1369         error_setg(errp,
1370                    "Zero copy currently only available on Linux");
1371         return false;
1372     }
1373 #endif
1374 
1375     if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) {
1376         if (!new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
1377             error_setg(errp, "Postcopy preempt requires postcopy-ram");
1378             return false;
1379         }
1380 
1381         /*
1382          * Preempt mode requires urgent pages to be sent in separate
1383          * channel, OTOH compression logic will disorder all pages into
1384          * different compression channels, which is not compatible with the
1385          * preempt assumptions on channel assignments.
1386          */
1387         if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
1388             error_setg(errp, "Postcopy preempt not compatible with compress");
1389             return false;
1390         }
1391     }
1392 
1393     if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) {
1394         if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
1395             error_setg(errp, "Multifd is not compatible with compress");
1396             return false;
1397         }
1398     }
1399 
1400     return true;
1401 }
1402 
1403 static void fill_destination_migration_info(MigrationInfo *info)
1404 {
1405     MigrationIncomingState *mis = migration_incoming_get_current();
1406 
1407     if (mis->socket_address_list) {
1408         info->has_socket_address = true;
1409         info->socket_address =
1410             QAPI_CLONE(SocketAddressList, mis->socket_address_list);
1411     }
1412 
1413     switch (mis->state) {
1414     case MIGRATION_STATUS_NONE:
1415         return;
1416     case MIGRATION_STATUS_SETUP:
1417     case MIGRATION_STATUS_CANCELLING:
1418     case MIGRATION_STATUS_CANCELLED:
1419     case MIGRATION_STATUS_ACTIVE:
1420     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1421     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1422     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1423     case MIGRATION_STATUS_FAILED:
1424     case MIGRATION_STATUS_COLO:
1425         info->has_status = true;
1426         break;
1427     case MIGRATION_STATUS_COMPLETED:
1428         info->has_status = true;
1429         fill_destination_postcopy_migration_info(info);
1430         break;
1431     }
1432     info->status = mis->state;
1433 }
1434 
1435 MigrationInfo *qmp_query_migrate(Error **errp)
1436 {
1437     MigrationInfo *info = g_malloc0(sizeof(*info));
1438 
1439     fill_destination_migration_info(info);
1440     fill_source_migration_info(info);
1441 
1442     return info;
1443 }
1444 
1445 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
1446                                   Error **errp)
1447 {
1448     MigrationState *s = migrate_get_current();
1449     MigrationCapabilityStatusList *cap;
1450     bool new_caps[MIGRATION_CAPABILITY__MAX];
1451 
1452     if (migration_is_running(s->state)) {
1453         error_setg(errp, QERR_MIGRATION_ACTIVE);
1454         return;
1455     }
1456 
1457     memcpy(new_caps, s->capabilities, sizeof(new_caps));
1458     for (cap = params; cap; cap = cap->next) {
1459         new_caps[cap->value->capability] = cap->value->state;
1460     }
1461 
1462     if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
1463         return;
1464     }
1465 
1466     for (cap = params; cap; cap = cap->next) {
1467         s->capabilities[cap->value->capability] = cap->value->state;
1468     }
1469 }
1470 
1471 /*
1472  * Check whether the parameters are valid. Error will be put into errp
1473  * (if provided). Return true if valid, otherwise false.
1474  */
1475 static bool migrate_params_check(MigrationParameters *params, Error **errp)
1476 {
1477     if (params->has_compress_level &&
1478         (params->compress_level > 9)) {
1479         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
1480                    "a value between 0 and 9");
1481         return false;
1482     }
1483 
1484     if (params->has_compress_threads && (params->compress_threads < 1)) {
1485         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1486                    "compress_threads",
1487                    "a value between 1 and 255");
1488         return false;
1489     }
1490 
1491     if (params->has_decompress_threads && (params->decompress_threads < 1)) {
1492         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1493                    "decompress_threads",
1494                    "a value between 1 and 255");
1495         return false;
1496     }
1497 
1498     if (params->has_throttle_trigger_threshold &&
1499         (params->throttle_trigger_threshold < 1 ||
1500          params->throttle_trigger_threshold > 100)) {
1501         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1502                    "throttle_trigger_threshold",
1503                    "an integer in the range of 1 to 100");
1504         return false;
1505     }
1506 
1507     if (params->has_cpu_throttle_initial &&
1508         (params->cpu_throttle_initial < 1 ||
1509          params->cpu_throttle_initial > 99)) {
1510         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1511                    "cpu_throttle_initial",
1512                    "an integer in the range of 1 to 99");
1513         return false;
1514     }
1515 
1516     if (params->has_cpu_throttle_increment &&
1517         (params->cpu_throttle_increment < 1 ||
1518          params->cpu_throttle_increment > 99)) {
1519         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1520                    "cpu_throttle_increment",
1521                    "an integer in the range of 1 to 99");
1522         return false;
1523     }
1524 
1525     if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) {
1526         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1527                    "max_bandwidth",
1528                    "an integer in the range of 0 to "stringify(SIZE_MAX)
1529                    " bytes/second");
1530         return false;
1531     }
1532 
1533     if (params->has_downtime_limit &&
1534         (params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
1535         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1536                    "downtime_limit",
1537                    "an integer in the range of 0 to "
1538                     stringify(MAX_MIGRATE_DOWNTIME)" ms");
1539         return false;
1540     }
1541 
1542     /* x_checkpoint_delay is now always positive */
1543 
1544     if (params->has_multifd_channels && (params->multifd_channels < 1)) {
1545         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1546                    "multifd_channels",
1547                    "a value between 1 and 255");
1548         return false;
1549     }
1550 
1551     if (params->has_multifd_zlib_level &&
1552         (params->multifd_zlib_level > 9)) {
1553         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zlib_level",
1554                    "a value between 0 and 9");
1555         return false;
1556     }
1557 
1558     if (params->has_multifd_zstd_level &&
1559         (params->multifd_zstd_level > 20)) {
1560         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level",
1561                    "a value between 0 and 20");
1562         return false;
1563     }
1564 
1565     if (params->has_xbzrle_cache_size &&
1566         (params->xbzrle_cache_size < qemu_target_page_size() ||
1567          !is_power_of_2(params->xbzrle_cache_size))) {
1568         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1569                    "xbzrle_cache_size",
1570                    "a power of two no less than the target page size");
1571         return false;
1572     }
1573 
1574     if (params->has_max_cpu_throttle &&
1575         (params->max_cpu_throttle < params->cpu_throttle_initial ||
1576          params->max_cpu_throttle > 99)) {
1577         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1578                    "max_cpu_throttle",
1579                    "an integer in the range of cpu_throttle_initial to 99");
1580         return false;
1581     }
1582 
1583     if (params->has_announce_initial &&
1584         params->announce_initial > 100000) {
1585         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1586                    "announce_initial",
1587                    "a value between 0 and 100000");
1588         return false;
1589     }
1590     if (params->has_announce_max &&
1591         params->announce_max > 100000) {
1592         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1593                    "announce_max",
1594                    "a value between 0 and 100000");
1595        return false;
1596     }
1597     if (params->has_announce_rounds &&
1598         params->announce_rounds > 1000) {
1599         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1600                    "announce_rounds",
1601                    "a value between 0 and 1000");
1602        return false;
1603     }
1604     if (params->has_announce_step &&
1605         (params->announce_step < 1 ||
1606         params->announce_step > 10000)) {
1607         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1608                    "announce_step",
1609                    "a value between 0 and 10000");
1610        return false;
1611     }
1612 
1613     if (params->has_block_bitmap_mapping &&
1614         !check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) {
1615         error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: ");
1616         return false;
1617     }
1618 
1619 #ifdef CONFIG_LINUX
1620     if (migrate_use_zero_copy_send() &&
1621         ((params->has_multifd_compression && params->multifd_compression) ||
1622          (params->tls_creds && *params->tls_creds))) {
1623         error_setg(errp,
1624                    "Zero copy only available for non-compressed non-TLS multifd migration");
1625         return false;
1626     }
1627 #endif
1628 
1629     return true;
1630 }
1631 
1632 static void migrate_params_test_apply(MigrateSetParameters *params,
1633                                       MigrationParameters *dest)
1634 {
1635     *dest = migrate_get_current()->parameters;
1636 
1637     /* TODO use QAPI_CLONE() instead of duplicating it inline */
1638 
1639     if (params->has_compress_level) {
1640         dest->compress_level = params->compress_level;
1641     }
1642 
1643     if (params->has_compress_threads) {
1644         dest->compress_threads = params->compress_threads;
1645     }
1646 
1647     if (params->has_compress_wait_thread) {
1648         dest->compress_wait_thread = params->compress_wait_thread;
1649     }
1650 
1651     if (params->has_decompress_threads) {
1652         dest->decompress_threads = params->decompress_threads;
1653     }
1654 
1655     if (params->has_throttle_trigger_threshold) {
1656         dest->throttle_trigger_threshold = params->throttle_trigger_threshold;
1657     }
1658 
1659     if (params->has_cpu_throttle_initial) {
1660         dest->cpu_throttle_initial = params->cpu_throttle_initial;
1661     }
1662 
1663     if (params->has_cpu_throttle_increment) {
1664         dest->cpu_throttle_increment = params->cpu_throttle_increment;
1665     }
1666 
1667     if (params->has_cpu_throttle_tailslow) {
1668         dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow;
1669     }
1670 
1671     if (params->tls_creds) {
1672         assert(params->tls_creds->type == QTYPE_QSTRING);
1673         dest->tls_creds = params->tls_creds->u.s;
1674     }
1675 
1676     if (params->tls_hostname) {
1677         assert(params->tls_hostname->type == QTYPE_QSTRING);
1678         dest->tls_hostname = params->tls_hostname->u.s;
1679     }
1680 
1681     if (params->has_max_bandwidth) {
1682         dest->max_bandwidth = params->max_bandwidth;
1683     }
1684 
1685     if (params->has_downtime_limit) {
1686         dest->downtime_limit = params->downtime_limit;
1687     }
1688 
1689     if (params->has_x_checkpoint_delay) {
1690         dest->x_checkpoint_delay = params->x_checkpoint_delay;
1691     }
1692 
1693     if (params->has_block_incremental) {
1694         dest->block_incremental = params->block_incremental;
1695     }
1696     if (params->has_multifd_channels) {
1697         dest->multifd_channels = params->multifd_channels;
1698     }
1699     if (params->has_multifd_compression) {
1700         dest->multifd_compression = params->multifd_compression;
1701     }
1702     if (params->has_xbzrle_cache_size) {
1703         dest->xbzrle_cache_size = params->xbzrle_cache_size;
1704     }
1705     if (params->has_max_postcopy_bandwidth) {
1706         dest->max_postcopy_bandwidth = params->max_postcopy_bandwidth;
1707     }
1708     if (params->has_max_cpu_throttle) {
1709         dest->max_cpu_throttle = params->max_cpu_throttle;
1710     }
1711     if (params->has_announce_initial) {
1712         dest->announce_initial = params->announce_initial;
1713     }
1714     if (params->has_announce_max) {
1715         dest->announce_max = params->announce_max;
1716     }
1717     if (params->has_announce_rounds) {
1718         dest->announce_rounds = params->announce_rounds;
1719     }
1720     if (params->has_announce_step) {
1721         dest->announce_step = params->announce_step;
1722     }
1723 
1724     if (params->has_block_bitmap_mapping) {
1725         dest->has_block_bitmap_mapping = true;
1726         dest->block_bitmap_mapping = params->block_bitmap_mapping;
1727     }
1728 }
1729 
1730 static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
1731 {
1732     MigrationState *s = migrate_get_current();
1733 
1734     /* TODO use QAPI_CLONE() instead of duplicating it inline */
1735 
1736     if (params->has_compress_level) {
1737         s->parameters.compress_level = params->compress_level;
1738     }
1739 
1740     if (params->has_compress_threads) {
1741         s->parameters.compress_threads = params->compress_threads;
1742     }
1743 
1744     if (params->has_compress_wait_thread) {
1745         s->parameters.compress_wait_thread = params->compress_wait_thread;
1746     }
1747 
1748     if (params->has_decompress_threads) {
1749         s->parameters.decompress_threads = params->decompress_threads;
1750     }
1751 
1752     if (params->has_throttle_trigger_threshold) {
1753         s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold;
1754     }
1755 
1756     if (params->has_cpu_throttle_initial) {
1757         s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
1758     }
1759 
1760     if (params->has_cpu_throttle_increment) {
1761         s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
1762     }
1763 
1764     if (params->has_cpu_throttle_tailslow) {
1765         s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow;
1766     }
1767 
1768     if (params->tls_creds) {
1769         g_free(s->parameters.tls_creds);
1770         assert(params->tls_creds->type == QTYPE_QSTRING);
1771         s->parameters.tls_creds = g_strdup(params->tls_creds->u.s);
1772     }
1773 
1774     if (params->tls_hostname) {
1775         g_free(s->parameters.tls_hostname);
1776         assert(params->tls_hostname->type == QTYPE_QSTRING);
1777         s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s);
1778     }
1779 
1780     if (params->tls_authz) {
1781         g_free(s->parameters.tls_authz);
1782         assert(params->tls_authz->type == QTYPE_QSTRING);
1783         s->parameters.tls_authz = g_strdup(params->tls_authz->u.s);
1784     }
1785 
1786     if (params->has_max_bandwidth) {
1787         s->parameters.max_bandwidth = params->max_bandwidth;
1788         if (s->to_dst_file && !migration_in_postcopy()) {
1789             qemu_file_set_rate_limit(s->to_dst_file,
1790                                 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
1791         }
1792     }
1793 
1794     if (params->has_downtime_limit) {
1795         s->parameters.downtime_limit = params->downtime_limit;
1796     }
1797 
1798     if (params->has_x_checkpoint_delay) {
1799         s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
1800         if (migration_in_colo_state()) {
1801             colo_checkpoint_notify(s);
1802         }
1803     }
1804 
1805     if (params->has_block_incremental) {
1806         s->parameters.block_incremental = params->block_incremental;
1807     }
1808     if (params->has_multifd_channels) {
1809         s->parameters.multifd_channels = params->multifd_channels;
1810     }
1811     if (params->has_multifd_compression) {
1812         s->parameters.multifd_compression = params->multifd_compression;
1813     }
1814     if (params->has_xbzrle_cache_size) {
1815         s->parameters.xbzrle_cache_size = params->xbzrle_cache_size;
1816         xbzrle_cache_resize(params->xbzrle_cache_size, errp);
1817     }
1818     if (params->has_max_postcopy_bandwidth) {
1819         s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth;
1820         if (s->to_dst_file && migration_in_postcopy()) {
1821             qemu_file_set_rate_limit(s->to_dst_file,
1822                     s->parameters.max_postcopy_bandwidth / XFER_LIMIT_RATIO);
1823         }
1824     }
1825     if (params->has_max_cpu_throttle) {
1826         s->parameters.max_cpu_throttle = params->max_cpu_throttle;
1827     }
1828     if (params->has_announce_initial) {
1829         s->parameters.announce_initial = params->announce_initial;
1830     }
1831     if (params->has_announce_max) {
1832         s->parameters.announce_max = params->announce_max;
1833     }
1834     if (params->has_announce_rounds) {
1835         s->parameters.announce_rounds = params->announce_rounds;
1836     }
1837     if (params->has_announce_step) {
1838         s->parameters.announce_step = params->announce_step;
1839     }
1840 
1841     if (params->has_block_bitmap_mapping) {
1842         qapi_free_BitmapMigrationNodeAliasList(
1843             s->parameters.block_bitmap_mapping);
1844 
1845         s->parameters.has_block_bitmap_mapping = true;
1846         s->parameters.block_bitmap_mapping =
1847             QAPI_CLONE(BitmapMigrationNodeAliasList,
1848                        params->block_bitmap_mapping);
1849     }
1850 }
1851 
1852 void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp)
1853 {
1854     MigrationParameters tmp;
1855 
1856     /* TODO Rewrite "" to null instead */
1857     if (params->tls_creds
1858         && params->tls_creds->type == QTYPE_QNULL) {
1859         qobject_unref(params->tls_creds->u.n);
1860         params->tls_creds->type = QTYPE_QSTRING;
1861         params->tls_creds->u.s = strdup("");
1862     }
1863     /* TODO Rewrite "" to null instead */
1864     if (params->tls_hostname
1865         && params->tls_hostname->type == QTYPE_QNULL) {
1866         qobject_unref(params->tls_hostname->u.n);
1867         params->tls_hostname->type = QTYPE_QSTRING;
1868         params->tls_hostname->u.s = strdup("");
1869     }
1870 
1871     migrate_params_test_apply(params, &tmp);
1872 
1873     if (!migrate_params_check(&tmp, errp)) {
1874         /* Invalid parameter */
1875         return;
1876     }
1877 
1878     migrate_params_apply(params, errp);
1879 }
1880 
1881 
1882 void qmp_migrate_start_postcopy(Error **errp)
1883 {
1884     MigrationState *s = migrate_get_current();
1885 
1886     if (!migrate_postcopy()) {
1887         error_setg(errp, "Enable postcopy with migrate_set_capability before"
1888                          " the start of migration");
1889         return;
1890     }
1891 
1892     if (s->state == MIGRATION_STATUS_NONE) {
1893         error_setg(errp, "Postcopy must be started after migration has been"
1894                          " started");
1895         return;
1896     }
1897     /*
1898      * we don't error if migration has finished since that would be racy
1899      * with issuing this command.
1900      */
1901     qatomic_set(&s->start_postcopy, true);
1902 }
1903 
1904 /* shared migration helpers */
1905 
1906 void migrate_set_state(int *state, int old_state, int new_state)
1907 {
1908     assert(new_state < MIGRATION_STATUS__MAX);
1909     if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
1910         trace_migrate_set_state(MigrationStatus_str(new_state));
1911         migrate_generate_event(new_state);
1912     }
1913 }
1914 
1915 static MigrationCapabilityStatus *migrate_cap_add(MigrationCapability index,
1916                                                   bool state)
1917 {
1918     MigrationCapabilityStatus *cap;
1919 
1920     cap = g_new0(MigrationCapabilityStatus, 1);
1921     cap->capability = index;
1922     cap->state = state;
1923 
1924     return cap;
1925 }
1926 
1927 void migrate_set_block_enabled(bool value, Error **errp)
1928 {
1929     MigrationCapabilityStatusList *cap = NULL;
1930 
1931     QAPI_LIST_PREPEND(cap, migrate_cap_add(MIGRATION_CAPABILITY_BLOCK, value));
1932     qmp_migrate_set_capabilities(cap, errp);
1933     qapi_free_MigrationCapabilityStatusList(cap);
1934 }
1935 
1936 static void migrate_set_block_incremental(MigrationState *s, bool value)
1937 {
1938     s->parameters.block_incremental = value;
1939 }
1940 
1941 static void block_cleanup_parameters(MigrationState *s)
1942 {
1943     if (s->must_remove_block_options) {
1944         /* setting to false can never fail */
1945         migrate_set_block_enabled(false, &error_abort);
1946         migrate_set_block_incremental(s, false);
1947         s->must_remove_block_options = false;
1948     }
1949 }
1950 
1951 static void migrate_fd_cleanup(MigrationState *s)
1952 {
1953     qemu_bh_delete(s->cleanup_bh);
1954     s->cleanup_bh = NULL;
1955 
1956     g_free(s->hostname);
1957     s->hostname = NULL;
1958     json_writer_free(s->vmdesc);
1959     s->vmdesc = NULL;
1960 
1961     qemu_savevm_state_cleanup();
1962 
1963     if (s->to_dst_file) {
1964         QEMUFile *tmp;
1965 
1966         trace_migrate_fd_cleanup();
1967         qemu_mutex_unlock_iothread();
1968         if (s->migration_thread_running) {
1969             qemu_thread_join(&s->thread);
1970             s->migration_thread_running = false;
1971         }
1972         qemu_mutex_lock_iothread();
1973 
1974         multifd_save_cleanup();
1975         qemu_mutex_lock(&s->qemu_file_lock);
1976         tmp = s->to_dst_file;
1977         s->to_dst_file = NULL;
1978         qemu_mutex_unlock(&s->qemu_file_lock);
1979         /*
1980          * Close the file handle without the lock to make sure the
1981          * critical section won't block for long.
1982          */
1983         migration_ioc_unregister_yank_from_file(tmp);
1984         qemu_fclose(tmp);
1985     }
1986 
1987     if (s->postcopy_qemufile_src) {
1988         migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src);
1989         qemu_fclose(s->postcopy_qemufile_src);
1990         s->postcopy_qemufile_src = NULL;
1991     }
1992 
1993     assert(!migration_is_active(s));
1994 
1995     if (s->state == MIGRATION_STATUS_CANCELLING) {
1996         migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
1997                           MIGRATION_STATUS_CANCELLED);
1998     }
1999 
2000     if (s->error) {
2001         /* It is used on info migrate.  We can't free it */
2002         error_report_err(error_copy(s->error));
2003     }
2004     notifier_list_notify(&migration_state_notifiers, s);
2005     block_cleanup_parameters(s);
2006     yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2007 }
2008 
2009 static void migrate_fd_cleanup_schedule(MigrationState *s)
2010 {
2011     /*
2012      * Ref the state for bh, because it may be called when
2013      * there're already no other refs
2014      */
2015     object_ref(OBJECT(s));
2016     qemu_bh_schedule(s->cleanup_bh);
2017 }
2018 
2019 static void migrate_fd_cleanup_bh(void *opaque)
2020 {
2021     MigrationState *s = opaque;
2022     migrate_fd_cleanup(s);
2023     object_unref(OBJECT(s));
2024 }
2025 
2026 void migrate_set_error(MigrationState *s, const Error *error)
2027 {
2028     QEMU_LOCK_GUARD(&s->error_mutex);
2029     if (!s->error) {
2030         s->error = error_copy(error);
2031     }
2032 }
2033 
2034 static void migrate_error_free(MigrationState *s)
2035 {
2036     QEMU_LOCK_GUARD(&s->error_mutex);
2037     if (s->error) {
2038         error_free(s->error);
2039         s->error = NULL;
2040     }
2041 }
2042 
2043 void migrate_fd_error(MigrationState *s, const Error *error)
2044 {
2045     trace_migrate_fd_error(error_get_pretty(error));
2046     assert(s->to_dst_file == NULL);
2047     migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2048                       MIGRATION_STATUS_FAILED);
2049     migrate_set_error(s, error);
2050 }
2051 
2052 static void migrate_fd_cancel(MigrationState *s)
2053 {
2054     int old_state ;
2055     QEMUFile *f = migrate_get_current()->to_dst_file;
2056     trace_migrate_fd_cancel();
2057 
2058     WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
2059         if (s->rp_state.from_dst_file) {
2060             /* shutdown the rp socket, so causing the rp thread to shutdown */
2061             qemu_file_shutdown(s->rp_state.from_dst_file);
2062         }
2063     }
2064 
2065     do {
2066         old_state = s->state;
2067         if (!migration_is_running(old_state)) {
2068             break;
2069         }
2070         /* If the migration is paused, kick it out of the pause */
2071         if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
2072             qemu_sem_post(&s->pause_sem);
2073         }
2074         migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
2075     } while (s->state != MIGRATION_STATUS_CANCELLING);
2076 
2077     /*
2078      * If we're unlucky the migration code might be stuck somewhere in a
2079      * send/write while the network has failed and is waiting to timeout;
2080      * if we've got shutdown(2) available then we can force it to quit.
2081      * The outgoing qemu file gets closed in migrate_fd_cleanup that is
2082      * called in a bh, so there is no race against this cancel.
2083      */
2084     if (s->state == MIGRATION_STATUS_CANCELLING && f) {
2085         qemu_file_shutdown(f);
2086     }
2087     if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
2088         Error *local_err = NULL;
2089 
2090         bdrv_activate_all(&local_err);
2091         if (local_err) {
2092             error_report_err(local_err);
2093         } else {
2094             s->block_inactive = false;
2095         }
2096     }
2097 }
2098 
2099 void add_migration_state_change_notifier(Notifier *notify)
2100 {
2101     notifier_list_add(&migration_state_notifiers, notify);
2102 }
2103 
2104 void remove_migration_state_change_notifier(Notifier *notify)
2105 {
2106     notifier_remove(notify);
2107 }
2108 
2109 bool migration_in_setup(MigrationState *s)
2110 {
2111     return s->state == MIGRATION_STATUS_SETUP;
2112 }
2113 
2114 bool migration_has_finished(MigrationState *s)
2115 {
2116     return s->state == MIGRATION_STATUS_COMPLETED;
2117 }
2118 
2119 bool migration_has_failed(MigrationState *s)
2120 {
2121     return (s->state == MIGRATION_STATUS_CANCELLED ||
2122             s->state == MIGRATION_STATUS_FAILED);
2123 }
2124 
2125 bool migration_in_postcopy(void)
2126 {
2127     MigrationState *s = migrate_get_current();
2128 
2129     switch (s->state) {
2130     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
2131     case MIGRATION_STATUS_POSTCOPY_PAUSED:
2132     case MIGRATION_STATUS_POSTCOPY_RECOVER:
2133         return true;
2134     default:
2135         return false;
2136     }
2137 }
2138 
2139 bool migration_in_postcopy_after_devices(MigrationState *s)
2140 {
2141     return migration_in_postcopy() && s->postcopy_after_devices;
2142 }
2143 
2144 bool migration_in_incoming_postcopy(void)
2145 {
2146     PostcopyState ps = postcopy_state_get();
2147 
2148     return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END;
2149 }
2150 
2151 bool migration_incoming_postcopy_advised(void)
2152 {
2153     PostcopyState ps = postcopy_state_get();
2154 
2155     return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
2156 }
2157 
2158 bool migration_in_bg_snapshot(void)
2159 {
2160     MigrationState *s = migrate_get_current();
2161 
2162     return migrate_background_snapshot() &&
2163             migration_is_setup_or_active(s->state);
2164 }
2165 
2166 bool migration_is_idle(void)
2167 {
2168     MigrationState *s = current_migration;
2169 
2170     if (!s) {
2171         return true;
2172     }
2173 
2174     switch (s->state) {
2175     case MIGRATION_STATUS_NONE:
2176     case MIGRATION_STATUS_CANCELLED:
2177     case MIGRATION_STATUS_COMPLETED:
2178     case MIGRATION_STATUS_FAILED:
2179         return true;
2180     case MIGRATION_STATUS_SETUP:
2181     case MIGRATION_STATUS_CANCELLING:
2182     case MIGRATION_STATUS_ACTIVE:
2183     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
2184     case MIGRATION_STATUS_COLO:
2185     case MIGRATION_STATUS_PRE_SWITCHOVER:
2186     case MIGRATION_STATUS_DEVICE:
2187     case MIGRATION_STATUS_WAIT_UNPLUG:
2188         return false;
2189     case MIGRATION_STATUS__MAX:
2190         g_assert_not_reached();
2191     }
2192 
2193     return false;
2194 }
2195 
2196 bool migration_is_active(MigrationState *s)
2197 {
2198     return (s->state == MIGRATION_STATUS_ACTIVE ||
2199             s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
2200 }
2201 
2202 void migrate_init(MigrationState *s)
2203 {
2204     /*
2205      * Reinitialise all migration state, except
2206      * parameters/capabilities that the user set, and
2207      * locks.
2208      */
2209     s->cleanup_bh = 0;
2210     s->vm_start_bh = 0;
2211     s->to_dst_file = NULL;
2212     s->state = MIGRATION_STATUS_NONE;
2213     s->rp_state.from_dst_file = NULL;
2214     s->rp_state.error = false;
2215     s->mbps = 0.0;
2216     s->pages_per_second = 0.0;
2217     s->downtime = 0;
2218     s->expected_downtime = 0;
2219     s->setup_time = 0;
2220     s->start_postcopy = false;
2221     s->postcopy_after_devices = false;
2222     s->migration_thread_running = false;
2223     error_free(s->error);
2224     s->error = NULL;
2225     s->hostname = NULL;
2226 
2227     migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
2228 
2229     s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2230     s->total_time = 0;
2231     s->vm_was_running = false;
2232     s->iteration_initial_bytes = 0;
2233     s->threshold_size = 0;
2234 }
2235 
2236 int migrate_add_blocker_internal(Error *reason, Error **errp)
2237 {
2238     /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */
2239     if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
2240         error_propagate_prepend(errp, error_copy(reason),
2241                                 "disallowing migration blocker "
2242                                 "(migration/snapshot in progress) for: ");
2243         return -EBUSY;
2244     }
2245 
2246     migration_blockers = g_slist_prepend(migration_blockers, reason);
2247     return 0;
2248 }
2249 
2250 int migrate_add_blocker(Error *reason, Error **errp)
2251 {
2252     if (only_migratable) {
2253         error_propagate_prepend(errp, error_copy(reason),
2254                                 "disallowing migration blocker "
2255                                 "(--only-migratable) for: ");
2256         return -EACCES;
2257     }
2258 
2259     return migrate_add_blocker_internal(reason, errp);
2260 }
2261 
2262 void migrate_del_blocker(Error *reason)
2263 {
2264     migration_blockers = g_slist_remove(migration_blockers, reason);
2265 }
2266 
2267 void qmp_migrate_incoming(const char *uri, Error **errp)
2268 {
2269     Error *local_err = NULL;
2270     static bool once = true;
2271 
2272     if (!once) {
2273         error_setg(errp, "The incoming migration has already been started");
2274         return;
2275     }
2276     if (!runstate_check(RUN_STATE_INMIGRATE)) {
2277         error_setg(errp, "'-incoming' was not specified on the command line");
2278         return;
2279     }
2280 
2281     if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
2282         return;
2283     }
2284 
2285     qemu_start_incoming_migration(uri, &local_err);
2286 
2287     if (local_err) {
2288         yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2289         error_propagate(errp, local_err);
2290         return;
2291     }
2292 
2293     once = false;
2294 }
2295 
2296 void qmp_migrate_recover(const char *uri, Error **errp)
2297 {
2298     MigrationIncomingState *mis = migration_incoming_get_current();
2299 
2300     /*
2301      * Don't even bother to use ERRP_GUARD() as it _must_ always be set by
2302      * callers (no one should ignore a recover failure); if there is, it's a
2303      * programming error.
2304      */
2305     assert(errp);
2306 
2307     if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
2308         error_setg(errp, "Migrate recover can only be run "
2309                    "when postcopy is paused.");
2310         return;
2311     }
2312 
2313     /* If there's an existing transport, release it */
2314     migration_incoming_transport_cleanup(mis);
2315 
2316     /*
2317      * Note that this call will never start a real migration; it will
2318      * only re-setup the migration stream and poke existing migration
2319      * to continue using that newly established channel.
2320      */
2321     qemu_start_incoming_migration(uri, errp);
2322 }
2323 
2324 void qmp_migrate_pause(Error **errp)
2325 {
2326     MigrationState *ms = migrate_get_current();
2327     MigrationIncomingState *mis = migration_incoming_get_current();
2328     int ret;
2329 
2330     if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2331         /* Source side, during postcopy */
2332         qemu_mutex_lock(&ms->qemu_file_lock);
2333         ret = qemu_file_shutdown(ms->to_dst_file);
2334         qemu_mutex_unlock(&ms->qemu_file_lock);
2335         if (ret) {
2336             error_setg(errp, "Failed to pause source migration");
2337         }
2338         return;
2339     }
2340 
2341     if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2342         ret = qemu_file_shutdown(mis->from_src_file);
2343         if (ret) {
2344             error_setg(errp, "Failed to pause destination migration");
2345         }
2346         return;
2347     }
2348 
2349     error_setg(errp, "migrate-pause is currently only supported "
2350                "during postcopy-active state");
2351 }
2352 
2353 bool migration_is_blocked(Error **errp)
2354 {
2355     if (qemu_savevm_state_blocked(errp)) {
2356         return true;
2357     }
2358 
2359     if (migration_blockers) {
2360         error_propagate(errp, error_copy(migration_blockers->data));
2361         return true;
2362     }
2363 
2364     return false;
2365 }
2366 
2367 /* Returns true if continue to migrate, or false if error detected */
2368 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
2369                             bool resume, Error **errp)
2370 {
2371     Error *local_err = NULL;
2372 
2373     if (resume) {
2374         if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
2375             error_setg(errp, "Cannot resume if there is no "
2376                        "paused migration");
2377             return false;
2378         }
2379 
2380         /*
2381          * Postcopy recovery won't work well with release-ram
2382          * capability since release-ram will drop the page buffer as
2383          * long as the page is put into the send buffer.  So if there
2384          * is a network failure happened, any page buffers that have
2385          * not yet reached the destination VM but have already been
2386          * sent from the source VM will be lost forever.  Let's refuse
2387          * the client from resuming such a postcopy migration.
2388          * Luckily release-ram was designed to only be used when src
2389          * and destination VMs are on the same host, so it should be
2390          * fine.
2391          */
2392         if (migrate_release_ram()) {
2393             error_setg(errp, "Postcopy recovery cannot work "
2394                        "when release-ram capability is set");
2395             return false;
2396         }
2397 
2398         /* This is a resume, skip init status */
2399         return true;
2400     }
2401 
2402     if (migration_is_running(s->state)) {
2403         error_setg(errp, QERR_MIGRATION_ACTIVE);
2404         return false;
2405     }
2406 
2407     if (runstate_check(RUN_STATE_INMIGRATE)) {
2408         error_setg(errp, "Guest is waiting for an incoming migration");
2409         return false;
2410     }
2411 
2412     if (runstate_check(RUN_STATE_POSTMIGRATE)) {
2413         error_setg(errp, "Can't migrate the vm that was paused due to "
2414                    "previous migration");
2415         return false;
2416     }
2417 
2418     if (migration_is_blocked(errp)) {
2419         return false;
2420     }
2421 
2422     if (blk || blk_inc) {
2423         if (migrate_colo_enabled()) {
2424             error_setg(errp, "No disk migration is required in COLO mode");
2425             return false;
2426         }
2427         if (migrate_use_block() || migrate_use_block_incremental()) {
2428             error_setg(errp, "Command options are incompatible with "
2429                        "current migration capabilities");
2430             return false;
2431         }
2432         migrate_set_block_enabled(true, &local_err);
2433         if (local_err) {
2434             error_propagate(errp, local_err);
2435             return false;
2436         }
2437         s->must_remove_block_options = true;
2438     }
2439 
2440     if (blk_inc) {
2441         migrate_set_block_incremental(s, true);
2442     }
2443 
2444     migrate_init(s);
2445     /*
2446      * set ram_counters compression_counters memory to zero for a
2447      * new migration
2448      */
2449     memset(&ram_counters, 0, sizeof(ram_counters));
2450     memset(&compression_counters, 0, sizeof(compression_counters));
2451 
2452     return true;
2453 }
2454 
2455 void qmp_migrate(const char *uri, bool has_blk, bool blk,
2456                  bool has_inc, bool inc, bool has_detach, bool detach,
2457                  bool has_resume, bool resume, Error **errp)
2458 {
2459     Error *local_err = NULL;
2460     MigrationState *s = migrate_get_current();
2461     const char *p = NULL;
2462 
2463     /* URI is not suitable for migration? */
2464     if (!migration_channels_and_uri_compatible(uri, errp)) {
2465         return;
2466     }
2467 
2468     if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
2469                          has_resume && resume, errp)) {
2470         /* Error detected, put into errp */
2471         return;
2472     }
2473 
2474     if (!(has_resume && resume)) {
2475         if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
2476             return;
2477         }
2478     }
2479 
2480     if (strstart(uri, "tcp:", &p) ||
2481         strstart(uri, "unix:", NULL) ||
2482         strstart(uri, "vsock:", NULL)) {
2483         socket_start_outgoing_migration(s, p ? p : uri, &local_err);
2484 #ifdef CONFIG_RDMA
2485     } else if (strstart(uri, "rdma:", &p)) {
2486         rdma_start_outgoing_migration(s, p, &local_err);
2487 #endif
2488     } else if (strstart(uri, "exec:", &p)) {
2489         exec_start_outgoing_migration(s, p, &local_err);
2490     } else if (strstart(uri, "fd:", &p)) {
2491         fd_start_outgoing_migration(s, p, &local_err);
2492     } else {
2493         if (!(has_resume && resume)) {
2494             yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2495         }
2496         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
2497                    "a valid migration protocol");
2498         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2499                           MIGRATION_STATUS_FAILED);
2500         block_cleanup_parameters(s);
2501         return;
2502     }
2503 
2504     if (local_err) {
2505         if (!(has_resume && resume)) {
2506             yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2507         }
2508         migrate_fd_error(s, local_err);
2509         error_propagate(errp, local_err);
2510         return;
2511     }
2512 }
2513 
2514 void qmp_migrate_cancel(Error **errp)
2515 {
2516     migration_cancel(NULL);
2517 }
2518 
2519 void qmp_migrate_continue(MigrationStatus state, Error **errp)
2520 {
2521     MigrationState *s = migrate_get_current();
2522     if (s->state != state) {
2523         error_setg(errp,  "Migration not in expected state: %s",
2524                    MigrationStatus_str(s->state));
2525         return;
2526     }
2527     qemu_sem_post(&s->pause_sem);
2528 }
2529 
2530 bool migrate_release_ram(void)
2531 {
2532     MigrationState *s;
2533 
2534     s = migrate_get_current();
2535 
2536     return s->capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
2537 }
2538 
2539 bool migrate_postcopy_ram(void)
2540 {
2541     MigrationState *s;
2542 
2543     s = migrate_get_current();
2544 
2545     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
2546 }
2547 
2548 bool migrate_postcopy(void)
2549 {
2550     return migrate_postcopy_ram() || migrate_dirty_bitmaps();
2551 }
2552 
2553 bool migrate_auto_converge(void)
2554 {
2555     MigrationState *s;
2556 
2557     s = migrate_get_current();
2558 
2559     return s->capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
2560 }
2561 
2562 bool migrate_zero_blocks(void)
2563 {
2564     MigrationState *s;
2565 
2566     s = migrate_get_current();
2567 
2568     return s->capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
2569 }
2570 
2571 bool migrate_postcopy_blocktime(void)
2572 {
2573     MigrationState *s;
2574 
2575     s = migrate_get_current();
2576 
2577     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
2578 }
2579 
2580 bool migrate_use_compression(void)
2581 {
2582     MigrationState *s;
2583 
2584     s = migrate_get_current();
2585 
2586     return s->capabilities[MIGRATION_CAPABILITY_COMPRESS];
2587 }
2588 
2589 int migrate_compress_level(void)
2590 {
2591     MigrationState *s;
2592 
2593     s = migrate_get_current();
2594 
2595     return s->parameters.compress_level;
2596 }
2597 
2598 int migrate_compress_threads(void)
2599 {
2600     MigrationState *s;
2601 
2602     s = migrate_get_current();
2603 
2604     return s->parameters.compress_threads;
2605 }
2606 
2607 int migrate_compress_wait_thread(void)
2608 {
2609     MigrationState *s;
2610 
2611     s = migrate_get_current();
2612 
2613     return s->parameters.compress_wait_thread;
2614 }
2615 
2616 int migrate_decompress_threads(void)
2617 {
2618     MigrationState *s;
2619 
2620     s = migrate_get_current();
2621 
2622     return s->parameters.decompress_threads;
2623 }
2624 
2625 bool migrate_dirty_bitmaps(void)
2626 {
2627     MigrationState *s;
2628 
2629     s = migrate_get_current();
2630 
2631     return s->capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
2632 }
2633 
2634 bool migrate_ignore_shared(void)
2635 {
2636     MigrationState *s;
2637 
2638     s = migrate_get_current();
2639 
2640     return s->capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED];
2641 }
2642 
2643 bool migrate_validate_uuid(void)
2644 {
2645     MigrationState *s;
2646 
2647     s = migrate_get_current();
2648 
2649     return s->capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID];
2650 }
2651 
2652 bool migrate_use_events(void)
2653 {
2654     MigrationState *s;
2655 
2656     s = migrate_get_current();
2657 
2658     return s->capabilities[MIGRATION_CAPABILITY_EVENTS];
2659 }
2660 
2661 bool migrate_use_multifd(void)
2662 {
2663     MigrationState *s;
2664 
2665     s = migrate_get_current();
2666 
2667     return s->capabilities[MIGRATION_CAPABILITY_MULTIFD];
2668 }
2669 
2670 bool migrate_pause_before_switchover(void)
2671 {
2672     MigrationState *s;
2673 
2674     s = migrate_get_current();
2675 
2676     return s->capabilities[MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
2677 }
2678 
2679 int migrate_multifd_channels(void)
2680 {
2681     MigrationState *s;
2682 
2683     s = migrate_get_current();
2684 
2685     return s->parameters.multifd_channels;
2686 }
2687 
2688 MultiFDCompression migrate_multifd_compression(void)
2689 {
2690     MigrationState *s;
2691 
2692     s = migrate_get_current();
2693 
2694     assert(s->parameters.multifd_compression < MULTIFD_COMPRESSION__MAX);
2695     return s->parameters.multifd_compression;
2696 }
2697 
2698 int migrate_multifd_zlib_level(void)
2699 {
2700     MigrationState *s;
2701 
2702     s = migrate_get_current();
2703 
2704     return s->parameters.multifd_zlib_level;
2705 }
2706 
2707 int migrate_multifd_zstd_level(void)
2708 {
2709     MigrationState *s;
2710 
2711     s = migrate_get_current();
2712 
2713     return s->parameters.multifd_zstd_level;
2714 }
2715 
2716 #ifdef CONFIG_LINUX
2717 bool migrate_use_zero_copy_send(void)
2718 {
2719     MigrationState *s;
2720 
2721     s = migrate_get_current();
2722 
2723     return s->capabilities[MIGRATION_CAPABILITY_ZERO_COPY_SEND];
2724 }
2725 #endif
2726 
2727 int migrate_use_tls(void)
2728 {
2729     MigrationState *s;
2730 
2731     s = migrate_get_current();
2732 
2733     return s->parameters.tls_creds && *s->parameters.tls_creds;
2734 }
2735 
2736 int migrate_use_xbzrle(void)
2737 {
2738     MigrationState *s;
2739 
2740     s = migrate_get_current();
2741 
2742     return s->capabilities[MIGRATION_CAPABILITY_XBZRLE];
2743 }
2744 
2745 uint64_t migrate_xbzrle_cache_size(void)
2746 {
2747     MigrationState *s;
2748 
2749     s = migrate_get_current();
2750 
2751     return s->parameters.xbzrle_cache_size;
2752 }
2753 
2754 static int64_t migrate_max_postcopy_bandwidth(void)
2755 {
2756     MigrationState *s;
2757 
2758     s = migrate_get_current();
2759 
2760     return s->parameters.max_postcopy_bandwidth;
2761 }
2762 
2763 bool migrate_use_block(void)
2764 {
2765     MigrationState *s;
2766 
2767     s = migrate_get_current();
2768 
2769     return s->capabilities[MIGRATION_CAPABILITY_BLOCK];
2770 }
2771 
2772 bool migrate_use_return_path(void)
2773 {
2774     MigrationState *s;
2775 
2776     s = migrate_get_current();
2777 
2778     return s->capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
2779 }
2780 
2781 bool migrate_use_block_incremental(void)
2782 {
2783     MigrationState *s;
2784 
2785     s = migrate_get_current();
2786 
2787     return s->parameters.block_incremental;
2788 }
2789 
2790 bool migrate_background_snapshot(void)
2791 {
2792     MigrationState *s;
2793 
2794     s = migrate_get_current();
2795 
2796     return s->capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT];
2797 }
2798 
2799 bool migrate_postcopy_preempt(void)
2800 {
2801     MigrationState *s;
2802 
2803     s = migrate_get_current();
2804 
2805     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT];
2806 }
2807 
2808 /* migration thread support */
2809 /*
2810  * Something bad happened to the RP stream, mark an error
2811  * The caller shall print or trace something to indicate why
2812  */
2813 static void mark_source_rp_bad(MigrationState *s)
2814 {
2815     s->rp_state.error = true;
2816 }
2817 
2818 static struct rp_cmd_args {
2819     ssize_t     len; /* -1 = variable */
2820     const char *name;
2821 } rp_cmd_args[] = {
2822     [MIG_RP_MSG_INVALID]        = { .len = -1, .name = "INVALID" },
2823     [MIG_RP_MSG_SHUT]           = { .len =  4, .name = "SHUT" },
2824     [MIG_RP_MSG_PONG]           = { .len =  4, .name = "PONG" },
2825     [MIG_RP_MSG_REQ_PAGES]      = { .len = 12, .name = "REQ_PAGES" },
2826     [MIG_RP_MSG_REQ_PAGES_ID]   = { .len = -1, .name = "REQ_PAGES_ID" },
2827     [MIG_RP_MSG_RECV_BITMAP]    = { .len = -1, .name = "RECV_BITMAP" },
2828     [MIG_RP_MSG_RESUME_ACK]     = { .len =  4, .name = "RESUME_ACK" },
2829     [MIG_RP_MSG_MAX]            = { .len = -1, .name = "MAX" },
2830 };
2831 
2832 /*
2833  * Process a request for pages received on the return path,
2834  * We're allowed to send more than requested (e.g. to round to our page size)
2835  * and we don't need to send pages that have already been sent.
2836  */
2837 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
2838                                        ram_addr_t start, size_t len)
2839 {
2840     long our_host_ps = qemu_real_host_page_size();
2841 
2842     trace_migrate_handle_rp_req_pages(rbname, start, len);
2843 
2844     /*
2845      * Since we currently insist on matching page sizes, just sanity check
2846      * we're being asked for whole host pages.
2847      */
2848     if (!QEMU_IS_ALIGNED(start, our_host_ps) ||
2849         !QEMU_IS_ALIGNED(len, our_host_ps)) {
2850         error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
2851                      " len: %zd", __func__, start, len);
2852         mark_source_rp_bad(ms);
2853         return;
2854     }
2855 
2856     if (ram_save_queue_pages(rbname, start, len)) {
2857         mark_source_rp_bad(ms);
2858     }
2859 }
2860 
2861 /* Return true to retry, false to quit */
2862 static bool postcopy_pause_return_path_thread(MigrationState *s)
2863 {
2864     trace_postcopy_pause_return_path();
2865 
2866     qemu_sem_wait(&s->postcopy_pause_rp_sem);
2867 
2868     trace_postcopy_pause_return_path_continued();
2869 
2870     return true;
2871 }
2872 
2873 static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
2874 {
2875     RAMBlock *block = qemu_ram_block_by_name(block_name);
2876 
2877     if (!block) {
2878         error_report("%s: invalid block name '%s'", __func__, block_name);
2879         return -EINVAL;
2880     }
2881 
2882     /* Fetch the received bitmap and refresh the dirty bitmap */
2883     return ram_dirty_bitmap_reload(s, block);
2884 }
2885 
2886 static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value)
2887 {
2888     trace_source_return_path_thread_resume_ack(value);
2889 
2890     if (value != MIGRATION_RESUME_ACK_VALUE) {
2891         error_report("%s: illegal resume_ack value %"PRIu32,
2892                      __func__, value);
2893         return -1;
2894     }
2895 
2896     /* Now both sides are active. */
2897     migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
2898                       MIGRATION_STATUS_POSTCOPY_ACTIVE);
2899 
2900     /* Notify send thread that time to continue send pages */
2901     qemu_sem_post(&s->rp_state.rp_sem);
2902 
2903     return 0;
2904 }
2905 
2906 /*
2907  * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if
2908  * existed) in a safe way.
2909  */
2910 static void migration_release_dst_files(MigrationState *ms)
2911 {
2912     QEMUFile *file;
2913 
2914     WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
2915         /*
2916          * Reset the from_dst_file pointer first before releasing it, as we
2917          * can't block within lock section
2918          */
2919         file = ms->rp_state.from_dst_file;
2920         ms->rp_state.from_dst_file = NULL;
2921     }
2922 
2923     /*
2924      * Do the same to postcopy fast path socket too if there is.  No
2925      * locking needed because this qemufile should only be managed by
2926      * return path thread.
2927      */
2928     if (ms->postcopy_qemufile_src) {
2929         migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src);
2930         qemu_file_shutdown(ms->postcopy_qemufile_src);
2931         qemu_fclose(ms->postcopy_qemufile_src);
2932         ms->postcopy_qemufile_src = NULL;
2933     }
2934 
2935     qemu_fclose(file);
2936 }
2937 
2938 /*
2939  * Handles messages sent on the return path towards the source VM
2940  *
2941  */
2942 static void *source_return_path_thread(void *opaque)
2943 {
2944     MigrationState *ms = opaque;
2945     QEMUFile *rp = ms->rp_state.from_dst_file;
2946     uint16_t header_len, header_type;
2947     uint8_t buf[512];
2948     uint32_t tmp32, sibling_error;
2949     ram_addr_t start = 0; /* =0 to silence warning */
2950     size_t  len = 0, expected_len;
2951     int res;
2952 
2953     trace_source_return_path_thread_entry();
2954     rcu_register_thread();
2955 
2956 retry:
2957     while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
2958            migration_is_setup_or_active(ms->state)) {
2959         trace_source_return_path_thread_loop_top();
2960         header_type = qemu_get_be16(rp);
2961         header_len = qemu_get_be16(rp);
2962 
2963         if (qemu_file_get_error(rp)) {
2964             mark_source_rp_bad(ms);
2965             goto out;
2966         }
2967 
2968         if (header_type >= MIG_RP_MSG_MAX ||
2969             header_type == MIG_RP_MSG_INVALID) {
2970             error_report("RP: Received invalid message 0x%04x length 0x%04x",
2971                          header_type, header_len);
2972             mark_source_rp_bad(ms);
2973             goto out;
2974         }
2975 
2976         if ((rp_cmd_args[header_type].len != -1 &&
2977             header_len != rp_cmd_args[header_type].len) ||
2978             header_len > sizeof(buf)) {
2979             error_report("RP: Received '%s' message (0x%04x) with"
2980                          "incorrect length %d expecting %zu",
2981                          rp_cmd_args[header_type].name, header_type, header_len,
2982                          (size_t)rp_cmd_args[header_type].len);
2983             mark_source_rp_bad(ms);
2984             goto out;
2985         }
2986 
2987         /* We know we've got a valid header by this point */
2988         res = qemu_get_buffer(rp, buf, header_len);
2989         if (res != header_len) {
2990             error_report("RP: Failed reading data for message 0x%04x"
2991                          " read %d expected %d",
2992                          header_type, res, header_len);
2993             mark_source_rp_bad(ms);
2994             goto out;
2995         }
2996 
2997         /* OK, we have the message and the data */
2998         switch (header_type) {
2999         case MIG_RP_MSG_SHUT:
3000             sibling_error = ldl_be_p(buf);
3001             trace_source_return_path_thread_shut(sibling_error);
3002             if (sibling_error) {
3003                 error_report("RP: Sibling indicated error %d", sibling_error);
3004                 mark_source_rp_bad(ms);
3005             }
3006             /*
3007              * We'll let the main thread deal with closing the RP
3008              * we could do a shutdown(2) on it, but we're the only user
3009              * anyway, so there's nothing gained.
3010              */
3011             goto out;
3012 
3013         case MIG_RP_MSG_PONG:
3014             tmp32 = ldl_be_p(buf);
3015             trace_source_return_path_thread_pong(tmp32);
3016             qemu_sem_post(&ms->rp_state.rp_pong_acks);
3017             break;
3018 
3019         case MIG_RP_MSG_REQ_PAGES:
3020             start = ldq_be_p(buf);
3021             len = ldl_be_p(buf + 8);
3022             migrate_handle_rp_req_pages(ms, NULL, start, len);
3023             break;
3024 
3025         case MIG_RP_MSG_REQ_PAGES_ID:
3026             expected_len = 12 + 1; /* header + termination */
3027 
3028             if (header_len >= expected_len) {
3029                 start = ldq_be_p(buf);
3030                 len = ldl_be_p(buf + 8);
3031                 /* Now we expect an idstr */
3032                 tmp32 = buf[12]; /* Length of the following idstr */
3033                 buf[13 + tmp32] = '\0';
3034                 expected_len += tmp32;
3035             }
3036             if (header_len != expected_len) {
3037                 error_report("RP: Req_Page_id with length %d expecting %zd",
3038                              header_len, expected_len);
3039                 mark_source_rp_bad(ms);
3040                 goto out;
3041             }
3042             migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
3043             break;
3044 
3045         case MIG_RP_MSG_RECV_BITMAP:
3046             if (header_len < 1) {
3047                 error_report("%s: missing block name", __func__);
3048                 mark_source_rp_bad(ms);
3049                 goto out;
3050             }
3051             /* Format: len (1B) + idstr (<255B). This ends the idstr. */
3052             buf[buf[0] + 1] = '\0';
3053             if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) {
3054                 mark_source_rp_bad(ms);
3055                 goto out;
3056             }
3057             break;
3058 
3059         case MIG_RP_MSG_RESUME_ACK:
3060             tmp32 = ldl_be_p(buf);
3061             if (migrate_handle_rp_resume_ack(ms, tmp32)) {
3062                 mark_source_rp_bad(ms);
3063                 goto out;
3064             }
3065             break;
3066 
3067         default:
3068             break;
3069         }
3070     }
3071 
3072 out:
3073     res = qemu_file_get_error(rp);
3074     if (res) {
3075         if (res && migration_in_postcopy()) {
3076             /*
3077              * Maybe there is something we can do: it looks like a
3078              * network down issue, and we pause for a recovery.
3079              */
3080             migration_release_dst_files(ms);
3081             rp = NULL;
3082             if (postcopy_pause_return_path_thread(ms)) {
3083                 /*
3084                  * Reload rp, reset the rest.  Referencing it is safe since
3085                  * it's reset only by us above, or when migration completes
3086                  */
3087                 rp = ms->rp_state.from_dst_file;
3088                 ms->rp_state.error = false;
3089                 goto retry;
3090             }
3091         }
3092 
3093         trace_source_return_path_thread_bad_end();
3094         mark_source_rp_bad(ms);
3095     }
3096 
3097     trace_source_return_path_thread_end();
3098     migration_release_dst_files(ms);
3099     rcu_unregister_thread();
3100     return NULL;
3101 }
3102 
3103 static int open_return_path_on_source(MigrationState *ms,
3104                                       bool create_thread)
3105 {
3106     ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
3107     if (!ms->rp_state.from_dst_file) {
3108         return -1;
3109     }
3110 
3111     trace_open_return_path_on_source();
3112 
3113     if (!create_thread) {
3114         /* We're done */
3115         return 0;
3116     }
3117 
3118     qemu_thread_create(&ms->rp_state.rp_thread, "return path",
3119                        source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
3120     ms->rp_state.rp_thread_created = true;
3121 
3122     trace_open_return_path_on_source_continue();
3123 
3124     return 0;
3125 }
3126 
3127 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
3128 static int await_return_path_close_on_source(MigrationState *ms)
3129 {
3130     /*
3131      * If this is a normal exit then the destination will send a SHUT and the
3132      * rp_thread will exit, however if there's an error we need to cause
3133      * it to exit.
3134      */
3135     if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
3136         /*
3137          * shutdown(2), if we have it, will cause it to unblock if it's stuck
3138          * waiting for the destination.
3139          */
3140         qemu_file_shutdown(ms->rp_state.from_dst_file);
3141         mark_source_rp_bad(ms);
3142     }
3143     trace_await_return_path_close_on_source_joining();
3144     qemu_thread_join(&ms->rp_state.rp_thread);
3145     ms->rp_state.rp_thread_created = false;
3146     trace_await_return_path_close_on_source_close();
3147     return ms->rp_state.error;
3148 }
3149 
3150 static inline void
3151 migration_wait_main_channel(MigrationState *ms)
3152 {
3153     /* Wait until one PONG message received */
3154     qemu_sem_wait(&ms->rp_state.rp_pong_acks);
3155 }
3156 
3157 /*
3158  * Switch from normal iteration to postcopy
3159  * Returns non-0 on error
3160  */
3161 static int postcopy_start(MigrationState *ms)
3162 {
3163     int ret;
3164     QIOChannelBuffer *bioc;
3165     QEMUFile *fb;
3166     int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3167     int64_t bandwidth = migrate_max_postcopy_bandwidth();
3168     bool restart_block = false;
3169     int cur_state = MIGRATION_STATUS_ACTIVE;
3170 
3171     if (migrate_postcopy_preempt()) {
3172         migration_wait_main_channel(ms);
3173         if (postcopy_preempt_establish_channel(ms)) {
3174             migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED);
3175             return -1;
3176         }
3177     }
3178 
3179     if (!migrate_pause_before_switchover()) {
3180         migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
3181                           MIGRATION_STATUS_POSTCOPY_ACTIVE);
3182     }
3183 
3184     trace_postcopy_start();
3185     qemu_mutex_lock_iothread();
3186     trace_postcopy_start_set_run();
3187 
3188     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
3189     global_state_store();
3190     ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
3191     if (ret < 0) {
3192         goto fail;
3193     }
3194 
3195     ret = migration_maybe_pause(ms, &cur_state,
3196                                 MIGRATION_STATUS_POSTCOPY_ACTIVE);
3197     if (ret < 0) {
3198         goto fail;
3199     }
3200 
3201     ret = bdrv_inactivate_all();
3202     if (ret < 0) {
3203         goto fail;
3204     }
3205     restart_block = true;
3206 
3207     /*
3208      * Cause any non-postcopiable, but iterative devices to
3209      * send out their final data.
3210      */
3211     qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
3212 
3213     /*
3214      * in Finish migrate and with the io-lock held everything should
3215      * be quiet, but we've potentially still got dirty pages and we
3216      * need to tell the destination to throw any pages it's already received
3217      * that are dirty
3218      */
3219     if (migrate_postcopy_ram()) {
3220         ram_postcopy_send_discard_bitmap(ms);
3221     }
3222 
3223     /*
3224      * send rest of state - note things that are doing postcopy
3225      * will notice we're in POSTCOPY_ACTIVE and not actually
3226      * wrap their state up here
3227      */
3228     /* 0 max-postcopy-bandwidth means unlimited */
3229     if (!bandwidth) {
3230         qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
3231     } else {
3232         qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO);
3233     }
3234     if (migrate_postcopy_ram()) {
3235         /* Ping just for debugging, helps line traces up */
3236         qemu_savevm_send_ping(ms->to_dst_file, 2);
3237     }
3238 
3239     /*
3240      * While loading the device state we may trigger page transfer
3241      * requests and the fd must be free to process those, and thus
3242      * the destination must read the whole device state off the fd before
3243      * it starts processing it.  Unfortunately the ad-hoc migration format
3244      * doesn't allow the destination to know the size to read without fully
3245      * parsing it through each devices load-state code (especially the open
3246      * coded devices that use get/put).
3247      * So we wrap the device state up in a package with a length at the start;
3248      * to do this we use a qemu_buf to hold the whole of the device state.
3249      */
3250     bioc = qio_channel_buffer_new(4096);
3251     qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
3252     fb = qemu_file_new_output(QIO_CHANNEL(bioc));
3253     object_unref(OBJECT(bioc));
3254 
3255     /*
3256      * Make sure the receiver can get incoming pages before we send the rest
3257      * of the state
3258      */
3259     qemu_savevm_send_postcopy_listen(fb);
3260 
3261     qemu_savevm_state_complete_precopy(fb, false, false);
3262     if (migrate_postcopy_ram()) {
3263         qemu_savevm_send_ping(fb, 3);
3264     }
3265 
3266     qemu_savevm_send_postcopy_run(fb);
3267 
3268     /* <><> end of stuff going into the package */
3269 
3270     /* Last point of recovery; as soon as we send the package the destination
3271      * can open devices and potentially start running.
3272      * Lets just check again we've not got any errors.
3273      */
3274     ret = qemu_file_get_error(ms->to_dst_file);
3275     if (ret) {
3276         error_report("postcopy_start: Migration stream errored (pre package)");
3277         goto fail_closefb;
3278     }
3279 
3280     restart_block = false;
3281 
3282     /* Now send that blob */
3283     if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
3284         goto fail_closefb;
3285     }
3286     qemu_fclose(fb);
3287 
3288     /* Send a notify to give a chance for anything that needs to happen
3289      * at the transition to postcopy and after the device state; in particular
3290      * spice needs to trigger a transition now
3291      */
3292     ms->postcopy_after_devices = true;
3293     notifier_list_notify(&migration_state_notifiers, ms);
3294 
3295     ms->downtime =  qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
3296 
3297     qemu_mutex_unlock_iothread();
3298 
3299     if (migrate_postcopy_ram()) {
3300         /*
3301          * Although this ping is just for debug, it could potentially be
3302          * used for getting a better measurement of downtime at the source.
3303          */
3304         qemu_savevm_send_ping(ms->to_dst_file, 4);
3305     }
3306 
3307     if (migrate_release_ram()) {
3308         ram_postcopy_migrated_memory_release(ms);
3309     }
3310 
3311     ret = qemu_file_get_error(ms->to_dst_file);
3312     if (ret) {
3313         error_report("postcopy_start: Migration stream errored");
3314         migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
3315                               MIGRATION_STATUS_FAILED);
3316     }
3317 
3318     trace_postcopy_preempt_enabled(migrate_postcopy_preempt());
3319 
3320     return ret;
3321 
3322 fail_closefb:
3323     qemu_fclose(fb);
3324 fail:
3325     migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
3326                           MIGRATION_STATUS_FAILED);
3327     if (restart_block) {
3328         /* A failure happened early enough that we know the destination hasn't
3329          * accessed block devices, so we're safe to recover.
3330          */
3331         Error *local_err = NULL;
3332 
3333         bdrv_activate_all(&local_err);
3334         if (local_err) {
3335             error_report_err(local_err);
3336         }
3337     }
3338     qemu_mutex_unlock_iothread();
3339     return -1;
3340 }
3341 
3342 /**
3343  * migration_maybe_pause: Pause if required to by
3344  * migrate_pause_before_switchover called with the iothread locked
3345  * Returns: 0 on success
3346  */
3347 static int migration_maybe_pause(MigrationState *s,
3348                                  int *current_active_state,
3349                                  int new_state)
3350 {
3351     if (!migrate_pause_before_switchover()) {
3352         return 0;
3353     }
3354 
3355     /* Since leaving this state is not atomic with posting the semaphore
3356      * it's possible that someone could have issued multiple migrate_continue
3357      * and the semaphore is incorrectly positive at this point;
3358      * the docs say it's undefined to reinit a semaphore that's already
3359      * init'd, so use timedwait to eat up any existing posts.
3360      */
3361     while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
3362         /* This block intentionally left blank */
3363     }
3364 
3365     /*
3366      * If the migration is cancelled when it is in the completion phase,
3367      * the migration state is set to MIGRATION_STATUS_CANCELLING.
3368      * So we don't need to wait a semaphore, otherwise we would always
3369      * wait for the 'pause_sem' semaphore.
3370      */
3371     if (s->state != MIGRATION_STATUS_CANCELLING) {
3372         qemu_mutex_unlock_iothread();
3373         migrate_set_state(&s->state, *current_active_state,
3374                           MIGRATION_STATUS_PRE_SWITCHOVER);
3375         qemu_sem_wait(&s->pause_sem);
3376         migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
3377                           new_state);
3378         *current_active_state = new_state;
3379         qemu_mutex_lock_iothread();
3380     }
3381 
3382     return s->state == new_state ? 0 : -EINVAL;
3383 }
3384 
3385 /**
3386  * migration_completion: Used by migration_thread when there's not much left.
3387  *   The caller 'breaks' the loop when this returns.
3388  *
3389  * @s: Current migration state
3390  */
3391 static void migration_completion(MigrationState *s)
3392 {
3393     int ret;
3394     int current_active_state = s->state;
3395 
3396     if (s->state == MIGRATION_STATUS_ACTIVE) {
3397         qemu_mutex_lock_iothread();
3398         s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3399         qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
3400         s->vm_was_running = runstate_is_running();
3401         ret = global_state_store();
3402 
3403         if (!ret) {
3404             ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
3405             trace_migration_completion_vm_stop(ret);
3406             if (ret >= 0) {
3407                 ret = migration_maybe_pause(s, &current_active_state,
3408                                             MIGRATION_STATUS_DEVICE);
3409             }
3410             if (ret >= 0) {
3411                 s->block_inactive = !migrate_colo_enabled();
3412                 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
3413                 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
3414                                                          s->block_inactive);
3415             }
3416         }
3417         qemu_mutex_unlock_iothread();
3418 
3419         if (ret < 0) {
3420             goto fail;
3421         }
3422     } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
3423         trace_migration_completion_postcopy_end();
3424 
3425         qemu_mutex_lock_iothread();
3426         qemu_savevm_state_complete_postcopy(s->to_dst_file);
3427         qemu_mutex_unlock_iothread();
3428 
3429         /*
3430          * Shutdown the postcopy fast path thread.  This is only needed
3431          * when dest QEMU binary is old (7.1/7.2).  QEMU 8.0+ doesn't need
3432          * this.
3433          */
3434         if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
3435             postcopy_preempt_shutdown_file(s);
3436         }
3437 
3438         trace_migration_completion_postcopy_end_after_complete();
3439     } else {
3440         goto fail;
3441     }
3442 
3443     /*
3444      * If rp was opened we must clean up the thread before
3445      * cleaning everything else up (since if there are no failures
3446      * it will wait for the destination to send it's status in
3447      * a SHUT command).
3448      */
3449     if (s->rp_state.rp_thread_created) {
3450         int rp_error;
3451         trace_migration_return_path_end_before();
3452         rp_error = await_return_path_close_on_source(s);
3453         trace_migration_return_path_end_after(rp_error);
3454         if (rp_error) {
3455             goto fail_invalidate;
3456         }
3457     }
3458 
3459     if (qemu_file_get_error(s->to_dst_file)) {
3460         trace_migration_completion_file_err();
3461         goto fail_invalidate;
3462     }
3463 
3464     if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) {
3465         /* COLO does not support postcopy */
3466         migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
3467                           MIGRATION_STATUS_COLO);
3468     } else {
3469         migrate_set_state(&s->state, current_active_state,
3470                           MIGRATION_STATUS_COMPLETED);
3471     }
3472 
3473     return;
3474 
3475 fail_invalidate:
3476     /* If not doing postcopy, vm_start() will be called: let's regain
3477      * control on images.
3478      */
3479     if (s->state == MIGRATION_STATUS_ACTIVE ||
3480         s->state == MIGRATION_STATUS_DEVICE) {
3481         Error *local_err = NULL;
3482 
3483         qemu_mutex_lock_iothread();
3484         bdrv_activate_all(&local_err);
3485         if (local_err) {
3486             error_report_err(local_err);
3487             s->block_inactive = true;
3488         } else {
3489             s->block_inactive = false;
3490         }
3491         qemu_mutex_unlock_iothread();
3492     }
3493 
3494 fail:
3495     migrate_set_state(&s->state, current_active_state,
3496                       MIGRATION_STATUS_FAILED);
3497 }
3498 
3499 /**
3500  * bg_migration_completion: Used by bg_migration_thread when after all the
3501  *   RAM has been saved. The caller 'breaks' the loop when this returns.
3502  *
3503  * @s: Current migration state
3504  */
3505 static void bg_migration_completion(MigrationState *s)
3506 {
3507     int current_active_state = s->state;
3508 
3509     /*
3510      * Stop tracking RAM writes - un-protect memory, un-register UFFD
3511      * memory ranges, flush kernel wait queues and wake up threads
3512      * waiting for write fault to be resolved.
3513      */
3514     ram_write_tracking_stop();
3515 
3516     if (s->state == MIGRATION_STATUS_ACTIVE) {
3517         /*
3518          * By this moment we have RAM content saved into the migration stream.
3519          * The next step is to flush the non-RAM content (device state)
3520          * right after the ram content. The device state has been stored into
3521          * the temporary buffer before RAM saving started.
3522          */
3523         qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage);
3524         qemu_fflush(s->to_dst_file);
3525     } else if (s->state == MIGRATION_STATUS_CANCELLING) {
3526         goto fail;
3527     }
3528 
3529     if (qemu_file_get_error(s->to_dst_file)) {
3530         trace_migration_completion_file_err();
3531         goto fail;
3532     }
3533 
3534     migrate_set_state(&s->state, current_active_state,
3535                       MIGRATION_STATUS_COMPLETED);
3536     return;
3537 
3538 fail:
3539     migrate_set_state(&s->state, current_active_state,
3540                       MIGRATION_STATUS_FAILED);
3541 }
3542 
3543 bool migrate_colo_enabled(void)
3544 {
3545     MigrationState *s = migrate_get_current();
3546     return s->capabilities[MIGRATION_CAPABILITY_X_COLO];
3547 }
3548 
3549 typedef enum MigThrError {
3550     /* No error detected */
3551     MIG_THR_ERR_NONE = 0,
3552     /* Detected error, but resumed successfully */
3553     MIG_THR_ERR_RECOVERED = 1,
3554     /* Detected fatal error, need to exit */
3555     MIG_THR_ERR_FATAL = 2,
3556 } MigThrError;
3557 
3558 static int postcopy_resume_handshake(MigrationState *s)
3559 {
3560     qemu_savevm_send_postcopy_resume(s->to_dst_file);
3561 
3562     while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
3563         qemu_sem_wait(&s->rp_state.rp_sem);
3564     }
3565 
3566     if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
3567         return 0;
3568     }
3569 
3570     return -1;
3571 }
3572 
3573 /* Return zero if success, or <0 for error */
3574 static int postcopy_do_resume(MigrationState *s)
3575 {
3576     int ret;
3577 
3578     /*
3579      * Call all the resume_prepare() hooks, so that modules can be
3580      * ready for the migration resume.
3581      */
3582     ret = qemu_savevm_state_resume_prepare(s);
3583     if (ret) {
3584         error_report("%s: resume_prepare() failure detected: %d",
3585                      __func__, ret);
3586         return ret;
3587     }
3588 
3589     /*
3590      * If preempt is enabled, re-establish the preempt channel.  Note that
3591      * we do it after resume prepare to make sure the main channel will be
3592      * created before the preempt channel.  E.g. with weak network, the
3593      * dest QEMU may get messed up with the preempt and main channels on
3594      * the order of connection setup.  This guarantees the correct order.
3595      */
3596     ret = postcopy_preempt_establish_channel(s);
3597     if (ret) {
3598         error_report("%s: postcopy_preempt_establish_channel(): %d",
3599                      __func__, ret);
3600         return ret;
3601     }
3602 
3603     /*
3604      * Last handshake with destination on the resume (destination will
3605      * switch to postcopy-active afterwards)
3606      */
3607     ret = postcopy_resume_handshake(s);
3608     if (ret) {
3609         error_report("%s: handshake failed: %d", __func__, ret);
3610         return ret;
3611     }
3612 
3613     return 0;
3614 }
3615 
3616 /*
3617  * We don't return until we are in a safe state to continue current
3618  * postcopy migration.  Returns MIG_THR_ERR_RECOVERED if recovered, or
3619  * MIG_THR_ERR_FATAL if unrecovery failure happened.
3620  */
3621 static MigThrError postcopy_pause(MigrationState *s)
3622 {
3623     assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
3624 
3625     while (true) {
3626         QEMUFile *file;
3627 
3628         /*
3629          * Current channel is possibly broken. Release it.  Note that this is
3630          * guaranteed even without lock because to_dst_file should only be
3631          * modified by the migration thread.  That also guarantees that the
3632          * unregister of yank is safe too without the lock.  It should be safe
3633          * even to be within the qemu_file_lock, but we didn't do that to avoid
3634          * taking more mutex (yank_lock) within qemu_file_lock.  TL;DR: we make
3635          * the qemu_file_lock critical section as small as possible.
3636          */
3637         assert(s->to_dst_file);
3638         migration_ioc_unregister_yank_from_file(s->to_dst_file);
3639         qemu_mutex_lock(&s->qemu_file_lock);
3640         file = s->to_dst_file;
3641         s->to_dst_file = NULL;
3642         qemu_mutex_unlock(&s->qemu_file_lock);
3643 
3644         qemu_file_shutdown(file);
3645         qemu_fclose(file);
3646 
3647         migrate_set_state(&s->state, s->state,
3648                           MIGRATION_STATUS_POSTCOPY_PAUSED);
3649 
3650         error_report("Detected IO failure for postcopy. "
3651                      "Migration paused.");
3652 
3653         /*
3654          * We wait until things fixed up. Then someone will setup the
3655          * status back for us.
3656          */
3657         while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
3658             qemu_sem_wait(&s->postcopy_pause_sem);
3659         }
3660 
3661         if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
3662             /* Woken up by a recover procedure. Give it a shot */
3663 
3664             /*
3665              * Firstly, let's wake up the return path now, with a new
3666              * return path channel.
3667              */
3668             qemu_sem_post(&s->postcopy_pause_rp_sem);
3669 
3670             /* Do the resume logic */
3671             if (postcopy_do_resume(s) == 0) {
3672                 /* Let's continue! */
3673                 trace_postcopy_pause_continued();
3674                 return MIG_THR_ERR_RECOVERED;
3675             } else {
3676                 /*
3677                  * Something wrong happened during the recovery, let's
3678                  * pause again. Pause is always better than throwing
3679                  * data away.
3680                  */
3681                 continue;
3682             }
3683         } else {
3684             /* This is not right... Time to quit. */
3685             return MIG_THR_ERR_FATAL;
3686         }
3687     }
3688 }
3689 
3690 static MigThrError migration_detect_error(MigrationState *s)
3691 {
3692     int ret;
3693     int state = s->state;
3694     Error *local_error = NULL;
3695 
3696     if (state == MIGRATION_STATUS_CANCELLING ||
3697         state == MIGRATION_STATUS_CANCELLED) {
3698         /* End the migration, but don't set the state to failed */
3699         return MIG_THR_ERR_FATAL;
3700     }
3701 
3702     /*
3703      * Try to detect any file errors.  Note that postcopy_qemufile_src will
3704      * be NULL when postcopy preempt is not enabled.
3705      */
3706     ret = qemu_file_get_error_obj_any(s->to_dst_file,
3707                                       s->postcopy_qemufile_src,
3708                                       &local_error);
3709     if (!ret) {
3710         /* Everything is fine */
3711         assert(!local_error);
3712         return MIG_THR_ERR_NONE;
3713     }
3714 
3715     if (local_error) {
3716         migrate_set_error(s, local_error);
3717         error_free(local_error);
3718     }
3719 
3720     if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) {
3721         /*
3722          * For postcopy, we allow the network to be down for a
3723          * while. After that, it can be continued by a
3724          * recovery phase.
3725          */
3726         return postcopy_pause(s);
3727     } else {
3728         /*
3729          * For precopy (or postcopy with error outside IO), we fail
3730          * with no time.
3731          */
3732         migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED);
3733         trace_migration_thread_file_err();
3734 
3735         /* Time to stop the migration, now. */
3736         return MIG_THR_ERR_FATAL;
3737     }
3738 }
3739 
3740 /* How many bytes have we transferred since the beginning of the migration */
3741 static uint64_t migration_total_bytes(MigrationState *s)
3742 {
3743     return qemu_file_total_transferred(s->to_dst_file) +
3744         stat64_get(&ram_counters.multifd_bytes);
3745 }
3746 
3747 static void migration_calculate_complete(MigrationState *s)
3748 {
3749     uint64_t bytes = migration_total_bytes(s);
3750     int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3751     int64_t transfer_time;
3752 
3753     s->total_time = end_time - s->start_time;
3754     if (!s->downtime) {
3755         /*
3756          * It's still not set, so we are precopy migration.  For
3757          * postcopy, downtime is calculated during postcopy_start().
3758          */
3759         s->downtime = end_time - s->downtime_start;
3760     }
3761 
3762     transfer_time = s->total_time - s->setup_time;
3763     if (transfer_time) {
3764         s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
3765     }
3766 }
3767 
3768 static void update_iteration_initial_status(MigrationState *s)
3769 {
3770     /*
3771      * Update these three fields at the same time to avoid mismatch info lead
3772      * wrong speed calculation.
3773      */
3774     s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3775     s->iteration_initial_bytes = migration_total_bytes(s);
3776     s->iteration_initial_pages = ram_get_total_transferred_pages();
3777 }
3778 
3779 static void migration_update_counters(MigrationState *s,
3780                                       int64_t current_time)
3781 {
3782     uint64_t transferred, transferred_pages, time_spent;
3783     uint64_t current_bytes; /* bytes transferred since the beginning */
3784     double bandwidth;
3785 
3786     if (current_time < s->iteration_start_time + BUFFER_DELAY) {
3787         return;
3788     }
3789 
3790     current_bytes = migration_total_bytes(s);
3791     transferred = current_bytes - s->iteration_initial_bytes;
3792     time_spent = current_time - s->iteration_start_time;
3793     bandwidth = (double)transferred / time_spent;
3794     s->threshold_size = bandwidth * s->parameters.downtime_limit;
3795 
3796     s->mbps = (((double) transferred * 8.0) /
3797                ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
3798 
3799     transferred_pages = ram_get_total_transferred_pages() -
3800                             s->iteration_initial_pages;
3801     s->pages_per_second = (double) transferred_pages /
3802                              (((double) time_spent / 1000.0));
3803 
3804     /*
3805      * if we haven't sent anything, we don't want to
3806      * recalculate. 10000 is a small enough number for our purposes
3807      */
3808     if (ram_counters.dirty_pages_rate && transferred > 10000) {
3809         s->expected_downtime = ram_counters.remaining / bandwidth;
3810     }
3811 
3812     qemu_file_reset_rate_limit(s->to_dst_file);
3813 
3814     update_iteration_initial_status(s);
3815 
3816     trace_migrate_transferred(transferred, time_spent,
3817                               bandwidth, s->threshold_size);
3818 }
3819 
3820 /* Migration thread iteration status */
3821 typedef enum {
3822     MIG_ITERATE_RESUME,         /* Resume current iteration */
3823     MIG_ITERATE_SKIP,           /* Skip current iteration */
3824     MIG_ITERATE_BREAK,          /* Break the loop */
3825 } MigIterateState;
3826 
3827 /*
3828  * Return true if continue to the next iteration directly, false
3829  * otherwise.
3830  */
3831 static MigIterateState migration_iteration_run(MigrationState *s)
3832 {
3833     uint64_t must_precopy, can_postcopy;
3834     bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
3835 
3836     qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
3837     uint64_t pending_size = must_precopy + can_postcopy;
3838 
3839     trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
3840 
3841     if (must_precopy <= s->threshold_size) {
3842         qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
3843         pending_size = must_precopy + can_postcopy;
3844         trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
3845     }
3846 
3847     if (!pending_size || pending_size < s->threshold_size) {
3848         trace_migration_thread_low_pending(pending_size);
3849         migration_completion(s);
3850         return MIG_ITERATE_BREAK;
3851     }
3852 
3853     /* Still a significant amount to transfer */
3854     if (!in_postcopy && must_precopy <= s->threshold_size &&
3855         qatomic_read(&s->start_postcopy)) {
3856         if (postcopy_start(s)) {
3857             error_report("%s: postcopy failed to start", __func__);
3858         }
3859         return MIG_ITERATE_SKIP;
3860     }
3861 
3862     /* Just another iteration step */
3863     qemu_savevm_state_iterate(s->to_dst_file, in_postcopy);
3864     return MIG_ITERATE_RESUME;
3865 }
3866 
3867 static void migration_iteration_finish(MigrationState *s)
3868 {
3869     /* If we enabled cpu throttling for auto-converge, turn it off. */
3870     cpu_throttle_stop();
3871 
3872     qemu_mutex_lock_iothread();
3873     switch (s->state) {
3874     case MIGRATION_STATUS_COMPLETED:
3875         migration_calculate_complete(s);
3876         runstate_set(RUN_STATE_POSTMIGRATE);
3877         break;
3878     case MIGRATION_STATUS_COLO:
3879         if (!migrate_colo_enabled()) {
3880             error_report("%s: critical error: calling COLO code without "
3881                          "COLO enabled", __func__);
3882         }
3883         migrate_start_colo_process(s);
3884         s->vm_was_running = true;
3885         /* Fallthrough */
3886     case MIGRATION_STATUS_FAILED:
3887     case MIGRATION_STATUS_CANCELLED:
3888     case MIGRATION_STATUS_CANCELLING:
3889         if (s->vm_was_running) {
3890             if (!runstate_check(RUN_STATE_SHUTDOWN)) {
3891                 vm_start();
3892             }
3893         } else {
3894             if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
3895                 runstate_set(RUN_STATE_POSTMIGRATE);
3896             }
3897         }
3898         break;
3899 
3900     default:
3901         /* Should not reach here, but if so, forgive the VM. */
3902         error_report("%s: Unknown ending state %d", __func__, s->state);
3903         break;
3904     }
3905     migrate_fd_cleanup_schedule(s);
3906     qemu_mutex_unlock_iothread();
3907 }
3908 
3909 static void bg_migration_iteration_finish(MigrationState *s)
3910 {
3911     qemu_mutex_lock_iothread();
3912     switch (s->state) {
3913     case MIGRATION_STATUS_COMPLETED:
3914         migration_calculate_complete(s);
3915         break;
3916 
3917     case MIGRATION_STATUS_ACTIVE:
3918     case MIGRATION_STATUS_FAILED:
3919     case MIGRATION_STATUS_CANCELLED:
3920     case MIGRATION_STATUS_CANCELLING:
3921         break;
3922 
3923     default:
3924         /* Should not reach here, but if so, forgive the VM. */
3925         error_report("%s: Unknown ending state %d", __func__, s->state);
3926         break;
3927     }
3928 
3929     migrate_fd_cleanup_schedule(s);
3930     qemu_mutex_unlock_iothread();
3931 }
3932 
3933 /*
3934  * Return true if continue to the next iteration directly, false
3935  * otherwise.
3936  */
3937 static MigIterateState bg_migration_iteration_run(MigrationState *s)
3938 {
3939     int res;
3940 
3941     res = qemu_savevm_state_iterate(s->to_dst_file, false);
3942     if (res > 0) {
3943         bg_migration_completion(s);
3944         return MIG_ITERATE_BREAK;
3945     }
3946 
3947     return MIG_ITERATE_RESUME;
3948 }
3949 
3950 void migration_make_urgent_request(void)
3951 {
3952     qemu_sem_post(&migrate_get_current()->rate_limit_sem);
3953 }
3954 
3955 void migration_consume_urgent_request(void)
3956 {
3957     qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
3958 }
3959 
3960 /* Returns true if the rate limiting was broken by an urgent request */
3961 bool migration_rate_limit(void)
3962 {
3963     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3964     MigrationState *s = migrate_get_current();
3965 
3966     bool urgent = false;
3967     migration_update_counters(s, now);
3968     if (qemu_file_rate_limit(s->to_dst_file)) {
3969 
3970         if (qemu_file_get_error(s->to_dst_file)) {
3971             return false;
3972         }
3973         /*
3974          * Wait for a delay to do rate limiting OR
3975          * something urgent to post the semaphore.
3976          */
3977         int ms = s->iteration_start_time + BUFFER_DELAY - now;
3978         trace_migration_rate_limit_pre(ms);
3979         if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
3980             /*
3981              * We were woken by one or more urgent things but
3982              * the timedwait will have consumed one of them.
3983              * The service routine for the urgent wake will dec
3984              * the semaphore itself for each item it consumes,
3985              * so add this one we just eat back.
3986              */
3987             qemu_sem_post(&s->rate_limit_sem);
3988             urgent = true;
3989         }
3990         trace_migration_rate_limit_post(urgent);
3991     }
3992     return urgent;
3993 }
3994 
3995 /*
3996  * if failover devices are present, wait they are completely
3997  * unplugged
3998  */
3999 
4000 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state,
4001                                     int new_state)
4002 {
4003     if (qemu_savevm_state_guest_unplug_pending()) {
4004         migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG);
4005 
4006         while (s->state == MIGRATION_STATUS_WAIT_UNPLUG &&
4007                qemu_savevm_state_guest_unplug_pending()) {
4008             qemu_sem_timedwait(&s->wait_unplug_sem, 250);
4009         }
4010         if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) {
4011             int timeout = 120; /* 30 seconds */
4012             /*
4013              * migration has been canceled
4014              * but as we have started an unplug we must wait the end
4015              * to be able to plug back the card
4016              */
4017             while (timeout-- && qemu_savevm_state_guest_unplug_pending()) {
4018                 qemu_sem_timedwait(&s->wait_unplug_sem, 250);
4019             }
4020             if (qemu_savevm_state_guest_unplug_pending() &&
4021                 !qtest_enabled()) {
4022                 warn_report("migration: partially unplugged device on "
4023                             "failure");
4024             }
4025         }
4026 
4027         migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state);
4028     } else {
4029         migrate_set_state(&s->state, old_state, new_state);
4030     }
4031 }
4032 
4033 /*
4034  * Master migration thread on the source VM.
4035  * It drives the migration and pumps the data down the outgoing channel.
4036  */
4037 static void *migration_thread(void *opaque)
4038 {
4039     MigrationState *s = opaque;
4040     MigrationThread *thread = NULL;
4041     int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
4042     MigThrError thr_error;
4043     bool urgent = false;
4044 
4045     thread = MigrationThreadAdd("live_migration", qemu_get_thread_id());
4046 
4047     rcu_register_thread();
4048 
4049     object_ref(OBJECT(s));
4050     update_iteration_initial_status(s);
4051 
4052     qemu_savevm_state_header(s->to_dst_file);
4053 
4054     /*
4055      * If we opened the return path, we need to make sure dst has it
4056      * opened as well.
4057      */
4058     if (s->rp_state.rp_thread_created) {
4059         /* Now tell the dest that it should open its end so it can reply */
4060         qemu_savevm_send_open_return_path(s->to_dst_file);
4061 
4062         /* And do a ping that will make stuff easier to debug */
4063         qemu_savevm_send_ping(s->to_dst_file, 1);
4064     }
4065 
4066     if (migrate_postcopy()) {
4067         /*
4068          * Tell the destination that we *might* want to do postcopy later;
4069          * if the other end can't do postcopy it should fail now, nice and
4070          * early.
4071          */
4072         qemu_savevm_send_postcopy_advise(s->to_dst_file);
4073     }
4074 
4075     if (migrate_colo_enabled()) {
4076         /* Notify migration destination that we enable COLO */
4077         qemu_savevm_send_colo_enable(s->to_dst_file);
4078     }
4079 
4080     qemu_savevm_state_setup(s->to_dst_file);
4081 
4082     qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
4083                                MIGRATION_STATUS_ACTIVE);
4084 
4085     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
4086 
4087     trace_migration_thread_setup_complete();
4088 
4089     while (migration_is_active(s)) {
4090         if (urgent || !qemu_file_rate_limit(s->to_dst_file)) {
4091             MigIterateState iter_state = migration_iteration_run(s);
4092             if (iter_state == MIG_ITERATE_SKIP) {
4093                 continue;
4094             } else if (iter_state == MIG_ITERATE_BREAK) {
4095                 break;
4096             }
4097         }
4098 
4099         /*
4100          * Try to detect any kind of failures, and see whether we
4101          * should stop the migration now.
4102          */
4103         thr_error = migration_detect_error(s);
4104         if (thr_error == MIG_THR_ERR_FATAL) {
4105             /* Stop migration */
4106             break;
4107         } else if (thr_error == MIG_THR_ERR_RECOVERED) {
4108             /*
4109              * Just recovered from a e.g. network failure, reset all
4110              * the local variables. This is important to avoid
4111              * breaking transferred_bytes and bandwidth calculation
4112              */
4113             update_iteration_initial_status(s);
4114         }
4115 
4116         urgent = migration_rate_limit();
4117     }
4118 
4119     trace_migration_thread_after_loop();
4120     migration_iteration_finish(s);
4121     object_unref(OBJECT(s));
4122     rcu_unregister_thread();
4123     MigrationThreadDel(thread);
4124     return NULL;
4125 }
4126 
4127 static void bg_migration_vm_start_bh(void *opaque)
4128 {
4129     MigrationState *s = opaque;
4130 
4131     qemu_bh_delete(s->vm_start_bh);
4132     s->vm_start_bh = NULL;
4133 
4134     vm_start();
4135     s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start;
4136 }
4137 
4138 /**
4139  * Background snapshot thread, based on live migration code.
4140  * This is an alternative implementation of live migration mechanism
4141  * introduced specifically to support background snapshots.
4142  *
4143  * It takes advantage of userfault_fd write protection mechanism introduced
4144  * in v5.7 kernel. Compared to existing dirty page logging migration much
4145  * lesser stream traffic is produced resulting in smaller snapshot images,
4146  * simply cause of no page duplicates can get into the stream.
4147  *
4148  * Another key point is that generated vmstate stream reflects machine state
4149  * 'frozen' at the beginning of snapshot creation compared to dirty page logging
4150  * mechanism, which effectively results in that saved snapshot is the state of VM
4151  * at the end of the process.
4152  */
4153 static void *bg_migration_thread(void *opaque)
4154 {
4155     MigrationState *s = opaque;
4156     int64_t setup_start;
4157     MigThrError thr_error;
4158     QEMUFile *fb;
4159     bool early_fail = true;
4160 
4161     rcu_register_thread();
4162     object_ref(OBJECT(s));
4163 
4164     qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
4165 
4166     setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
4167     /*
4168      * We want to save vmstate for the moment when migration has been
4169      * initiated but also we want to save RAM content while VM is running.
4170      * The RAM content should appear first in the vmstate. So, we first
4171      * stash the non-RAM part of the vmstate to the temporary buffer,
4172      * then write RAM part of the vmstate to the migration stream
4173      * with vCPUs running and, finally, write stashed non-RAM part of
4174      * the vmstate from the buffer to the migration stream.
4175      */
4176     s->bioc = qio_channel_buffer_new(512 * 1024);
4177     qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer");
4178     fb = qemu_file_new_output(QIO_CHANNEL(s->bioc));
4179     object_unref(OBJECT(s->bioc));
4180 
4181     update_iteration_initial_status(s);
4182 
4183     /*
4184      * Prepare for tracking memory writes with UFFD-WP - populate
4185      * RAM pages before protecting.
4186      */
4187 #ifdef __linux__
4188     ram_write_tracking_prepare();
4189 #endif
4190 
4191     qemu_savevm_state_header(s->to_dst_file);
4192     qemu_savevm_state_setup(s->to_dst_file);
4193 
4194     qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
4195                                MIGRATION_STATUS_ACTIVE);
4196 
4197     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
4198 
4199     trace_migration_thread_setup_complete();
4200     s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
4201 
4202     qemu_mutex_lock_iothread();
4203 
4204     /*
4205      * If VM is currently in suspended state, then, to make a valid runstate
4206      * transition in vm_stop_force_state() we need to wakeup it up.
4207      */
4208     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
4209     s->vm_was_running = runstate_is_running();
4210 
4211     if (global_state_store()) {
4212         goto fail;
4213     }
4214     /* Forcibly stop VM before saving state of vCPUs and devices */
4215     if (vm_stop_force_state(RUN_STATE_PAUSED)) {
4216         goto fail;
4217     }
4218     /*
4219      * Put vCPUs in sync with shadow context structures, then
4220      * save their state to channel-buffer along with devices.
4221      */
4222     cpu_synchronize_all_states();
4223     if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
4224         goto fail;
4225     }
4226     /*
4227      * Since we are going to get non-iterable state data directly
4228      * from s->bioc->data, explicit flush is needed here.
4229      */
4230     qemu_fflush(fb);
4231 
4232     /* Now initialize UFFD context and start tracking RAM writes */
4233     if (ram_write_tracking_start()) {
4234         goto fail;
4235     }
4236     early_fail = false;
4237 
4238     /*
4239      * Start VM from BH handler to avoid write-fault lock here.
4240      * UFFD-WP protection for the whole RAM is already enabled so
4241      * calling VM state change notifiers from vm_start() would initiate
4242      * writes to virtio VQs memory which is in write-protected region.
4243      */
4244     s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
4245     qemu_bh_schedule(s->vm_start_bh);
4246 
4247     qemu_mutex_unlock_iothread();
4248 
4249     while (migration_is_active(s)) {
4250         MigIterateState iter_state = bg_migration_iteration_run(s);
4251         if (iter_state == MIG_ITERATE_SKIP) {
4252             continue;
4253         } else if (iter_state == MIG_ITERATE_BREAK) {
4254             break;
4255         }
4256 
4257         /*
4258          * Try to detect any kind of failures, and see whether we
4259          * should stop the migration now.
4260          */
4261         thr_error = migration_detect_error(s);
4262         if (thr_error == MIG_THR_ERR_FATAL) {
4263             /* Stop migration */
4264             break;
4265         }
4266 
4267         migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
4268     }
4269 
4270     trace_migration_thread_after_loop();
4271 
4272 fail:
4273     if (early_fail) {
4274         migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
4275                 MIGRATION_STATUS_FAILED);
4276         qemu_mutex_unlock_iothread();
4277     }
4278 
4279     bg_migration_iteration_finish(s);
4280 
4281     qemu_fclose(fb);
4282     object_unref(OBJECT(s));
4283     rcu_unregister_thread();
4284 
4285     return NULL;
4286 }
4287 
4288 void migrate_fd_connect(MigrationState *s, Error *error_in)
4289 {
4290     Error *local_err = NULL;
4291     int64_t rate_limit;
4292     bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
4293 
4294     /*
4295      * If there's a previous error, free it and prepare for another one.
4296      * Meanwhile if migration completes successfully, there won't have an error
4297      * dumped when calling migrate_fd_cleanup().
4298      */
4299     migrate_error_free(s);
4300 
4301     s->expected_downtime = s->parameters.downtime_limit;
4302     if (resume) {
4303         assert(s->cleanup_bh);
4304     } else {
4305         assert(!s->cleanup_bh);
4306         s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
4307     }
4308     if (error_in) {
4309         migrate_fd_error(s, error_in);
4310         if (resume) {
4311             /*
4312              * Don't do cleanup for resume if channel is invalid, but only dump
4313              * the error.  We wait for another channel connect from the user.
4314              * The error_report still gives HMP user a hint on what failed.
4315              * It's normally done in migrate_fd_cleanup(), but call it here
4316              * explicitly.
4317              */
4318             error_report_err(error_copy(s->error));
4319         } else {
4320             migrate_fd_cleanup(s);
4321         }
4322         return;
4323     }
4324 
4325     if (resume) {
4326         /* This is a resumed migration */
4327         rate_limit = s->parameters.max_postcopy_bandwidth /
4328             XFER_LIMIT_RATIO;
4329     } else {
4330         /* This is a fresh new migration */
4331         rate_limit = s->parameters.max_bandwidth / XFER_LIMIT_RATIO;
4332 
4333         /* Notify before starting migration thread */
4334         notifier_list_notify(&migration_state_notifiers, s);
4335     }
4336 
4337     qemu_file_set_rate_limit(s->to_dst_file, rate_limit);
4338     qemu_file_set_blocking(s->to_dst_file, true);
4339 
4340     /*
4341      * Open the return path. For postcopy, it is used exclusively. For
4342      * precopy, only if user specified "return-path" capability would
4343      * QEMU uses the return path.
4344      */
4345     if (migrate_postcopy_ram() || migrate_use_return_path()) {
4346         if (open_return_path_on_source(s, !resume)) {
4347             error_report("Unable to open return-path for postcopy");
4348             migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
4349             migrate_fd_cleanup(s);
4350             return;
4351         }
4352     }
4353 
4354     /*
4355      * This needs to be done before resuming a postcopy.  Note: for newer
4356      * QEMUs we will delay the channel creation until postcopy_start(), to
4357      * avoid disorder of channel creations.
4358      */
4359     if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
4360         postcopy_preempt_setup(s);
4361     }
4362 
4363     if (resume) {
4364         /* Wakeup the main migration thread to do the recovery */
4365         migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
4366                           MIGRATION_STATUS_POSTCOPY_RECOVER);
4367         qemu_sem_post(&s->postcopy_pause_sem);
4368         return;
4369     }
4370 
4371     if (multifd_save_setup(&local_err) != 0) {
4372         error_report_err(local_err);
4373         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
4374                           MIGRATION_STATUS_FAILED);
4375         migrate_fd_cleanup(s);
4376         return;
4377     }
4378 
4379     if (migrate_background_snapshot()) {
4380         qemu_thread_create(&s->thread, "bg_snapshot",
4381                 bg_migration_thread, s, QEMU_THREAD_JOINABLE);
4382     } else {
4383         qemu_thread_create(&s->thread, "live_migration",
4384                 migration_thread, s, QEMU_THREAD_JOINABLE);
4385     }
4386     s->migration_thread_running = true;
4387 }
4388 
4389 #define DEFINE_PROP_MIG_CAP(name, x)             \
4390     DEFINE_PROP_BOOL(name, MigrationState, capabilities[x], false)
4391 
4392 static Property migration_properties[] = {
4393     DEFINE_PROP_BOOL("store-global-state", MigrationState,
4394                      store_global_state, true),
4395     DEFINE_PROP_BOOL("send-configuration", MigrationState,
4396                      send_configuration, true),
4397     DEFINE_PROP_BOOL("send-section-footer", MigrationState,
4398                      send_section_footer, true),
4399     DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
4400                       decompress_error_check, true),
4401     DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
4402                       clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT),
4403     DEFINE_PROP_BOOL("x-preempt-pre-7-2", MigrationState,
4404                      preempt_pre_7_2, false),
4405 
4406     /* Migration parameters */
4407     DEFINE_PROP_UINT8("x-compress-level", MigrationState,
4408                       parameters.compress_level,
4409                       DEFAULT_MIGRATE_COMPRESS_LEVEL),
4410     DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
4411                       parameters.compress_threads,
4412                       DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
4413     DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState,
4414                       parameters.compress_wait_thread, true),
4415     DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
4416                       parameters.decompress_threads,
4417                       DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
4418     DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState,
4419                       parameters.throttle_trigger_threshold,
4420                       DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD),
4421     DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState,
4422                       parameters.cpu_throttle_initial,
4423                       DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL),
4424     DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState,
4425                       parameters.cpu_throttle_increment,
4426                       DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT),
4427     DEFINE_PROP_BOOL("x-cpu-throttle-tailslow", MigrationState,
4428                       parameters.cpu_throttle_tailslow, false),
4429     DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState,
4430                       parameters.max_bandwidth, MAX_THROTTLE),
4431     DEFINE_PROP_UINT64("x-downtime-limit", MigrationState,
4432                       parameters.downtime_limit,
4433                       DEFAULT_MIGRATE_SET_DOWNTIME),
4434     DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState,
4435                       parameters.x_checkpoint_delay,
4436                       DEFAULT_MIGRATE_X_CHECKPOINT_DELAY),
4437     DEFINE_PROP_UINT8("multifd-channels", MigrationState,
4438                       parameters.multifd_channels,
4439                       DEFAULT_MIGRATE_MULTIFD_CHANNELS),
4440     DEFINE_PROP_MULTIFD_COMPRESSION("multifd-compression", MigrationState,
4441                       parameters.multifd_compression,
4442                       DEFAULT_MIGRATE_MULTIFD_COMPRESSION),
4443     DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState,
4444                       parameters.multifd_zlib_level,
4445                       DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL),
4446     DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState,
4447                       parameters.multifd_zstd_level,
4448                       DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL),
4449     DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState,
4450                       parameters.xbzrle_cache_size,
4451                       DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE),
4452     DEFINE_PROP_SIZE("max-postcopy-bandwidth", MigrationState,
4453                       parameters.max_postcopy_bandwidth,
4454                       DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH),
4455     DEFINE_PROP_UINT8("max-cpu-throttle", MigrationState,
4456                       parameters.max_cpu_throttle,
4457                       DEFAULT_MIGRATE_MAX_CPU_THROTTLE),
4458     DEFINE_PROP_SIZE("announce-initial", MigrationState,
4459                       parameters.announce_initial,
4460                       DEFAULT_MIGRATE_ANNOUNCE_INITIAL),
4461     DEFINE_PROP_SIZE("announce-max", MigrationState,
4462                       parameters.announce_max,
4463                       DEFAULT_MIGRATE_ANNOUNCE_MAX),
4464     DEFINE_PROP_SIZE("announce-rounds", MigrationState,
4465                       parameters.announce_rounds,
4466                       DEFAULT_MIGRATE_ANNOUNCE_ROUNDS),
4467     DEFINE_PROP_SIZE("announce-step", MigrationState,
4468                       parameters.announce_step,
4469                       DEFAULT_MIGRATE_ANNOUNCE_STEP),
4470     DEFINE_PROP_STRING("tls-creds", MigrationState, parameters.tls_creds),
4471     DEFINE_PROP_STRING("tls-hostname", MigrationState, parameters.tls_hostname),
4472     DEFINE_PROP_STRING("tls-authz", MigrationState, parameters.tls_authz),
4473 
4474     /* Migration capabilities */
4475     DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE),
4476     DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL),
4477     DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE),
4478     DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS),
4479     DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS),
4480     DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS),
4481     DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM),
4482     DEFINE_PROP_MIG_CAP("x-postcopy-preempt",
4483                         MIGRATION_CAPABILITY_POSTCOPY_PREEMPT),
4484     DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO),
4485     DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM),
4486     DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK),
4487     DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH),
4488     DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD),
4489     DEFINE_PROP_MIG_CAP("x-background-snapshot",
4490             MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT),
4491 #ifdef CONFIG_LINUX
4492     DEFINE_PROP_MIG_CAP("x-zero-copy-send",
4493             MIGRATION_CAPABILITY_ZERO_COPY_SEND),
4494 #endif
4495 
4496     DEFINE_PROP_END_OF_LIST(),
4497 };
4498 
4499 static void migration_class_init(ObjectClass *klass, void *data)
4500 {
4501     DeviceClass *dc = DEVICE_CLASS(klass);
4502 
4503     dc->user_creatable = false;
4504     device_class_set_props(dc, migration_properties);
4505 }
4506 
4507 static void migration_instance_finalize(Object *obj)
4508 {
4509     MigrationState *ms = MIGRATION_OBJ(obj);
4510 
4511     qemu_mutex_destroy(&ms->error_mutex);
4512     qemu_mutex_destroy(&ms->qemu_file_lock);
4513     qemu_sem_destroy(&ms->wait_unplug_sem);
4514     qemu_sem_destroy(&ms->rate_limit_sem);
4515     qemu_sem_destroy(&ms->pause_sem);
4516     qemu_sem_destroy(&ms->postcopy_pause_sem);
4517     qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
4518     qemu_sem_destroy(&ms->rp_state.rp_sem);
4519     qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
4520     qemu_sem_destroy(&ms->postcopy_qemufile_src_sem);
4521     error_free(ms->error);
4522 }
4523 
4524 static void migration_instance_init(Object *obj)
4525 {
4526     MigrationState *ms = MIGRATION_OBJ(obj);
4527     MigrationParameters *params = &ms->parameters;
4528 
4529     ms->state = MIGRATION_STATUS_NONE;
4530     ms->mbps = -1;
4531     ms->pages_per_second = -1;
4532     qemu_sem_init(&ms->pause_sem, 0);
4533     qemu_mutex_init(&ms->error_mutex);
4534 
4535     params->tls_hostname = g_strdup("");
4536     params->tls_creds = g_strdup("");
4537 
4538     /* Set has_* up only for parameter checks */
4539     params->has_compress_level = true;
4540     params->has_compress_threads = true;
4541     params->has_compress_wait_thread = true;
4542     params->has_decompress_threads = true;
4543     params->has_throttle_trigger_threshold = true;
4544     params->has_cpu_throttle_initial = true;
4545     params->has_cpu_throttle_increment = true;
4546     params->has_cpu_throttle_tailslow = true;
4547     params->has_max_bandwidth = true;
4548     params->has_downtime_limit = true;
4549     params->has_x_checkpoint_delay = true;
4550     params->has_block_incremental = true;
4551     params->has_multifd_channels = true;
4552     params->has_multifd_compression = true;
4553     params->has_multifd_zlib_level = true;
4554     params->has_multifd_zstd_level = true;
4555     params->has_xbzrle_cache_size = true;
4556     params->has_max_postcopy_bandwidth = true;
4557     params->has_max_cpu_throttle = true;
4558     params->has_announce_initial = true;
4559     params->has_announce_max = true;
4560     params->has_announce_rounds = true;
4561     params->has_announce_step = true;
4562 
4563     qemu_sem_init(&ms->postcopy_pause_sem, 0);
4564     qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
4565     qemu_sem_init(&ms->rp_state.rp_sem, 0);
4566     qemu_sem_init(&ms->rp_state.rp_pong_acks, 0);
4567     qemu_sem_init(&ms->rate_limit_sem, 0);
4568     qemu_sem_init(&ms->wait_unplug_sem, 0);
4569     qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0);
4570     qemu_mutex_init(&ms->qemu_file_lock);
4571 }
4572 
4573 /*
4574  * Return true if check pass, false otherwise. Error will be put
4575  * inside errp if provided.
4576  */
4577 static bool migration_object_check(MigrationState *ms, Error **errp)
4578 {
4579     /* Assuming all off */
4580     bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 };
4581 
4582     if (!migrate_params_check(&ms->parameters, errp)) {
4583         return false;
4584     }
4585 
4586     return migrate_caps_check(old_caps, ms->capabilities, errp);
4587 }
4588 
4589 static const TypeInfo migration_type = {
4590     .name = TYPE_MIGRATION,
4591     /*
4592      * NOTE: TYPE_MIGRATION is not really a device, as the object is
4593      * not created using qdev_new(), it is not attached to the qdev
4594      * device tree, and it is never realized.
4595      *
4596      * TODO: Make this TYPE_OBJECT once QOM provides something like
4597      * TYPE_DEVICE's "-global" properties.
4598      */
4599     .parent = TYPE_DEVICE,
4600     .class_init = migration_class_init,
4601     .class_size = sizeof(MigrationClass),
4602     .instance_size = sizeof(MigrationState),
4603     .instance_init = migration_instance_init,
4604     .instance_finalize = migration_instance_finalize,
4605 };
4606 
4607 static void register_migration_types(void)
4608 {
4609     type_register_static(&migration_type);
4610 }
4611 
4612 type_init(register_migration_types);
4613