xref: /openbmc/qemu/migration/migration.c (revision eda7362af9595a9c6b1f1fefdd94b5ef711c250c)
1 /*
2  * QEMU live migration
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/blocker.h"
21 #include "exec.h"
22 #include "fd.h"
23 #include "socket.h"
24 #include "sysemu/runstate.h"
25 #include "sysemu/sysemu.h"
26 #include "sysemu/cpu-throttle.h"
27 #include "rdma.h"
28 #include "ram.h"
29 #include "ram-compress.h"
30 #include "migration/global_state.h"
31 #include "migration/misc.h"
32 #include "migration.h"
33 #include "migration-stats.h"
34 #include "savevm.h"
35 #include "qemu-file.h"
36 #include "channel.h"
37 #include "migration/vmstate.h"
38 #include "block/block.h"
39 #include "qapi/error.h"
40 #include "qapi/clone-visitor.h"
41 #include "qapi/qapi-visit-migration.h"
42 #include "qapi/qapi-visit-sockets.h"
43 #include "qapi/qapi-commands-migration.h"
44 #include "qapi/qapi-events-migration.h"
45 #include "qapi/qmp/qerror.h"
46 #include "qapi/qmp/qnull.h"
47 #include "qemu/rcu.h"
48 #include "block.h"
49 #include "postcopy-ram.h"
50 #include "qemu/thread.h"
51 #include "trace.h"
52 #include "exec/target_page.h"
53 #include "io/channel-buffer.h"
54 #include "io/channel-tls.h"
55 #include "migration/colo.h"
56 #include "hw/boards.h"
57 #include "monitor/monitor.h"
58 #include "net/announce.h"
59 #include "qemu/queue.h"
60 #include "multifd.h"
61 #include "threadinfo.h"
62 #include "qemu/yank.h"
63 #include "sysemu/cpus.h"
64 #include "yank_functions.h"
65 #include "sysemu/qtest.h"
66 #include "options.h"
67 
68 static NotifierList migration_state_notifiers =
69     NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
70 
71 /* Messages sent on the return path from destination to source */
72 enum mig_rp_message_type {
73     MIG_RP_MSG_INVALID = 0,  /* Must be 0 */
74     MIG_RP_MSG_SHUT,         /* sibling will not send any more RP messages */
75     MIG_RP_MSG_PONG,         /* Response to a PING; data (seq: be32 ) */
76 
77     MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
78     MIG_RP_MSG_REQ_PAGES,    /* data (start: be64, len: be32) */
79     MIG_RP_MSG_RECV_BITMAP,  /* send recved_bitmap back to source */
80     MIG_RP_MSG_RESUME_ACK,   /* tell source that we are ready to resume */
81     MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */
82 
83     MIG_RP_MSG_MAX
84 };
85 
86 /* When we add fault tolerance, we could have several
87    migrations at once.  For now we don't need to add
88    dynamic creation of migration */
89 
90 static MigrationState *current_migration;
91 static MigrationIncomingState *current_incoming;
92 
93 static GSList *migration_blockers;
94 
95 static bool migration_object_check(MigrationState *ms, Error **errp);
96 static int migration_maybe_pause(MigrationState *s,
97                                  int *current_active_state,
98                                  int new_state);
99 static void migrate_fd_cancel(MigrationState *s);
100 
101 static bool migration_needs_multiple_sockets(void)
102 {
103     return migrate_multifd() || migrate_postcopy_preempt();
104 }
105 
106 static bool uri_supports_multi_channels(const char *uri)
107 {
108     return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) ||
109            strstart(uri, "vsock:", NULL);
110 }
111 
112 static bool
113 migration_channels_and_uri_compatible(const char *uri, Error **errp)
114 {
115     if (migration_needs_multiple_sockets() &&
116         !uri_supports_multi_channels(uri)) {
117         error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)");
118         return false;
119     }
120 
121     return true;
122 }
123 
124 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
125 {
126     uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp;
127 
128     return (a > b) - (a < b);
129 }
130 
131 void migration_object_init(void)
132 {
133     /* This can only be called once. */
134     assert(!current_migration);
135     current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
136 
137     /*
138      * Init the migrate incoming object as well no matter whether
139      * we'll use it or not.
140      */
141     assert(!current_incoming);
142     current_incoming = g_new0(MigrationIncomingState, 1);
143     current_incoming->state = MIGRATION_STATUS_NONE;
144     current_incoming->postcopy_remote_fds =
145         g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD));
146     qemu_mutex_init(&current_incoming->rp_mutex);
147     qemu_mutex_init(&current_incoming->postcopy_prio_thread_mutex);
148     qemu_event_init(&current_incoming->main_thread_load_event, false);
149     qemu_sem_init(&current_incoming->postcopy_pause_sem_dst, 0);
150     qemu_sem_init(&current_incoming->postcopy_pause_sem_fault, 0);
151     qemu_sem_init(&current_incoming->postcopy_pause_sem_fast_load, 0);
152     qemu_sem_init(&current_incoming->postcopy_qemufile_dst_done, 0);
153 
154     qemu_mutex_init(&current_incoming->page_request_mutex);
155     current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
156 
157     migration_object_check(current_migration, &error_fatal);
158 
159     blk_mig_init();
160     ram_mig_init();
161     dirty_bitmap_mig_init();
162 }
163 
164 void migration_cancel(const Error *error)
165 {
166     if (error) {
167         migrate_set_error(current_migration, error);
168     }
169     migrate_fd_cancel(current_migration);
170 }
171 
172 void migration_shutdown(void)
173 {
174     /*
175      * When the QEMU main thread exit, the COLO thread
176      * may wait a semaphore. So, we should wakeup the
177      * COLO thread before migration shutdown.
178      */
179     colo_shutdown();
180     /*
181      * Cancel the current migration - that will (eventually)
182      * stop the migration using this structure
183      */
184     migration_cancel(NULL);
185     object_unref(OBJECT(current_migration));
186 
187     /*
188      * Cancel outgoing migration of dirty bitmaps. It should
189      * at least unref used block nodes.
190      */
191     dirty_bitmap_mig_cancel_outgoing();
192 
193     /*
194      * Cancel incoming migration of dirty bitmaps. Dirty bitmaps
195      * are non-critical data, and their loss never considered as
196      * something serious.
197      */
198     dirty_bitmap_mig_cancel_incoming();
199 }
200 
201 /* For outgoing */
202 MigrationState *migrate_get_current(void)
203 {
204     /* This can only be called after the object created. */
205     assert(current_migration);
206     return current_migration;
207 }
208 
209 MigrationIncomingState *migration_incoming_get_current(void)
210 {
211     assert(current_incoming);
212     return current_incoming;
213 }
214 
215 void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
216 {
217     if (mis->socket_address_list) {
218         qapi_free_SocketAddressList(mis->socket_address_list);
219         mis->socket_address_list = NULL;
220     }
221 
222     if (mis->transport_cleanup) {
223         mis->transport_cleanup(mis->transport_data);
224         mis->transport_data = mis->transport_cleanup = NULL;
225     }
226 }
227 
228 void migration_incoming_state_destroy(void)
229 {
230     struct MigrationIncomingState *mis = migration_incoming_get_current();
231 
232     multifd_load_cleanup();
233     compress_threads_load_cleanup();
234 
235     if (mis->to_src_file) {
236         /* Tell source that we are done */
237         migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
238         qemu_fclose(mis->to_src_file);
239         mis->to_src_file = NULL;
240     }
241 
242     if (mis->from_src_file) {
243         migration_ioc_unregister_yank_from_file(mis->from_src_file);
244         qemu_fclose(mis->from_src_file);
245         mis->from_src_file = NULL;
246     }
247     if (mis->postcopy_remote_fds) {
248         g_array_free(mis->postcopy_remote_fds, TRUE);
249         mis->postcopy_remote_fds = NULL;
250     }
251 
252     migration_incoming_transport_cleanup(mis);
253     qemu_event_reset(&mis->main_thread_load_event);
254 
255     if (mis->page_requested) {
256         g_tree_destroy(mis->page_requested);
257         mis->page_requested = NULL;
258     }
259 
260     if (mis->postcopy_qemufile_dst) {
261         migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst);
262         qemu_fclose(mis->postcopy_qemufile_dst);
263         mis->postcopy_qemufile_dst = NULL;
264     }
265 
266     yank_unregister_instance(MIGRATION_YANK_INSTANCE);
267 }
268 
269 static void migrate_generate_event(int new_state)
270 {
271     if (migrate_events()) {
272         qapi_event_send_migration(new_state);
273     }
274 }
275 
276 /*
277  * Send a message on the return channel back to the source
278  * of the migration.
279  */
280 static int migrate_send_rp_message(MigrationIncomingState *mis,
281                                    enum mig_rp_message_type message_type,
282                                    uint16_t len, void *data)
283 {
284     int ret = 0;
285 
286     trace_migrate_send_rp_message((int)message_type, len);
287     QEMU_LOCK_GUARD(&mis->rp_mutex);
288 
289     /*
290      * It's possible that the file handle got lost due to network
291      * failures.
292      */
293     if (!mis->to_src_file) {
294         ret = -EIO;
295         return ret;
296     }
297 
298     qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
299     qemu_put_be16(mis->to_src_file, len);
300     qemu_put_buffer(mis->to_src_file, data, len);
301     qemu_fflush(mis->to_src_file);
302 
303     /* It's possible that qemu file got error during sending */
304     ret = qemu_file_get_error(mis->to_src_file);
305 
306     return ret;
307 }
308 
309 /* Request one page from the source VM at the given start address.
310  *   rb: the RAMBlock to request the page in
311  *   Start: Address offset within the RB
312  *   Len: Length in bytes required - must be a multiple of pagesize
313  */
314 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
315                                       RAMBlock *rb, ram_addr_t start)
316 {
317     uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
318     size_t msglen = 12; /* start + len */
319     size_t len = qemu_ram_pagesize(rb);
320     enum mig_rp_message_type msg_type;
321     const char *rbname;
322     int rbname_len;
323 
324     *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
325     *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
326 
327     /*
328      * We maintain the last ramblock that we requested for page.  Note that we
329      * don't need locking because this function will only be called within the
330      * postcopy ram fault thread.
331      */
332     if (rb != mis->last_rb) {
333         mis->last_rb = rb;
334 
335         rbname = qemu_ram_get_idstr(rb);
336         rbname_len = strlen(rbname);
337 
338         assert(rbname_len < 256);
339 
340         bufc[msglen++] = rbname_len;
341         memcpy(bufc + msglen, rbname, rbname_len);
342         msglen += rbname_len;
343         msg_type = MIG_RP_MSG_REQ_PAGES_ID;
344     } else {
345         msg_type = MIG_RP_MSG_REQ_PAGES;
346     }
347 
348     return migrate_send_rp_message(mis, msg_type, msglen, bufc);
349 }
350 
351 int migrate_send_rp_req_pages(MigrationIncomingState *mis,
352                               RAMBlock *rb, ram_addr_t start, uint64_t haddr)
353 {
354     void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
355     bool received = false;
356 
357     WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
358         received = ramblock_recv_bitmap_test_byte_offset(rb, start);
359         if (!received && !g_tree_lookup(mis->page_requested, aligned)) {
360             /*
361              * The page has not been received, and it's not yet in the page
362              * request list.  Queue it.  Set the value of element to 1, so that
363              * things like g_tree_lookup() will return TRUE (1) when found.
364              */
365             g_tree_insert(mis->page_requested, aligned, (gpointer)1);
366             mis->page_requested_count++;
367             trace_postcopy_page_req_add(aligned, mis->page_requested_count);
368         }
369     }
370 
371     /*
372      * If the page is there, skip sending the message.  We don't even need the
373      * lock because as long as the page arrived, it'll be there forever.
374      */
375     if (received) {
376         return 0;
377     }
378 
379     return migrate_send_rp_message_req_pages(mis, rb, start);
380 }
381 
382 static bool migration_colo_enabled;
383 bool migration_incoming_colo_enabled(void)
384 {
385     return migration_colo_enabled;
386 }
387 
388 void migration_incoming_disable_colo(void)
389 {
390     ram_block_discard_disable(false);
391     migration_colo_enabled = false;
392 }
393 
394 int migration_incoming_enable_colo(void)
395 {
396 #ifndef CONFIG_REPLICATION
397     error_report("ENABLE_COLO command come in migration stream, but COLO "
398                  "module is not built in");
399     return -ENOTSUP;
400 #endif
401 
402     if (!migrate_colo()) {
403         error_report("ENABLE_COLO command come in migration stream, but c-colo "
404                      "capability is not set");
405         return -EINVAL;
406     }
407 
408     if (ram_block_discard_disable(true)) {
409         error_report("COLO: cannot disable RAM discard");
410         return -EBUSY;
411     }
412     migration_colo_enabled = true;
413     return 0;
414 }
415 
416 void migrate_add_address(SocketAddress *address)
417 {
418     MigrationIncomingState *mis = migration_incoming_get_current();
419 
420     QAPI_LIST_PREPEND(mis->socket_address_list,
421                       QAPI_CLONE(SocketAddress, address));
422 }
423 
424 static void qemu_start_incoming_migration(const char *uri, Error **errp)
425 {
426     const char *p = NULL;
427 
428     /* URI is not suitable for migration? */
429     if (!migration_channels_and_uri_compatible(uri, errp)) {
430         return;
431     }
432 
433     qapi_event_send_migration(MIGRATION_STATUS_SETUP);
434     if (strstart(uri, "tcp:", &p) ||
435         strstart(uri, "unix:", NULL) ||
436         strstart(uri, "vsock:", NULL)) {
437         socket_start_incoming_migration(p ? p : uri, errp);
438 #ifdef CONFIG_RDMA
439     } else if (strstart(uri, "rdma:", &p)) {
440         rdma_start_incoming_migration(p, errp);
441 #endif
442     } else if (strstart(uri, "exec:", &p)) {
443         exec_start_incoming_migration(p, errp);
444     } else if (strstart(uri, "fd:", &p)) {
445         fd_start_incoming_migration(p, errp);
446     } else {
447         error_setg(errp, "unknown migration protocol: %s", uri);
448     }
449 }
450 
451 static void process_incoming_migration_bh(void *opaque)
452 {
453     Error *local_err = NULL;
454     MigrationIncomingState *mis = opaque;
455 
456     /* If capability late_block_activate is set:
457      * Only fire up the block code now if we're going to restart the
458      * VM, else 'cont' will do it.
459      * This causes file locking to happen; so we don't want it to happen
460      * unless we really are starting the VM.
461      */
462     if (!migrate_late_block_activate() ||
463          (autostart && (!global_state_received() ||
464             global_state_get_runstate() == RUN_STATE_RUNNING))) {
465         /* Make sure all file formats throw away their mutable metadata.
466          * If we get an error here, just don't restart the VM yet. */
467         bdrv_activate_all(&local_err);
468         if (local_err) {
469             error_report_err(local_err);
470             local_err = NULL;
471             autostart = false;
472         }
473     }
474 
475     /*
476      * This must happen after all error conditions are dealt with and
477      * we're sure the VM is going to be running on this host.
478      */
479     qemu_announce_self(&mis->announce_timer, migrate_announce_params());
480 
481     multifd_load_shutdown();
482 
483     dirty_bitmap_mig_before_vm_start();
484 
485     if (!global_state_received() ||
486         global_state_get_runstate() == RUN_STATE_RUNNING) {
487         if (autostart) {
488             vm_start();
489         } else {
490             runstate_set(RUN_STATE_PAUSED);
491         }
492     } else if (migration_incoming_colo_enabled()) {
493         migration_incoming_disable_colo();
494         vm_start();
495     } else {
496         runstate_set(global_state_get_runstate());
497     }
498     /*
499      * This must happen after any state changes since as soon as an external
500      * observer sees this event they might start to prod at the VM assuming
501      * it's ready to use.
502      */
503     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
504                       MIGRATION_STATUS_COMPLETED);
505     qemu_bh_delete(mis->bh);
506     migration_incoming_state_destroy();
507 }
508 
509 static void coroutine_fn
510 process_incoming_migration_co(void *opaque)
511 {
512     MigrationIncomingState *mis = migration_incoming_get_current();
513     PostcopyState ps;
514     int ret;
515 
516     assert(mis->from_src_file);
517 
518     if (compress_threads_load_setup(mis->from_src_file)) {
519         error_report("Failed to setup decompress threads");
520         goto fail;
521     }
522 
523     mis->largest_page_size = qemu_ram_pagesize_largest();
524     postcopy_state_set(POSTCOPY_INCOMING_NONE);
525     migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
526                       MIGRATION_STATUS_ACTIVE);
527 
528     mis->loadvm_co = qemu_coroutine_self();
529     ret = qemu_loadvm_state(mis->from_src_file);
530     mis->loadvm_co = NULL;
531 
532     ps = postcopy_state_get();
533     trace_process_incoming_migration_co_end(ret, ps);
534     if (ps != POSTCOPY_INCOMING_NONE) {
535         if (ps == POSTCOPY_INCOMING_ADVISE) {
536             /*
537              * Where a migration had postcopy enabled (and thus went to advise)
538              * but managed to complete within the precopy period, we can use
539              * the normal exit.
540              */
541             postcopy_ram_incoming_cleanup(mis);
542         } else if (ret >= 0) {
543             /*
544              * Postcopy was started, cleanup should happen at the end of the
545              * postcopy thread.
546              */
547             trace_process_incoming_migration_co_postcopy_end_main();
548             return;
549         }
550         /* Else if something went wrong then just fall out of the normal exit */
551     }
552 
553     if (ret < 0) {
554         error_report("load of migration failed: %s", strerror(-ret));
555         goto fail;
556     }
557 
558     if (colo_incoming_co() < 0) {
559         goto fail;
560     }
561 
562     mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
563     qemu_bh_schedule(mis->bh);
564     return;
565 fail:
566     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
567                       MIGRATION_STATUS_FAILED);
568     qemu_fclose(mis->from_src_file);
569 
570     multifd_load_cleanup();
571     compress_threads_load_cleanup();
572 
573     exit(EXIT_FAILURE);
574 }
575 
576 /**
577  * migration_incoming_setup: Setup incoming migration
578  * @f: file for main migration channel
579  * @errp: where to put errors
580  *
581  * Returns: %true on success, %false on error.
582  */
583 static bool migration_incoming_setup(QEMUFile *f, Error **errp)
584 {
585     MigrationIncomingState *mis = migration_incoming_get_current();
586 
587     if (!mis->from_src_file) {
588         mis->from_src_file = f;
589     }
590     qemu_file_set_blocking(f, false);
591     return true;
592 }
593 
594 void migration_incoming_process(void)
595 {
596     Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);
597     qemu_coroutine_enter(co);
598 }
599 
600 /* Returns true if recovered from a paused migration, otherwise false */
601 static bool postcopy_try_recover(void)
602 {
603     MigrationIncomingState *mis = migration_incoming_get_current();
604 
605     if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
606         /* Resumed from a paused postcopy migration */
607 
608         /* This should be set already in migration_incoming_setup() */
609         assert(mis->from_src_file);
610         /* Postcopy has standalone thread to do vm load */
611         qemu_file_set_blocking(mis->from_src_file, true);
612 
613         /* Re-configure the return path */
614         mis->to_src_file = qemu_file_get_return_path(mis->from_src_file);
615 
616         migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
617                           MIGRATION_STATUS_POSTCOPY_RECOVER);
618 
619         /*
620          * Here, we only wake up the main loading thread (while the
621          * rest threads will still be waiting), so that we can receive
622          * commands from source now, and answer it if needed. The
623          * rest threads will be woken up afterwards until we are sure
624          * that source is ready to reply to page requests.
625          */
626         qemu_sem_post(&mis->postcopy_pause_sem_dst);
627         return true;
628     }
629 
630     return false;
631 }
632 
633 void migration_fd_process_incoming(QEMUFile *f, Error **errp)
634 {
635     if (!migration_incoming_setup(f, errp)) {
636         return;
637     }
638     if (postcopy_try_recover()) {
639         return;
640     }
641     migration_incoming_process();
642 }
643 
644 /*
645  * Returns true when we want to start a new incoming migration process,
646  * false otherwise.
647  */
648 static bool migration_should_start_incoming(bool main_channel)
649 {
650     /* Multifd doesn't start unless all channels are established */
651     if (migrate_multifd()) {
652         return migration_has_all_channels();
653     }
654 
655     /* Preempt channel only starts when the main channel is created */
656     if (migrate_postcopy_preempt()) {
657         return main_channel;
658     }
659 
660     /*
661      * For all the rest types of migration, we should only reach here when
662      * it's the main channel that's being created, and we should always
663      * proceed with this channel.
664      */
665     assert(main_channel);
666     return true;
667 }
668 
669 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
670 {
671     MigrationIncomingState *mis = migration_incoming_get_current();
672     Error *local_err = NULL;
673     QEMUFile *f;
674     bool default_channel = true;
675     uint32_t channel_magic = 0;
676     int ret = 0;
677 
678     if (migrate_multifd() && !migrate_postcopy_ram() &&
679         qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
680         /*
681          * With multiple channels, it is possible that we receive channels
682          * out of order on destination side, causing incorrect mapping of
683          * source channels on destination side. Check channel MAGIC to
684          * decide type of channel. Please note this is best effort, postcopy
685          * preempt channel does not send any magic number so avoid it for
686          * postcopy live migration. Also tls live migration already does
687          * tls handshake while initializing main channel so with tls this
688          * issue is not possible.
689          */
690         ret = migration_channel_read_peek(ioc, (void *)&channel_magic,
691                                           sizeof(channel_magic), &local_err);
692 
693         if (ret != 0) {
694             error_propagate(errp, local_err);
695             return;
696         }
697 
698         default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC));
699     } else {
700         default_channel = !mis->from_src_file;
701     }
702 
703     if (multifd_load_setup(errp) != 0) {
704         error_setg(errp, "Failed to setup multifd channels");
705         return;
706     }
707 
708     if (default_channel) {
709         f = qemu_file_new_input(ioc);
710 
711         if (!migration_incoming_setup(f, errp)) {
712             return;
713         }
714     } else {
715         /* Multiple connections */
716         assert(migration_needs_multiple_sockets());
717         if (migrate_multifd()) {
718             multifd_recv_new_channel(ioc, &local_err);
719         } else {
720             assert(migrate_postcopy_preempt());
721             f = qemu_file_new_input(ioc);
722             postcopy_preempt_new_channel(mis, f);
723         }
724         if (local_err) {
725             error_propagate(errp, local_err);
726             return;
727         }
728     }
729 
730     if (migration_should_start_incoming(default_channel)) {
731         /* If it's a recovery, we're done */
732         if (postcopy_try_recover()) {
733             return;
734         }
735         migration_incoming_process();
736     }
737 }
738 
739 /**
740  * @migration_has_all_channels: We have received all channels that we need
741  *
742  * Returns true when we have got connections to all the channels that
743  * we need for migration.
744  */
745 bool migration_has_all_channels(void)
746 {
747     MigrationIncomingState *mis = migration_incoming_get_current();
748 
749     if (!mis->from_src_file) {
750         return false;
751     }
752 
753     if (migrate_multifd()) {
754         return multifd_recv_all_channels_created();
755     }
756 
757     if (migrate_postcopy_preempt()) {
758         return mis->postcopy_qemufile_dst != NULL;
759     }
760 
761     return true;
762 }
763 
764 int migrate_send_rp_switchover_ack(MigrationIncomingState *mis)
765 {
766     return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL);
767 }
768 
769 /*
770  * Send a 'SHUT' message on the return channel with the given value
771  * to indicate that we've finished with the RP.  Non-0 value indicates
772  * error.
773  */
774 void migrate_send_rp_shut(MigrationIncomingState *mis,
775                           uint32_t value)
776 {
777     uint32_t buf;
778 
779     buf = cpu_to_be32(value);
780     migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
781 }
782 
783 /*
784  * Send a 'PONG' message on the return channel with the given value
785  * (normally in response to a 'PING')
786  */
787 void migrate_send_rp_pong(MigrationIncomingState *mis,
788                           uint32_t value)
789 {
790     uint32_t buf;
791 
792     buf = cpu_to_be32(value);
793     migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
794 }
795 
796 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
797                                  char *block_name)
798 {
799     char buf[512];
800     int len;
801     int64_t res;
802 
803     /*
804      * First, we send the header part. It contains only the len of
805      * idstr, and the idstr itself.
806      */
807     len = strlen(block_name);
808     buf[0] = len;
809     memcpy(buf + 1, block_name, len);
810 
811     if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
812         error_report("%s: MSG_RP_RECV_BITMAP only used for recovery",
813                      __func__);
814         return;
815     }
816 
817     migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf);
818 
819     /*
820      * Next, we dump the received bitmap to the stream.
821      *
822      * TODO: currently we are safe since we are the only one that is
823      * using the to_src_file handle (fault thread is still paused),
824      * and it's ok even not taking the mutex. However the best way is
825      * to take the lock before sending the message header, and release
826      * the lock after sending the bitmap.
827      */
828     qemu_mutex_lock(&mis->rp_mutex);
829     res = ramblock_recv_bitmap_send(mis->to_src_file, block_name);
830     qemu_mutex_unlock(&mis->rp_mutex);
831 
832     trace_migrate_send_rp_recv_bitmap(block_name, res);
833 }
834 
835 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
836 {
837     uint32_t buf;
838 
839     buf = cpu_to_be32(value);
840     migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
841 }
842 
843 /*
844  * Return true if we're already in the middle of a migration
845  * (i.e. any of the active or setup states)
846  */
847 bool migration_is_setup_or_active(int state)
848 {
849     switch (state) {
850     case MIGRATION_STATUS_ACTIVE:
851     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
852     case MIGRATION_STATUS_POSTCOPY_PAUSED:
853     case MIGRATION_STATUS_POSTCOPY_RECOVER:
854     case MIGRATION_STATUS_SETUP:
855     case MIGRATION_STATUS_PRE_SWITCHOVER:
856     case MIGRATION_STATUS_DEVICE:
857     case MIGRATION_STATUS_WAIT_UNPLUG:
858     case MIGRATION_STATUS_COLO:
859         return true;
860 
861     default:
862         return false;
863 
864     }
865 }
866 
867 bool migration_is_running(int state)
868 {
869     switch (state) {
870     case MIGRATION_STATUS_ACTIVE:
871     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
872     case MIGRATION_STATUS_POSTCOPY_PAUSED:
873     case MIGRATION_STATUS_POSTCOPY_RECOVER:
874     case MIGRATION_STATUS_SETUP:
875     case MIGRATION_STATUS_PRE_SWITCHOVER:
876     case MIGRATION_STATUS_DEVICE:
877     case MIGRATION_STATUS_WAIT_UNPLUG:
878     case MIGRATION_STATUS_CANCELLING:
879         return true;
880 
881     default:
882         return false;
883 
884     }
885 }
886 
887 static bool migrate_show_downtime(MigrationState *s)
888 {
889     return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy();
890 }
891 
892 static void populate_time_info(MigrationInfo *info, MigrationState *s)
893 {
894     info->has_status = true;
895     info->has_setup_time = true;
896     info->setup_time = s->setup_time;
897 
898     if (s->state == MIGRATION_STATUS_COMPLETED) {
899         info->has_total_time = true;
900         info->total_time = s->total_time;
901     } else {
902         info->has_total_time = true;
903         info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
904                            s->start_time;
905     }
906 
907     if (migrate_show_downtime(s)) {
908         info->has_downtime = true;
909         info->downtime = s->downtime;
910     } else {
911         info->has_expected_downtime = true;
912         info->expected_downtime = s->expected_downtime;
913     }
914 }
915 
916 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
917 {
918     size_t page_size = qemu_target_page_size();
919 
920     info->ram = g_malloc0(sizeof(*info->ram));
921     info->ram->transferred = stat64_get(&mig_stats.transferred);
922     info->ram->total = ram_bytes_total();
923     info->ram->duplicate = stat64_get(&mig_stats.zero_pages);
924     /* legacy value.  It is not used anymore */
925     info->ram->skipped = 0;
926     info->ram->normal = stat64_get(&mig_stats.normal_pages);
927     info->ram->normal_bytes = info->ram->normal * page_size;
928     info->ram->mbps = s->mbps;
929     info->ram->dirty_sync_count =
930         stat64_get(&mig_stats.dirty_sync_count);
931     info->ram->dirty_sync_missed_zero_copy =
932         stat64_get(&mig_stats.dirty_sync_missed_zero_copy);
933     info->ram->postcopy_requests =
934         stat64_get(&mig_stats.postcopy_requests);
935     info->ram->page_size = page_size;
936     info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes);
937     info->ram->pages_per_second = s->pages_per_second;
938     info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes);
939     info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes);
940     info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes);
941 
942     if (migrate_xbzrle()) {
943         info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
944         info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
945         info->xbzrle_cache->bytes = xbzrle_counters.bytes;
946         info->xbzrle_cache->pages = xbzrle_counters.pages;
947         info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
948         info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
949         info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate;
950         info->xbzrle_cache->overflow = xbzrle_counters.overflow;
951     }
952 
953     if (migrate_compress()) {
954         info->compression = g_malloc0(sizeof(*info->compression));
955         info->compression->pages = compression_counters.pages;
956         info->compression->busy = compression_counters.busy;
957         info->compression->busy_rate = compression_counters.busy_rate;
958         info->compression->compressed_size =
959                                     compression_counters.compressed_size;
960         info->compression->compression_rate =
961                                     compression_counters.compression_rate;
962     }
963 
964     if (cpu_throttle_active()) {
965         info->has_cpu_throttle_percentage = true;
966         info->cpu_throttle_percentage = cpu_throttle_get_percentage();
967     }
968 
969     if (s->state != MIGRATION_STATUS_COMPLETED) {
970         info->ram->remaining = ram_bytes_remaining();
971         info->ram->dirty_pages_rate =
972            stat64_get(&mig_stats.dirty_pages_rate);
973     }
974 }
975 
976 static void populate_disk_info(MigrationInfo *info)
977 {
978     if (blk_mig_active()) {
979         info->disk = g_malloc0(sizeof(*info->disk));
980         info->disk->transferred = blk_mig_bytes_transferred();
981         info->disk->remaining = blk_mig_bytes_remaining();
982         info->disk->total = blk_mig_bytes_total();
983     }
984 }
985 
986 static void fill_source_migration_info(MigrationInfo *info)
987 {
988     MigrationState *s = migrate_get_current();
989     int state = qatomic_read(&s->state);
990     GSList *cur_blocker = migration_blockers;
991 
992     info->blocked_reasons = NULL;
993 
994     /*
995      * There are two types of reasons a migration might be blocked;
996      * a) devices marked in VMState as non-migratable, and
997      * b) Explicit migration blockers
998      * We need to add both of them here.
999      */
1000     qemu_savevm_non_migratable_list(&info->blocked_reasons);
1001 
1002     while (cur_blocker) {
1003         QAPI_LIST_PREPEND(info->blocked_reasons,
1004                           g_strdup(error_get_pretty(cur_blocker->data)));
1005         cur_blocker = g_slist_next(cur_blocker);
1006     }
1007     info->has_blocked_reasons = info->blocked_reasons != NULL;
1008 
1009     switch (state) {
1010     case MIGRATION_STATUS_NONE:
1011         /* no migration has happened ever */
1012         /* do not overwrite destination migration status */
1013         return;
1014     case MIGRATION_STATUS_SETUP:
1015         info->has_status = true;
1016         info->has_total_time = false;
1017         break;
1018     case MIGRATION_STATUS_ACTIVE:
1019     case MIGRATION_STATUS_CANCELLING:
1020     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1021     case MIGRATION_STATUS_PRE_SWITCHOVER:
1022     case MIGRATION_STATUS_DEVICE:
1023     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1024     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1025         /* TODO add some postcopy stats */
1026         populate_time_info(info, s);
1027         populate_ram_info(info, s);
1028         populate_disk_info(info);
1029         populate_vfio_info(info);
1030         break;
1031     case MIGRATION_STATUS_COLO:
1032         info->has_status = true;
1033         /* TODO: display COLO specific information (checkpoint info etc.) */
1034         break;
1035     case MIGRATION_STATUS_COMPLETED:
1036         populate_time_info(info, s);
1037         populate_ram_info(info, s);
1038         populate_vfio_info(info);
1039         break;
1040     case MIGRATION_STATUS_FAILED:
1041         info->has_status = true;
1042         if (s->error) {
1043             info->error_desc = g_strdup(error_get_pretty(s->error));
1044         }
1045         break;
1046     case MIGRATION_STATUS_CANCELLED:
1047         info->has_status = true;
1048         break;
1049     case MIGRATION_STATUS_WAIT_UNPLUG:
1050         info->has_status = true;
1051         break;
1052     }
1053     info->status = state;
1054 }
1055 
1056 static void fill_destination_migration_info(MigrationInfo *info)
1057 {
1058     MigrationIncomingState *mis = migration_incoming_get_current();
1059 
1060     if (mis->socket_address_list) {
1061         info->has_socket_address = true;
1062         info->socket_address =
1063             QAPI_CLONE(SocketAddressList, mis->socket_address_list);
1064     }
1065 
1066     switch (mis->state) {
1067     case MIGRATION_STATUS_NONE:
1068         return;
1069     case MIGRATION_STATUS_SETUP:
1070     case MIGRATION_STATUS_CANCELLING:
1071     case MIGRATION_STATUS_CANCELLED:
1072     case MIGRATION_STATUS_ACTIVE:
1073     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1074     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1075     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1076     case MIGRATION_STATUS_FAILED:
1077     case MIGRATION_STATUS_COLO:
1078         info->has_status = true;
1079         break;
1080     case MIGRATION_STATUS_COMPLETED:
1081         info->has_status = true;
1082         fill_destination_postcopy_migration_info(info);
1083         break;
1084     }
1085     info->status = mis->state;
1086 }
1087 
1088 MigrationInfo *qmp_query_migrate(Error **errp)
1089 {
1090     MigrationInfo *info = g_malloc0(sizeof(*info));
1091 
1092     fill_destination_migration_info(info);
1093     fill_source_migration_info(info);
1094 
1095     return info;
1096 }
1097 
1098 void qmp_migrate_start_postcopy(Error **errp)
1099 {
1100     MigrationState *s = migrate_get_current();
1101 
1102     if (!migrate_postcopy()) {
1103         error_setg(errp, "Enable postcopy with migrate_set_capability before"
1104                          " the start of migration");
1105         return;
1106     }
1107 
1108     if (s->state == MIGRATION_STATUS_NONE) {
1109         error_setg(errp, "Postcopy must be started after migration has been"
1110                          " started");
1111         return;
1112     }
1113     /*
1114      * we don't error if migration has finished since that would be racy
1115      * with issuing this command.
1116      */
1117     qatomic_set(&s->start_postcopy, true);
1118 }
1119 
1120 /* shared migration helpers */
1121 
1122 void migrate_set_state(int *state, int old_state, int new_state)
1123 {
1124     assert(new_state < MIGRATION_STATUS__MAX);
1125     if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
1126         trace_migrate_set_state(MigrationStatus_str(new_state));
1127         migrate_generate_event(new_state);
1128     }
1129 }
1130 
1131 static void migrate_fd_cleanup(MigrationState *s)
1132 {
1133     qemu_bh_delete(s->cleanup_bh);
1134     s->cleanup_bh = NULL;
1135 
1136     g_free(s->hostname);
1137     s->hostname = NULL;
1138     json_writer_free(s->vmdesc);
1139     s->vmdesc = NULL;
1140 
1141     qemu_savevm_state_cleanup();
1142 
1143     if (s->to_dst_file) {
1144         QEMUFile *tmp;
1145 
1146         trace_migrate_fd_cleanup();
1147         qemu_mutex_unlock_iothread();
1148         if (s->migration_thread_running) {
1149             qemu_thread_join(&s->thread);
1150             s->migration_thread_running = false;
1151         }
1152         qemu_mutex_lock_iothread();
1153 
1154         multifd_save_cleanup();
1155         qemu_mutex_lock(&s->qemu_file_lock);
1156         tmp = s->to_dst_file;
1157         s->to_dst_file = NULL;
1158         qemu_mutex_unlock(&s->qemu_file_lock);
1159         /*
1160          * Close the file handle without the lock to make sure the
1161          * critical section won't block for long.
1162          */
1163         migration_ioc_unregister_yank_from_file(tmp);
1164         qemu_fclose(tmp);
1165     }
1166 
1167     if (s->postcopy_qemufile_src) {
1168         migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src);
1169         qemu_fclose(s->postcopy_qemufile_src);
1170         s->postcopy_qemufile_src = NULL;
1171     }
1172 
1173     assert(!migration_is_active(s));
1174 
1175     if (s->state == MIGRATION_STATUS_CANCELLING) {
1176         migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
1177                           MIGRATION_STATUS_CANCELLED);
1178     }
1179 
1180     if (s->error) {
1181         /* It is used on info migrate.  We can't free it */
1182         error_report_err(error_copy(s->error));
1183     }
1184     notifier_list_notify(&migration_state_notifiers, s);
1185     block_cleanup_parameters();
1186     yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1187 }
1188 
1189 static void migrate_fd_cleanup_schedule(MigrationState *s)
1190 {
1191     /*
1192      * Ref the state for bh, because it may be called when
1193      * there're already no other refs
1194      */
1195     object_ref(OBJECT(s));
1196     qemu_bh_schedule(s->cleanup_bh);
1197 }
1198 
1199 static void migrate_fd_cleanup_bh(void *opaque)
1200 {
1201     MigrationState *s = opaque;
1202     migrate_fd_cleanup(s);
1203     object_unref(OBJECT(s));
1204 }
1205 
1206 void migrate_set_error(MigrationState *s, const Error *error)
1207 {
1208     QEMU_LOCK_GUARD(&s->error_mutex);
1209     if (!s->error) {
1210         s->error = error_copy(error);
1211     }
1212 }
1213 
1214 static void migrate_error_free(MigrationState *s)
1215 {
1216     QEMU_LOCK_GUARD(&s->error_mutex);
1217     if (s->error) {
1218         error_free(s->error);
1219         s->error = NULL;
1220     }
1221 }
1222 
1223 void migrate_fd_error(MigrationState *s, const Error *error)
1224 {
1225     trace_migrate_fd_error(error_get_pretty(error));
1226     assert(s->to_dst_file == NULL);
1227     migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1228                       MIGRATION_STATUS_FAILED);
1229     migrate_set_error(s, error);
1230 }
1231 
1232 static void migrate_fd_cancel(MigrationState *s)
1233 {
1234     int old_state ;
1235     QEMUFile *f = migrate_get_current()->to_dst_file;
1236     trace_migrate_fd_cancel();
1237 
1238     WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
1239         if (s->rp_state.from_dst_file) {
1240             /* shutdown the rp socket, so causing the rp thread to shutdown */
1241             qemu_file_shutdown(s->rp_state.from_dst_file);
1242         }
1243     }
1244 
1245     do {
1246         old_state = s->state;
1247         if (!migration_is_running(old_state)) {
1248             break;
1249         }
1250         /* If the migration is paused, kick it out of the pause */
1251         if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
1252             qemu_sem_post(&s->pause_sem);
1253         }
1254         migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
1255     } while (s->state != MIGRATION_STATUS_CANCELLING);
1256 
1257     /*
1258      * If we're unlucky the migration code might be stuck somewhere in a
1259      * send/write while the network has failed and is waiting to timeout;
1260      * if we've got shutdown(2) available then we can force it to quit.
1261      * The outgoing qemu file gets closed in migrate_fd_cleanup that is
1262      * called in a bh, so there is no race against this cancel.
1263      */
1264     if (s->state == MIGRATION_STATUS_CANCELLING && f) {
1265         qemu_file_shutdown(f);
1266     }
1267     if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
1268         Error *local_err = NULL;
1269 
1270         bdrv_activate_all(&local_err);
1271         if (local_err) {
1272             error_report_err(local_err);
1273         } else {
1274             s->block_inactive = false;
1275         }
1276     }
1277 }
1278 
1279 void add_migration_state_change_notifier(Notifier *notify)
1280 {
1281     notifier_list_add(&migration_state_notifiers, notify);
1282 }
1283 
1284 void remove_migration_state_change_notifier(Notifier *notify)
1285 {
1286     notifier_remove(notify);
1287 }
1288 
1289 bool migration_in_setup(MigrationState *s)
1290 {
1291     return s->state == MIGRATION_STATUS_SETUP;
1292 }
1293 
1294 bool migration_has_finished(MigrationState *s)
1295 {
1296     return s->state == MIGRATION_STATUS_COMPLETED;
1297 }
1298 
1299 bool migration_has_failed(MigrationState *s)
1300 {
1301     return (s->state == MIGRATION_STATUS_CANCELLED ||
1302             s->state == MIGRATION_STATUS_FAILED);
1303 }
1304 
1305 bool migration_in_postcopy(void)
1306 {
1307     MigrationState *s = migrate_get_current();
1308 
1309     switch (s->state) {
1310     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1311     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1312     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1313         return true;
1314     default:
1315         return false;
1316     }
1317 }
1318 
1319 bool migration_in_postcopy_after_devices(MigrationState *s)
1320 {
1321     return migration_in_postcopy() && s->postcopy_after_devices;
1322 }
1323 
1324 bool migration_in_incoming_postcopy(void)
1325 {
1326     PostcopyState ps = postcopy_state_get();
1327 
1328     return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END;
1329 }
1330 
1331 bool migration_incoming_postcopy_advised(void)
1332 {
1333     PostcopyState ps = postcopy_state_get();
1334 
1335     return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
1336 }
1337 
1338 bool migration_in_bg_snapshot(void)
1339 {
1340     MigrationState *s = migrate_get_current();
1341 
1342     return migrate_background_snapshot() &&
1343             migration_is_setup_or_active(s->state);
1344 }
1345 
1346 bool migration_is_idle(void)
1347 {
1348     MigrationState *s = current_migration;
1349 
1350     if (!s) {
1351         return true;
1352     }
1353 
1354     switch (s->state) {
1355     case MIGRATION_STATUS_NONE:
1356     case MIGRATION_STATUS_CANCELLED:
1357     case MIGRATION_STATUS_COMPLETED:
1358     case MIGRATION_STATUS_FAILED:
1359         return true;
1360     case MIGRATION_STATUS_SETUP:
1361     case MIGRATION_STATUS_CANCELLING:
1362     case MIGRATION_STATUS_ACTIVE:
1363     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1364     case MIGRATION_STATUS_COLO:
1365     case MIGRATION_STATUS_PRE_SWITCHOVER:
1366     case MIGRATION_STATUS_DEVICE:
1367     case MIGRATION_STATUS_WAIT_UNPLUG:
1368         return false;
1369     case MIGRATION_STATUS__MAX:
1370         g_assert_not_reached();
1371     }
1372 
1373     return false;
1374 }
1375 
1376 bool migration_is_active(MigrationState *s)
1377 {
1378     return (s->state == MIGRATION_STATUS_ACTIVE ||
1379             s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
1380 }
1381 
1382 void migrate_init(MigrationState *s)
1383 {
1384     /*
1385      * Reinitialise all migration state, except
1386      * parameters/capabilities that the user set, and
1387      * locks.
1388      */
1389     s->cleanup_bh = 0;
1390     s->vm_start_bh = 0;
1391     s->to_dst_file = NULL;
1392     s->state = MIGRATION_STATUS_NONE;
1393     s->rp_state.from_dst_file = NULL;
1394     s->rp_state.error = false;
1395     s->mbps = 0.0;
1396     s->pages_per_second = 0.0;
1397     s->downtime = 0;
1398     s->expected_downtime = 0;
1399     s->setup_time = 0;
1400     s->start_postcopy = false;
1401     s->postcopy_after_devices = false;
1402     s->migration_thread_running = false;
1403     error_free(s->error);
1404     s->error = NULL;
1405     s->hostname = NULL;
1406 
1407     migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1408 
1409     s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1410     s->total_time = 0;
1411     s->vm_old_state = -1;
1412     s->iteration_initial_bytes = 0;
1413     s->threshold_size = 0;
1414     s->switchover_acked = false;
1415 }
1416 
1417 int migrate_add_blocker_internal(Error *reason, Error **errp)
1418 {
1419     /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */
1420     if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
1421         error_propagate_prepend(errp, error_copy(reason),
1422                                 "disallowing migration blocker "
1423                                 "(migration/snapshot in progress) for: ");
1424         return -EBUSY;
1425     }
1426 
1427     migration_blockers = g_slist_prepend(migration_blockers, reason);
1428     return 0;
1429 }
1430 
1431 int migrate_add_blocker(Error *reason, Error **errp)
1432 {
1433     if (only_migratable) {
1434         error_propagate_prepend(errp, error_copy(reason),
1435                                 "disallowing migration blocker "
1436                                 "(--only-migratable) for: ");
1437         return -EACCES;
1438     }
1439 
1440     return migrate_add_blocker_internal(reason, errp);
1441 }
1442 
1443 void migrate_del_blocker(Error *reason)
1444 {
1445     migration_blockers = g_slist_remove(migration_blockers, reason);
1446 }
1447 
1448 void qmp_migrate_incoming(const char *uri, Error **errp)
1449 {
1450     Error *local_err = NULL;
1451     static bool once = true;
1452 
1453     if (!once) {
1454         error_setg(errp, "The incoming migration has already been started");
1455         return;
1456     }
1457     if (!runstate_check(RUN_STATE_INMIGRATE)) {
1458         error_setg(errp, "'-incoming' was not specified on the command line");
1459         return;
1460     }
1461 
1462     if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
1463         return;
1464     }
1465 
1466     qemu_start_incoming_migration(uri, &local_err);
1467 
1468     if (local_err) {
1469         yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1470         error_propagate(errp, local_err);
1471         return;
1472     }
1473 
1474     once = false;
1475 }
1476 
1477 void qmp_migrate_recover(const char *uri, Error **errp)
1478 {
1479     MigrationIncomingState *mis = migration_incoming_get_current();
1480 
1481     /*
1482      * Don't even bother to use ERRP_GUARD() as it _must_ always be set by
1483      * callers (no one should ignore a recover failure); if there is, it's a
1484      * programming error.
1485      */
1486     assert(errp);
1487 
1488     if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
1489         error_setg(errp, "Migrate recover can only be run "
1490                    "when postcopy is paused.");
1491         return;
1492     }
1493 
1494     /* If there's an existing transport, release it */
1495     migration_incoming_transport_cleanup(mis);
1496 
1497     /*
1498      * Note that this call will never start a real migration; it will
1499      * only re-setup the migration stream and poke existing migration
1500      * to continue using that newly established channel.
1501      */
1502     qemu_start_incoming_migration(uri, errp);
1503 }
1504 
1505 void qmp_migrate_pause(Error **errp)
1506 {
1507     MigrationState *ms = migrate_get_current();
1508     MigrationIncomingState *mis = migration_incoming_get_current();
1509     int ret;
1510 
1511     if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1512         /* Source side, during postcopy */
1513         qemu_mutex_lock(&ms->qemu_file_lock);
1514         ret = qemu_file_shutdown(ms->to_dst_file);
1515         qemu_mutex_unlock(&ms->qemu_file_lock);
1516         if (ret) {
1517             error_setg(errp, "Failed to pause source migration");
1518         }
1519         return;
1520     }
1521 
1522     if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1523         ret = qemu_file_shutdown(mis->from_src_file);
1524         if (ret) {
1525             error_setg(errp, "Failed to pause destination migration");
1526         }
1527         return;
1528     }
1529 
1530     error_setg(errp, "migrate-pause is currently only supported "
1531                "during postcopy-active state");
1532 }
1533 
1534 bool migration_is_blocked(Error **errp)
1535 {
1536     if (qemu_savevm_state_blocked(errp)) {
1537         return true;
1538     }
1539 
1540     if (migration_blockers) {
1541         error_propagate(errp, error_copy(migration_blockers->data));
1542         return true;
1543     }
1544 
1545     return false;
1546 }
1547 
1548 /* Returns true if continue to migrate, or false if error detected */
1549 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
1550                             bool resume, Error **errp)
1551 {
1552     Error *local_err = NULL;
1553 
1554     if (resume) {
1555         if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
1556             error_setg(errp, "Cannot resume if there is no "
1557                        "paused migration");
1558             return false;
1559         }
1560 
1561         /*
1562          * Postcopy recovery won't work well with release-ram
1563          * capability since release-ram will drop the page buffer as
1564          * long as the page is put into the send buffer.  So if there
1565          * is a network failure happened, any page buffers that have
1566          * not yet reached the destination VM but have already been
1567          * sent from the source VM will be lost forever.  Let's refuse
1568          * the client from resuming such a postcopy migration.
1569          * Luckily release-ram was designed to only be used when src
1570          * and destination VMs are on the same host, so it should be
1571          * fine.
1572          */
1573         if (migrate_release_ram()) {
1574             error_setg(errp, "Postcopy recovery cannot work "
1575                        "when release-ram capability is set");
1576             return false;
1577         }
1578 
1579         /* This is a resume, skip init status */
1580         return true;
1581     }
1582 
1583     if (migration_is_running(s->state)) {
1584         error_setg(errp, QERR_MIGRATION_ACTIVE);
1585         return false;
1586     }
1587 
1588     if (runstate_check(RUN_STATE_INMIGRATE)) {
1589         error_setg(errp, "Guest is waiting for an incoming migration");
1590         return false;
1591     }
1592 
1593     if (runstate_check(RUN_STATE_POSTMIGRATE)) {
1594         error_setg(errp, "Can't migrate the vm that was paused due to "
1595                    "previous migration");
1596         return false;
1597     }
1598 
1599     if (migration_is_blocked(errp)) {
1600         return false;
1601     }
1602 
1603     if (blk || blk_inc) {
1604         if (migrate_colo()) {
1605             error_setg(errp, "No disk migration is required in COLO mode");
1606             return false;
1607         }
1608         if (migrate_block() || migrate_block_incremental()) {
1609             error_setg(errp, "Command options are incompatible with "
1610                        "current migration capabilities");
1611             return false;
1612         }
1613         if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, &local_err)) {
1614             error_propagate(errp, local_err);
1615             return false;
1616         }
1617         s->must_remove_block_options = true;
1618     }
1619 
1620     if (blk_inc) {
1621         migrate_set_block_incremental(true);
1622     }
1623 
1624     migrate_init(s);
1625     /*
1626      * set mig_stats compression_counters memory to zero for a
1627      * new migration
1628      */
1629     memset(&mig_stats, 0, sizeof(mig_stats));
1630     memset(&compression_counters, 0, sizeof(compression_counters));
1631 
1632     return true;
1633 }
1634 
1635 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1636                  bool has_inc, bool inc, bool has_detach, bool detach,
1637                  bool has_resume, bool resume, Error **errp)
1638 {
1639     Error *local_err = NULL;
1640     MigrationState *s = migrate_get_current();
1641     const char *p = NULL;
1642 
1643     /* URI is not suitable for migration? */
1644     if (!migration_channels_and_uri_compatible(uri, errp)) {
1645         return;
1646     }
1647 
1648     if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
1649                          has_resume && resume, errp)) {
1650         /* Error detected, put into errp */
1651         return;
1652     }
1653 
1654     if (!(has_resume && resume)) {
1655         if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
1656             return;
1657         }
1658     }
1659 
1660     if (strstart(uri, "tcp:", &p) ||
1661         strstart(uri, "unix:", NULL) ||
1662         strstart(uri, "vsock:", NULL)) {
1663         socket_start_outgoing_migration(s, p ? p : uri, &local_err);
1664 #ifdef CONFIG_RDMA
1665     } else if (strstart(uri, "rdma:", &p)) {
1666         rdma_start_outgoing_migration(s, p, &local_err);
1667 #endif
1668     } else if (strstart(uri, "exec:", &p)) {
1669         exec_start_outgoing_migration(s, p, &local_err);
1670     } else if (strstart(uri, "fd:", &p)) {
1671         fd_start_outgoing_migration(s, p, &local_err);
1672     } else {
1673         if (!(has_resume && resume)) {
1674             yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1675         }
1676         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1677                    "a valid migration protocol");
1678         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1679                           MIGRATION_STATUS_FAILED);
1680         block_cleanup_parameters();
1681         return;
1682     }
1683 
1684     if (local_err) {
1685         if (!(has_resume && resume)) {
1686             yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1687         }
1688         migrate_fd_error(s, local_err);
1689         error_propagate(errp, local_err);
1690         return;
1691     }
1692 }
1693 
1694 void qmp_migrate_cancel(Error **errp)
1695 {
1696     migration_cancel(NULL);
1697 }
1698 
1699 void qmp_migrate_continue(MigrationStatus state, Error **errp)
1700 {
1701     MigrationState *s = migrate_get_current();
1702     if (s->state != state) {
1703         error_setg(errp,  "Migration not in expected state: %s",
1704                    MigrationStatus_str(s->state));
1705         return;
1706     }
1707     qemu_sem_post(&s->pause_sem);
1708 }
1709 
1710 /* migration thread support */
1711 /*
1712  * Something bad happened to the RP stream, mark an error
1713  * The caller shall print or trace something to indicate why
1714  */
1715 static void mark_source_rp_bad(MigrationState *s)
1716 {
1717     s->rp_state.error = true;
1718 }
1719 
1720 static struct rp_cmd_args {
1721     ssize_t     len; /* -1 = variable */
1722     const char *name;
1723 } rp_cmd_args[] = {
1724     [MIG_RP_MSG_INVALID]        = { .len = -1, .name = "INVALID" },
1725     [MIG_RP_MSG_SHUT]           = { .len =  4, .name = "SHUT" },
1726     [MIG_RP_MSG_PONG]           = { .len =  4, .name = "PONG" },
1727     [MIG_RP_MSG_REQ_PAGES]      = { .len = 12, .name = "REQ_PAGES" },
1728     [MIG_RP_MSG_REQ_PAGES_ID]   = { .len = -1, .name = "REQ_PAGES_ID" },
1729     [MIG_RP_MSG_RECV_BITMAP]    = { .len = -1, .name = "RECV_BITMAP" },
1730     [MIG_RP_MSG_RESUME_ACK]     = { .len =  4, .name = "RESUME_ACK" },
1731     [MIG_RP_MSG_SWITCHOVER_ACK] = { .len =  0, .name = "SWITCHOVER_ACK" },
1732     [MIG_RP_MSG_MAX]            = { .len = -1, .name = "MAX" },
1733 };
1734 
1735 /*
1736  * Process a request for pages received on the return path,
1737  * We're allowed to send more than requested (e.g. to round to our page size)
1738  * and we don't need to send pages that have already been sent.
1739  */
1740 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1741                                        ram_addr_t start, size_t len)
1742 {
1743     long our_host_ps = qemu_real_host_page_size();
1744 
1745     trace_migrate_handle_rp_req_pages(rbname, start, len);
1746 
1747     /*
1748      * Since we currently insist on matching page sizes, just sanity check
1749      * we're being asked for whole host pages.
1750      */
1751     if (!QEMU_IS_ALIGNED(start, our_host_ps) ||
1752         !QEMU_IS_ALIGNED(len, our_host_ps)) {
1753         error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1754                      " len: %zd", __func__, start, len);
1755         mark_source_rp_bad(ms);
1756         return;
1757     }
1758 
1759     if (ram_save_queue_pages(rbname, start, len)) {
1760         mark_source_rp_bad(ms);
1761     }
1762 }
1763 
1764 /* Return true to retry, false to quit */
1765 static bool postcopy_pause_return_path_thread(MigrationState *s)
1766 {
1767     trace_postcopy_pause_return_path();
1768 
1769     qemu_sem_wait(&s->postcopy_pause_rp_sem);
1770 
1771     trace_postcopy_pause_return_path_continued();
1772 
1773     return true;
1774 }
1775 
1776 static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
1777 {
1778     RAMBlock *block = qemu_ram_block_by_name(block_name);
1779 
1780     if (!block) {
1781         error_report("%s: invalid block name '%s'", __func__, block_name);
1782         return -EINVAL;
1783     }
1784 
1785     /* Fetch the received bitmap and refresh the dirty bitmap */
1786     return ram_dirty_bitmap_reload(s, block);
1787 }
1788 
1789 static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value)
1790 {
1791     trace_source_return_path_thread_resume_ack(value);
1792 
1793     if (value != MIGRATION_RESUME_ACK_VALUE) {
1794         error_report("%s: illegal resume_ack value %"PRIu32,
1795                      __func__, value);
1796         return -1;
1797     }
1798 
1799     /* Now both sides are active. */
1800     migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
1801                       MIGRATION_STATUS_POSTCOPY_ACTIVE);
1802 
1803     /* Notify send thread that time to continue send pages */
1804     qemu_sem_post(&s->rp_state.rp_sem);
1805 
1806     return 0;
1807 }
1808 
1809 /*
1810  * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if
1811  * existed) in a safe way.
1812  */
1813 static void migration_release_dst_files(MigrationState *ms)
1814 {
1815     QEMUFile *file;
1816 
1817     WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
1818         /*
1819          * Reset the from_dst_file pointer first before releasing it, as we
1820          * can't block within lock section
1821          */
1822         file = ms->rp_state.from_dst_file;
1823         ms->rp_state.from_dst_file = NULL;
1824     }
1825 
1826     /*
1827      * Do the same to postcopy fast path socket too if there is.  No
1828      * locking needed because this qemufile should only be managed by
1829      * return path thread.
1830      */
1831     if (ms->postcopy_qemufile_src) {
1832         migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src);
1833         qemu_file_shutdown(ms->postcopy_qemufile_src);
1834         qemu_fclose(ms->postcopy_qemufile_src);
1835         ms->postcopy_qemufile_src = NULL;
1836     }
1837 
1838     qemu_fclose(file);
1839 }
1840 
1841 /*
1842  * Handles messages sent on the return path towards the source VM
1843  *
1844  */
1845 static void *source_return_path_thread(void *opaque)
1846 {
1847     MigrationState *ms = opaque;
1848     QEMUFile *rp = ms->rp_state.from_dst_file;
1849     uint16_t header_len, header_type;
1850     uint8_t buf[512];
1851     uint32_t tmp32, sibling_error;
1852     ram_addr_t start = 0; /* =0 to silence warning */
1853     size_t  len = 0, expected_len;
1854     int res;
1855 
1856     trace_source_return_path_thread_entry();
1857     rcu_register_thread();
1858 
1859 retry:
1860     while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1861            migration_is_setup_or_active(ms->state)) {
1862         trace_source_return_path_thread_loop_top();
1863         header_type = qemu_get_be16(rp);
1864         header_len = qemu_get_be16(rp);
1865 
1866         if (qemu_file_get_error(rp)) {
1867             mark_source_rp_bad(ms);
1868             goto out;
1869         }
1870 
1871         if (header_type >= MIG_RP_MSG_MAX ||
1872             header_type == MIG_RP_MSG_INVALID) {
1873             error_report("RP: Received invalid message 0x%04x length 0x%04x",
1874                          header_type, header_len);
1875             mark_source_rp_bad(ms);
1876             goto out;
1877         }
1878 
1879         if ((rp_cmd_args[header_type].len != -1 &&
1880             header_len != rp_cmd_args[header_type].len) ||
1881             header_len > sizeof(buf)) {
1882             error_report("RP: Received '%s' message (0x%04x) with"
1883                          "incorrect length %d expecting %zu",
1884                          rp_cmd_args[header_type].name, header_type, header_len,
1885                          (size_t)rp_cmd_args[header_type].len);
1886             mark_source_rp_bad(ms);
1887             goto out;
1888         }
1889 
1890         /* We know we've got a valid header by this point */
1891         res = qemu_get_buffer(rp, buf, header_len);
1892         if (res != header_len) {
1893             error_report("RP: Failed reading data for message 0x%04x"
1894                          " read %d expected %d",
1895                          header_type, res, header_len);
1896             mark_source_rp_bad(ms);
1897             goto out;
1898         }
1899 
1900         /* OK, we have the message and the data */
1901         switch (header_type) {
1902         case MIG_RP_MSG_SHUT:
1903             sibling_error = ldl_be_p(buf);
1904             trace_source_return_path_thread_shut(sibling_error);
1905             if (sibling_error) {
1906                 error_report("RP: Sibling indicated error %d", sibling_error);
1907                 mark_source_rp_bad(ms);
1908             }
1909             /*
1910              * We'll let the main thread deal with closing the RP
1911              * we could do a shutdown(2) on it, but we're the only user
1912              * anyway, so there's nothing gained.
1913              */
1914             goto out;
1915 
1916         case MIG_RP_MSG_PONG:
1917             tmp32 = ldl_be_p(buf);
1918             trace_source_return_path_thread_pong(tmp32);
1919             qemu_sem_post(&ms->rp_state.rp_pong_acks);
1920             break;
1921 
1922         case MIG_RP_MSG_REQ_PAGES:
1923             start = ldq_be_p(buf);
1924             len = ldl_be_p(buf + 8);
1925             migrate_handle_rp_req_pages(ms, NULL, start, len);
1926             break;
1927 
1928         case MIG_RP_MSG_REQ_PAGES_ID:
1929             expected_len = 12 + 1; /* header + termination */
1930 
1931             if (header_len >= expected_len) {
1932                 start = ldq_be_p(buf);
1933                 len = ldl_be_p(buf + 8);
1934                 /* Now we expect an idstr */
1935                 tmp32 = buf[12]; /* Length of the following idstr */
1936                 buf[13 + tmp32] = '\0';
1937                 expected_len += tmp32;
1938             }
1939             if (header_len != expected_len) {
1940                 error_report("RP: Req_Page_id with length %d expecting %zd",
1941                              header_len, expected_len);
1942                 mark_source_rp_bad(ms);
1943                 goto out;
1944             }
1945             migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1946             break;
1947 
1948         case MIG_RP_MSG_RECV_BITMAP:
1949             if (header_len < 1) {
1950                 error_report("%s: missing block name", __func__);
1951                 mark_source_rp_bad(ms);
1952                 goto out;
1953             }
1954             /* Format: len (1B) + idstr (<255B). This ends the idstr. */
1955             buf[buf[0] + 1] = '\0';
1956             if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) {
1957                 mark_source_rp_bad(ms);
1958                 goto out;
1959             }
1960             break;
1961 
1962         case MIG_RP_MSG_RESUME_ACK:
1963             tmp32 = ldl_be_p(buf);
1964             if (migrate_handle_rp_resume_ack(ms, tmp32)) {
1965                 mark_source_rp_bad(ms);
1966                 goto out;
1967             }
1968             break;
1969 
1970         case MIG_RP_MSG_SWITCHOVER_ACK:
1971             ms->switchover_acked = true;
1972             trace_source_return_path_thread_switchover_acked();
1973             break;
1974 
1975         default:
1976             break;
1977         }
1978     }
1979 
1980 out:
1981     res = qemu_file_get_error(rp);
1982     if (res) {
1983         if (res && migration_in_postcopy()) {
1984             /*
1985              * Maybe there is something we can do: it looks like a
1986              * network down issue, and we pause for a recovery.
1987              */
1988             migration_release_dst_files(ms);
1989             rp = NULL;
1990             if (postcopy_pause_return_path_thread(ms)) {
1991                 /*
1992                  * Reload rp, reset the rest.  Referencing it is safe since
1993                  * it's reset only by us above, or when migration completes
1994                  */
1995                 rp = ms->rp_state.from_dst_file;
1996                 ms->rp_state.error = false;
1997                 goto retry;
1998             }
1999         }
2000 
2001         trace_source_return_path_thread_bad_end();
2002         mark_source_rp_bad(ms);
2003     }
2004 
2005     trace_source_return_path_thread_end();
2006     migration_release_dst_files(ms);
2007     rcu_unregister_thread();
2008     return NULL;
2009 }
2010 
2011 static int open_return_path_on_source(MigrationState *ms,
2012                                       bool create_thread)
2013 {
2014     ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
2015     if (!ms->rp_state.from_dst_file) {
2016         return -1;
2017     }
2018 
2019     trace_open_return_path_on_source();
2020 
2021     if (!create_thread) {
2022         /* We're done */
2023         return 0;
2024     }
2025 
2026     qemu_thread_create(&ms->rp_state.rp_thread, "return path",
2027                        source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
2028     ms->rp_state.rp_thread_created = true;
2029 
2030     trace_open_return_path_on_source_continue();
2031 
2032     return 0;
2033 }
2034 
2035 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
2036 static int await_return_path_close_on_source(MigrationState *ms)
2037 {
2038     /*
2039      * If this is a normal exit then the destination will send a SHUT and the
2040      * rp_thread will exit, however if there's an error we need to cause
2041      * it to exit.
2042      */
2043     if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
2044         /*
2045          * shutdown(2), if we have it, will cause it to unblock if it's stuck
2046          * waiting for the destination.
2047          */
2048         qemu_file_shutdown(ms->rp_state.from_dst_file);
2049         mark_source_rp_bad(ms);
2050     }
2051     trace_await_return_path_close_on_source_joining();
2052     qemu_thread_join(&ms->rp_state.rp_thread);
2053     ms->rp_state.rp_thread_created = false;
2054     trace_await_return_path_close_on_source_close();
2055     return ms->rp_state.error;
2056 }
2057 
2058 static inline void
2059 migration_wait_main_channel(MigrationState *ms)
2060 {
2061     /* Wait until one PONG message received */
2062     qemu_sem_wait(&ms->rp_state.rp_pong_acks);
2063 }
2064 
2065 /*
2066  * Switch from normal iteration to postcopy
2067  * Returns non-0 on error
2068  */
2069 static int postcopy_start(MigrationState *ms)
2070 {
2071     int ret;
2072     QIOChannelBuffer *bioc;
2073     QEMUFile *fb;
2074     int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2075     uint64_t bandwidth = migrate_max_postcopy_bandwidth();
2076     bool restart_block = false;
2077     int cur_state = MIGRATION_STATUS_ACTIVE;
2078 
2079     if (migrate_postcopy_preempt()) {
2080         migration_wait_main_channel(ms);
2081         if (postcopy_preempt_establish_channel(ms)) {
2082             migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED);
2083             return -1;
2084         }
2085     }
2086 
2087     if (!migrate_pause_before_switchover()) {
2088         migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
2089                           MIGRATION_STATUS_POSTCOPY_ACTIVE);
2090     }
2091 
2092     trace_postcopy_start();
2093     qemu_mutex_lock_iothread();
2094     trace_postcopy_start_set_run();
2095 
2096     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
2097     global_state_store();
2098     ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
2099     if (ret < 0) {
2100         goto fail;
2101     }
2102 
2103     ret = migration_maybe_pause(ms, &cur_state,
2104                                 MIGRATION_STATUS_POSTCOPY_ACTIVE);
2105     if (ret < 0) {
2106         goto fail;
2107     }
2108 
2109     ret = bdrv_inactivate_all();
2110     if (ret < 0) {
2111         goto fail;
2112     }
2113     restart_block = true;
2114 
2115     /*
2116      * Cause any non-postcopiable, but iterative devices to
2117      * send out their final data.
2118      */
2119     qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
2120 
2121     /*
2122      * in Finish migrate and with the io-lock held everything should
2123      * be quiet, but we've potentially still got dirty pages and we
2124      * need to tell the destination to throw any pages it's already received
2125      * that are dirty
2126      */
2127     if (migrate_postcopy_ram()) {
2128         ram_postcopy_send_discard_bitmap(ms);
2129     }
2130 
2131     /*
2132      * send rest of state - note things that are doing postcopy
2133      * will notice we're in POSTCOPY_ACTIVE and not actually
2134      * wrap their state up here
2135      */
2136     migration_rate_set(bandwidth);
2137     if (migrate_postcopy_ram()) {
2138         /* Ping just for debugging, helps line traces up */
2139         qemu_savevm_send_ping(ms->to_dst_file, 2);
2140     }
2141 
2142     /*
2143      * While loading the device state we may trigger page transfer
2144      * requests and the fd must be free to process those, and thus
2145      * the destination must read the whole device state off the fd before
2146      * it starts processing it.  Unfortunately the ad-hoc migration format
2147      * doesn't allow the destination to know the size to read without fully
2148      * parsing it through each devices load-state code (especially the open
2149      * coded devices that use get/put).
2150      * So we wrap the device state up in a package with a length at the start;
2151      * to do this we use a qemu_buf to hold the whole of the device state.
2152      */
2153     bioc = qio_channel_buffer_new(4096);
2154     qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
2155     fb = qemu_file_new_output(QIO_CHANNEL(bioc));
2156     object_unref(OBJECT(bioc));
2157 
2158     /*
2159      * Make sure the receiver can get incoming pages before we send the rest
2160      * of the state
2161      */
2162     qemu_savevm_send_postcopy_listen(fb);
2163 
2164     qemu_savevm_state_complete_precopy(fb, false, false);
2165     if (migrate_postcopy_ram()) {
2166         qemu_savevm_send_ping(fb, 3);
2167     }
2168 
2169     qemu_savevm_send_postcopy_run(fb);
2170 
2171     /* <><> end of stuff going into the package */
2172 
2173     /* Last point of recovery; as soon as we send the package the destination
2174      * can open devices and potentially start running.
2175      * Lets just check again we've not got any errors.
2176      */
2177     ret = qemu_file_get_error(ms->to_dst_file);
2178     if (ret) {
2179         error_report("postcopy_start: Migration stream errored (pre package)");
2180         goto fail_closefb;
2181     }
2182 
2183     restart_block = false;
2184 
2185     /* Now send that blob */
2186     if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
2187         goto fail_closefb;
2188     }
2189     qemu_fclose(fb);
2190 
2191     /* Send a notify to give a chance for anything that needs to happen
2192      * at the transition to postcopy and after the device state; in particular
2193      * spice needs to trigger a transition now
2194      */
2195     ms->postcopy_after_devices = true;
2196     notifier_list_notify(&migration_state_notifiers, ms);
2197 
2198     ms->downtime =  qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
2199 
2200     qemu_mutex_unlock_iothread();
2201 
2202     if (migrate_postcopy_ram()) {
2203         /*
2204          * Although this ping is just for debug, it could potentially be
2205          * used for getting a better measurement of downtime at the source.
2206          */
2207         qemu_savevm_send_ping(ms->to_dst_file, 4);
2208     }
2209 
2210     if (migrate_release_ram()) {
2211         ram_postcopy_migrated_memory_release(ms);
2212     }
2213 
2214     ret = qemu_file_get_error(ms->to_dst_file);
2215     if (ret) {
2216         error_report("postcopy_start: Migration stream errored");
2217         migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
2218                               MIGRATION_STATUS_FAILED);
2219     }
2220 
2221     trace_postcopy_preempt_enabled(migrate_postcopy_preempt());
2222 
2223     return ret;
2224 
2225 fail_closefb:
2226     qemu_fclose(fb);
2227 fail:
2228     migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
2229                           MIGRATION_STATUS_FAILED);
2230     if (restart_block) {
2231         /* A failure happened early enough that we know the destination hasn't
2232          * accessed block devices, so we're safe to recover.
2233          */
2234         Error *local_err = NULL;
2235 
2236         bdrv_activate_all(&local_err);
2237         if (local_err) {
2238             error_report_err(local_err);
2239         }
2240     }
2241     qemu_mutex_unlock_iothread();
2242     return -1;
2243 }
2244 
2245 /**
2246  * migration_maybe_pause: Pause if required to by
2247  * migrate_pause_before_switchover called with the iothread locked
2248  * Returns: 0 on success
2249  */
2250 static int migration_maybe_pause(MigrationState *s,
2251                                  int *current_active_state,
2252                                  int new_state)
2253 {
2254     if (!migrate_pause_before_switchover()) {
2255         return 0;
2256     }
2257 
2258     /* Since leaving this state is not atomic with posting the semaphore
2259      * it's possible that someone could have issued multiple migrate_continue
2260      * and the semaphore is incorrectly positive at this point;
2261      * the docs say it's undefined to reinit a semaphore that's already
2262      * init'd, so use timedwait to eat up any existing posts.
2263      */
2264     while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
2265         /* This block intentionally left blank */
2266     }
2267 
2268     /*
2269      * If the migration is cancelled when it is in the completion phase,
2270      * the migration state is set to MIGRATION_STATUS_CANCELLING.
2271      * So we don't need to wait a semaphore, otherwise we would always
2272      * wait for the 'pause_sem' semaphore.
2273      */
2274     if (s->state != MIGRATION_STATUS_CANCELLING) {
2275         qemu_mutex_unlock_iothread();
2276         migrate_set_state(&s->state, *current_active_state,
2277                           MIGRATION_STATUS_PRE_SWITCHOVER);
2278         qemu_sem_wait(&s->pause_sem);
2279         migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
2280                           new_state);
2281         *current_active_state = new_state;
2282         qemu_mutex_lock_iothread();
2283     }
2284 
2285     return s->state == new_state ? 0 : -EINVAL;
2286 }
2287 
2288 /**
2289  * migration_completion: Used by migration_thread when there's not much left.
2290  *   The caller 'breaks' the loop when this returns.
2291  *
2292  * @s: Current migration state
2293  */
2294 static void migration_completion(MigrationState *s)
2295 {
2296     int ret;
2297     int current_active_state = s->state;
2298 
2299     if (s->state == MIGRATION_STATUS_ACTIVE) {
2300         qemu_mutex_lock_iothread();
2301         s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2302         qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
2303 
2304         s->vm_old_state = runstate_get();
2305         global_state_store();
2306 
2307         ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
2308         trace_migration_completion_vm_stop(ret);
2309         if (ret >= 0) {
2310             ret = migration_maybe_pause(s, &current_active_state,
2311                                         MIGRATION_STATUS_DEVICE);
2312         }
2313         if (ret >= 0) {
2314             /*
2315              * Inactivate disks except in COLO, and track that we
2316              * have done so in order to remember to reactivate
2317              * them if migration fails or is cancelled.
2318              */
2319             s->block_inactive = !migrate_colo();
2320             migration_rate_set(RATE_LIMIT_DISABLED);
2321             ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
2322                                                      s->block_inactive);
2323         }
2324 
2325         qemu_mutex_unlock_iothread();
2326 
2327         if (ret < 0) {
2328             goto fail;
2329         }
2330     } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2331         trace_migration_completion_postcopy_end();
2332 
2333         qemu_mutex_lock_iothread();
2334         qemu_savevm_state_complete_postcopy(s->to_dst_file);
2335         qemu_mutex_unlock_iothread();
2336 
2337         /*
2338          * Shutdown the postcopy fast path thread.  This is only needed
2339          * when dest QEMU binary is old (7.1/7.2).  QEMU 8.0+ doesn't need
2340          * this.
2341          */
2342         if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
2343             postcopy_preempt_shutdown_file(s);
2344         }
2345 
2346         trace_migration_completion_postcopy_end_after_complete();
2347     } else {
2348         goto fail;
2349     }
2350 
2351     /*
2352      * If rp was opened we must clean up the thread before
2353      * cleaning everything else up (since if there are no failures
2354      * it will wait for the destination to send it's status in
2355      * a SHUT command).
2356      */
2357     if (s->rp_state.rp_thread_created) {
2358         int rp_error;
2359         trace_migration_return_path_end_before();
2360         rp_error = await_return_path_close_on_source(s);
2361         trace_migration_return_path_end_after(rp_error);
2362         if (rp_error) {
2363             goto fail;
2364         }
2365     }
2366 
2367     if (qemu_file_get_error(s->to_dst_file)) {
2368         trace_migration_completion_file_err();
2369         goto fail;
2370     }
2371 
2372     if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) {
2373         /* COLO does not support postcopy */
2374         migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
2375                           MIGRATION_STATUS_COLO);
2376     } else {
2377         migrate_set_state(&s->state, current_active_state,
2378                           MIGRATION_STATUS_COMPLETED);
2379     }
2380 
2381     return;
2382 
2383 fail:
2384     if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE ||
2385                               s->state == MIGRATION_STATUS_DEVICE)) {
2386         /*
2387          * If not doing postcopy, vm_start() will be called: let's
2388          * regain control on images.
2389          */
2390         Error *local_err = NULL;
2391 
2392         qemu_mutex_lock_iothread();
2393         bdrv_activate_all(&local_err);
2394         if (local_err) {
2395             error_report_err(local_err);
2396         } else {
2397             s->block_inactive = false;
2398         }
2399         qemu_mutex_unlock_iothread();
2400     }
2401 
2402     migrate_set_state(&s->state, current_active_state,
2403                       MIGRATION_STATUS_FAILED);
2404 }
2405 
2406 /**
2407  * bg_migration_completion: Used by bg_migration_thread when after all the
2408  *   RAM has been saved. The caller 'breaks' the loop when this returns.
2409  *
2410  * @s: Current migration state
2411  */
2412 static void bg_migration_completion(MigrationState *s)
2413 {
2414     int current_active_state = s->state;
2415 
2416     if (s->state == MIGRATION_STATUS_ACTIVE) {
2417         /*
2418          * By this moment we have RAM content saved into the migration stream.
2419          * The next step is to flush the non-RAM content (device state)
2420          * right after the ram content. The device state has been stored into
2421          * the temporary buffer before RAM saving started.
2422          */
2423         qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage);
2424         qemu_fflush(s->to_dst_file);
2425     } else if (s->state == MIGRATION_STATUS_CANCELLING) {
2426         goto fail;
2427     }
2428 
2429     if (qemu_file_get_error(s->to_dst_file)) {
2430         trace_migration_completion_file_err();
2431         goto fail;
2432     }
2433 
2434     migrate_set_state(&s->state, current_active_state,
2435                       MIGRATION_STATUS_COMPLETED);
2436     return;
2437 
2438 fail:
2439     migrate_set_state(&s->state, current_active_state,
2440                       MIGRATION_STATUS_FAILED);
2441 }
2442 
2443 typedef enum MigThrError {
2444     /* No error detected */
2445     MIG_THR_ERR_NONE = 0,
2446     /* Detected error, but resumed successfully */
2447     MIG_THR_ERR_RECOVERED = 1,
2448     /* Detected fatal error, need to exit */
2449     MIG_THR_ERR_FATAL = 2,
2450 } MigThrError;
2451 
2452 static int postcopy_resume_handshake(MigrationState *s)
2453 {
2454     qemu_savevm_send_postcopy_resume(s->to_dst_file);
2455 
2456     while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
2457         qemu_sem_wait(&s->rp_state.rp_sem);
2458     }
2459 
2460     if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2461         return 0;
2462     }
2463 
2464     return -1;
2465 }
2466 
2467 /* Return zero if success, or <0 for error */
2468 static int postcopy_do_resume(MigrationState *s)
2469 {
2470     int ret;
2471 
2472     /*
2473      * Call all the resume_prepare() hooks, so that modules can be
2474      * ready for the migration resume.
2475      */
2476     ret = qemu_savevm_state_resume_prepare(s);
2477     if (ret) {
2478         error_report("%s: resume_prepare() failure detected: %d",
2479                      __func__, ret);
2480         return ret;
2481     }
2482 
2483     /*
2484      * If preempt is enabled, re-establish the preempt channel.  Note that
2485      * we do it after resume prepare to make sure the main channel will be
2486      * created before the preempt channel.  E.g. with weak network, the
2487      * dest QEMU may get messed up with the preempt and main channels on
2488      * the order of connection setup.  This guarantees the correct order.
2489      */
2490     ret = postcopy_preempt_establish_channel(s);
2491     if (ret) {
2492         error_report("%s: postcopy_preempt_establish_channel(): %d",
2493                      __func__, ret);
2494         return ret;
2495     }
2496 
2497     /*
2498      * Last handshake with destination on the resume (destination will
2499      * switch to postcopy-active afterwards)
2500      */
2501     ret = postcopy_resume_handshake(s);
2502     if (ret) {
2503         error_report("%s: handshake failed: %d", __func__, ret);
2504         return ret;
2505     }
2506 
2507     return 0;
2508 }
2509 
2510 /*
2511  * We don't return until we are in a safe state to continue current
2512  * postcopy migration.  Returns MIG_THR_ERR_RECOVERED if recovered, or
2513  * MIG_THR_ERR_FATAL if unrecovery failure happened.
2514  */
2515 static MigThrError postcopy_pause(MigrationState *s)
2516 {
2517     assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
2518 
2519     while (true) {
2520         QEMUFile *file;
2521 
2522         /*
2523          * Current channel is possibly broken. Release it.  Note that this is
2524          * guaranteed even without lock because to_dst_file should only be
2525          * modified by the migration thread.  That also guarantees that the
2526          * unregister of yank is safe too without the lock.  It should be safe
2527          * even to be within the qemu_file_lock, but we didn't do that to avoid
2528          * taking more mutex (yank_lock) within qemu_file_lock.  TL;DR: we make
2529          * the qemu_file_lock critical section as small as possible.
2530          */
2531         assert(s->to_dst_file);
2532         migration_ioc_unregister_yank_from_file(s->to_dst_file);
2533         qemu_mutex_lock(&s->qemu_file_lock);
2534         file = s->to_dst_file;
2535         s->to_dst_file = NULL;
2536         qemu_mutex_unlock(&s->qemu_file_lock);
2537 
2538         qemu_file_shutdown(file);
2539         qemu_fclose(file);
2540 
2541         migrate_set_state(&s->state, s->state,
2542                           MIGRATION_STATUS_POSTCOPY_PAUSED);
2543 
2544         error_report("Detected IO failure for postcopy. "
2545                      "Migration paused.");
2546 
2547         /*
2548          * We wait until things fixed up. Then someone will setup the
2549          * status back for us.
2550          */
2551         while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
2552             qemu_sem_wait(&s->postcopy_pause_sem);
2553         }
2554 
2555         if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
2556             /* Woken up by a recover procedure. Give it a shot */
2557 
2558             /*
2559              * Firstly, let's wake up the return path now, with a new
2560              * return path channel.
2561              */
2562             qemu_sem_post(&s->postcopy_pause_rp_sem);
2563 
2564             /* Do the resume logic */
2565             if (postcopy_do_resume(s) == 0) {
2566                 /* Let's continue! */
2567                 trace_postcopy_pause_continued();
2568                 return MIG_THR_ERR_RECOVERED;
2569             } else {
2570                 /*
2571                  * Something wrong happened during the recovery, let's
2572                  * pause again. Pause is always better than throwing
2573                  * data away.
2574                  */
2575                 continue;
2576             }
2577         } else {
2578             /* This is not right... Time to quit. */
2579             return MIG_THR_ERR_FATAL;
2580         }
2581     }
2582 }
2583 
2584 static MigThrError migration_detect_error(MigrationState *s)
2585 {
2586     int ret;
2587     int state = s->state;
2588     Error *local_error = NULL;
2589 
2590     if (state == MIGRATION_STATUS_CANCELLING ||
2591         state == MIGRATION_STATUS_CANCELLED) {
2592         /* End the migration, but don't set the state to failed */
2593         return MIG_THR_ERR_FATAL;
2594     }
2595 
2596     /*
2597      * Try to detect any file errors.  Note that postcopy_qemufile_src will
2598      * be NULL when postcopy preempt is not enabled.
2599      */
2600     ret = qemu_file_get_error_obj_any(s->to_dst_file,
2601                                       s->postcopy_qemufile_src,
2602                                       &local_error);
2603     if (!ret) {
2604         /* Everything is fine */
2605         assert(!local_error);
2606         return MIG_THR_ERR_NONE;
2607     }
2608 
2609     if (local_error) {
2610         migrate_set_error(s, local_error);
2611         error_free(local_error);
2612     }
2613 
2614     if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) {
2615         /*
2616          * For postcopy, we allow the network to be down for a
2617          * while. After that, it can be continued by a
2618          * recovery phase.
2619          */
2620         return postcopy_pause(s);
2621     } else {
2622         /*
2623          * For precopy (or postcopy with error outside IO), we fail
2624          * with no time.
2625          */
2626         migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED);
2627         trace_migration_thread_file_err();
2628 
2629         /* Time to stop the migration, now. */
2630         return MIG_THR_ERR_FATAL;
2631     }
2632 }
2633 
2634 static void migration_calculate_complete(MigrationState *s)
2635 {
2636     uint64_t bytes = migration_transferred_bytes(s->to_dst_file);
2637     int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2638     int64_t transfer_time;
2639 
2640     s->total_time = end_time - s->start_time;
2641     if (!s->downtime) {
2642         /*
2643          * It's still not set, so we are precopy migration.  For
2644          * postcopy, downtime is calculated during postcopy_start().
2645          */
2646         s->downtime = end_time - s->downtime_start;
2647     }
2648 
2649     transfer_time = s->total_time - s->setup_time;
2650     if (transfer_time) {
2651         s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
2652     }
2653 }
2654 
2655 static void update_iteration_initial_status(MigrationState *s)
2656 {
2657     /*
2658      * Update these three fields at the same time to avoid mismatch info lead
2659      * wrong speed calculation.
2660      */
2661     s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2662     s->iteration_initial_bytes = migration_transferred_bytes(s->to_dst_file);
2663     s->iteration_initial_pages = ram_get_total_transferred_pages();
2664 }
2665 
2666 static void migration_update_counters(MigrationState *s,
2667                                       int64_t current_time)
2668 {
2669     uint64_t transferred, transferred_pages, time_spent;
2670     uint64_t current_bytes; /* bytes transferred since the beginning */
2671     double bandwidth;
2672 
2673     if (current_time < s->iteration_start_time + BUFFER_DELAY) {
2674         return;
2675     }
2676 
2677     current_bytes = migration_transferred_bytes(s->to_dst_file);
2678     transferred = current_bytes - s->iteration_initial_bytes;
2679     time_spent = current_time - s->iteration_start_time;
2680     bandwidth = (double)transferred / time_spent;
2681     s->threshold_size = bandwidth * migrate_downtime_limit();
2682 
2683     s->mbps = (((double) transferred * 8.0) /
2684                ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
2685 
2686     transferred_pages = ram_get_total_transferred_pages() -
2687                             s->iteration_initial_pages;
2688     s->pages_per_second = (double) transferred_pages /
2689                              (((double) time_spent / 1000.0));
2690 
2691     /*
2692      * if we haven't sent anything, we don't want to
2693      * recalculate. 10000 is a small enough number for our purposes
2694      */
2695     if (stat64_get(&mig_stats.dirty_pages_rate) &&
2696         transferred > 10000) {
2697         s->expected_downtime =
2698             stat64_get(&mig_stats.dirty_bytes_last_sync) / bandwidth;
2699     }
2700 
2701     migration_rate_reset(s->to_dst_file);
2702 
2703     update_iteration_initial_status(s);
2704 
2705     trace_migrate_transferred(transferred, time_spent,
2706                               bandwidth, s->threshold_size);
2707 }
2708 
2709 static bool migration_can_switchover(MigrationState *s)
2710 {
2711     if (!migrate_switchover_ack()) {
2712         return true;
2713     }
2714 
2715     /* No reason to wait for switchover ACK if VM is stopped */
2716     if (!runstate_is_running()) {
2717         return true;
2718     }
2719 
2720     return s->switchover_acked;
2721 }
2722 
2723 /* Migration thread iteration status */
2724 typedef enum {
2725     MIG_ITERATE_RESUME,         /* Resume current iteration */
2726     MIG_ITERATE_SKIP,           /* Skip current iteration */
2727     MIG_ITERATE_BREAK,          /* Break the loop */
2728 } MigIterateState;
2729 
2730 /*
2731  * Return true if continue to the next iteration directly, false
2732  * otherwise.
2733  */
2734 static MigIterateState migration_iteration_run(MigrationState *s)
2735 {
2736     uint64_t must_precopy, can_postcopy;
2737     bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
2738     bool can_switchover = migration_can_switchover(s);
2739 
2740     qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
2741     uint64_t pending_size = must_precopy + can_postcopy;
2742 
2743     trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
2744 
2745     if (must_precopy <= s->threshold_size) {
2746         qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
2747         pending_size = must_precopy + can_postcopy;
2748         trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
2749     }
2750 
2751     if ((!pending_size || pending_size < s->threshold_size) && can_switchover) {
2752         trace_migration_thread_low_pending(pending_size);
2753         migration_completion(s);
2754         return MIG_ITERATE_BREAK;
2755     }
2756 
2757     /* Still a significant amount to transfer */
2758     if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover &&
2759         qatomic_read(&s->start_postcopy)) {
2760         if (postcopy_start(s)) {
2761             error_report("%s: postcopy failed to start", __func__);
2762         }
2763         return MIG_ITERATE_SKIP;
2764     }
2765 
2766     /* Just another iteration step */
2767     qemu_savevm_state_iterate(s->to_dst_file, in_postcopy);
2768     return MIG_ITERATE_RESUME;
2769 }
2770 
2771 static void migration_iteration_finish(MigrationState *s)
2772 {
2773     /* If we enabled cpu throttling for auto-converge, turn it off. */
2774     cpu_throttle_stop();
2775 
2776     qemu_mutex_lock_iothread();
2777     switch (s->state) {
2778     case MIGRATION_STATUS_COMPLETED:
2779         migration_calculate_complete(s);
2780         runstate_set(RUN_STATE_POSTMIGRATE);
2781         break;
2782     case MIGRATION_STATUS_COLO:
2783         assert(migrate_colo());
2784         migrate_start_colo_process(s);
2785         s->vm_old_state = RUN_STATE_RUNNING;
2786         /* Fallthrough */
2787     case MIGRATION_STATUS_FAILED:
2788     case MIGRATION_STATUS_CANCELLED:
2789     case MIGRATION_STATUS_CANCELLING:
2790         if (s->vm_old_state == RUN_STATE_RUNNING) {
2791             if (!runstate_check(RUN_STATE_SHUTDOWN)) {
2792                 vm_start();
2793             }
2794         } else {
2795             if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
2796                 runstate_set(s->vm_old_state);
2797             }
2798         }
2799         break;
2800 
2801     default:
2802         /* Should not reach here, but if so, forgive the VM. */
2803         error_report("%s: Unknown ending state %d", __func__, s->state);
2804         break;
2805     }
2806     migrate_fd_cleanup_schedule(s);
2807     qemu_mutex_unlock_iothread();
2808 }
2809 
2810 static void bg_migration_iteration_finish(MigrationState *s)
2811 {
2812     /*
2813      * Stop tracking RAM writes - un-protect memory, un-register UFFD
2814      * memory ranges, flush kernel wait queues and wake up threads
2815      * waiting for write fault to be resolved.
2816      */
2817     ram_write_tracking_stop();
2818 
2819     qemu_mutex_lock_iothread();
2820     switch (s->state) {
2821     case MIGRATION_STATUS_COMPLETED:
2822         migration_calculate_complete(s);
2823         break;
2824 
2825     case MIGRATION_STATUS_ACTIVE:
2826     case MIGRATION_STATUS_FAILED:
2827     case MIGRATION_STATUS_CANCELLED:
2828     case MIGRATION_STATUS_CANCELLING:
2829         break;
2830 
2831     default:
2832         /* Should not reach here, but if so, forgive the VM. */
2833         error_report("%s: Unknown ending state %d", __func__, s->state);
2834         break;
2835     }
2836 
2837     migrate_fd_cleanup_schedule(s);
2838     qemu_mutex_unlock_iothread();
2839 }
2840 
2841 /*
2842  * Return true if continue to the next iteration directly, false
2843  * otherwise.
2844  */
2845 static MigIterateState bg_migration_iteration_run(MigrationState *s)
2846 {
2847     int res;
2848 
2849     res = qemu_savevm_state_iterate(s->to_dst_file, false);
2850     if (res > 0) {
2851         bg_migration_completion(s);
2852         return MIG_ITERATE_BREAK;
2853     }
2854 
2855     return MIG_ITERATE_RESUME;
2856 }
2857 
2858 void migration_make_urgent_request(void)
2859 {
2860     qemu_sem_post(&migrate_get_current()->rate_limit_sem);
2861 }
2862 
2863 void migration_consume_urgent_request(void)
2864 {
2865     qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
2866 }
2867 
2868 /* Returns true if the rate limiting was broken by an urgent request */
2869 bool migration_rate_limit(void)
2870 {
2871     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2872     MigrationState *s = migrate_get_current();
2873 
2874     bool urgent = false;
2875     migration_update_counters(s, now);
2876     if (migration_rate_exceeded(s->to_dst_file)) {
2877 
2878         if (qemu_file_get_error(s->to_dst_file)) {
2879             return false;
2880         }
2881         /*
2882          * Wait for a delay to do rate limiting OR
2883          * something urgent to post the semaphore.
2884          */
2885         int ms = s->iteration_start_time + BUFFER_DELAY - now;
2886         trace_migration_rate_limit_pre(ms);
2887         if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
2888             /*
2889              * We were woken by one or more urgent things but
2890              * the timedwait will have consumed one of them.
2891              * The service routine for the urgent wake will dec
2892              * the semaphore itself for each item it consumes,
2893              * so add this one we just eat back.
2894              */
2895             qemu_sem_post(&s->rate_limit_sem);
2896             urgent = true;
2897         }
2898         trace_migration_rate_limit_post(urgent);
2899     }
2900     return urgent;
2901 }
2902 
2903 /*
2904  * if failover devices are present, wait they are completely
2905  * unplugged
2906  */
2907 
2908 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state,
2909                                     int new_state)
2910 {
2911     if (qemu_savevm_state_guest_unplug_pending()) {
2912         migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG);
2913 
2914         while (s->state == MIGRATION_STATUS_WAIT_UNPLUG &&
2915                qemu_savevm_state_guest_unplug_pending()) {
2916             qemu_sem_timedwait(&s->wait_unplug_sem, 250);
2917         }
2918         if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) {
2919             int timeout = 120; /* 30 seconds */
2920             /*
2921              * migration has been canceled
2922              * but as we have started an unplug we must wait the end
2923              * to be able to plug back the card
2924              */
2925             while (timeout-- && qemu_savevm_state_guest_unplug_pending()) {
2926                 qemu_sem_timedwait(&s->wait_unplug_sem, 250);
2927             }
2928             if (qemu_savevm_state_guest_unplug_pending() &&
2929                 !qtest_enabled()) {
2930                 warn_report("migration: partially unplugged device on "
2931                             "failure");
2932             }
2933         }
2934 
2935         migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state);
2936     } else {
2937         migrate_set_state(&s->state, old_state, new_state);
2938     }
2939 }
2940 
2941 /*
2942  * Master migration thread on the source VM.
2943  * It drives the migration and pumps the data down the outgoing channel.
2944  */
2945 static void *migration_thread(void *opaque)
2946 {
2947     MigrationState *s = opaque;
2948     MigrationThread *thread = NULL;
2949     int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
2950     MigThrError thr_error;
2951     bool urgent = false;
2952 
2953     thread = MigrationThreadAdd("live_migration", qemu_get_thread_id());
2954 
2955     rcu_register_thread();
2956 
2957     object_ref(OBJECT(s));
2958     update_iteration_initial_status(s);
2959 
2960     qemu_savevm_state_header(s->to_dst_file);
2961 
2962     /*
2963      * If we opened the return path, we need to make sure dst has it
2964      * opened as well.
2965      */
2966     if (s->rp_state.rp_thread_created) {
2967         /* Now tell the dest that it should open its end so it can reply */
2968         qemu_savevm_send_open_return_path(s->to_dst_file);
2969 
2970         /* And do a ping that will make stuff easier to debug */
2971         qemu_savevm_send_ping(s->to_dst_file, 1);
2972     }
2973 
2974     if (migrate_postcopy()) {
2975         /*
2976          * Tell the destination that we *might* want to do postcopy later;
2977          * if the other end can't do postcopy it should fail now, nice and
2978          * early.
2979          */
2980         qemu_savevm_send_postcopy_advise(s->to_dst_file);
2981     }
2982 
2983     if (migrate_colo()) {
2984         /* Notify migration destination that we enable COLO */
2985         qemu_savevm_send_colo_enable(s->to_dst_file);
2986     }
2987 
2988     qemu_savevm_state_setup(s->to_dst_file);
2989 
2990     qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
2991                                MIGRATION_STATUS_ACTIVE);
2992 
2993     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
2994 
2995     trace_migration_thread_setup_complete();
2996 
2997     while (migration_is_active(s)) {
2998         if (urgent || !migration_rate_exceeded(s->to_dst_file)) {
2999             MigIterateState iter_state = migration_iteration_run(s);
3000             if (iter_state == MIG_ITERATE_SKIP) {
3001                 continue;
3002             } else if (iter_state == MIG_ITERATE_BREAK) {
3003                 break;
3004             }
3005         }
3006 
3007         /*
3008          * Try to detect any kind of failures, and see whether we
3009          * should stop the migration now.
3010          */
3011         thr_error = migration_detect_error(s);
3012         if (thr_error == MIG_THR_ERR_FATAL) {
3013             /* Stop migration */
3014             break;
3015         } else if (thr_error == MIG_THR_ERR_RECOVERED) {
3016             /*
3017              * Just recovered from a e.g. network failure, reset all
3018              * the local variables. This is important to avoid
3019              * breaking transferred_bytes and bandwidth calculation
3020              */
3021             update_iteration_initial_status(s);
3022         }
3023 
3024         urgent = migration_rate_limit();
3025     }
3026 
3027     trace_migration_thread_after_loop();
3028     migration_iteration_finish(s);
3029     object_unref(OBJECT(s));
3030     rcu_unregister_thread();
3031     MigrationThreadDel(thread);
3032     return NULL;
3033 }
3034 
3035 static void bg_migration_vm_start_bh(void *opaque)
3036 {
3037     MigrationState *s = opaque;
3038 
3039     qemu_bh_delete(s->vm_start_bh);
3040     s->vm_start_bh = NULL;
3041 
3042     vm_start();
3043     s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start;
3044 }
3045 
3046 /**
3047  * Background snapshot thread, based on live migration code.
3048  * This is an alternative implementation of live migration mechanism
3049  * introduced specifically to support background snapshots.
3050  *
3051  * It takes advantage of userfault_fd write protection mechanism introduced
3052  * in v5.7 kernel. Compared to existing dirty page logging migration much
3053  * lesser stream traffic is produced resulting in smaller snapshot images,
3054  * simply cause of no page duplicates can get into the stream.
3055  *
3056  * Another key point is that generated vmstate stream reflects machine state
3057  * 'frozen' at the beginning of snapshot creation compared to dirty page logging
3058  * mechanism, which effectively results in that saved snapshot is the state of VM
3059  * at the end of the process.
3060  */
3061 static void *bg_migration_thread(void *opaque)
3062 {
3063     MigrationState *s = opaque;
3064     int64_t setup_start;
3065     MigThrError thr_error;
3066     QEMUFile *fb;
3067     bool early_fail = true;
3068 
3069     rcu_register_thread();
3070     object_ref(OBJECT(s));
3071 
3072     migration_rate_set(RATE_LIMIT_DISABLED);
3073 
3074     setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
3075     /*
3076      * We want to save vmstate for the moment when migration has been
3077      * initiated but also we want to save RAM content while VM is running.
3078      * The RAM content should appear first in the vmstate. So, we first
3079      * stash the non-RAM part of the vmstate to the temporary buffer,
3080      * then write RAM part of the vmstate to the migration stream
3081      * with vCPUs running and, finally, write stashed non-RAM part of
3082      * the vmstate from the buffer to the migration stream.
3083      */
3084     s->bioc = qio_channel_buffer_new(512 * 1024);
3085     qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer");
3086     fb = qemu_file_new_output(QIO_CHANNEL(s->bioc));
3087     object_unref(OBJECT(s->bioc));
3088 
3089     update_iteration_initial_status(s);
3090 
3091     /*
3092      * Prepare for tracking memory writes with UFFD-WP - populate
3093      * RAM pages before protecting.
3094      */
3095 #ifdef __linux__
3096     ram_write_tracking_prepare();
3097 #endif
3098 
3099     qemu_savevm_state_header(s->to_dst_file);
3100     qemu_savevm_state_setup(s->to_dst_file);
3101 
3102     qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
3103                                MIGRATION_STATUS_ACTIVE);
3104 
3105     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
3106 
3107     trace_migration_thread_setup_complete();
3108     s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3109 
3110     qemu_mutex_lock_iothread();
3111 
3112     /*
3113      * If VM is currently in suspended state, then, to make a valid runstate
3114      * transition in vm_stop_force_state() we need to wakeup it up.
3115      */
3116     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
3117     s->vm_old_state = runstate_get();
3118 
3119     global_state_store();
3120     /* Forcibly stop VM before saving state of vCPUs and devices */
3121     if (vm_stop_force_state(RUN_STATE_PAUSED)) {
3122         goto fail;
3123     }
3124     /*
3125      * Put vCPUs in sync with shadow context structures, then
3126      * save their state to channel-buffer along with devices.
3127      */
3128     cpu_synchronize_all_states();
3129     if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
3130         goto fail;
3131     }
3132     /*
3133      * Since we are going to get non-iterable state data directly
3134      * from s->bioc->data, explicit flush is needed here.
3135      */
3136     qemu_fflush(fb);
3137 
3138     /* Now initialize UFFD context and start tracking RAM writes */
3139     if (ram_write_tracking_start()) {
3140         goto fail;
3141     }
3142     early_fail = false;
3143 
3144     /*
3145      * Start VM from BH handler to avoid write-fault lock here.
3146      * UFFD-WP protection for the whole RAM is already enabled so
3147      * calling VM state change notifiers from vm_start() would initiate
3148      * writes to virtio VQs memory which is in write-protected region.
3149      */
3150     s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
3151     qemu_bh_schedule(s->vm_start_bh);
3152 
3153     qemu_mutex_unlock_iothread();
3154 
3155     while (migration_is_active(s)) {
3156         MigIterateState iter_state = bg_migration_iteration_run(s);
3157         if (iter_state == MIG_ITERATE_SKIP) {
3158             continue;
3159         } else if (iter_state == MIG_ITERATE_BREAK) {
3160             break;
3161         }
3162 
3163         /*
3164          * Try to detect any kind of failures, and see whether we
3165          * should stop the migration now.
3166          */
3167         thr_error = migration_detect_error(s);
3168         if (thr_error == MIG_THR_ERR_FATAL) {
3169             /* Stop migration */
3170             break;
3171         }
3172 
3173         migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
3174     }
3175 
3176     trace_migration_thread_after_loop();
3177 
3178 fail:
3179     if (early_fail) {
3180         migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
3181                 MIGRATION_STATUS_FAILED);
3182         qemu_mutex_unlock_iothread();
3183     }
3184 
3185     bg_migration_iteration_finish(s);
3186 
3187     qemu_fclose(fb);
3188     object_unref(OBJECT(s));
3189     rcu_unregister_thread();
3190 
3191     return NULL;
3192 }
3193 
3194 void migrate_fd_connect(MigrationState *s, Error *error_in)
3195 {
3196     Error *local_err = NULL;
3197     uint64_t rate_limit;
3198     bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
3199 
3200     /*
3201      * If there's a previous error, free it and prepare for another one.
3202      * Meanwhile if migration completes successfully, there won't have an error
3203      * dumped when calling migrate_fd_cleanup().
3204      */
3205     migrate_error_free(s);
3206 
3207     s->expected_downtime = migrate_downtime_limit();
3208     if (resume) {
3209         assert(s->cleanup_bh);
3210     } else {
3211         assert(!s->cleanup_bh);
3212         s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
3213     }
3214     if (error_in) {
3215         migrate_fd_error(s, error_in);
3216         if (resume) {
3217             /*
3218              * Don't do cleanup for resume if channel is invalid, but only dump
3219              * the error.  We wait for another channel connect from the user.
3220              * The error_report still gives HMP user a hint on what failed.
3221              * It's normally done in migrate_fd_cleanup(), but call it here
3222              * explicitly.
3223              */
3224             error_report_err(error_copy(s->error));
3225         } else {
3226             migrate_fd_cleanup(s);
3227         }
3228         return;
3229     }
3230 
3231     if (resume) {
3232         /* This is a resumed migration */
3233         rate_limit = migrate_max_postcopy_bandwidth();
3234     } else {
3235         /* This is a fresh new migration */
3236         rate_limit = migrate_max_bandwidth();
3237 
3238         /* Notify before starting migration thread */
3239         notifier_list_notify(&migration_state_notifiers, s);
3240     }
3241 
3242     migration_rate_set(rate_limit);
3243     qemu_file_set_blocking(s->to_dst_file, true);
3244 
3245     /*
3246      * Open the return path. For postcopy, it is used exclusively. For
3247      * precopy, only if user specified "return-path" capability would
3248      * QEMU uses the return path.
3249      */
3250     if (migrate_postcopy_ram() || migrate_return_path()) {
3251         if (open_return_path_on_source(s, !resume)) {
3252             error_report("Unable to open return-path for postcopy");
3253             migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
3254             migrate_fd_cleanup(s);
3255             return;
3256         }
3257     }
3258 
3259     /*
3260      * This needs to be done before resuming a postcopy.  Note: for newer
3261      * QEMUs we will delay the channel creation until postcopy_start(), to
3262      * avoid disorder of channel creations.
3263      */
3264     if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
3265         postcopy_preempt_setup(s);
3266     }
3267 
3268     if (resume) {
3269         /* Wakeup the main migration thread to do the recovery */
3270         migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
3271                           MIGRATION_STATUS_POSTCOPY_RECOVER);
3272         qemu_sem_post(&s->postcopy_pause_sem);
3273         return;
3274     }
3275 
3276     if (multifd_save_setup(&local_err) != 0) {
3277         error_report_err(local_err);
3278         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
3279                           MIGRATION_STATUS_FAILED);
3280         migrate_fd_cleanup(s);
3281         return;
3282     }
3283 
3284     if (migrate_background_snapshot()) {
3285         qemu_thread_create(&s->thread, "bg_snapshot",
3286                 bg_migration_thread, s, QEMU_THREAD_JOINABLE);
3287     } else {
3288         qemu_thread_create(&s->thread, "live_migration",
3289                 migration_thread, s, QEMU_THREAD_JOINABLE);
3290     }
3291     s->migration_thread_running = true;
3292 }
3293 
3294 static void migration_class_init(ObjectClass *klass, void *data)
3295 {
3296     DeviceClass *dc = DEVICE_CLASS(klass);
3297 
3298     dc->user_creatable = false;
3299     device_class_set_props(dc, migration_properties);
3300 }
3301 
3302 static void migration_instance_finalize(Object *obj)
3303 {
3304     MigrationState *ms = MIGRATION_OBJ(obj);
3305 
3306     qemu_mutex_destroy(&ms->error_mutex);
3307     qemu_mutex_destroy(&ms->qemu_file_lock);
3308     qemu_sem_destroy(&ms->wait_unplug_sem);
3309     qemu_sem_destroy(&ms->rate_limit_sem);
3310     qemu_sem_destroy(&ms->pause_sem);
3311     qemu_sem_destroy(&ms->postcopy_pause_sem);
3312     qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
3313     qemu_sem_destroy(&ms->rp_state.rp_sem);
3314     qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
3315     qemu_sem_destroy(&ms->postcopy_qemufile_src_sem);
3316     error_free(ms->error);
3317 }
3318 
3319 static void migration_instance_init(Object *obj)
3320 {
3321     MigrationState *ms = MIGRATION_OBJ(obj);
3322 
3323     ms->state = MIGRATION_STATUS_NONE;
3324     ms->mbps = -1;
3325     ms->pages_per_second = -1;
3326     qemu_sem_init(&ms->pause_sem, 0);
3327     qemu_mutex_init(&ms->error_mutex);
3328 
3329     migrate_params_init(&ms->parameters);
3330 
3331     qemu_sem_init(&ms->postcopy_pause_sem, 0);
3332     qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
3333     qemu_sem_init(&ms->rp_state.rp_sem, 0);
3334     qemu_sem_init(&ms->rp_state.rp_pong_acks, 0);
3335     qemu_sem_init(&ms->rate_limit_sem, 0);
3336     qemu_sem_init(&ms->wait_unplug_sem, 0);
3337     qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0);
3338     qemu_mutex_init(&ms->qemu_file_lock);
3339 }
3340 
3341 /*
3342  * Return true if check pass, false otherwise. Error will be put
3343  * inside errp if provided.
3344  */
3345 static bool migration_object_check(MigrationState *ms, Error **errp)
3346 {
3347     /* Assuming all off */
3348     bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 };
3349 
3350     if (!migrate_params_check(&ms->parameters, errp)) {
3351         return false;
3352     }
3353 
3354     return migrate_caps_check(old_caps, ms->capabilities, errp);
3355 }
3356 
3357 static const TypeInfo migration_type = {
3358     .name = TYPE_MIGRATION,
3359     /*
3360      * NOTE: TYPE_MIGRATION is not really a device, as the object is
3361      * not created using qdev_new(), it is not attached to the qdev
3362      * device tree, and it is never realized.
3363      *
3364      * TODO: Make this TYPE_OBJECT once QOM provides something like
3365      * TYPE_DEVICE's "-global" properties.
3366      */
3367     .parent = TYPE_DEVICE,
3368     .class_init = migration_class_init,
3369     .class_size = sizeof(MigrationClass),
3370     .instance_size = sizeof(MigrationState),
3371     .instance_init = migration_instance_init,
3372     .instance_finalize = migration_instance_finalize,
3373 };
3374 
3375 static void register_migration_types(void)
3376 {
3377     type_register_static(&migration_type);
3378 }
3379 
3380 type_init(register_migration_types);
3381