xref: /openbmc/qemu/migration/migration.c (revision 5e6f3db2)
1 /*
2  * QEMU live migration
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/blocker.h"
21 #include "exec.h"
22 #include "fd.h"
23 #include "socket.h"
24 #include "sysemu/runstate.h"
25 #include "sysemu/sysemu.h"
26 #include "sysemu/cpu-throttle.h"
27 #include "rdma.h"
28 #include "ram.h"
29 #include "ram-compress.h"
30 #include "migration/global_state.h"
31 #include "migration/misc.h"
32 #include "migration.h"
33 #include "migration-stats.h"
34 #include "savevm.h"
35 #include "qemu-file.h"
36 #include "channel.h"
37 #include "migration/vmstate.h"
38 #include "block/block.h"
39 #include "qapi/error.h"
40 #include "qapi/clone-visitor.h"
41 #include "qapi/qapi-visit-migration.h"
42 #include "qapi/qapi-visit-sockets.h"
43 #include "qapi/qapi-commands-migration.h"
44 #include "qapi/qapi-events-migration.h"
45 #include "qapi/qmp/qerror.h"
46 #include "qapi/qmp/qnull.h"
47 #include "qemu/rcu.h"
48 #include "block.h"
49 #include "postcopy-ram.h"
50 #include "qemu/thread.h"
51 #include "trace.h"
52 #include "exec/target_page.h"
53 #include "io/channel-buffer.h"
54 #include "io/channel-tls.h"
55 #include "migration/colo.h"
56 #include "hw/boards.h"
57 #include "monitor/monitor.h"
58 #include "net/announce.h"
59 #include "qemu/queue.h"
60 #include "multifd.h"
61 #include "threadinfo.h"
62 #include "qemu/yank.h"
63 #include "sysemu/cpus.h"
64 #include "yank_functions.h"
65 #include "sysemu/qtest.h"
66 #include "options.h"
67 #include "sysemu/dirtylimit.h"
68 
69 static NotifierList migration_state_notifiers =
70     NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
71 
72 /* Messages sent on the return path from destination to source */
73 enum mig_rp_message_type {
74     MIG_RP_MSG_INVALID = 0,  /* Must be 0 */
75     MIG_RP_MSG_SHUT,         /* sibling will not send any more RP messages */
76     MIG_RP_MSG_PONG,         /* Response to a PING; data (seq: be32 ) */
77 
78     MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
79     MIG_RP_MSG_REQ_PAGES,    /* data (start: be64, len: be32) */
80     MIG_RP_MSG_RECV_BITMAP,  /* send recved_bitmap back to source */
81     MIG_RP_MSG_RESUME_ACK,   /* tell source that we are ready to resume */
82     MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */
83 
84     MIG_RP_MSG_MAX
85 };
86 
87 /* When we add fault tolerance, we could have several
88    migrations at once.  For now we don't need to add
89    dynamic creation of migration */
90 
91 static MigrationState *current_migration;
92 static MigrationIncomingState *current_incoming;
93 
94 static GSList *migration_blockers;
95 
96 static bool migration_object_check(MigrationState *ms, Error **errp);
97 static int migration_maybe_pause(MigrationState *s,
98                                  int *current_active_state,
99                                  int new_state);
100 static void migrate_fd_cancel(MigrationState *s);
101 
102 static bool migration_needs_multiple_sockets(void)
103 {
104     return migrate_multifd() || migrate_postcopy_preempt();
105 }
106 
107 static bool uri_supports_multi_channels(const char *uri)
108 {
109     return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) ||
110            strstart(uri, "vsock:", NULL);
111 }
112 
113 static bool
114 migration_channels_and_uri_compatible(const char *uri, Error **errp)
115 {
116     if (migration_needs_multiple_sockets() &&
117         !uri_supports_multi_channels(uri)) {
118         error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)");
119         return false;
120     }
121 
122     return true;
123 }
124 
125 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
126 {
127     uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp;
128 
129     return (a > b) - (a < b);
130 }
131 
132 void migration_object_init(void)
133 {
134     /* This can only be called once. */
135     assert(!current_migration);
136     current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
137 
138     /*
139      * Init the migrate incoming object as well no matter whether
140      * we'll use it or not.
141      */
142     assert(!current_incoming);
143     current_incoming = g_new0(MigrationIncomingState, 1);
144     current_incoming->state = MIGRATION_STATUS_NONE;
145     current_incoming->postcopy_remote_fds =
146         g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD));
147     qemu_mutex_init(&current_incoming->rp_mutex);
148     qemu_mutex_init(&current_incoming->postcopy_prio_thread_mutex);
149     qemu_event_init(&current_incoming->main_thread_load_event, false);
150     qemu_sem_init(&current_incoming->postcopy_pause_sem_dst, 0);
151     qemu_sem_init(&current_incoming->postcopy_pause_sem_fault, 0);
152     qemu_sem_init(&current_incoming->postcopy_pause_sem_fast_load, 0);
153     qemu_sem_init(&current_incoming->postcopy_qemufile_dst_done, 0);
154 
155     qemu_mutex_init(&current_incoming->page_request_mutex);
156     current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
157 
158     migration_object_check(current_migration, &error_fatal);
159 
160     blk_mig_init();
161     ram_mig_init();
162     dirty_bitmap_mig_init();
163 }
164 
165 void migration_cancel(const Error *error)
166 {
167     if (error) {
168         migrate_set_error(current_migration, error);
169     }
170     if (migrate_dirty_limit()) {
171         qmp_cancel_vcpu_dirty_limit(false, -1, NULL);
172     }
173     migrate_fd_cancel(current_migration);
174 }
175 
176 void migration_shutdown(void)
177 {
178     /*
179      * When the QEMU main thread exit, the COLO thread
180      * may wait a semaphore. So, we should wakeup the
181      * COLO thread before migration shutdown.
182      */
183     colo_shutdown();
184     /*
185      * Cancel the current migration - that will (eventually)
186      * stop the migration using this structure
187      */
188     migration_cancel(NULL);
189     object_unref(OBJECT(current_migration));
190 
191     /*
192      * Cancel outgoing migration of dirty bitmaps. It should
193      * at least unref used block nodes.
194      */
195     dirty_bitmap_mig_cancel_outgoing();
196 
197     /*
198      * Cancel incoming migration of dirty bitmaps. Dirty bitmaps
199      * are non-critical data, and their loss never considered as
200      * something serious.
201      */
202     dirty_bitmap_mig_cancel_incoming();
203 }
204 
205 /* For outgoing */
206 MigrationState *migrate_get_current(void)
207 {
208     /* This can only be called after the object created. */
209     assert(current_migration);
210     return current_migration;
211 }
212 
213 MigrationIncomingState *migration_incoming_get_current(void)
214 {
215     assert(current_incoming);
216     return current_incoming;
217 }
218 
219 void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
220 {
221     if (mis->socket_address_list) {
222         qapi_free_SocketAddressList(mis->socket_address_list);
223         mis->socket_address_list = NULL;
224     }
225 
226     if (mis->transport_cleanup) {
227         mis->transport_cleanup(mis->transport_data);
228         mis->transport_data = mis->transport_cleanup = NULL;
229     }
230 }
231 
232 void migration_incoming_state_destroy(void)
233 {
234     struct MigrationIncomingState *mis = migration_incoming_get_current();
235 
236     multifd_load_cleanup();
237     compress_threads_load_cleanup();
238 
239     if (mis->to_src_file) {
240         /* Tell source that we are done */
241         migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
242         qemu_fclose(mis->to_src_file);
243         mis->to_src_file = NULL;
244     }
245 
246     if (mis->from_src_file) {
247         migration_ioc_unregister_yank_from_file(mis->from_src_file);
248         qemu_fclose(mis->from_src_file);
249         mis->from_src_file = NULL;
250     }
251     if (mis->postcopy_remote_fds) {
252         g_array_free(mis->postcopy_remote_fds, TRUE);
253         mis->postcopy_remote_fds = NULL;
254     }
255 
256     migration_incoming_transport_cleanup(mis);
257     qemu_event_reset(&mis->main_thread_load_event);
258 
259     if (mis->page_requested) {
260         g_tree_destroy(mis->page_requested);
261         mis->page_requested = NULL;
262     }
263 
264     if (mis->postcopy_qemufile_dst) {
265         migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst);
266         qemu_fclose(mis->postcopy_qemufile_dst);
267         mis->postcopy_qemufile_dst = NULL;
268     }
269 
270     yank_unregister_instance(MIGRATION_YANK_INSTANCE);
271 }
272 
273 static void migrate_generate_event(int new_state)
274 {
275     if (migrate_events()) {
276         qapi_event_send_migration(new_state);
277     }
278 }
279 
280 /*
281  * Send a message on the return channel back to the source
282  * of the migration.
283  */
284 static int migrate_send_rp_message(MigrationIncomingState *mis,
285                                    enum mig_rp_message_type message_type,
286                                    uint16_t len, void *data)
287 {
288     int ret = 0;
289 
290     trace_migrate_send_rp_message((int)message_type, len);
291     QEMU_LOCK_GUARD(&mis->rp_mutex);
292 
293     /*
294      * It's possible that the file handle got lost due to network
295      * failures.
296      */
297     if (!mis->to_src_file) {
298         ret = -EIO;
299         return ret;
300     }
301 
302     qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
303     qemu_put_be16(mis->to_src_file, len);
304     qemu_put_buffer(mis->to_src_file, data, len);
305     qemu_fflush(mis->to_src_file);
306 
307     /* It's possible that qemu file got error during sending */
308     ret = qemu_file_get_error(mis->to_src_file);
309 
310     return ret;
311 }
312 
313 /* Request one page from the source VM at the given start address.
314  *   rb: the RAMBlock to request the page in
315  *   Start: Address offset within the RB
316  *   Len: Length in bytes required - must be a multiple of pagesize
317  */
318 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
319                                       RAMBlock *rb, ram_addr_t start)
320 {
321     uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
322     size_t msglen = 12; /* start + len */
323     size_t len = qemu_ram_pagesize(rb);
324     enum mig_rp_message_type msg_type;
325     const char *rbname;
326     int rbname_len;
327 
328     *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
329     *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
330 
331     /*
332      * We maintain the last ramblock that we requested for page.  Note that we
333      * don't need locking because this function will only be called within the
334      * postcopy ram fault thread.
335      */
336     if (rb != mis->last_rb) {
337         mis->last_rb = rb;
338 
339         rbname = qemu_ram_get_idstr(rb);
340         rbname_len = strlen(rbname);
341 
342         assert(rbname_len < 256);
343 
344         bufc[msglen++] = rbname_len;
345         memcpy(bufc + msglen, rbname, rbname_len);
346         msglen += rbname_len;
347         msg_type = MIG_RP_MSG_REQ_PAGES_ID;
348     } else {
349         msg_type = MIG_RP_MSG_REQ_PAGES;
350     }
351 
352     return migrate_send_rp_message(mis, msg_type, msglen, bufc);
353 }
354 
355 int migrate_send_rp_req_pages(MigrationIncomingState *mis,
356                               RAMBlock *rb, ram_addr_t start, uint64_t haddr)
357 {
358     void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
359     bool received = false;
360 
361     WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
362         received = ramblock_recv_bitmap_test_byte_offset(rb, start);
363         if (!received && !g_tree_lookup(mis->page_requested, aligned)) {
364             /*
365              * The page has not been received, and it's not yet in the page
366              * request list.  Queue it.  Set the value of element to 1, so that
367              * things like g_tree_lookup() will return TRUE (1) when found.
368              */
369             g_tree_insert(mis->page_requested, aligned, (gpointer)1);
370             mis->page_requested_count++;
371             trace_postcopy_page_req_add(aligned, mis->page_requested_count);
372         }
373     }
374 
375     /*
376      * If the page is there, skip sending the message.  We don't even need the
377      * lock because as long as the page arrived, it'll be there forever.
378      */
379     if (received) {
380         return 0;
381     }
382 
383     return migrate_send_rp_message_req_pages(mis, rb, start);
384 }
385 
386 static bool migration_colo_enabled;
387 bool migration_incoming_colo_enabled(void)
388 {
389     return migration_colo_enabled;
390 }
391 
392 void migration_incoming_disable_colo(void)
393 {
394     ram_block_discard_disable(false);
395     migration_colo_enabled = false;
396 }
397 
398 int migration_incoming_enable_colo(void)
399 {
400 #ifndef CONFIG_REPLICATION
401     error_report("ENABLE_COLO command come in migration stream, but COLO "
402                  "module is not built in");
403     return -ENOTSUP;
404 #endif
405 
406     if (!migrate_colo()) {
407         error_report("ENABLE_COLO command come in migration stream, but c-colo "
408                      "capability is not set");
409         return -EINVAL;
410     }
411 
412     if (ram_block_discard_disable(true)) {
413         error_report("COLO: cannot disable RAM discard");
414         return -EBUSY;
415     }
416     migration_colo_enabled = true;
417     return 0;
418 }
419 
420 void migrate_add_address(SocketAddress *address)
421 {
422     MigrationIncomingState *mis = migration_incoming_get_current();
423 
424     QAPI_LIST_PREPEND(mis->socket_address_list,
425                       QAPI_CLONE(SocketAddress, address));
426 }
427 
428 static void qemu_start_incoming_migration(const char *uri, Error **errp)
429 {
430     const char *p = NULL;
431 
432     /* URI is not suitable for migration? */
433     if (!migration_channels_and_uri_compatible(uri, errp)) {
434         return;
435     }
436 
437     qapi_event_send_migration(MIGRATION_STATUS_SETUP);
438     if (strstart(uri, "tcp:", &p) ||
439         strstart(uri, "unix:", NULL) ||
440         strstart(uri, "vsock:", NULL)) {
441         socket_start_incoming_migration(p ? p : uri, errp);
442 #ifdef CONFIG_RDMA
443     } else if (strstart(uri, "rdma:", &p)) {
444         rdma_start_incoming_migration(p, errp);
445 #endif
446     } else if (strstart(uri, "exec:", &p)) {
447         exec_start_incoming_migration(p, errp);
448     } else if (strstart(uri, "fd:", &p)) {
449         fd_start_incoming_migration(p, errp);
450     } else {
451         error_setg(errp, "unknown migration protocol: %s", uri);
452     }
453 }
454 
455 static void process_incoming_migration_bh(void *opaque)
456 {
457     Error *local_err = NULL;
458     MigrationIncomingState *mis = opaque;
459 
460     /* If capability late_block_activate is set:
461      * Only fire up the block code now if we're going to restart the
462      * VM, else 'cont' will do it.
463      * This causes file locking to happen; so we don't want it to happen
464      * unless we really are starting the VM.
465      */
466     if (!migrate_late_block_activate() ||
467          (autostart && (!global_state_received() ||
468             global_state_get_runstate() == RUN_STATE_RUNNING))) {
469         /* Make sure all file formats throw away their mutable metadata.
470          * If we get an error here, just don't restart the VM yet. */
471         bdrv_activate_all(&local_err);
472         if (local_err) {
473             error_report_err(local_err);
474             local_err = NULL;
475             autostart = false;
476         }
477     }
478 
479     /*
480      * This must happen after all error conditions are dealt with and
481      * we're sure the VM is going to be running on this host.
482      */
483     qemu_announce_self(&mis->announce_timer, migrate_announce_params());
484 
485     multifd_load_shutdown();
486 
487     dirty_bitmap_mig_before_vm_start();
488 
489     if (!global_state_received() ||
490         global_state_get_runstate() == RUN_STATE_RUNNING) {
491         if (autostart) {
492             vm_start();
493         } else {
494             runstate_set(RUN_STATE_PAUSED);
495         }
496     } else if (migration_incoming_colo_enabled()) {
497         migration_incoming_disable_colo();
498         vm_start();
499     } else {
500         runstate_set(global_state_get_runstate());
501     }
502     /*
503      * This must happen after any state changes since as soon as an external
504      * observer sees this event they might start to prod at the VM assuming
505      * it's ready to use.
506      */
507     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
508                       MIGRATION_STATUS_COMPLETED);
509     qemu_bh_delete(mis->bh);
510     migration_incoming_state_destroy();
511 }
512 
513 static void coroutine_fn
514 process_incoming_migration_co(void *opaque)
515 {
516     MigrationIncomingState *mis = migration_incoming_get_current();
517     PostcopyState ps;
518     int ret;
519 
520     assert(mis->from_src_file);
521 
522     if (compress_threads_load_setup(mis->from_src_file)) {
523         error_report("Failed to setup decompress threads");
524         goto fail;
525     }
526 
527     mis->largest_page_size = qemu_ram_pagesize_largest();
528     postcopy_state_set(POSTCOPY_INCOMING_NONE);
529     migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
530                       MIGRATION_STATUS_ACTIVE);
531 
532     mis->loadvm_co = qemu_coroutine_self();
533     ret = qemu_loadvm_state(mis->from_src_file);
534     mis->loadvm_co = NULL;
535 
536     ps = postcopy_state_get();
537     trace_process_incoming_migration_co_end(ret, ps);
538     if (ps != POSTCOPY_INCOMING_NONE) {
539         if (ps == POSTCOPY_INCOMING_ADVISE) {
540             /*
541              * Where a migration had postcopy enabled (and thus went to advise)
542              * but managed to complete within the precopy period, we can use
543              * the normal exit.
544              */
545             postcopy_ram_incoming_cleanup(mis);
546         } else if (ret >= 0) {
547             /*
548              * Postcopy was started, cleanup should happen at the end of the
549              * postcopy thread.
550              */
551             trace_process_incoming_migration_co_postcopy_end_main();
552             return;
553         }
554         /* Else if something went wrong then just fall out of the normal exit */
555     }
556 
557     if (ret < 0) {
558         error_report("load of migration failed: %s", strerror(-ret));
559         goto fail;
560     }
561 
562     if (colo_incoming_co() < 0) {
563         goto fail;
564     }
565 
566     mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
567     qemu_bh_schedule(mis->bh);
568     return;
569 fail:
570     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
571                       MIGRATION_STATUS_FAILED);
572     qemu_fclose(mis->from_src_file);
573 
574     multifd_load_cleanup();
575     compress_threads_load_cleanup();
576 
577     exit(EXIT_FAILURE);
578 }
579 
580 /**
581  * migration_incoming_setup: Setup incoming migration
582  * @f: file for main migration channel
583  * @errp: where to put errors
584  *
585  * Returns: %true on success, %false on error.
586  */
587 static bool migration_incoming_setup(QEMUFile *f, Error **errp)
588 {
589     MigrationIncomingState *mis = migration_incoming_get_current();
590 
591     if (!mis->from_src_file) {
592         mis->from_src_file = f;
593     }
594     qemu_file_set_blocking(f, false);
595     return true;
596 }
597 
598 void migration_incoming_process(void)
599 {
600     Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);
601     qemu_coroutine_enter(co);
602 }
603 
604 /* Returns true if recovered from a paused migration, otherwise false */
605 static bool postcopy_try_recover(void)
606 {
607     MigrationIncomingState *mis = migration_incoming_get_current();
608 
609     if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
610         /* Resumed from a paused postcopy migration */
611 
612         /* This should be set already in migration_incoming_setup() */
613         assert(mis->from_src_file);
614         /* Postcopy has standalone thread to do vm load */
615         qemu_file_set_blocking(mis->from_src_file, true);
616 
617         /* Re-configure the return path */
618         mis->to_src_file = qemu_file_get_return_path(mis->from_src_file);
619 
620         migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
621                           MIGRATION_STATUS_POSTCOPY_RECOVER);
622 
623         /*
624          * Here, we only wake up the main loading thread (while the
625          * rest threads will still be waiting), so that we can receive
626          * commands from source now, and answer it if needed. The
627          * rest threads will be woken up afterwards until we are sure
628          * that source is ready to reply to page requests.
629          */
630         qemu_sem_post(&mis->postcopy_pause_sem_dst);
631         return true;
632     }
633 
634     return false;
635 }
636 
637 void migration_fd_process_incoming(QEMUFile *f, Error **errp)
638 {
639     if (!migration_incoming_setup(f, errp)) {
640         return;
641     }
642     if (postcopy_try_recover()) {
643         return;
644     }
645     migration_incoming_process();
646 }
647 
648 /*
649  * Returns true when we want to start a new incoming migration process,
650  * false otherwise.
651  */
652 static bool migration_should_start_incoming(bool main_channel)
653 {
654     /* Multifd doesn't start unless all channels are established */
655     if (migrate_multifd()) {
656         return migration_has_all_channels();
657     }
658 
659     /* Preempt channel only starts when the main channel is created */
660     if (migrate_postcopy_preempt()) {
661         return main_channel;
662     }
663 
664     /*
665      * For all the rest types of migration, we should only reach here when
666      * it's the main channel that's being created, and we should always
667      * proceed with this channel.
668      */
669     assert(main_channel);
670     return true;
671 }
672 
673 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
674 {
675     MigrationIncomingState *mis = migration_incoming_get_current();
676     Error *local_err = NULL;
677     QEMUFile *f;
678     bool default_channel = true;
679     uint32_t channel_magic = 0;
680     int ret = 0;
681 
682     if (migrate_multifd() && !migrate_postcopy_ram() &&
683         qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
684         /*
685          * With multiple channels, it is possible that we receive channels
686          * out of order on destination side, causing incorrect mapping of
687          * source channels on destination side. Check channel MAGIC to
688          * decide type of channel. Please note this is best effort, postcopy
689          * preempt channel does not send any magic number so avoid it for
690          * postcopy live migration. Also tls live migration already does
691          * tls handshake while initializing main channel so with tls this
692          * issue is not possible.
693          */
694         ret = migration_channel_read_peek(ioc, (void *)&channel_magic,
695                                           sizeof(channel_magic), &local_err);
696 
697         if (ret != 0) {
698             error_propagate(errp, local_err);
699             return;
700         }
701 
702         default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC));
703     } else {
704         default_channel = !mis->from_src_file;
705     }
706 
707     if (multifd_load_setup(errp) != 0) {
708         error_setg(errp, "Failed to setup multifd channels");
709         return;
710     }
711 
712     if (default_channel) {
713         f = qemu_file_new_input(ioc);
714 
715         if (!migration_incoming_setup(f, errp)) {
716             return;
717         }
718     } else {
719         /* Multiple connections */
720         assert(migration_needs_multiple_sockets());
721         if (migrate_multifd()) {
722             multifd_recv_new_channel(ioc, &local_err);
723         } else {
724             assert(migrate_postcopy_preempt());
725             f = qemu_file_new_input(ioc);
726             postcopy_preempt_new_channel(mis, f);
727         }
728         if (local_err) {
729             error_propagate(errp, local_err);
730             return;
731         }
732     }
733 
734     if (migration_should_start_incoming(default_channel)) {
735         /* If it's a recovery, we're done */
736         if (postcopy_try_recover()) {
737             return;
738         }
739         migration_incoming_process();
740     }
741 }
742 
743 /**
744  * @migration_has_all_channels: We have received all channels that we need
745  *
746  * Returns true when we have got connections to all the channels that
747  * we need for migration.
748  */
749 bool migration_has_all_channels(void)
750 {
751     MigrationIncomingState *mis = migration_incoming_get_current();
752 
753     if (!mis->from_src_file) {
754         return false;
755     }
756 
757     if (migrate_multifd()) {
758         return multifd_recv_all_channels_created();
759     }
760 
761     if (migrate_postcopy_preempt()) {
762         return mis->postcopy_qemufile_dst != NULL;
763     }
764 
765     return true;
766 }
767 
768 int migrate_send_rp_switchover_ack(MigrationIncomingState *mis)
769 {
770     return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL);
771 }
772 
773 /*
774  * Send a 'SHUT' message on the return channel with the given value
775  * to indicate that we've finished with the RP.  Non-0 value indicates
776  * error.
777  */
778 void migrate_send_rp_shut(MigrationIncomingState *mis,
779                           uint32_t value)
780 {
781     uint32_t buf;
782 
783     buf = cpu_to_be32(value);
784     migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
785 }
786 
787 /*
788  * Send a 'PONG' message on the return channel with the given value
789  * (normally in response to a 'PING')
790  */
791 void migrate_send_rp_pong(MigrationIncomingState *mis,
792                           uint32_t value)
793 {
794     uint32_t buf;
795 
796     buf = cpu_to_be32(value);
797     migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
798 }
799 
800 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
801                                  char *block_name)
802 {
803     char buf[512];
804     int len;
805     int64_t res;
806 
807     /*
808      * First, we send the header part. It contains only the len of
809      * idstr, and the idstr itself.
810      */
811     len = strlen(block_name);
812     buf[0] = len;
813     memcpy(buf + 1, block_name, len);
814 
815     if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
816         error_report("%s: MSG_RP_RECV_BITMAP only used for recovery",
817                      __func__);
818         return;
819     }
820 
821     migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf);
822 
823     /*
824      * Next, we dump the received bitmap to the stream.
825      *
826      * TODO: currently we are safe since we are the only one that is
827      * using the to_src_file handle (fault thread is still paused),
828      * and it's ok even not taking the mutex. However the best way is
829      * to take the lock before sending the message header, and release
830      * the lock after sending the bitmap.
831      */
832     qemu_mutex_lock(&mis->rp_mutex);
833     res = ramblock_recv_bitmap_send(mis->to_src_file, block_name);
834     qemu_mutex_unlock(&mis->rp_mutex);
835 
836     trace_migrate_send_rp_recv_bitmap(block_name, res);
837 }
838 
839 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
840 {
841     uint32_t buf;
842 
843     buf = cpu_to_be32(value);
844     migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
845 }
846 
847 /*
848  * Return true if we're already in the middle of a migration
849  * (i.e. any of the active or setup states)
850  */
851 bool migration_is_setup_or_active(int state)
852 {
853     switch (state) {
854     case MIGRATION_STATUS_ACTIVE:
855     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
856     case MIGRATION_STATUS_POSTCOPY_PAUSED:
857     case MIGRATION_STATUS_POSTCOPY_RECOVER:
858     case MIGRATION_STATUS_SETUP:
859     case MIGRATION_STATUS_PRE_SWITCHOVER:
860     case MIGRATION_STATUS_DEVICE:
861     case MIGRATION_STATUS_WAIT_UNPLUG:
862     case MIGRATION_STATUS_COLO:
863         return true;
864 
865     default:
866         return false;
867 
868     }
869 }
870 
871 bool migration_is_running(int state)
872 {
873     switch (state) {
874     case MIGRATION_STATUS_ACTIVE:
875     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
876     case MIGRATION_STATUS_POSTCOPY_PAUSED:
877     case MIGRATION_STATUS_POSTCOPY_RECOVER:
878     case MIGRATION_STATUS_SETUP:
879     case MIGRATION_STATUS_PRE_SWITCHOVER:
880     case MIGRATION_STATUS_DEVICE:
881     case MIGRATION_STATUS_WAIT_UNPLUG:
882     case MIGRATION_STATUS_CANCELLING:
883         return true;
884 
885     default:
886         return false;
887 
888     }
889 }
890 
891 static bool migrate_show_downtime(MigrationState *s)
892 {
893     return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy();
894 }
895 
896 static void populate_time_info(MigrationInfo *info, MigrationState *s)
897 {
898     info->has_status = true;
899     info->has_setup_time = true;
900     info->setup_time = s->setup_time;
901 
902     if (s->state == MIGRATION_STATUS_COMPLETED) {
903         info->has_total_time = true;
904         info->total_time = s->total_time;
905     } else {
906         info->has_total_time = true;
907         info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
908                            s->start_time;
909     }
910 
911     if (migrate_show_downtime(s)) {
912         info->has_downtime = true;
913         info->downtime = s->downtime;
914     } else {
915         info->has_expected_downtime = true;
916         info->expected_downtime = s->expected_downtime;
917     }
918 }
919 
920 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
921 {
922     size_t page_size = qemu_target_page_size();
923 
924     info->ram = g_malloc0(sizeof(*info->ram));
925     info->ram->transferred = stat64_get(&mig_stats.transferred);
926     info->ram->total = ram_bytes_total();
927     info->ram->duplicate = stat64_get(&mig_stats.zero_pages);
928     /* legacy value.  It is not used anymore */
929     info->ram->skipped = 0;
930     info->ram->normal = stat64_get(&mig_stats.normal_pages);
931     info->ram->normal_bytes = info->ram->normal * page_size;
932     info->ram->mbps = s->mbps;
933     info->ram->dirty_sync_count =
934         stat64_get(&mig_stats.dirty_sync_count);
935     info->ram->dirty_sync_missed_zero_copy =
936         stat64_get(&mig_stats.dirty_sync_missed_zero_copy);
937     info->ram->postcopy_requests =
938         stat64_get(&mig_stats.postcopy_requests);
939     info->ram->page_size = page_size;
940     info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes);
941     info->ram->pages_per_second = s->pages_per_second;
942     info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes);
943     info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes);
944     info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes);
945 
946     if (migrate_xbzrle()) {
947         info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
948         info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
949         info->xbzrle_cache->bytes = xbzrle_counters.bytes;
950         info->xbzrle_cache->pages = xbzrle_counters.pages;
951         info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
952         info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
953         info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate;
954         info->xbzrle_cache->overflow = xbzrle_counters.overflow;
955     }
956 
957     if (migrate_compress()) {
958         info->compression = g_malloc0(sizeof(*info->compression));
959         info->compression->pages = compression_counters.pages;
960         info->compression->busy = compression_counters.busy;
961         info->compression->busy_rate = compression_counters.busy_rate;
962         info->compression->compressed_size =
963                                     compression_counters.compressed_size;
964         info->compression->compression_rate =
965                                     compression_counters.compression_rate;
966     }
967 
968     if (cpu_throttle_active()) {
969         info->has_cpu_throttle_percentage = true;
970         info->cpu_throttle_percentage = cpu_throttle_get_percentage();
971     }
972 
973     if (s->state != MIGRATION_STATUS_COMPLETED) {
974         info->ram->remaining = ram_bytes_remaining();
975         info->ram->dirty_pages_rate =
976            stat64_get(&mig_stats.dirty_pages_rate);
977     }
978 
979     if (migrate_dirty_limit() && dirtylimit_in_service()) {
980         info->has_dirty_limit_throttle_time_per_round = true;
981         info->dirty_limit_throttle_time_per_round =
982                             dirtylimit_throttle_time_per_round();
983 
984         info->has_dirty_limit_ring_full_time = true;
985         info->dirty_limit_ring_full_time = dirtylimit_ring_full_time();
986     }
987 }
988 
989 static void populate_disk_info(MigrationInfo *info)
990 {
991     if (blk_mig_active()) {
992         info->disk = g_malloc0(sizeof(*info->disk));
993         info->disk->transferred = blk_mig_bytes_transferred();
994         info->disk->remaining = blk_mig_bytes_remaining();
995         info->disk->total = blk_mig_bytes_total();
996     }
997 }
998 
999 static void fill_source_migration_info(MigrationInfo *info)
1000 {
1001     MigrationState *s = migrate_get_current();
1002     int state = qatomic_read(&s->state);
1003     GSList *cur_blocker = migration_blockers;
1004 
1005     info->blocked_reasons = NULL;
1006 
1007     /*
1008      * There are two types of reasons a migration might be blocked;
1009      * a) devices marked in VMState as non-migratable, and
1010      * b) Explicit migration blockers
1011      * We need to add both of them here.
1012      */
1013     qemu_savevm_non_migratable_list(&info->blocked_reasons);
1014 
1015     while (cur_blocker) {
1016         QAPI_LIST_PREPEND(info->blocked_reasons,
1017                           g_strdup(error_get_pretty(cur_blocker->data)));
1018         cur_blocker = g_slist_next(cur_blocker);
1019     }
1020     info->has_blocked_reasons = info->blocked_reasons != NULL;
1021 
1022     switch (state) {
1023     case MIGRATION_STATUS_NONE:
1024         /* no migration has happened ever */
1025         /* do not overwrite destination migration status */
1026         return;
1027     case MIGRATION_STATUS_SETUP:
1028         info->has_status = true;
1029         info->has_total_time = false;
1030         break;
1031     case MIGRATION_STATUS_ACTIVE:
1032     case MIGRATION_STATUS_CANCELLING:
1033     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1034     case MIGRATION_STATUS_PRE_SWITCHOVER:
1035     case MIGRATION_STATUS_DEVICE:
1036     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1037     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1038         /* TODO add some postcopy stats */
1039         populate_time_info(info, s);
1040         populate_ram_info(info, s);
1041         populate_disk_info(info);
1042         migration_populate_vfio_info(info);
1043         break;
1044     case MIGRATION_STATUS_COLO:
1045         info->has_status = true;
1046         /* TODO: display COLO specific information (checkpoint info etc.) */
1047         break;
1048     case MIGRATION_STATUS_COMPLETED:
1049         populate_time_info(info, s);
1050         populate_ram_info(info, s);
1051         migration_populate_vfio_info(info);
1052         break;
1053     case MIGRATION_STATUS_FAILED:
1054         info->has_status = true;
1055         if (s->error) {
1056             info->error_desc = g_strdup(error_get_pretty(s->error));
1057         }
1058         break;
1059     case MIGRATION_STATUS_CANCELLED:
1060         info->has_status = true;
1061         break;
1062     case MIGRATION_STATUS_WAIT_UNPLUG:
1063         info->has_status = true;
1064         break;
1065     }
1066     info->status = state;
1067 }
1068 
1069 static void fill_destination_migration_info(MigrationInfo *info)
1070 {
1071     MigrationIncomingState *mis = migration_incoming_get_current();
1072 
1073     if (mis->socket_address_list) {
1074         info->has_socket_address = true;
1075         info->socket_address =
1076             QAPI_CLONE(SocketAddressList, mis->socket_address_list);
1077     }
1078 
1079     switch (mis->state) {
1080     case MIGRATION_STATUS_NONE:
1081         return;
1082     case MIGRATION_STATUS_SETUP:
1083     case MIGRATION_STATUS_CANCELLING:
1084     case MIGRATION_STATUS_CANCELLED:
1085     case MIGRATION_STATUS_ACTIVE:
1086     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1087     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1088     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1089     case MIGRATION_STATUS_FAILED:
1090     case MIGRATION_STATUS_COLO:
1091         info->has_status = true;
1092         break;
1093     case MIGRATION_STATUS_COMPLETED:
1094         info->has_status = true;
1095         fill_destination_postcopy_migration_info(info);
1096         break;
1097     }
1098     info->status = mis->state;
1099 }
1100 
1101 MigrationInfo *qmp_query_migrate(Error **errp)
1102 {
1103     MigrationInfo *info = g_malloc0(sizeof(*info));
1104 
1105     fill_destination_migration_info(info);
1106     fill_source_migration_info(info);
1107 
1108     return info;
1109 }
1110 
1111 void qmp_migrate_start_postcopy(Error **errp)
1112 {
1113     MigrationState *s = migrate_get_current();
1114 
1115     if (!migrate_postcopy()) {
1116         error_setg(errp, "Enable postcopy with migrate_set_capability before"
1117                          " the start of migration");
1118         return;
1119     }
1120 
1121     if (s->state == MIGRATION_STATUS_NONE) {
1122         error_setg(errp, "Postcopy must be started after migration has been"
1123                          " started");
1124         return;
1125     }
1126     /*
1127      * we don't error if migration has finished since that would be racy
1128      * with issuing this command.
1129      */
1130     qatomic_set(&s->start_postcopy, true);
1131 }
1132 
1133 /* shared migration helpers */
1134 
1135 void migrate_set_state(int *state, int old_state, int new_state)
1136 {
1137     assert(new_state < MIGRATION_STATUS__MAX);
1138     if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
1139         trace_migrate_set_state(MigrationStatus_str(new_state));
1140         migrate_generate_event(new_state);
1141     }
1142 }
1143 
1144 static void migrate_fd_cleanup(MigrationState *s)
1145 {
1146     qemu_bh_delete(s->cleanup_bh);
1147     s->cleanup_bh = NULL;
1148 
1149     g_free(s->hostname);
1150     s->hostname = NULL;
1151     json_writer_free(s->vmdesc);
1152     s->vmdesc = NULL;
1153 
1154     qemu_savevm_state_cleanup();
1155 
1156     if (s->to_dst_file) {
1157         QEMUFile *tmp;
1158 
1159         trace_migrate_fd_cleanup();
1160         qemu_mutex_unlock_iothread();
1161         if (s->migration_thread_running) {
1162             qemu_thread_join(&s->thread);
1163             s->migration_thread_running = false;
1164         }
1165         qemu_mutex_lock_iothread();
1166 
1167         multifd_save_cleanup();
1168         qemu_mutex_lock(&s->qemu_file_lock);
1169         tmp = s->to_dst_file;
1170         s->to_dst_file = NULL;
1171         qemu_mutex_unlock(&s->qemu_file_lock);
1172         /*
1173          * Close the file handle without the lock to make sure the
1174          * critical section won't block for long.
1175          */
1176         migration_ioc_unregister_yank_from_file(tmp);
1177         qemu_fclose(tmp);
1178     }
1179 
1180     if (s->postcopy_qemufile_src) {
1181         migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src);
1182         qemu_fclose(s->postcopy_qemufile_src);
1183         s->postcopy_qemufile_src = NULL;
1184     }
1185 
1186     assert(!migration_is_active(s));
1187 
1188     if (s->state == MIGRATION_STATUS_CANCELLING) {
1189         migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
1190                           MIGRATION_STATUS_CANCELLED);
1191     }
1192 
1193     if (s->error) {
1194         /* It is used on info migrate.  We can't free it */
1195         error_report_err(error_copy(s->error));
1196     }
1197     notifier_list_notify(&migration_state_notifiers, s);
1198     block_cleanup_parameters();
1199     yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1200 }
1201 
1202 static void migrate_fd_cleanup_schedule(MigrationState *s)
1203 {
1204     /*
1205      * Ref the state for bh, because it may be called when
1206      * there're already no other refs
1207      */
1208     object_ref(OBJECT(s));
1209     qemu_bh_schedule(s->cleanup_bh);
1210 }
1211 
1212 static void migrate_fd_cleanup_bh(void *opaque)
1213 {
1214     MigrationState *s = opaque;
1215     migrate_fd_cleanup(s);
1216     object_unref(OBJECT(s));
1217 }
1218 
1219 void migrate_set_error(MigrationState *s, const Error *error)
1220 {
1221     QEMU_LOCK_GUARD(&s->error_mutex);
1222     if (!s->error) {
1223         s->error = error_copy(error);
1224     }
1225 }
1226 
1227 static void migrate_error_free(MigrationState *s)
1228 {
1229     QEMU_LOCK_GUARD(&s->error_mutex);
1230     if (s->error) {
1231         error_free(s->error);
1232         s->error = NULL;
1233     }
1234 }
1235 
1236 static void migrate_fd_error(MigrationState *s, const Error *error)
1237 {
1238     trace_migrate_fd_error(error_get_pretty(error));
1239     assert(s->to_dst_file == NULL);
1240     migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1241                       MIGRATION_STATUS_FAILED);
1242     migrate_set_error(s, error);
1243 }
1244 
1245 static void migrate_fd_cancel(MigrationState *s)
1246 {
1247     int old_state ;
1248     QEMUFile *f = migrate_get_current()->to_dst_file;
1249     trace_migrate_fd_cancel();
1250 
1251     WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
1252         if (s->rp_state.from_dst_file) {
1253             /* shutdown the rp socket, so causing the rp thread to shutdown */
1254             qemu_file_shutdown(s->rp_state.from_dst_file);
1255         }
1256     }
1257 
1258     do {
1259         old_state = s->state;
1260         if (!migration_is_running(old_state)) {
1261             break;
1262         }
1263         /* If the migration is paused, kick it out of the pause */
1264         if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
1265             qemu_sem_post(&s->pause_sem);
1266         }
1267         migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
1268     } while (s->state != MIGRATION_STATUS_CANCELLING);
1269 
1270     /*
1271      * If we're unlucky the migration code might be stuck somewhere in a
1272      * send/write while the network has failed and is waiting to timeout;
1273      * if we've got shutdown(2) available then we can force it to quit.
1274      * The outgoing qemu file gets closed in migrate_fd_cleanup that is
1275      * called in a bh, so there is no race against this cancel.
1276      */
1277     if (s->state == MIGRATION_STATUS_CANCELLING && f) {
1278         qemu_file_shutdown(f);
1279     }
1280     if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
1281         Error *local_err = NULL;
1282 
1283         bdrv_activate_all(&local_err);
1284         if (local_err) {
1285             error_report_err(local_err);
1286         } else {
1287             s->block_inactive = false;
1288         }
1289     }
1290 }
1291 
1292 void add_migration_state_change_notifier(Notifier *notify)
1293 {
1294     notifier_list_add(&migration_state_notifiers, notify);
1295 }
1296 
1297 void remove_migration_state_change_notifier(Notifier *notify)
1298 {
1299     notifier_remove(notify);
1300 }
1301 
1302 bool migration_in_setup(MigrationState *s)
1303 {
1304     return s->state == MIGRATION_STATUS_SETUP;
1305 }
1306 
1307 bool migration_has_finished(MigrationState *s)
1308 {
1309     return s->state == MIGRATION_STATUS_COMPLETED;
1310 }
1311 
1312 bool migration_has_failed(MigrationState *s)
1313 {
1314     return (s->state == MIGRATION_STATUS_CANCELLED ||
1315             s->state == MIGRATION_STATUS_FAILED);
1316 }
1317 
1318 bool migration_in_postcopy(void)
1319 {
1320     MigrationState *s = migrate_get_current();
1321 
1322     switch (s->state) {
1323     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1324     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1325     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1326         return true;
1327     default:
1328         return false;
1329     }
1330 }
1331 
1332 bool migration_in_postcopy_after_devices(MigrationState *s)
1333 {
1334     return migration_in_postcopy() && s->postcopy_after_devices;
1335 }
1336 
1337 bool migration_in_incoming_postcopy(void)
1338 {
1339     PostcopyState ps = postcopy_state_get();
1340 
1341     return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END;
1342 }
1343 
1344 bool migration_incoming_postcopy_advised(void)
1345 {
1346     PostcopyState ps = postcopy_state_get();
1347 
1348     return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
1349 }
1350 
1351 bool migration_in_bg_snapshot(void)
1352 {
1353     MigrationState *s = migrate_get_current();
1354 
1355     return migrate_background_snapshot() &&
1356             migration_is_setup_or_active(s->state);
1357 }
1358 
1359 bool migration_is_idle(void)
1360 {
1361     MigrationState *s = current_migration;
1362 
1363     if (!s) {
1364         return true;
1365     }
1366 
1367     switch (s->state) {
1368     case MIGRATION_STATUS_NONE:
1369     case MIGRATION_STATUS_CANCELLED:
1370     case MIGRATION_STATUS_COMPLETED:
1371     case MIGRATION_STATUS_FAILED:
1372         return true;
1373     case MIGRATION_STATUS_SETUP:
1374     case MIGRATION_STATUS_CANCELLING:
1375     case MIGRATION_STATUS_ACTIVE:
1376     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1377     case MIGRATION_STATUS_COLO:
1378     case MIGRATION_STATUS_PRE_SWITCHOVER:
1379     case MIGRATION_STATUS_DEVICE:
1380     case MIGRATION_STATUS_WAIT_UNPLUG:
1381         return false;
1382     case MIGRATION_STATUS__MAX:
1383         g_assert_not_reached();
1384     }
1385 
1386     return false;
1387 }
1388 
1389 bool migration_is_active(MigrationState *s)
1390 {
1391     return (s->state == MIGRATION_STATUS_ACTIVE ||
1392             s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
1393 }
1394 
1395 int migrate_init(MigrationState *s, Error **errp)
1396 {
1397     int ret;
1398 
1399     ret = qemu_savevm_state_prepare(errp);
1400     if (ret) {
1401         return ret;
1402     }
1403 
1404     /*
1405      * Reinitialise all migration state, except
1406      * parameters/capabilities that the user set, and
1407      * locks.
1408      */
1409     s->cleanup_bh = 0;
1410     s->vm_start_bh = 0;
1411     s->to_dst_file = NULL;
1412     s->state = MIGRATION_STATUS_NONE;
1413     s->rp_state.from_dst_file = NULL;
1414     s->rp_state.error = false;
1415     s->mbps = 0.0;
1416     s->pages_per_second = 0.0;
1417     s->downtime = 0;
1418     s->expected_downtime = 0;
1419     s->setup_time = 0;
1420     s->start_postcopy = false;
1421     s->postcopy_after_devices = false;
1422     s->migration_thread_running = false;
1423     error_free(s->error);
1424     s->error = NULL;
1425     s->hostname = NULL;
1426 
1427     migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1428 
1429     s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1430     s->total_time = 0;
1431     s->vm_old_state = -1;
1432     s->iteration_initial_bytes = 0;
1433     s->threshold_size = 0;
1434     s->switchover_acked = false;
1435     /*
1436      * set mig_stats compression_counters memory to zero for a
1437      * new migration
1438      */
1439     memset(&mig_stats, 0, sizeof(mig_stats));
1440     memset(&compression_counters, 0, sizeof(compression_counters));
1441     migration_reset_vfio_bytes_transferred();
1442 
1443     return 0;
1444 }
1445 
1446 int migrate_add_blocker_internal(Error *reason, Error **errp)
1447 {
1448     /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */
1449     if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
1450         error_propagate_prepend(errp, error_copy(reason),
1451                                 "disallowing migration blocker "
1452                                 "(migration/snapshot in progress) for: ");
1453         return -EBUSY;
1454     }
1455 
1456     migration_blockers = g_slist_prepend(migration_blockers, reason);
1457     return 0;
1458 }
1459 
1460 int migrate_add_blocker(Error *reason, Error **errp)
1461 {
1462     if (only_migratable) {
1463         error_propagate_prepend(errp, error_copy(reason),
1464                                 "disallowing migration blocker "
1465                                 "(--only-migratable) for: ");
1466         return -EACCES;
1467     }
1468 
1469     return migrate_add_blocker_internal(reason, errp);
1470 }
1471 
1472 void migrate_del_blocker(Error *reason)
1473 {
1474     migration_blockers = g_slist_remove(migration_blockers, reason);
1475 }
1476 
1477 void qmp_migrate_incoming(const char *uri, Error **errp)
1478 {
1479     Error *local_err = NULL;
1480     static bool once = true;
1481 
1482     if (!once) {
1483         error_setg(errp, "The incoming migration has already been started");
1484         return;
1485     }
1486     if (!runstate_check(RUN_STATE_INMIGRATE)) {
1487         error_setg(errp, "'-incoming' was not specified on the command line");
1488         return;
1489     }
1490 
1491     if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
1492         return;
1493     }
1494 
1495     qemu_start_incoming_migration(uri, &local_err);
1496 
1497     if (local_err) {
1498         yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1499         error_propagate(errp, local_err);
1500         return;
1501     }
1502 
1503     once = false;
1504 }
1505 
1506 void qmp_migrate_recover(const char *uri, Error **errp)
1507 {
1508     MigrationIncomingState *mis = migration_incoming_get_current();
1509 
1510     /*
1511      * Don't even bother to use ERRP_GUARD() as it _must_ always be set by
1512      * callers (no one should ignore a recover failure); if there is, it's a
1513      * programming error.
1514      */
1515     assert(errp);
1516 
1517     if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
1518         error_setg(errp, "Migrate recover can only be run "
1519                    "when postcopy is paused.");
1520         return;
1521     }
1522 
1523     /* If there's an existing transport, release it */
1524     migration_incoming_transport_cleanup(mis);
1525 
1526     /*
1527      * Note that this call will never start a real migration; it will
1528      * only re-setup the migration stream and poke existing migration
1529      * to continue using that newly established channel.
1530      */
1531     qemu_start_incoming_migration(uri, errp);
1532 }
1533 
1534 void qmp_migrate_pause(Error **errp)
1535 {
1536     MigrationState *ms = migrate_get_current();
1537     MigrationIncomingState *mis = migration_incoming_get_current();
1538     int ret;
1539 
1540     if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1541         /* Source side, during postcopy */
1542         qemu_mutex_lock(&ms->qemu_file_lock);
1543         ret = qemu_file_shutdown(ms->to_dst_file);
1544         qemu_mutex_unlock(&ms->qemu_file_lock);
1545         if (ret) {
1546             error_setg(errp, "Failed to pause source migration");
1547         }
1548         return;
1549     }
1550 
1551     if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1552         ret = qemu_file_shutdown(mis->from_src_file);
1553         if (ret) {
1554             error_setg(errp, "Failed to pause destination migration");
1555         }
1556         return;
1557     }
1558 
1559     error_setg(errp, "migrate-pause is currently only supported "
1560                "during postcopy-active state");
1561 }
1562 
1563 bool migration_is_blocked(Error **errp)
1564 {
1565     if (qemu_savevm_state_blocked(errp)) {
1566         return true;
1567     }
1568 
1569     if (migration_blockers) {
1570         error_propagate(errp, error_copy(migration_blockers->data));
1571         return true;
1572     }
1573 
1574     return false;
1575 }
1576 
1577 /* Returns true if continue to migrate, or false if error detected */
1578 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
1579                             bool resume, Error **errp)
1580 {
1581     Error *local_err = NULL;
1582 
1583     if (resume) {
1584         if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
1585             error_setg(errp, "Cannot resume if there is no "
1586                        "paused migration");
1587             return false;
1588         }
1589 
1590         /*
1591          * Postcopy recovery won't work well with release-ram
1592          * capability since release-ram will drop the page buffer as
1593          * long as the page is put into the send buffer.  So if there
1594          * is a network failure happened, any page buffers that have
1595          * not yet reached the destination VM but have already been
1596          * sent from the source VM will be lost forever.  Let's refuse
1597          * the client from resuming such a postcopy migration.
1598          * Luckily release-ram was designed to only be used when src
1599          * and destination VMs are on the same host, so it should be
1600          * fine.
1601          */
1602         if (migrate_release_ram()) {
1603             error_setg(errp, "Postcopy recovery cannot work "
1604                        "when release-ram capability is set");
1605             return false;
1606         }
1607 
1608         /* This is a resume, skip init status */
1609         return true;
1610     }
1611 
1612     if (migration_is_running(s->state)) {
1613         error_setg(errp, QERR_MIGRATION_ACTIVE);
1614         return false;
1615     }
1616 
1617     if (runstate_check(RUN_STATE_INMIGRATE)) {
1618         error_setg(errp, "Guest is waiting for an incoming migration");
1619         return false;
1620     }
1621 
1622     if (runstate_check(RUN_STATE_POSTMIGRATE)) {
1623         error_setg(errp, "Can't migrate the vm that was paused due to "
1624                    "previous migration");
1625         return false;
1626     }
1627 
1628     if (migration_is_blocked(errp)) {
1629         return false;
1630     }
1631 
1632     if (blk || blk_inc) {
1633         if (migrate_colo()) {
1634             error_setg(errp, "No disk migration is required in COLO mode");
1635             return false;
1636         }
1637         if (migrate_block() || migrate_block_incremental()) {
1638             error_setg(errp, "Command options are incompatible with "
1639                        "current migration capabilities");
1640             return false;
1641         }
1642         if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, &local_err)) {
1643             error_propagate(errp, local_err);
1644             return false;
1645         }
1646         s->must_remove_block_options = true;
1647     }
1648 
1649     if (blk_inc) {
1650         migrate_set_block_incremental(true);
1651     }
1652 
1653     if (migrate_init(s, errp)) {
1654         return false;
1655     }
1656 
1657     return true;
1658 }
1659 
1660 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1661                  bool has_inc, bool inc, bool has_detach, bool detach,
1662                  bool has_resume, bool resume, Error **errp)
1663 {
1664     bool resume_requested;
1665     Error *local_err = NULL;
1666     MigrationState *s = migrate_get_current();
1667     const char *p = NULL;
1668 
1669     /* URI is not suitable for migration? */
1670     if (!migration_channels_and_uri_compatible(uri, errp)) {
1671         return;
1672     }
1673 
1674     resume_requested = has_resume && resume;
1675     if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
1676                          resume_requested, errp)) {
1677         /* Error detected, put into errp */
1678         return;
1679     }
1680 
1681     if (!resume_requested) {
1682         if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
1683             return;
1684         }
1685     }
1686 
1687     if (strstart(uri, "tcp:", &p) ||
1688         strstart(uri, "unix:", NULL) ||
1689         strstart(uri, "vsock:", NULL)) {
1690         socket_start_outgoing_migration(s, p ? p : uri, &local_err);
1691 #ifdef CONFIG_RDMA
1692     } else if (strstart(uri, "rdma:", &p)) {
1693         rdma_start_outgoing_migration(s, p, &local_err);
1694 #endif
1695     } else if (strstart(uri, "exec:", &p)) {
1696         exec_start_outgoing_migration(s, p, &local_err);
1697     } else if (strstart(uri, "fd:", &p)) {
1698         fd_start_outgoing_migration(s, p, &local_err);
1699     } else {
1700         if (!resume_requested) {
1701             yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1702         }
1703         error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri",
1704                    "a valid migration protocol");
1705         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1706                           MIGRATION_STATUS_FAILED);
1707         block_cleanup_parameters();
1708         return;
1709     }
1710 
1711     if (local_err) {
1712         if (!resume_requested) {
1713             yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1714         }
1715         migrate_fd_error(s, local_err);
1716         error_propagate(errp, local_err);
1717         return;
1718     }
1719 }
1720 
1721 void qmp_migrate_cancel(Error **errp)
1722 {
1723     migration_cancel(NULL);
1724 }
1725 
1726 void qmp_migrate_continue(MigrationStatus state, Error **errp)
1727 {
1728     MigrationState *s = migrate_get_current();
1729     if (s->state != state) {
1730         error_setg(errp,  "Migration not in expected state: %s",
1731                    MigrationStatus_str(s->state));
1732         return;
1733     }
1734     qemu_sem_post(&s->pause_sem);
1735 }
1736 
1737 /* migration thread support */
1738 /*
1739  * Something bad happened to the RP stream, mark an error
1740  * The caller shall print or trace something to indicate why
1741  */
1742 static void mark_source_rp_bad(MigrationState *s)
1743 {
1744     s->rp_state.error = true;
1745 }
1746 
1747 static struct rp_cmd_args {
1748     ssize_t     len; /* -1 = variable */
1749     const char *name;
1750 } rp_cmd_args[] = {
1751     [MIG_RP_MSG_INVALID]        = { .len = -1, .name = "INVALID" },
1752     [MIG_RP_MSG_SHUT]           = { .len =  4, .name = "SHUT" },
1753     [MIG_RP_MSG_PONG]           = { .len =  4, .name = "PONG" },
1754     [MIG_RP_MSG_REQ_PAGES]      = { .len = 12, .name = "REQ_PAGES" },
1755     [MIG_RP_MSG_REQ_PAGES_ID]   = { .len = -1, .name = "REQ_PAGES_ID" },
1756     [MIG_RP_MSG_RECV_BITMAP]    = { .len = -1, .name = "RECV_BITMAP" },
1757     [MIG_RP_MSG_RESUME_ACK]     = { .len =  4, .name = "RESUME_ACK" },
1758     [MIG_RP_MSG_SWITCHOVER_ACK] = { .len =  0, .name = "SWITCHOVER_ACK" },
1759     [MIG_RP_MSG_MAX]            = { .len = -1, .name = "MAX" },
1760 };
1761 
1762 /*
1763  * Process a request for pages received on the return path,
1764  * We're allowed to send more than requested (e.g. to round to our page size)
1765  * and we don't need to send pages that have already been sent.
1766  */
1767 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1768                                        ram_addr_t start, size_t len)
1769 {
1770     long our_host_ps = qemu_real_host_page_size();
1771 
1772     trace_migrate_handle_rp_req_pages(rbname, start, len);
1773 
1774     /*
1775      * Since we currently insist on matching page sizes, just sanity check
1776      * we're being asked for whole host pages.
1777      */
1778     if (!QEMU_IS_ALIGNED(start, our_host_ps) ||
1779         !QEMU_IS_ALIGNED(len, our_host_ps)) {
1780         error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1781                      " len: %zd", __func__, start, len);
1782         mark_source_rp_bad(ms);
1783         return;
1784     }
1785 
1786     if (ram_save_queue_pages(rbname, start, len)) {
1787         mark_source_rp_bad(ms);
1788     }
1789 }
1790 
1791 /* Return true to retry, false to quit */
1792 static bool postcopy_pause_return_path_thread(MigrationState *s)
1793 {
1794     trace_postcopy_pause_return_path();
1795 
1796     qemu_sem_wait(&s->postcopy_pause_rp_sem);
1797 
1798     trace_postcopy_pause_return_path_continued();
1799 
1800     return true;
1801 }
1802 
1803 static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
1804 {
1805     RAMBlock *block = qemu_ram_block_by_name(block_name);
1806 
1807     if (!block) {
1808         error_report("%s: invalid block name '%s'", __func__, block_name);
1809         return -EINVAL;
1810     }
1811 
1812     /* Fetch the received bitmap and refresh the dirty bitmap */
1813     return ram_dirty_bitmap_reload(s, block);
1814 }
1815 
1816 static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value)
1817 {
1818     trace_source_return_path_thread_resume_ack(value);
1819 
1820     if (value != MIGRATION_RESUME_ACK_VALUE) {
1821         error_report("%s: illegal resume_ack value %"PRIu32,
1822                      __func__, value);
1823         return -1;
1824     }
1825 
1826     /* Now both sides are active. */
1827     migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
1828                       MIGRATION_STATUS_POSTCOPY_ACTIVE);
1829 
1830     /* Notify send thread that time to continue send pages */
1831     qemu_sem_post(&s->rp_state.rp_sem);
1832 
1833     return 0;
1834 }
1835 
1836 /*
1837  * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if
1838  * existed) in a safe way.
1839  */
1840 static void migration_release_dst_files(MigrationState *ms)
1841 {
1842     QEMUFile *file;
1843 
1844     WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
1845         /*
1846          * Reset the from_dst_file pointer first before releasing it, as we
1847          * can't block within lock section
1848          */
1849         file = ms->rp_state.from_dst_file;
1850         ms->rp_state.from_dst_file = NULL;
1851     }
1852 
1853     /*
1854      * Do the same to postcopy fast path socket too if there is.  No
1855      * locking needed because this qemufile should only be managed by
1856      * return path thread.
1857      */
1858     if (ms->postcopy_qemufile_src) {
1859         migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src);
1860         qemu_file_shutdown(ms->postcopy_qemufile_src);
1861         qemu_fclose(ms->postcopy_qemufile_src);
1862         ms->postcopy_qemufile_src = NULL;
1863     }
1864 
1865     qemu_fclose(file);
1866 }
1867 
1868 /*
1869  * Handles messages sent on the return path towards the source VM
1870  *
1871  */
1872 static void *source_return_path_thread(void *opaque)
1873 {
1874     MigrationState *ms = opaque;
1875     QEMUFile *rp = ms->rp_state.from_dst_file;
1876     uint16_t header_len, header_type;
1877     uint8_t buf[512];
1878     uint32_t tmp32, sibling_error;
1879     ram_addr_t start = 0; /* =0 to silence warning */
1880     size_t  len = 0, expected_len;
1881     int res;
1882 
1883     trace_source_return_path_thread_entry();
1884     rcu_register_thread();
1885 
1886 retry:
1887     while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1888            migration_is_setup_or_active(ms->state)) {
1889         trace_source_return_path_thread_loop_top();
1890         header_type = qemu_get_be16(rp);
1891         header_len = qemu_get_be16(rp);
1892 
1893         if (qemu_file_get_error(rp)) {
1894             mark_source_rp_bad(ms);
1895             goto out;
1896         }
1897 
1898         if (header_type >= MIG_RP_MSG_MAX ||
1899             header_type == MIG_RP_MSG_INVALID) {
1900             error_report("RP: Received invalid message 0x%04x length 0x%04x",
1901                          header_type, header_len);
1902             mark_source_rp_bad(ms);
1903             goto out;
1904         }
1905 
1906         if ((rp_cmd_args[header_type].len != -1 &&
1907             header_len != rp_cmd_args[header_type].len) ||
1908             header_len > sizeof(buf)) {
1909             error_report("RP: Received '%s' message (0x%04x) with"
1910                          "incorrect length %d expecting %zu",
1911                          rp_cmd_args[header_type].name, header_type, header_len,
1912                          (size_t)rp_cmd_args[header_type].len);
1913             mark_source_rp_bad(ms);
1914             goto out;
1915         }
1916 
1917         /* We know we've got a valid header by this point */
1918         res = qemu_get_buffer(rp, buf, header_len);
1919         if (res != header_len) {
1920             error_report("RP: Failed reading data for message 0x%04x"
1921                          " read %d expected %d",
1922                          header_type, res, header_len);
1923             mark_source_rp_bad(ms);
1924             goto out;
1925         }
1926 
1927         /* OK, we have the message and the data */
1928         switch (header_type) {
1929         case MIG_RP_MSG_SHUT:
1930             sibling_error = ldl_be_p(buf);
1931             trace_source_return_path_thread_shut(sibling_error);
1932             if (sibling_error) {
1933                 error_report("RP: Sibling indicated error %d", sibling_error);
1934                 mark_source_rp_bad(ms);
1935             }
1936             /*
1937              * We'll let the main thread deal with closing the RP
1938              * we could do a shutdown(2) on it, but we're the only user
1939              * anyway, so there's nothing gained.
1940              */
1941             goto out;
1942 
1943         case MIG_RP_MSG_PONG:
1944             tmp32 = ldl_be_p(buf);
1945             trace_source_return_path_thread_pong(tmp32);
1946             qemu_sem_post(&ms->rp_state.rp_pong_acks);
1947             break;
1948 
1949         case MIG_RP_MSG_REQ_PAGES:
1950             start = ldq_be_p(buf);
1951             len = ldl_be_p(buf + 8);
1952             migrate_handle_rp_req_pages(ms, NULL, start, len);
1953             break;
1954 
1955         case MIG_RP_MSG_REQ_PAGES_ID:
1956             expected_len = 12 + 1; /* header + termination */
1957 
1958             if (header_len >= expected_len) {
1959                 start = ldq_be_p(buf);
1960                 len = ldl_be_p(buf + 8);
1961                 /* Now we expect an idstr */
1962                 tmp32 = buf[12]; /* Length of the following idstr */
1963                 buf[13 + tmp32] = '\0';
1964                 expected_len += tmp32;
1965             }
1966             if (header_len != expected_len) {
1967                 error_report("RP: Req_Page_id with length %d expecting %zd",
1968                              header_len, expected_len);
1969                 mark_source_rp_bad(ms);
1970                 goto out;
1971             }
1972             migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1973             break;
1974 
1975         case MIG_RP_MSG_RECV_BITMAP:
1976             if (header_len < 1) {
1977                 error_report("%s: missing block name", __func__);
1978                 mark_source_rp_bad(ms);
1979                 goto out;
1980             }
1981             /* Format: len (1B) + idstr (<255B). This ends the idstr. */
1982             buf[buf[0] + 1] = '\0';
1983             if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) {
1984                 mark_source_rp_bad(ms);
1985                 goto out;
1986             }
1987             break;
1988 
1989         case MIG_RP_MSG_RESUME_ACK:
1990             tmp32 = ldl_be_p(buf);
1991             if (migrate_handle_rp_resume_ack(ms, tmp32)) {
1992                 mark_source_rp_bad(ms);
1993                 goto out;
1994             }
1995             break;
1996 
1997         case MIG_RP_MSG_SWITCHOVER_ACK:
1998             ms->switchover_acked = true;
1999             trace_source_return_path_thread_switchover_acked();
2000             break;
2001 
2002         default:
2003             break;
2004         }
2005     }
2006 
2007 out:
2008     res = qemu_file_get_error(rp);
2009     if (res) {
2010         if (res && migration_in_postcopy()) {
2011             /*
2012              * Maybe there is something we can do: it looks like a
2013              * network down issue, and we pause for a recovery.
2014              */
2015             migration_release_dst_files(ms);
2016             rp = NULL;
2017             if (postcopy_pause_return_path_thread(ms)) {
2018                 /*
2019                  * Reload rp, reset the rest.  Referencing it is safe since
2020                  * it's reset only by us above, or when migration completes
2021                  */
2022                 rp = ms->rp_state.from_dst_file;
2023                 ms->rp_state.error = false;
2024                 goto retry;
2025             }
2026         }
2027 
2028         trace_source_return_path_thread_bad_end();
2029         mark_source_rp_bad(ms);
2030     }
2031 
2032     trace_source_return_path_thread_end();
2033     migration_release_dst_files(ms);
2034     rcu_unregister_thread();
2035     return NULL;
2036 }
2037 
2038 static int open_return_path_on_source(MigrationState *ms,
2039                                       bool create_thread)
2040 {
2041     ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
2042     if (!ms->rp_state.from_dst_file) {
2043         return -1;
2044     }
2045 
2046     trace_open_return_path_on_source();
2047 
2048     if (!create_thread) {
2049         /* We're done */
2050         return 0;
2051     }
2052 
2053     qemu_thread_create(&ms->rp_state.rp_thread, "return path",
2054                        source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
2055     ms->rp_state.rp_thread_created = true;
2056 
2057     trace_open_return_path_on_source_continue();
2058 
2059     return 0;
2060 }
2061 
2062 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
2063 static int await_return_path_close_on_source(MigrationState *ms)
2064 {
2065     /*
2066      * If this is a normal exit then the destination will send a SHUT and the
2067      * rp_thread will exit, however if there's an error we need to cause
2068      * it to exit.
2069      */
2070     if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
2071         /*
2072          * shutdown(2), if we have it, will cause it to unblock if it's stuck
2073          * waiting for the destination.
2074          */
2075         qemu_file_shutdown(ms->rp_state.from_dst_file);
2076         mark_source_rp_bad(ms);
2077     }
2078     trace_await_return_path_close_on_source_joining();
2079     qemu_thread_join(&ms->rp_state.rp_thread);
2080     ms->rp_state.rp_thread_created = false;
2081     trace_await_return_path_close_on_source_close();
2082     return ms->rp_state.error;
2083 }
2084 
2085 static inline void
2086 migration_wait_main_channel(MigrationState *ms)
2087 {
2088     /* Wait until one PONG message received */
2089     qemu_sem_wait(&ms->rp_state.rp_pong_acks);
2090 }
2091 
2092 /*
2093  * Switch from normal iteration to postcopy
2094  * Returns non-0 on error
2095  */
2096 static int postcopy_start(MigrationState *ms, Error **errp)
2097 {
2098     int ret;
2099     QIOChannelBuffer *bioc;
2100     QEMUFile *fb;
2101     int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2102     uint64_t bandwidth = migrate_max_postcopy_bandwidth();
2103     bool restart_block = false;
2104     int cur_state = MIGRATION_STATUS_ACTIVE;
2105 
2106     if (migrate_postcopy_preempt()) {
2107         migration_wait_main_channel(ms);
2108         if (postcopy_preempt_establish_channel(ms)) {
2109             migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED);
2110             return -1;
2111         }
2112     }
2113 
2114     if (!migrate_pause_before_switchover()) {
2115         migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
2116                           MIGRATION_STATUS_POSTCOPY_ACTIVE);
2117     }
2118 
2119     trace_postcopy_start();
2120     qemu_mutex_lock_iothread();
2121     trace_postcopy_start_set_run();
2122 
2123     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
2124     global_state_store();
2125     ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
2126     if (ret < 0) {
2127         goto fail;
2128     }
2129 
2130     ret = migration_maybe_pause(ms, &cur_state,
2131                                 MIGRATION_STATUS_POSTCOPY_ACTIVE);
2132     if (ret < 0) {
2133         goto fail;
2134     }
2135 
2136     ret = bdrv_inactivate_all();
2137     if (ret < 0) {
2138         goto fail;
2139     }
2140     restart_block = true;
2141 
2142     /*
2143      * Cause any non-postcopiable, but iterative devices to
2144      * send out their final data.
2145      */
2146     qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
2147 
2148     /*
2149      * in Finish migrate and with the io-lock held everything should
2150      * be quiet, but we've potentially still got dirty pages and we
2151      * need to tell the destination to throw any pages it's already received
2152      * that are dirty
2153      */
2154     if (migrate_postcopy_ram()) {
2155         ram_postcopy_send_discard_bitmap(ms);
2156     }
2157 
2158     /*
2159      * send rest of state - note things that are doing postcopy
2160      * will notice we're in POSTCOPY_ACTIVE and not actually
2161      * wrap their state up here
2162      */
2163     migration_rate_set(bandwidth);
2164     if (migrate_postcopy_ram()) {
2165         /* Ping just for debugging, helps line traces up */
2166         qemu_savevm_send_ping(ms->to_dst_file, 2);
2167     }
2168 
2169     /*
2170      * While loading the device state we may trigger page transfer
2171      * requests and the fd must be free to process those, and thus
2172      * the destination must read the whole device state off the fd before
2173      * it starts processing it.  Unfortunately the ad-hoc migration format
2174      * doesn't allow the destination to know the size to read without fully
2175      * parsing it through each devices load-state code (especially the open
2176      * coded devices that use get/put).
2177      * So we wrap the device state up in a package with a length at the start;
2178      * to do this we use a qemu_buf to hold the whole of the device state.
2179      */
2180     bioc = qio_channel_buffer_new(4096);
2181     qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
2182     fb = qemu_file_new_output(QIO_CHANNEL(bioc));
2183     object_unref(OBJECT(bioc));
2184 
2185     /*
2186      * Make sure the receiver can get incoming pages before we send the rest
2187      * of the state
2188      */
2189     qemu_savevm_send_postcopy_listen(fb);
2190 
2191     qemu_savevm_state_complete_precopy(fb, false, false);
2192     if (migrate_postcopy_ram()) {
2193         qemu_savevm_send_ping(fb, 3);
2194     }
2195 
2196     qemu_savevm_send_postcopy_run(fb);
2197 
2198     /* <><> end of stuff going into the package */
2199 
2200     /* Last point of recovery; as soon as we send the package the destination
2201      * can open devices and potentially start running.
2202      * Lets just check again we've not got any errors.
2203      */
2204     ret = qemu_file_get_error(ms->to_dst_file);
2205     if (ret) {
2206         error_setg(errp, "postcopy_start: Migration stream errored (pre package)");
2207         goto fail_closefb;
2208     }
2209 
2210     restart_block = false;
2211 
2212     /* Now send that blob */
2213     if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
2214         goto fail_closefb;
2215     }
2216     qemu_fclose(fb);
2217 
2218     /* Send a notify to give a chance for anything that needs to happen
2219      * at the transition to postcopy and after the device state; in particular
2220      * spice needs to trigger a transition now
2221      */
2222     ms->postcopy_after_devices = true;
2223     notifier_list_notify(&migration_state_notifiers, ms);
2224 
2225     ms->downtime =  qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
2226 
2227     qemu_mutex_unlock_iothread();
2228 
2229     if (migrate_postcopy_ram()) {
2230         /*
2231          * Although this ping is just for debug, it could potentially be
2232          * used for getting a better measurement of downtime at the source.
2233          */
2234         qemu_savevm_send_ping(ms->to_dst_file, 4);
2235     }
2236 
2237     if (migrate_release_ram()) {
2238         ram_postcopy_migrated_memory_release(ms);
2239     }
2240 
2241     ret = qemu_file_get_error(ms->to_dst_file);
2242     if (ret) {
2243         error_setg(errp, "postcopy_start: Migration stream errored");
2244         migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
2245                               MIGRATION_STATUS_FAILED);
2246     }
2247 
2248     trace_postcopy_preempt_enabled(migrate_postcopy_preempt());
2249 
2250     return ret;
2251 
2252 fail_closefb:
2253     qemu_fclose(fb);
2254 fail:
2255     migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
2256                           MIGRATION_STATUS_FAILED);
2257     if (restart_block) {
2258         /* A failure happened early enough that we know the destination hasn't
2259          * accessed block devices, so we're safe to recover.
2260          */
2261         Error *local_err = NULL;
2262 
2263         bdrv_activate_all(&local_err);
2264         if (local_err) {
2265             error_report_err(local_err);
2266         }
2267     }
2268     qemu_mutex_unlock_iothread();
2269     return -1;
2270 }
2271 
2272 /**
2273  * migration_maybe_pause: Pause if required to by
2274  * migrate_pause_before_switchover called with the iothread locked
2275  * Returns: 0 on success
2276  */
2277 static int migration_maybe_pause(MigrationState *s,
2278                                  int *current_active_state,
2279                                  int new_state)
2280 {
2281     if (!migrate_pause_before_switchover()) {
2282         return 0;
2283     }
2284 
2285     /* Since leaving this state is not atomic with posting the semaphore
2286      * it's possible that someone could have issued multiple migrate_continue
2287      * and the semaphore is incorrectly positive at this point;
2288      * the docs say it's undefined to reinit a semaphore that's already
2289      * init'd, so use timedwait to eat up any existing posts.
2290      */
2291     while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
2292         /* This block intentionally left blank */
2293     }
2294 
2295     /*
2296      * If the migration is cancelled when it is in the completion phase,
2297      * the migration state is set to MIGRATION_STATUS_CANCELLING.
2298      * So we don't need to wait a semaphore, otherwise we would always
2299      * wait for the 'pause_sem' semaphore.
2300      */
2301     if (s->state != MIGRATION_STATUS_CANCELLING) {
2302         qemu_mutex_unlock_iothread();
2303         migrate_set_state(&s->state, *current_active_state,
2304                           MIGRATION_STATUS_PRE_SWITCHOVER);
2305         qemu_sem_wait(&s->pause_sem);
2306         migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
2307                           new_state);
2308         *current_active_state = new_state;
2309         qemu_mutex_lock_iothread();
2310     }
2311 
2312     return s->state == new_state ? 0 : -EINVAL;
2313 }
2314 
2315 /**
2316  * migration_completion: Used by migration_thread when there's not much left.
2317  *   The caller 'breaks' the loop when this returns.
2318  *
2319  * @s: Current migration state
2320  */
2321 static void migration_completion(MigrationState *s)
2322 {
2323     int ret;
2324     int current_active_state = s->state;
2325 
2326     if (s->state == MIGRATION_STATUS_ACTIVE) {
2327         qemu_mutex_lock_iothread();
2328         s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2329         qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
2330 
2331         s->vm_old_state = runstate_get();
2332         global_state_store();
2333 
2334         ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
2335         trace_migration_completion_vm_stop(ret);
2336         if (ret >= 0) {
2337             ret = migration_maybe_pause(s, &current_active_state,
2338                                         MIGRATION_STATUS_DEVICE);
2339         }
2340         if (ret >= 0) {
2341             /*
2342              * Inactivate disks except in COLO, and track that we
2343              * have done so in order to remember to reactivate
2344              * them if migration fails or is cancelled.
2345              */
2346             s->block_inactive = !migrate_colo();
2347             migration_rate_set(RATE_LIMIT_DISABLED);
2348             ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
2349                                                      s->block_inactive);
2350         }
2351 
2352         qemu_mutex_unlock_iothread();
2353 
2354         if (ret < 0) {
2355             goto fail;
2356         }
2357     } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2358         trace_migration_completion_postcopy_end();
2359 
2360         qemu_mutex_lock_iothread();
2361         qemu_savevm_state_complete_postcopy(s->to_dst_file);
2362         qemu_mutex_unlock_iothread();
2363 
2364         /*
2365          * Shutdown the postcopy fast path thread.  This is only needed
2366          * when dest QEMU binary is old (7.1/7.2).  QEMU 8.0+ doesn't need
2367          * this.
2368          */
2369         if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
2370             postcopy_preempt_shutdown_file(s);
2371         }
2372 
2373         trace_migration_completion_postcopy_end_after_complete();
2374     } else {
2375         goto fail;
2376     }
2377 
2378     /*
2379      * If rp was opened we must clean up the thread before
2380      * cleaning everything else up (since if there are no failures
2381      * it will wait for the destination to send it's status in
2382      * a SHUT command).
2383      */
2384     if (s->rp_state.rp_thread_created) {
2385         int rp_error;
2386         trace_migration_return_path_end_before();
2387         rp_error = await_return_path_close_on_source(s);
2388         trace_migration_return_path_end_after(rp_error);
2389         if (rp_error) {
2390             goto fail;
2391         }
2392     }
2393 
2394     if (qemu_file_get_error(s->to_dst_file)) {
2395         trace_migration_completion_file_err();
2396         goto fail;
2397     }
2398 
2399     if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) {
2400         /* COLO does not support postcopy */
2401         migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
2402                           MIGRATION_STATUS_COLO);
2403     } else {
2404         migrate_set_state(&s->state, current_active_state,
2405                           MIGRATION_STATUS_COMPLETED);
2406     }
2407 
2408     return;
2409 
2410 fail:
2411     if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE ||
2412                               s->state == MIGRATION_STATUS_DEVICE)) {
2413         /*
2414          * If not doing postcopy, vm_start() will be called: let's
2415          * regain control on images.
2416          */
2417         Error *local_err = NULL;
2418 
2419         qemu_mutex_lock_iothread();
2420         bdrv_activate_all(&local_err);
2421         if (local_err) {
2422             error_report_err(local_err);
2423         } else {
2424             s->block_inactive = false;
2425         }
2426         qemu_mutex_unlock_iothread();
2427     }
2428 
2429     migrate_set_state(&s->state, current_active_state,
2430                       MIGRATION_STATUS_FAILED);
2431 }
2432 
2433 /**
2434  * bg_migration_completion: Used by bg_migration_thread when after all the
2435  *   RAM has been saved. The caller 'breaks' the loop when this returns.
2436  *
2437  * @s: Current migration state
2438  */
2439 static void bg_migration_completion(MigrationState *s)
2440 {
2441     int current_active_state = s->state;
2442 
2443     if (s->state == MIGRATION_STATUS_ACTIVE) {
2444         /*
2445          * By this moment we have RAM content saved into the migration stream.
2446          * The next step is to flush the non-RAM content (device state)
2447          * right after the ram content. The device state has been stored into
2448          * the temporary buffer before RAM saving started.
2449          */
2450         qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage);
2451         qemu_fflush(s->to_dst_file);
2452     } else if (s->state == MIGRATION_STATUS_CANCELLING) {
2453         goto fail;
2454     }
2455 
2456     if (qemu_file_get_error(s->to_dst_file)) {
2457         trace_migration_completion_file_err();
2458         goto fail;
2459     }
2460 
2461     migrate_set_state(&s->state, current_active_state,
2462                       MIGRATION_STATUS_COMPLETED);
2463     return;
2464 
2465 fail:
2466     migrate_set_state(&s->state, current_active_state,
2467                       MIGRATION_STATUS_FAILED);
2468 }
2469 
2470 typedef enum MigThrError {
2471     /* No error detected */
2472     MIG_THR_ERR_NONE = 0,
2473     /* Detected error, but resumed successfully */
2474     MIG_THR_ERR_RECOVERED = 1,
2475     /* Detected fatal error, need to exit */
2476     MIG_THR_ERR_FATAL = 2,
2477 } MigThrError;
2478 
2479 static int postcopy_resume_handshake(MigrationState *s)
2480 {
2481     qemu_savevm_send_postcopy_resume(s->to_dst_file);
2482 
2483     while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
2484         qemu_sem_wait(&s->rp_state.rp_sem);
2485     }
2486 
2487     if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2488         return 0;
2489     }
2490 
2491     return -1;
2492 }
2493 
2494 /* Return zero if success, or <0 for error */
2495 static int postcopy_do_resume(MigrationState *s)
2496 {
2497     int ret;
2498 
2499     /*
2500      * Call all the resume_prepare() hooks, so that modules can be
2501      * ready for the migration resume.
2502      */
2503     ret = qemu_savevm_state_resume_prepare(s);
2504     if (ret) {
2505         error_report("%s: resume_prepare() failure detected: %d",
2506                      __func__, ret);
2507         return ret;
2508     }
2509 
2510     /*
2511      * If preempt is enabled, re-establish the preempt channel.  Note that
2512      * we do it after resume prepare to make sure the main channel will be
2513      * created before the preempt channel.  E.g. with weak network, the
2514      * dest QEMU may get messed up with the preempt and main channels on
2515      * the order of connection setup.  This guarantees the correct order.
2516      */
2517     ret = postcopy_preempt_establish_channel(s);
2518     if (ret) {
2519         error_report("%s: postcopy_preempt_establish_channel(): %d",
2520                      __func__, ret);
2521         return ret;
2522     }
2523 
2524     /*
2525      * Last handshake with destination on the resume (destination will
2526      * switch to postcopy-active afterwards)
2527      */
2528     ret = postcopy_resume_handshake(s);
2529     if (ret) {
2530         error_report("%s: handshake failed: %d", __func__, ret);
2531         return ret;
2532     }
2533 
2534     return 0;
2535 }
2536 
2537 /*
2538  * We don't return until we are in a safe state to continue current
2539  * postcopy migration.  Returns MIG_THR_ERR_RECOVERED if recovered, or
2540  * MIG_THR_ERR_FATAL if unrecovery failure happened.
2541  */
2542 static MigThrError postcopy_pause(MigrationState *s)
2543 {
2544     assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
2545 
2546     while (true) {
2547         QEMUFile *file;
2548 
2549         /*
2550          * Current channel is possibly broken. Release it.  Note that this is
2551          * guaranteed even without lock because to_dst_file should only be
2552          * modified by the migration thread.  That also guarantees that the
2553          * unregister of yank is safe too without the lock.  It should be safe
2554          * even to be within the qemu_file_lock, but we didn't do that to avoid
2555          * taking more mutex (yank_lock) within qemu_file_lock.  TL;DR: we make
2556          * the qemu_file_lock critical section as small as possible.
2557          */
2558         assert(s->to_dst_file);
2559         migration_ioc_unregister_yank_from_file(s->to_dst_file);
2560         qemu_mutex_lock(&s->qemu_file_lock);
2561         file = s->to_dst_file;
2562         s->to_dst_file = NULL;
2563         qemu_mutex_unlock(&s->qemu_file_lock);
2564 
2565         qemu_file_shutdown(file);
2566         qemu_fclose(file);
2567 
2568         migrate_set_state(&s->state, s->state,
2569                           MIGRATION_STATUS_POSTCOPY_PAUSED);
2570 
2571         error_report("Detected IO failure for postcopy. "
2572                      "Migration paused.");
2573 
2574         /*
2575          * We wait until things fixed up. Then someone will setup the
2576          * status back for us.
2577          */
2578         while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
2579             qemu_sem_wait(&s->postcopy_pause_sem);
2580         }
2581 
2582         if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
2583             /* Woken up by a recover procedure. Give it a shot */
2584 
2585             /*
2586              * Firstly, let's wake up the return path now, with a new
2587              * return path channel.
2588              */
2589             qemu_sem_post(&s->postcopy_pause_rp_sem);
2590 
2591             /* Do the resume logic */
2592             if (postcopy_do_resume(s) == 0) {
2593                 /* Let's continue! */
2594                 trace_postcopy_pause_continued();
2595                 return MIG_THR_ERR_RECOVERED;
2596             } else {
2597                 /*
2598                  * Something wrong happened during the recovery, let's
2599                  * pause again. Pause is always better than throwing
2600                  * data away.
2601                  */
2602                 continue;
2603             }
2604         } else {
2605             /* This is not right... Time to quit. */
2606             return MIG_THR_ERR_FATAL;
2607         }
2608     }
2609 }
2610 
2611 static MigThrError migration_detect_error(MigrationState *s)
2612 {
2613     int ret;
2614     int state = s->state;
2615     Error *local_error = NULL;
2616 
2617     if (state == MIGRATION_STATUS_CANCELLING ||
2618         state == MIGRATION_STATUS_CANCELLED) {
2619         /* End the migration, but don't set the state to failed */
2620         return MIG_THR_ERR_FATAL;
2621     }
2622 
2623     /*
2624      * Try to detect any file errors.  Note that postcopy_qemufile_src will
2625      * be NULL when postcopy preempt is not enabled.
2626      */
2627     ret = qemu_file_get_error_obj_any(s->to_dst_file,
2628                                       s->postcopy_qemufile_src,
2629                                       &local_error);
2630     if (!ret) {
2631         /* Everything is fine */
2632         assert(!local_error);
2633         return MIG_THR_ERR_NONE;
2634     }
2635 
2636     if (local_error) {
2637         migrate_set_error(s, local_error);
2638         error_free(local_error);
2639     }
2640 
2641     if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) {
2642         /*
2643          * For postcopy, we allow the network to be down for a
2644          * while. After that, it can be continued by a
2645          * recovery phase.
2646          */
2647         return postcopy_pause(s);
2648     } else {
2649         /*
2650          * For precopy (or postcopy with error outside IO), we fail
2651          * with no time.
2652          */
2653         migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED);
2654         trace_migration_thread_file_err();
2655 
2656         /* Time to stop the migration, now. */
2657         return MIG_THR_ERR_FATAL;
2658     }
2659 }
2660 
2661 static void migration_calculate_complete(MigrationState *s)
2662 {
2663     uint64_t bytes = migration_transferred_bytes(s->to_dst_file);
2664     int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2665     int64_t transfer_time;
2666 
2667     s->total_time = end_time - s->start_time;
2668     if (!s->downtime) {
2669         /*
2670          * It's still not set, so we are precopy migration.  For
2671          * postcopy, downtime is calculated during postcopy_start().
2672          */
2673         s->downtime = end_time - s->downtime_start;
2674     }
2675 
2676     transfer_time = s->total_time - s->setup_time;
2677     if (transfer_time) {
2678         s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
2679     }
2680 }
2681 
2682 static void update_iteration_initial_status(MigrationState *s)
2683 {
2684     /*
2685      * Update these three fields at the same time to avoid mismatch info lead
2686      * wrong speed calculation.
2687      */
2688     s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2689     s->iteration_initial_bytes = migration_transferred_bytes(s->to_dst_file);
2690     s->iteration_initial_pages = ram_get_total_transferred_pages();
2691 }
2692 
2693 static void migration_update_counters(MigrationState *s,
2694                                       int64_t current_time)
2695 {
2696     uint64_t transferred, transferred_pages, time_spent;
2697     uint64_t current_bytes; /* bytes transferred since the beginning */
2698     double bandwidth;
2699 
2700     if (current_time < s->iteration_start_time + BUFFER_DELAY) {
2701         return;
2702     }
2703 
2704     current_bytes = migration_transferred_bytes(s->to_dst_file);
2705     transferred = current_bytes - s->iteration_initial_bytes;
2706     time_spent = current_time - s->iteration_start_time;
2707     bandwidth = (double)transferred / time_spent;
2708     s->threshold_size = bandwidth * migrate_downtime_limit();
2709 
2710     s->mbps = (((double) transferred * 8.0) /
2711                ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
2712 
2713     transferred_pages = ram_get_total_transferred_pages() -
2714                             s->iteration_initial_pages;
2715     s->pages_per_second = (double) transferred_pages /
2716                              (((double) time_spent / 1000.0));
2717 
2718     /*
2719      * if we haven't sent anything, we don't want to
2720      * recalculate. 10000 is a small enough number for our purposes
2721      */
2722     if (stat64_get(&mig_stats.dirty_pages_rate) &&
2723         transferred > 10000) {
2724         s->expected_downtime =
2725             stat64_get(&mig_stats.dirty_bytes_last_sync) / bandwidth;
2726     }
2727 
2728     migration_rate_reset(s->to_dst_file);
2729 
2730     update_iteration_initial_status(s);
2731 
2732     trace_migrate_transferred(transferred, time_spent,
2733                               bandwidth, s->threshold_size);
2734 }
2735 
2736 static bool migration_can_switchover(MigrationState *s)
2737 {
2738     if (!migrate_switchover_ack()) {
2739         return true;
2740     }
2741 
2742     /* No reason to wait for switchover ACK if VM is stopped */
2743     if (!runstate_is_running()) {
2744         return true;
2745     }
2746 
2747     return s->switchover_acked;
2748 }
2749 
2750 /* Migration thread iteration status */
2751 typedef enum {
2752     MIG_ITERATE_RESUME,         /* Resume current iteration */
2753     MIG_ITERATE_SKIP,           /* Skip current iteration */
2754     MIG_ITERATE_BREAK,          /* Break the loop */
2755 } MigIterateState;
2756 
2757 /*
2758  * Return true if continue to the next iteration directly, false
2759  * otherwise.
2760  */
2761 static MigIterateState migration_iteration_run(MigrationState *s)
2762 {
2763     uint64_t must_precopy, can_postcopy;
2764     Error *local_err = NULL;
2765     bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
2766     bool can_switchover = migration_can_switchover(s);
2767 
2768     qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
2769     uint64_t pending_size = must_precopy + can_postcopy;
2770 
2771     trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
2772 
2773     if (must_precopy <= s->threshold_size) {
2774         qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
2775         pending_size = must_precopy + can_postcopy;
2776         trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
2777     }
2778 
2779     if ((!pending_size || pending_size < s->threshold_size) && can_switchover) {
2780         trace_migration_thread_low_pending(pending_size);
2781         migration_completion(s);
2782         return MIG_ITERATE_BREAK;
2783     }
2784 
2785     /* Still a significant amount to transfer */
2786     if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover &&
2787         qatomic_read(&s->start_postcopy)) {
2788         if (postcopy_start(s, &local_err)) {
2789             migrate_set_error(s, local_err);
2790             error_report_err(local_err);
2791         }
2792         return MIG_ITERATE_SKIP;
2793     }
2794 
2795     /* Just another iteration step */
2796     qemu_savevm_state_iterate(s->to_dst_file, in_postcopy);
2797     return MIG_ITERATE_RESUME;
2798 }
2799 
2800 static void migration_iteration_finish(MigrationState *s)
2801 {
2802     /* If we enabled cpu throttling for auto-converge, turn it off. */
2803     cpu_throttle_stop();
2804 
2805     qemu_mutex_lock_iothread();
2806     switch (s->state) {
2807     case MIGRATION_STATUS_COMPLETED:
2808         migration_calculate_complete(s);
2809         runstate_set(RUN_STATE_POSTMIGRATE);
2810         break;
2811     case MIGRATION_STATUS_COLO:
2812         assert(migrate_colo());
2813         migrate_start_colo_process(s);
2814         s->vm_old_state = RUN_STATE_RUNNING;
2815         /* Fallthrough */
2816     case MIGRATION_STATUS_FAILED:
2817     case MIGRATION_STATUS_CANCELLED:
2818     case MIGRATION_STATUS_CANCELLING:
2819         if (s->vm_old_state == RUN_STATE_RUNNING) {
2820             if (!runstate_check(RUN_STATE_SHUTDOWN)) {
2821                 vm_start();
2822             }
2823         } else {
2824             if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
2825                 runstate_set(s->vm_old_state);
2826             }
2827         }
2828         break;
2829 
2830     default:
2831         /* Should not reach here, but if so, forgive the VM. */
2832         error_report("%s: Unknown ending state %d", __func__, s->state);
2833         break;
2834     }
2835     migrate_fd_cleanup_schedule(s);
2836     qemu_mutex_unlock_iothread();
2837 }
2838 
2839 static void bg_migration_iteration_finish(MigrationState *s)
2840 {
2841     /*
2842      * Stop tracking RAM writes - un-protect memory, un-register UFFD
2843      * memory ranges, flush kernel wait queues and wake up threads
2844      * waiting for write fault to be resolved.
2845      */
2846     ram_write_tracking_stop();
2847 
2848     qemu_mutex_lock_iothread();
2849     switch (s->state) {
2850     case MIGRATION_STATUS_COMPLETED:
2851         migration_calculate_complete(s);
2852         break;
2853 
2854     case MIGRATION_STATUS_ACTIVE:
2855     case MIGRATION_STATUS_FAILED:
2856     case MIGRATION_STATUS_CANCELLED:
2857     case MIGRATION_STATUS_CANCELLING:
2858         break;
2859 
2860     default:
2861         /* Should not reach here, but if so, forgive the VM. */
2862         error_report("%s: Unknown ending state %d", __func__, s->state);
2863         break;
2864     }
2865 
2866     migrate_fd_cleanup_schedule(s);
2867     qemu_mutex_unlock_iothread();
2868 }
2869 
2870 /*
2871  * Return true if continue to the next iteration directly, false
2872  * otherwise.
2873  */
2874 static MigIterateState bg_migration_iteration_run(MigrationState *s)
2875 {
2876     int res;
2877 
2878     res = qemu_savevm_state_iterate(s->to_dst_file, false);
2879     if (res > 0) {
2880         bg_migration_completion(s);
2881         return MIG_ITERATE_BREAK;
2882     }
2883 
2884     return MIG_ITERATE_RESUME;
2885 }
2886 
2887 void migration_make_urgent_request(void)
2888 {
2889     qemu_sem_post(&migrate_get_current()->rate_limit_sem);
2890 }
2891 
2892 void migration_consume_urgent_request(void)
2893 {
2894     qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
2895 }
2896 
2897 /* Returns true if the rate limiting was broken by an urgent request */
2898 bool migration_rate_limit(void)
2899 {
2900     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2901     MigrationState *s = migrate_get_current();
2902 
2903     bool urgent = false;
2904     migration_update_counters(s, now);
2905     if (migration_rate_exceeded(s->to_dst_file)) {
2906 
2907         if (qemu_file_get_error(s->to_dst_file)) {
2908             return false;
2909         }
2910         /*
2911          * Wait for a delay to do rate limiting OR
2912          * something urgent to post the semaphore.
2913          */
2914         int ms = s->iteration_start_time + BUFFER_DELAY - now;
2915         trace_migration_rate_limit_pre(ms);
2916         if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
2917             /*
2918              * We were woken by one or more urgent things but
2919              * the timedwait will have consumed one of them.
2920              * The service routine for the urgent wake will dec
2921              * the semaphore itself for each item it consumes,
2922              * so add this one we just eat back.
2923              */
2924             qemu_sem_post(&s->rate_limit_sem);
2925             urgent = true;
2926         }
2927         trace_migration_rate_limit_post(urgent);
2928     }
2929     return urgent;
2930 }
2931 
2932 /*
2933  * if failover devices are present, wait they are completely
2934  * unplugged
2935  */
2936 
2937 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state,
2938                                     int new_state)
2939 {
2940     if (qemu_savevm_state_guest_unplug_pending()) {
2941         migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG);
2942 
2943         while (s->state == MIGRATION_STATUS_WAIT_UNPLUG &&
2944                qemu_savevm_state_guest_unplug_pending()) {
2945             qemu_sem_timedwait(&s->wait_unplug_sem, 250);
2946         }
2947         if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) {
2948             int timeout = 120; /* 30 seconds */
2949             /*
2950              * migration has been canceled
2951              * but as we have started an unplug we must wait the end
2952              * to be able to plug back the card
2953              */
2954             while (timeout-- && qemu_savevm_state_guest_unplug_pending()) {
2955                 qemu_sem_timedwait(&s->wait_unplug_sem, 250);
2956             }
2957             if (qemu_savevm_state_guest_unplug_pending() &&
2958                 !qtest_enabled()) {
2959                 warn_report("migration: partially unplugged device on "
2960                             "failure");
2961             }
2962         }
2963 
2964         migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state);
2965     } else {
2966         migrate_set_state(&s->state, old_state, new_state);
2967     }
2968 }
2969 
2970 /*
2971  * Master migration thread on the source VM.
2972  * It drives the migration and pumps the data down the outgoing channel.
2973  */
2974 static void *migration_thread(void *opaque)
2975 {
2976     MigrationState *s = opaque;
2977     MigrationThread *thread = NULL;
2978     int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
2979     MigThrError thr_error;
2980     bool urgent = false;
2981 
2982     thread = migration_threads_add("live_migration", qemu_get_thread_id());
2983 
2984     rcu_register_thread();
2985 
2986     object_ref(OBJECT(s));
2987     update_iteration_initial_status(s);
2988 
2989     qemu_savevm_state_header(s->to_dst_file);
2990 
2991     /*
2992      * If we opened the return path, we need to make sure dst has it
2993      * opened as well.
2994      */
2995     if (s->rp_state.rp_thread_created) {
2996         /* Now tell the dest that it should open its end so it can reply */
2997         qemu_savevm_send_open_return_path(s->to_dst_file);
2998 
2999         /* And do a ping that will make stuff easier to debug */
3000         qemu_savevm_send_ping(s->to_dst_file, 1);
3001     }
3002 
3003     if (migrate_postcopy()) {
3004         /*
3005          * Tell the destination that we *might* want to do postcopy later;
3006          * if the other end can't do postcopy it should fail now, nice and
3007          * early.
3008          */
3009         qemu_savevm_send_postcopy_advise(s->to_dst_file);
3010     }
3011 
3012     if (migrate_colo()) {
3013         /* Notify migration destination that we enable COLO */
3014         qemu_savevm_send_colo_enable(s->to_dst_file);
3015     }
3016 
3017     qemu_savevm_state_setup(s->to_dst_file);
3018 
3019     qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
3020                                MIGRATION_STATUS_ACTIVE);
3021 
3022     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
3023 
3024     trace_migration_thread_setup_complete();
3025 
3026     while (migration_is_active(s)) {
3027         if (urgent || !migration_rate_exceeded(s->to_dst_file)) {
3028             MigIterateState iter_state = migration_iteration_run(s);
3029             if (iter_state == MIG_ITERATE_SKIP) {
3030                 continue;
3031             } else if (iter_state == MIG_ITERATE_BREAK) {
3032                 break;
3033             }
3034         }
3035 
3036         /*
3037          * Try to detect any kind of failures, and see whether we
3038          * should stop the migration now.
3039          */
3040         thr_error = migration_detect_error(s);
3041         if (thr_error == MIG_THR_ERR_FATAL) {
3042             /* Stop migration */
3043             break;
3044         } else if (thr_error == MIG_THR_ERR_RECOVERED) {
3045             /*
3046              * Just recovered from a e.g. network failure, reset all
3047              * the local variables. This is important to avoid
3048              * breaking transferred_bytes and bandwidth calculation
3049              */
3050             update_iteration_initial_status(s);
3051         }
3052 
3053         urgent = migration_rate_limit();
3054     }
3055 
3056     trace_migration_thread_after_loop();
3057     migration_iteration_finish(s);
3058     object_unref(OBJECT(s));
3059     rcu_unregister_thread();
3060     migration_threads_remove(thread);
3061     return NULL;
3062 }
3063 
3064 static void bg_migration_vm_start_bh(void *opaque)
3065 {
3066     MigrationState *s = opaque;
3067 
3068     qemu_bh_delete(s->vm_start_bh);
3069     s->vm_start_bh = NULL;
3070 
3071     vm_start();
3072     s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start;
3073 }
3074 
3075 /**
3076  * Background snapshot thread, based on live migration code.
3077  * This is an alternative implementation of live migration mechanism
3078  * introduced specifically to support background snapshots.
3079  *
3080  * It takes advantage of userfault_fd write protection mechanism introduced
3081  * in v5.7 kernel. Compared to existing dirty page logging migration much
3082  * lesser stream traffic is produced resulting in smaller snapshot images,
3083  * simply cause of no page duplicates can get into the stream.
3084  *
3085  * Another key point is that generated vmstate stream reflects machine state
3086  * 'frozen' at the beginning of snapshot creation compared to dirty page logging
3087  * mechanism, which effectively results in that saved snapshot is the state of VM
3088  * at the end of the process.
3089  */
3090 static void *bg_migration_thread(void *opaque)
3091 {
3092     MigrationState *s = opaque;
3093     int64_t setup_start;
3094     MigThrError thr_error;
3095     QEMUFile *fb;
3096     bool early_fail = true;
3097 
3098     rcu_register_thread();
3099     object_ref(OBJECT(s));
3100 
3101     migration_rate_set(RATE_LIMIT_DISABLED);
3102 
3103     setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
3104     /*
3105      * We want to save vmstate for the moment when migration has been
3106      * initiated but also we want to save RAM content while VM is running.
3107      * The RAM content should appear first in the vmstate. So, we first
3108      * stash the non-RAM part of the vmstate to the temporary buffer,
3109      * then write RAM part of the vmstate to the migration stream
3110      * with vCPUs running and, finally, write stashed non-RAM part of
3111      * the vmstate from the buffer to the migration stream.
3112      */
3113     s->bioc = qio_channel_buffer_new(512 * 1024);
3114     qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer");
3115     fb = qemu_file_new_output(QIO_CHANNEL(s->bioc));
3116     object_unref(OBJECT(s->bioc));
3117 
3118     update_iteration_initial_status(s);
3119 
3120     /*
3121      * Prepare for tracking memory writes with UFFD-WP - populate
3122      * RAM pages before protecting.
3123      */
3124 #ifdef __linux__
3125     ram_write_tracking_prepare();
3126 #endif
3127 
3128     qemu_savevm_state_header(s->to_dst_file);
3129     qemu_savevm_state_setup(s->to_dst_file);
3130 
3131     qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
3132                                MIGRATION_STATUS_ACTIVE);
3133 
3134     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
3135 
3136     trace_migration_thread_setup_complete();
3137     s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3138 
3139     qemu_mutex_lock_iothread();
3140 
3141     /*
3142      * If VM is currently in suspended state, then, to make a valid runstate
3143      * transition in vm_stop_force_state() we need to wakeup it up.
3144      */
3145     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
3146     s->vm_old_state = runstate_get();
3147 
3148     global_state_store();
3149     /* Forcibly stop VM before saving state of vCPUs and devices */
3150     if (vm_stop_force_state(RUN_STATE_PAUSED)) {
3151         goto fail;
3152     }
3153     /*
3154      * Put vCPUs in sync with shadow context structures, then
3155      * save their state to channel-buffer along with devices.
3156      */
3157     cpu_synchronize_all_states();
3158     if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
3159         goto fail;
3160     }
3161     /*
3162      * Since we are going to get non-iterable state data directly
3163      * from s->bioc->data, explicit flush is needed here.
3164      */
3165     qemu_fflush(fb);
3166 
3167     /* Now initialize UFFD context and start tracking RAM writes */
3168     if (ram_write_tracking_start()) {
3169         goto fail;
3170     }
3171     early_fail = false;
3172 
3173     /*
3174      * Start VM from BH handler to avoid write-fault lock here.
3175      * UFFD-WP protection for the whole RAM is already enabled so
3176      * calling VM state change notifiers from vm_start() would initiate
3177      * writes to virtio VQs memory which is in write-protected region.
3178      */
3179     s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
3180     qemu_bh_schedule(s->vm_start_bh);
3181 
3182     qemu_mutex_unlock_iothread();
3183 
3184     while (migration_is_active(s)) {
3185         MigIterateState iter_state = bg_migration_iteration_run(s);
3186         if (iter_state == MIG_ITERATE_SKIP) {
3187             continue;
3188         } else if (iter_state == MIG_ITERATE_BREAK) {
3189             break;
3190         }
3191 
3192         /*
3193          * Try to detect any kind of failures, and see whether we
3194          * should stop the migration now.
3195          */
3196         thr_error = migration_detect_error(s);
3197         if (thr_error == MIG_THR_ERR_FATAL) {
3198             /* Stop migration */
3199             break;
3200         }
3201 
3202         migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
3203     }
3204 
3205     trace_migration_thread_after_loop();
3206 
3207 fail:
3208     if (early_fail) {
3209         migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
3210                 MIGRATION_STATUS_FAILED);
3211         qemu_mutex_unlock_iothread();
3212     }
3213 
3214     bg_migration_iteration_finish(s);
3215 
3216     qemu_fclose(fb);
3217     object_unref(OBJECT(s));
3218     rcu_unregister_thread();
3219 
3220     return NULL;
3221 }
3222 
3223 void migrate_fd_connect(MigrationState *s, Error *error_in)
3224 {
3225     Error *local_err = NULL;
3226     uint64_t rate_limit;
3227     bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
3228 
3229     /*
3230      * If there's a previous error, free it and prepare for another one.
3231      * Meanwhile if migration completes successfully, there won't have an error
3232      * dumped when calling migrate_fd_cleanup().
3233      */
3234     migrate_error_free(s);
3235 
3236     s->expected_downtime = migrate_downtime_limit();
3237     if (resume) {
3238         assert(s->cleanup_bh);
3239     } else {
3240         assert(!s->cleanup_bh);
3241         s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
3242     }
3243     if (error_in) {
3244         migrate_fd_error(s, error_in);
3245         if (resume) {
3246             /*
3247              * Don't do cleanup for resume if channel is invalid, but only dump
3248              * the error.  We wait for another channel connect from the user.
3249              * The error_report still gives HMP user a hint on what failed.
3250              * It's normally done in migrate_fd_cleanup(), but call it here
3251              * explicitly.
3252              */
3253             error_report_err(error_copy(s->error));
3254         } else {
3255             migrate_fd_cleanup(s);
3256         }
3257         return;
3258     }
3259 
3260     if (resume) {
3261         /* This is a resumed migration */
3262         rate_limit = migrate_max_postcopy_bandwidth();
3263     } else {
3264         /* This is a fresh new migration */
3265         rate_limit = migrate_max_bandwidth();
3266 
3267         /* Notify before starting migration thread */
3268         notifier_list_notify(&migration_state_notifiers, s);
3269     }
3270 
3271     migration_rate_set(rate_limit);
3272     qemu_file_set_blocking(s->to_dst_file, true);
3273 
3274     /*
3275      * Open the return path. For postcopy, it is used exclusively. For
3276      * precopy, only if user specified "return-path" capability would
3277      * QEMU uses the return path.
3278      */
3279     if (migrate_postcopy_ram() || migrate_return_path()) {
3280         if (open_return_path_on_source(s, !resume)) {
3281             error_setg(&local_err, "Unable to open return-path for postcopy");
3282             migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
3283             migrate_set_error(s, local_err);
3284             error_report_err(local_err);
3285             migrate_fd_cleanup(s);
3286             return;
3287         }
3288     }
3289 
3290     /*
3291      * This needs to be done before resuming a postcopy.  Note: for newer
3292      * QEMUs we will delay the channel creation until postcopy_start(), to
3293      * avoid disorder of channel creations.
3294      */
3295     if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
3296         postcopy_preempt_setup(s);
3297     }
3298 
3299     if (resume) {
3300         /* Wakeup the main migration thread to do the recovery */
3301         migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
3302                           MIGRATION_STATUS_POSTCOPY_RECOVER);
3303         qemu_sem_post(&s->postcopy_pause_sem);
3304         return;
3305     }
3306 
3307     if (multifd_save_setup(&local_err) != 0) {
3308         migrate_set_error(s, local_err);
3309         error_report_err(local_err);
3310         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
3311                           MIGRATION_STATUS_FAILED);
3312         migrate_fd_cleanup(s);
3313         return;
3314     }
3315 
3316     if (migrate_background_snapshot()) {
3317         qemu_thread_create(&s->thread, "bg_snapshot",
3318                 bg_migration_thread, s, QEMU_THREAD_JOINABLE);
3319     } else {
3320         qemu_thread_create(&s->thread, "live_migration",
3321                 migration_thread, s, QEMU_THREAD_JOINABLE);
3322     }
3323     s->migration_thread_running = true;
3324 }
3325 
3326 static void migration_class_init(ObjectClass *klass, void *data)
3327 {
3328     DeviceClass *dc = DEVICE_CLASS(klass);
3329 
3330     dc->user_creatable = false;
3331     device_class_set_props(dc, migration_properties);
3332 }
3333 
3334 static void migration_instance_finalize(Object *obj)
3335 {
3336     MigrationState *ms = MIGRATION_OBJ(obj);
3337 
3338     qemu_mutex_destroy(&ms->error_mutex);
3339     qemu_mutex_destroy(&ms->qemu_file_lock);
3340     qemu_sem_destroy(&ms->wait_unplug_sem);
3341     qemu_sem_destroy(&ms->rate_limit_sem);
3342     qemu_sem_destroy(&ms->pause_sem);
3343     qemu_sem_destroy(&ms->postcopy_pause_sem);
3344     qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
3345     qemu_sem_destroy(&ms->rp_state.rp_sem);
3346     qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
3347     qemu_sem_destroy(&ms->postcopy_qemufile_src_sem);
3348     error_free(ms->error);
3349 }
3350 
3351 static void migration_instance_init(Object *obj)
3352 {
3353     MigrationState *ms = MIGRATION_OBJ(obj);
3354 
3355     ms->state = MIGRATION_STATUS_NONE;
3356     ms->mbps = -1;
3357     ms->pages_per_second = -1;
3358     qemu_sem_init(&ms->pause_sem, 0);
3359     qemu_mutex_init(&ms->error_mutex);
3360 
3361     migrate_params_init(&ms->parameters);
3362 
3363     qemu_sem_init(&ms->postcopy_pause_sem, 0);
3364     qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
3365     qemu_sem_init(&ms->rp_state.rp_sem, 0);
3366     qemu_sem_init(&ms->rp_state.rp_pong_acks, 0);
3367     qemu_sem_init(&ms->rate_limit_sem, 0);
3368     qemu_sem_init(&ms->wait_unplug_sem, 0);
3369     qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0);
3370     qemu_mutex_init(&ms->qemu_file_lock);
3371 }
3372 
3373 /*
3374  * Return true if check pass, false otherwise. Error will be put
3375  * inside errp if provided.
3376  */
3377 static bool migration_object_check(MigrationState *ms, Error **errp)
3378 {
3379     /* Assuming all off */
3380     bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 };
3381 
3382     if (!migrate_params_check(&ms->parameters, errp)) {
3383         return false;
3384     }
3385 
3386     return migrate_caps_check(old_caps, ms->capabilities, errp);
3387 }
3388 
3389 static const TypeInfo migration_type = {
3390     .name = TYPE_MIGRATION,
3391     /*
3392      * NOTE: TYPE_MIGRATION is not really a device, as the object is
3393      * not created using qdev_new(), it is not attached to the qdev
3394      * device tree, and it is never realized.
3395      *
3396      * TODO: Make this TYPE_OBJECT once QOM provides something like
3397      * TYPE_DEVICE's "-global" properties.
3398      */
3399     .parent = TYPE_DEVICE,
3400     .class_init = migration_class_init,
3401     .class_size = sizeof(MigrationClass),
3402     .instance_size = sizeof(MigrationState),
3403     .instance_init = migration_instance_init,
3404     .instance_finalize = migration_instance_finalize,
3405 };
3406 
3407 static void register_migration_types(void)
3408 {
3409     type_register_static(&migration_type);
3410 }
3411 
3412 type_init(register_migration_types);
3413