xref: /openbmc/qemu/migration/migration.h (revision 522ece32)
1 /*
2  * QEMU live migration
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #ifndef QEMU_MIGRATION_H
15 #define QEMU_MIGRATION_H
16 
17 #include "qemu-common.h"
18 #include "qemu/thread.h"
19 #include "exec/cpu-common.h"
20 #include "qemu/coroutine_int.h"
21 #include "hw/qdev.h"
22 #include "io/channel.h"
23 
24 /* State for the incoming migration */
25 struct MigrationIncomingState {
26     QEMUFile *from_src_file;
27 
28     /*
29      * Free at the start of the main state load, set as the main thread finishes
30      * loading state.
31      */
32     QemuEvent main_thread_load_event;
33 
34     size_t         largest_page_size;
35     bool           have_fault_thread;
36     QemuThread     fault_thread;
37     QemuSemaphore  fault_thread_sem;
38 
39     bool           have_listen_thread;
40     QemuThread     listen_thread;
41     QemuSemaphore  listen_thread_sem;
42 
43     /* For the kernel to send us notifications */
44     int       userfault_fd;
45     /* To tell the fault_thread to quit */
46     int       userfault_quit_fd;
47     QEMUFile *to_src_file;
48     QemuMutex rp_mutex;    /* We send replies from multiple threads */
49     void     *postcopy_tmp_page;
50     void     *postcopy_tmp_zero_page;
51 
52     QEMUBH *bh;
53 
54     int state;
55 
56     bool have_colo_incoming_thread;
57     QemuThread colo_incoming_thread;
58     /* The coroutine we should enter (back) after failover */
59     Coroutine *migration_incoming_co;
60     QemuSemaphore colo_incoming_sem;
61 };
62 
63 MigrationIncomingState *migration_incoming_get_current(void);
64 void migration_incoming_state_destroy(void);
65 
66 #define TYPE_MIGRATION "migration"
67 
68 #define MIGRATION_CLASS(klass) \
69     OBJECT_CLASS_CHECK(MigrationClass, (klass), TYPE_MIGRATION)
70 #define MIGRATION_OBJ(obj) \
71     OBJECT_CHECK(MigrationState, (obj), TYPE_MIGRATION)
72 #define MIGRATION_GET_CLASS(obj) \
73     OBJECT_GET_CLASS(MigrationClass, (obj), TYPE_MIGRATION)
74 
75 typedef struct MigrationClass {
76     /*< private >*/
77     DeviceClass parent_class;
78 } MigrationClass;
79 
80 struct MigrationState
81 {
82     /*< private >*/
83     DeviceState parent_obj;
84 
85     /*< public >*/
86     size_t bytes_xfer;
87     size_t xfer_limit;
88     QemuThread thread;
89     QEMUBH *cleanup_bh;
90     QEMUFile *to_dst_file;
91 
92     /* bytes already send at the beggining of current interation */
93     uint64_t iteration_initial_bytes;
94     /* time at the start of current iteration */
95     int64_t iteration_start_time;
96     /*
97      * The final stage happens when the remaining data is smaller than
98      * this threshold; it's calculated from the requested downtime and
99      * measured bandwidth
100      */
101     int64_t threshold_size;
102 
103     /* params from 'migrate-set-parameters' */
104     MigrationParameters parameters;
105 
106     int state;
107 
108     /* State related to return path */
109     struct {
110         QEMUFile     *from_dst_file;
111         QemuThread    rp_thread;
112         bool          error;
113     } rp_state;
114 
115     double mbps;
116     /* Timestamp when recent migration starts (ms) */
117     int64_t start_time;
118     /* Total time used by latest migration (ms) */
119     int64_t total_time;
120     /* Timestamp when VM is down (ms) to migrate the last stuff */
121     int64_t downtime_start;
122     int64_t downtime;
123     int64_t expected_downtime;
124     bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
125     int64_t setup_time;
126     /*
127      * Whether guest was running when we enter the completion stage.
128      * If migration is interrupted by any reason, we need to continue
129      * running the guest on source.
130      */
131     bool vm_was_running;
132 
133     /* Flag set once the migration has been asked to enter postcopy */
134     bool start_postcopy;
135     /* Flag set after postcopy has sent the device state */
136     bool postcopy_after_devices;
137 
138     /* Flag set once the migration thread is running (and needs joining) */
139     bool migration_thread_running;
140 
141     /* Flag set once the migration thread called bdrv_inactivate_all */
142     bool block_inactive;
143 
144     /* Migration is paused due to pause-before-switchover */
145     QemuSemaphore pause_sem;
146 
147     /* The semaphore is used to notify COLO thread that failover is finished */
148     QemuSemaphore colo_exit_sem;
149 
150     /* The semaphore is used to notify COLO thread to do checkpoint */
151     QemuSemaphore colo_checkpoint_sem;
152     int64_t colo_checkpoint_time;
153     QEMUTimer *colo_delay_timer;
154 
155     /* The first error that has occurred.
156        We used the mutex to be able to return the 1st error message */
157     Error *error;
158     /* mutex to protect errp */
159     QemuMutex error_mutex;
160 
161     /* Do we have to clean up -b/-i from old migrate parameters */
162     /* This feature is deprecated and will be removed */
163     bool must_remove_block_options;
164 
165     /*
166      * Global switch on whether we need to store the global state
167      * during migration.
168      */
169     bool store_global_state;
170 
171     /* Whether the VM is only allowing for migratable devices */
172     bool only_migratable;
173 
174     /* Whether we send QEMU_VM_CONFIGURATION during migration */
175     bool send_configuration;
176     /* Whether we send section footer during migration */
177     bool send_section_footer;
178 };
179 
180 void migrate_set_state(int *state, int old_state, int new_state);
181 
182 void migration_fd_process_incoming(QEMUFile *f);
183 void migration_ioc_process_incoming(QIOChannel *ioc);
184 
185 bool  migration_has_all_channels(void);
186 
187 uint64_t migrate_max_downtime(void);
188 
189 void migrate_set_error(MigrationState *s, const Error *error);
190 void migrate_fd_error(MigrationState *s, const Error *error);
191 
192 void migrate_fd_connect(MigrationState *s, Error *error_in);
193 
194 MigrationState *migrate_init(void);
195 bool migration_is_blocked(Error **errp);
196 /* True if outgoing migration has entered postcopy phase */
197 bool migration_in_postcopy(void);
198 MigrationState *migrate_get_current(void);
199 
200 bool migrate_postcopy(void);
201 
202 bool migrate_release_ram(void);
203 bool migrate_postcopy_ram(void);
204 bool migrate_zero_blocks(void);
205 
206 bool migrate_auto_converge(void);
207 bool migrate_use_multifd(void);
208 bool migrate_pause_before_switchover(void);
209 int migrate_multifd_channels(void);
210 int migrate_multifd_page_count(void);
211 
212 int migrate_use_xbzrle(void);
213 int64_t migrate_xbzrle_cache_size(void);
214 bool migrate_colo_enabled(void);
215 
216 bool migrate_use_block(void);
217 bool migrate_use_block_incremental(void);
218 bool migrate_use_return_path(void);
219 
220 bool migrate_use_compression(void);
221 int migrate_compress_level(void);
222 int migrate_compress_threads(void);
223 int migrate_decompress_threads(void);
224 bool migrate_use_events(void);
225 
226 /* Sending on the return path - generic and then for each message type */
227 void migrate_send_rp_shut(MigrationIncomingState *mis,
228                           uint32_t value);
229 void migrate_send_rp_pong(MigrationIncomingState *mis,
230                           uint32_t value);
231 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char* rbname,
232                               ram_addr_t start, size_t len);
233 
234 #endif
235