xref: /openbmc/qemu/migration/options.c (revision 6499efdb16e5c1288b4c8390d3bf68b313329b8b)
1 /*
2  * QEMU migration capabilities
3  *
4  * Copyright (c) 2012-2023 Red Hat Inc
5  *
6  * Authors:
7  *   Orit Wasserman <owasserm@redhat.com>
8  *   Juan Quintela <quintela@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11  * See the COPYING file in the top-level directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qapi-commands-migration.h"
17 #include "qapi/qmp/qerror.h"
18 #include "sysemu/runstate.h"
19 #include "migration.h"
20 #include "ram.h"
21 #include "options.h"
22 
23 bool migrate_auto_converge(void)
24 {
25     MigrationState *s;
26 
27     s = migrate_get_current();
28 
29     return s->capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
30 }
31 
32 bool migrate_background_snapshot(void)
33 {
34     MigrationState *s;
35 
36     s = migrate_get_current();
37 
38     return s->capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT];
39 }
40 
41 bool migrate_block(void)
42 {
43     MigrationState *s;
44 
45     s = migrate_get_current();
46 
47     return s->capabilities[MIGRATION_CAPABILITY_BLOCK];
48 }
49 
50 bool migrate_colo(void)
51 {
52     MigrationState *s = migrate_get_current();
53     return s->capabilities[MIGRATION_CAPABILITY_X_COLO];
54 }
55 
56 bool migrate_compress(void)
57 {
58     MigrationState *s;
59 
60     s = migrate_get_current();
61 
62     return s->capabilities[MIGRATION_CAPABILITY_COMPRESS];
63 }
64 
65 bool migrate_dirty_bitmaps(void)
66 {
67     MigrationState *s;
68 
69     s = migrate_get_current();
70 
71     return s->capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
72 }
73 
74 bool migrate_events(void)
75 {
76     MigrationState *s;
77 
78     s = migrate_get_current();
79 
80     return s->capabilities[MIGRATION_CAPABILITY_EVENTS];
81 }
82 
83 bool migrate_ignore_shared(void)
84 {
85     MigrationState *s;
86 
87     s = migrate_get_current();
88 
89     return s->capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED];
90 }
91 
92 bool migrate_late_block_activate(void)
93 {
94     MigrationState *s;
95 
96     s = migrate_get_current();
97 
98     return s->capabilities[MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
99 }
100 
101 bool migrate_multifd(void)
102 {
103     MigrationState *s;
104 
105     s = migrate_get_current();
106 
107     return s->capabilities[MIGRATION_CAPABILITY_MULTIFD];
108 }
109 
110 bool migrate_pause_before_switchover(void)
111 {
112     MigrationState *s;
113 
114     s = migrate_get_current();
115 
116     return s->capabilities[MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
117 }
118 
119 bool migrate_postcopy_blocktime(void)
120 {
121     MigrationState *s;
122 
123     s = migrate_get_current();
124 
125     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
126 }
127 
128 bool migrate_postcopy_preempt(void)
129 {
130     MigrationState *s;
131 
132     s = migrate_get_current();
133 
134     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT];
135 }
136 
137 bool migrate_postcopy_ram(void)
138 {
139     MigrationState *s;
140 
141     s = migrate_get_current();
142 
143     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
144 }
145 
146 bool migrate_rdma_pin_all(void)
147 {
148     MigrationState *s = migrate_get_current();
149 
150     return s->capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL];
151 }
152 
153 bool migrate_release_ram(void)
154 {
155     MigrationState *s;
156 
157     s = migrate_get_current();
158 
159     return s->capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
160 }
161 
162 bool migrate_return_path(void)
163 {
164     MigrationState *s;
165 
166     s = migrate_get_current();
167 
168     return s->capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
169 }
170 
171 bool migrate_validate_uuid(void)
172 {
173     MigrationState *s;
174 
175     s = migrate_get_current();
176 
177     return s->capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID];
178 }
179 
180 bool migrate_xbzrle(void)
181 {
182     MigrationState *s;
183 
184     s = migrate_get_current();
185 
186     return s->capabilities[MIGRATION_CAPABILITY_XBZRLE];
187 }
188 
189 bool migrate_zero_blocks(void)
190 {
191     MigrationState *s;
192 
193     s = migrate_get_current();
194 
195     return s->capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
196 }
197 
198 bool migrate_zero_copy_send(void)
199 {
200     MigrationState *s;
201 
202     s = migrate_get_current();
203 
204     return s->capabilities[MIGRATION_CAPABILITY_ZERO_COPY_SEND];
205 }
206 typedef enum WriteTrackingSupport {
207     WT_SUPPORT_UNKNOWN = 0,
208     WT_SUPPORT_ABSENT,
209     WT_SUPPORT_AVAILABLE,
210     WT_SUPPORT_COMPATIBLE
211 } WriteTrackingSupport;
212 
213 static
214 WriteTrackingSupport migrate_query_write_tracking(void)
215 {
216     /* Check if kernel supports required UFFD features */
217     if (!ram_write_tracking_available()) {
218         return WT_SUPPORT_ABSENT;
219     }
220     /*
221      * Check if current memory configuration is
222      * compatible with required UFFD features.
223      */
224     if (!ram_write_tracking_compatible()) {
225         return WT_SUPPORT_AVAILABLE;
226     }
227 
228     return WT_SUPPORT_COMPATIBLE;
229 }
230 
231 /* Migration capabilities set */
232 struct MigrateCapsSet {
233     int size;                       /* Capability set size */
234     MigrationCapability caps[];     /* Variadic array of capabilities */
235 };
236 typedef struct MigrateCapsSet MigrateCapsSet;
237 
238 /* Define and initialize MigrateCapsSet */
239 #define INITIALIZE_MIGRATE_CAPS_SET(_name, ...)   \
240     MigrateCapsSet _name = {    \
241         .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \
242         .caps = { __VA_ARGS__ } \
243     }
244 
245 /* Background-snapshot compatibility check list */
246 static const
247 INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot,
248     MIGRATION_CAPABILITY_POSTCOPY_RAM,
249     MIGRATION_CAPABILITY_DIRTY_BITMAPS,
250     MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME,
251     MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE,
252     MIGRATION_CAPABILITY_RETURN_PATH,
253     MIGRATION_CAPABILITY_MULTIFD,
254     MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER,
255     MIGRATION_CAPABILITY_AUTO_CONVERGE,
256     MIGRATION_CAPABILITY_RELEASE_RAM,
257     MIGRATION_CAPABILITY_RDMA_PIN_ALL,
258     MIGRATION_CAPABILITY_COMPRESS,
259     MIGRATION_CAPABILITY_XBZRLE,
260     MIGRATION_CAPABILITY_X_COLO,
261     MIGRATION_CAPABILITY_VALIDATE_UUID,
262     MIGRATION_CAPABILITY_ZERO_COPY_SEND);
263 
264 /**
265  * @migration_caps_check - check capability compatibility
266  *
267  * @old_caps: old capability list
268  * @new_caps: new capability list
269  * @errp: set *errp if the check failed, with reason
270  *
271  * Returns true if check passed, otherwise false.
272  */
273 bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
274 {
275     MigrationIncomingState *mis = migration_incoming_get_current();
276 
277 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
278     if (new_caps[MIGRATION_CAPABILITY_BLOCK]) {
279         error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
280                    "block migration");
281         error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
282         return false;
283     }
284 #endif
285 
286 #ifndef CONFIG_REPLICATION
287     if (new_caps[MIGRATION_CAPABILITY_X_COLO]) {
288         error_setg(errp, "QEMU compiled without replication module"
289                    " can't enable COLO");
290         error_append_hint(errp, "Please enable replication before COLO.\n");
291         return false;
292     }
293 #endif
294 
295     if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
296         /* This check is reasonably expensive, so only when it's being
297          * set the first time, also it's only the destination that needs
298          * special support.
299          */
300         if (!old_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] &&
301             runstate_check(RUN_STATE_INMIGRATE) &&
302             !postcopy_ram_supported_by_host(mis)) {
303             /* postcopy_ram_supported_by_host will have emitted a more
304              * detailed message
305              */
306             error_setg(errp, "Postcopy is not supported");
307             return false;
308         }
309 
310         if (new_caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) {
311             error_setg(errp, "Postcopy is not compatible with ignore-shared");
312             return false;
313         }
314     }
315 
316     if (new_caps[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) {
317         WriteTrackingSupport wt_support;
318         int idx;
319         /*
320          * Check if 'background-snapshot' capability is supported by
321          * host kernel and compatible with guest memory configuration.
322          */
323         wt_support = migrate_query_write_tracking();
324         if (wt_support < WT_SUPPORT_AVAILABLE) {
325             error_setg(errp, "Background-snapshot is not supported by host kernel");
326             return false;
327         }
328         if (wt_support < WT_SUPPORT_COMPATIBLE) {
329             error_setg(errp, "Background-snapshot is not compatible "
330                     "with guest memory configuration");
331             return false;
332         }
333 
334         /*
335          * Check if there are any migration capabilities
336          * incompatible with 'background-snapshot'.
337          */
338         for (idx = 0; idx < check_caps_background_snapshot.size; idx++) {
339             int incomp_cap = check_caps_background_snapshot.caps[idx];
340             if (new_caps[incomp_cap]) {
341                 error_setg(errp,
342                         "Background-snapshot is not compatible with %s",
343                         MigrationCapability_str(incomp_cap));
344                 return false;
345             }
346         }
347     }
348 
349 #ifdef CONFIG_LINUX
350     if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND] &&
351         (!new_caps[MIGRATION_CAPABILITY_MULTIFD] ||
352          new_caps[MIGRATION_CAPABILITY_COMPRESS] ||
353          new_caps[MIGRATION_CAPABILITY_XBZRLE] ||
354          migrate_multifd_compression() ||
355          migrate_use_tls())) {
356         error_setg(errp,
357                    "Zero copy only available for non-compressed non-TLS multifd migration");
358         return false;
359     }
360 #else
361     if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND]) {
362         error_setg(errp,
363                    "Zero copy currently only available on Linux");
364         return false;
365     }
366 #endif
367 
368     if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) {
369         if (!new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
370             error_setg(errp, "Postcopy preempt requires postcopy-ram");
371             return false;
372         }
373 
374         /*
375          * Preempt mode requires urgent pages to be sent in separate
376          * channel, OTOH compression logic will disorder all pages into
377          * different compression channels, which is not compatible with the
378          * preempt assumptions on channel assignments.
379          */
380         if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
381             error_setg(errp, "Postcopy preempt not compatible with compress");
382             return false;
383         }
384     }
385 
386     if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) {
387         if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
388             error_setg(errp, "Multifd is not compatible with compress");
389             return false;
390         }
391     }
392 
393     return true;
394 }
395 
396 bool migrate_cap_set(int cap, bool value, Error **errp)
397 {
398     MigrationState *s = migrate_get_current();
399     bool new_caps[MIGRATION_CAPABILITY__MAX];
400 
401     if (migration_is_running(s->state)) {
402         error_setg(errp, QERR_MIGRATION_ACTIVE);
403         return false;
404     }
405 
406     memcpy(new_caps, s->capabilities, sizeof(new_caps));
407     new_caps[cap] = value;
408 
409     if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
410         return false;
411     }
412     s->capabilities[cap] = value;
413     return true;
414 }
415 
416 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
417 {
418     MigrationCapabilityStatusList *head = NULL, **tail = &head;
419     MigrationCapabilityStatus *caps;
420     MigrationState *s = migrate_get_current();
421     int i;
422 
423     for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
424 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
425         if (i == MIGRATION_CAPABILITY_BLOCK) {
426             continue;
427         }
428 #endif
429         caps = g_malloc0(sizeof(*caps));
430         caps->capability = i;
431         caps->state = s->capabilities[i];
432         QAPI_LIST_APPEND(tail, caps);
433     }
434 
435     return head;
436 }
437 
438 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
439                                   Error **errp)
440 {
441     MigrationState *s = migrate_get_current();
442     MigrationCapabilityStatusList *cap;
443     bool new_caps[MIGRATION_CAPABILITY__MAX];
444 
445     if (migration_is_running(s->state)) {
446         error_setg(errp, QERR_MIGRATION_ACTIVE);
447         return;
448     }
449 
450     memcpy(new_caps, s->capabilities, sizeof(new_caps));
451     for (cap = params; cap; cap = cap->next) {
452         new_caps[cap->value->capability] = cap->value->state;
453     }
454 
455     if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
456         return;
457     }
458 
459     for (cap = params; cap; cap = cap->next) {
460         s->capabilities[cap->value->capability] = cap->value->state;
461     }
462 }
463 
464 /* parameters */
465 
466 bool migrate_block_incremental(void)
467 {
468     MigrationState *s;
469 
470     s = migrate_get_current();
471 
472     return s->parameters.block_incremental;
473 }
474 
475 int migrate_compress_level(void)
476 {
477     MigrationState *s;
478 
479     s = migrate_get_current();
480 
481     return s->parameters.compress_level;
482 }
483 
484 int migrate_compress_threads(void)
485 {
486     MigrationState *s;
487 
488     s = migrate_get_current();
489 
490     return s->parameters.compress_threads;
491 }
492 
493 int migrate_compress_wait_thread(void)
494 {
495     MigrationState *s;
496 
497     s = migrate_get_current();
498 
499     return s->parameters.compress_wait_thread;
500 }
501 
502 int migrate_decompress_threads(void)
503 {
504     MigrationState *s;
505 
506     s = migrate_get_current();
507 
508     return s->parameters.decompress_threads;
509 }
510 
511 int64_t migrate_max_postcopy_bandwidth(void)
512 {
513     MigrationState *s;
514 
515     s = migrate_get_current();
516 
517     return s->parameters.max_postcopy_bandwidth;
518 }
519 
520 int migrate_multifd_channels(void)
521 {
522     MigrationState *s;
523 
524     s = migrate_get_current();
525 
526     return s->parameters.multifd_channels;
527 }
528 
529 MultiFDCompression migrate_multifd_compression(void)
530 {
531     MigrationState *s;
532 
533     s = migrate_get_current();
534 
535     assert(s->parameters.multifd_compression < MULTIFD_COMPRESSION__MAX);
536     return s->parameters.multifd_compression;
537 }
538 
539 int migrate_multifd_zlib_level(void)
540 {
541     MigrationState *s;
542 
543     s = migrate_get_current();
544 
545     return s->parameters.multifd_zlib_level;
546 }
547 
548 int migrate_multifd_zstd_level(void)
549 {
550     MigrationState *s;
551 
552     s = migrate_get_current();
553 
554     return s->parameters.multifd_zstd_level;
555 }
556 
557 uint8_t migrate_throttle_trigger_threshold(void)
558 {
559     MigrationState *s;
560 
561     s = migrate_get_current();
562 
563     return s->parameters.throttle_trigger_threshold;
564 }
565 
566 uint64_t migrate_xbzrle_cache_size(void)
567 {
568     MigrationState *s;
569 
570     s = migrate_get_current();
571 
572     return s->parameters.xbzrle_cache_size;
573 }
574