xref: /openbmc/qemu/migration/options.c (revision 873f674c559e3162a6e6e92994301d400c5cc873)
1 /*
2  * QEMU migration capabilities
3  *
4  * Copyright (c) 2012-2023 Red Hat Inc
5  *
6  * Authors:
7  *   Orit Wasserman <owasserm@redhat.com>
8  *   Juan Quintela <quintela@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11  * See the COPYING file in the top-level directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qapi-commands-migration.h"
17 #include "qapi/qmp/qerror.h"
18 #include "sysemu/runstate.h"
19 #include "migration/misc.h"
20 #include "migration.h"
21 #include "ram.h"
22 #include "options.h"
23 
24 bool migrate_auto_converge(void)
25 {
26     MigrationState *s;
27 
28     s = migrate_get_current();
29 
30     return s->capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
31 }
32 
33 bool migrate_background_snapshot(void)
34 {
35     MigrationState *s;
36 
37     s = migrate_get_current();
38 
39     return s->capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT];
40 }
41 
42 bool migrate_block(void)
43 {
44     MigrationState *s;
45 
46     s = migrate_get_current();
47 
48     return s->capabilities[MIGRATION_CAPABILITY_BLOCK];
49 }
50 
51 bool migrate_colo(void)
52 {
53     MigrationState *s = migrate_get_current();
54     return s->capabilities[MIGRATION_CAPABILITY_X_COLO];
55 }
56 
57 bool migrate_compress(void)
58 {
59     MigrationState *s;
60 
61     s = migrate_get_current();
62 
63     return s->capabilities[MIGRATION_CAPABILITY_COMPRESS];
64 }
65 
66 bool migrate_dirty_bitmaps(void)
67 {
68     MigrationState *s;
69 
70     s = migrate_get_current();
71 
72     return s->capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
73 }
74 
75 bool migrate_events(void)
76 {
77     MigrationState *s;
78 
79     s = migrate_get_current();
80 
81     return s->capabilities[MIGRATION_CAPABILITY_EVENTS];
82 }
83 
84 bool migrate_ignore_shared(void)
85 {
86     MigrationState *s;
87 
88     s = migrate_get_current();
89 
90     return s->capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED];
91 }
92 
93 bool migrate_late_block_activate(void)
94 {
95     MigrationState *s;
96 
97     s = migrate_get_current();
98 
99     return s->capabilities[MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
100 }
101 
102 bool migrate_multifd(void)
103 {
104     MigrationState *s;
105 
106     s = migrate_get_current();
107 
108     return s->capabilities[MIGRATION_CAPABILITY_MULTIFD];
109 }
110 
111 bool migrate_pause_before_switchover(void)
112 {
113     MigrationState *s;
114 
115     s = migrate_get_current();
116 
117     return s->capabilities[MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
118 }
119 
120 bool migrate_postcopy_blocktime(void)
121 {
122     MigrationState *s;
123 
124     s = migrate_get_current();
125 
126     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
127 }
128 
129 bool migrate_postcopy_preempt(void)
130 {
131     MigrationState *s;
132 
133     s = migrate_get_current();
134 
135     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT];
136 }
137 
138 bool migrate_postcopy_ram(void)
139 {
140     MigrationState *s;
141 
142     s = migrate_get_current();
143 
144     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
145 }
146 
147 bool migrate_rdma_pin_all(void)
148 {
149     MigrationState *s = migrate_get_current();
150 
151     return s->capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL];
152 }
153 
154 bool migrate_release_ram(void)
155 {
156     MigrationState *s;
157 
158     s = migrate_get_current();
159 
160     return s->capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
161 }
162 
163 bool migrate_return_path(void)
164 {
165     MigrationState *s;
166 
167     s = migrate_get_current();
168 
169     return s->capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
170 }
171 
172 bool migrate_validate_uuid(void)
173 {
174     MigrationState *s;
175 
176     s = migrate_get_current();
177 
178     return s->capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID];
179 }
180 
181 bool migrate_xbzrle(void)
182 {
183     MigrationState *s;
184 
185     s = migrate_get_current();
186 
187     return s->capabilities[MIGRATION_CAPABILITY_XBZRLE];
188 }
189 
190 bool migrate_zero_blocks(void)
191 {
192     MigrationState *s;
193 
194     s = migrate_get_current();
195 
196     return s->capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
197 }
198 
199 bool migrate_zero_copy_send(void)
200 {
201     MigrationState *s;
202 
203     s = migrate_get_current();
204 
205     return s->capabilities[MIGRATION_CAPABILITY_ZERO_COPY_SEND];
206 }
207 typedef enum WriteTrackingSupport {
208     WT_SUPPORT_UNKNOWN = 0,
209     WT_SUPPORT_ABSENT,
210     WT_SUPPORT_AVAILABLE,
211     WT_SUPPORT_COMPATIBLE
212 } WriteTrackingSupport;
213 
214 static
215 WriteTrackingSupport migrate_query_write_tracking(void)
216 {
217     /* Check if kernel supports required UFFD features */
218     if (!ram_write_tracking_available()) {
219         return WT_SUPPORT_ABSENT;
220     }
221     /*
222      * Check if current memory configuration is
223      * compatible with required UFFD features.
224      */
225     if (!ram_write_tracking_compatible()) {
226         return WT_SUPPORT_AVAILABLE;
227     }
228 
229     return WT_SUPPORT_COMPATIBLE;
230 }
231 
232 /* Migration capabilities set */
233 struct MigrateCapsSet {
234     int size;                       /* Capability set size */
235     MigrationCapability caps[];     /* Variadic array of capabilities */
236 };
237 typedef struct MigrateCapsSet MigrateCapsSet;
238 
239 /* Define and initialize MigrateCapsSet */
240 #define INITIALIZE_MIGRATE_CAPS_SET(_name, ...)   \
241     MigrateCapsSet _name = {    \
242         .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \
243         .caps = { __VA_ARGS__ } \
244     }
245 
246 /* Background-snapshot compatibility check list */
247 static const
248 INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot,
249     MIGRATION_CAPABILITY_POSTCOPY_RAM,
250     MIGRATION_CAPABILITY_DIRTY_BITMAPS,
251     MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME,
252     MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE,
253     MIGRATION_CAPABILITY_RETURN_PATH,
254     MIGRATION_CAPABILITY_MULTIFD,
255     MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER,
256     MIGRATION_CAPABILITY_AUTO_CONVERGE,
257     MIGRATION_CAPABILITY_RELEASE_RAM,
258     MIGRATION_CAPABILITY_RDMA_PIN_ALL,
259     MIGRATION_CAPABILITY_COMPRESS,
260     MIGRATION_CAPABILITY_XBZRLE,
261     MIGRATION_CAPABILITY_X_COLO,
262     MIGRATION_CAPABILITY_VALIDATE_UUID,
263     MIGRATION_CAPABILITY_ZERO_COPY_SEND);
264 
265 /**
266  * @migration_caps_check - check capability compatibility
267  *
268  * @old_caps: old capability list
269  * @new_caps: new capability list
270  * @errp: set *errp if the check failed, with reason
271  *
272  * Returns true if check passed, otherwise false.
273  */
274 bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
275 {
276     MigrationIncomingState *mis = migration_incoming_get_current();
277 
278 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
279     if (new_caps[MIGRATION_CAPABILITY_BLOCK]) {
280         error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
281                    "block migration");
282         error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
283         return false;
284     }
285 #endif
286 
287 #ifndef CONFIG_REPLICATION
288     if (new_caps[MIGRATION_CAPABILITY_X_COLO]) {
289         error_setg(errp, "QEMU compiled without replication module"
290                    " can't enable COLO");
291         error_append_hint(errp, "Please enable replication before COLO.\n");
292         return false;
293     }
294 #endif
295 
296     if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
297         /* This check is reasonably expensive, so only when it's being
298          * set the first time, also it's only the destination that needs
299          * special support.
300          */
301         if (!old_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] &&
302             runstate_check(RUN_STATE_INMIGRATE) &&
303             !postcopy_ram_supported_by_host(mis)) {
304             /* postcopy_ram_supported_by_host will have emitted a more
305              * detailed message
306              */
307             error_setg(errp, "Postcopy is not supported");
308             return false;
309         }
310 
311         if (new_caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) {
312             error_setg(errp, "Postcopy is not compatible with ignore-shared");
313             return false;
314         }
315     }
316 
317     if (new_caps[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) {
318         WriteTrackingSupport wt_support;
319         int idx;
320         /*
321          * Check if 'background-snapshot' capability is supported by
322          * host kernel and compatible with guest memory configuration.
323          */
324         wt_support = migrate_query_write_tracking();
325         if (wt_support < WT_SUPPORT_AVAILABLE) {
326             error_setg(errp, "Background-snapshot is not supported by host kernel");
327             return false;
328         }
329         if (wt_support < WT_SUPPORT_COMPATIBLE) {
330             error_setg(errp, "Background-snapshot is not compatible "
331                     "with guest memory configuration");
332             return false;
333         }
334 
335         /*
336          * Check if there are any migration capabilities
337          * incompatible with 'background-snapshot'.
338          */
339         for (idx = 0; idx < check_caps_background_snapshot.size; idx++) {
340             int incomp_cap = check_caps_background_snapshot.caps[idx];
341             if (new_caps[incomp_cap]) {
342                 error_setg(errp,
343                         "Background-snapshot is not compatible with %s",
344                         MigrationCapability_str(incomp_cap));
345                 return false;
346             }
347         }
348     }
349 
350 #ifdef CONFIG_LINUX
351     if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND] &&
352         (!new_caps[MIGRATION_CAPABILITY_MULTIFD] ||
353          new_caps[MIGRATION_CAPABILITY_COMPRESS] ||
354          new_caps[MIGRATION_CAPABILITY_XBZRLE] ||
355          migrate_multifd_compression() ||
356          migrate_use_tls())) {
357         error_setg(errp,
358                    "Zero copy only available for non-compressed non-TLS multifd migration");
359         return false;
360     }
361 #else
362     if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND]) {
363         error_setg(errp,
364                    "Zero copy currently only available on Linux");
365         return false;
366     }
367 #endif
368 
369     if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) {
370         if (!new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
371             error_setg(errp, "Postcopy preempt requires postcopy-ram");
372             return false;
373         }
374 
375         /*
376          * Preempt mode requires urgent pages to be sent in separate
377          * channel, OTOH compression logic will disorder all pages into
378          * different compression channels, which is not compatible with the
379          * preempt assumptions on channel assignments.
380          */
381         if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
382             error_setg(errp, "Postcopy preempt not compatible with compress");
383             return false;
384         }
385     }
386 
387     if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) {
388         if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
389             error_setg(errp, "Multifd is not compatible with compress");
390             return false;
391         }
392     }
393 
394     return true;
395 }
396 
397 bool migrate_cap_set(int cap, bool value, Error **errp)
398 {
399     MigrationState *s = migrate_get_current();
400     bool new_caps[MIGRATION_CAPABILITY__MAX];
401 
402     if (migration_is_running(s->state)) {
403         error_setg(errp, QERR_MIGRATION_ACTIVE);
404         return false;
405     }
406 
407     memcpy(new_caps, s->capabilities, sizeof(new_caps));
408     new_caps[cap] = value;
409 
410     if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
411         return false;
412     }
413     s->capabilities[cap] = value;
414     return true;
415 }
416 
417 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
418 {
419     MigrationCapabilityStatusList *head = NULL, **tail = &head;
420     MigrationCapabilityStatus *caps;
421     MigrationState *s = migrate_get_current();
422     int i;
423 
424     for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
425 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
426         if (i == MIGRATION_CAPABILITY_BLOCK) {
427             continue;
428         }
429 #endif
430         caps = g_malloc0(sizeof(*caps));
431         caps->capability = i;
432         caps->state = s->capabilities[i];
433         QAPI_LIST_APPEND(tail, caps);
434     }
435 
436     return head;
437 }
438 
439 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
440                                   Error **errp)
441 {
442     MigrationState *s = migrate_get_current();
443     MigrationCapabilityStatusList *cap;
444     bool new_caps[MIGRATION_CAPABILITY__MAX];
445 
446     if (migration_is_running(s->state)) {
447         error_setg(errp, QERR_MIGRATION_ACTIVE);
448         return;
449     }
450 
451     memcpy(new_caps, s->capabilities, sizeof(new_caps));
452     for (cap = params; cap; cap = cap->next) {
453         new_caps[cap->value->capability] = cap->value->state;
454     }
455 
456     if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
457         return;
458     }
459 
460     for (cap = params; cap; cap = cap->next) {
461         s->capabilities[cap->value->capability] = cap->value->state;
462     }
463 }
464 
465 /* parameters */
466 
467 bool migrate_block_incremental(void)
468 {
469     MigrationState *s;
470 
471     s = migrate_get_current();
472 
473     return s->parameters.block_incremental;
474 }
475 
476 uint32_t migrate_checkpoint_delay(void)
477 {
478     MigrationState *s;
479 
480     s = migrate_get_current();
481 
482     return s->parameters.x_checkpoint_delay;
483 }
484 
485 int migrate_compress_level(void)
486 {
487     MigrationState *s;
488 
489     s = migrate_get_current();
490 
491     return s->parameters.compress_level;
492 }
493 
494 int migrate_compress_threads(void)
495 {
496     MigrationState *s;
497 
498     s = migrate_get_current();
499 
500     return s->parameters.compress_threads;
501 }
502 
503 int migrate_compress_wait_thread(void)
504 {
505     MigrationState *s;
506 
507     s = migrate_get_current();
508 
509     return s->parameters.compress_wait_thread;
510 }
511 
512 uint8_t migrate_cpu_throttle_increment(void)
513 {
514     MigrationState *s;
515 
516     s = migrate_get_current();
517 
518     return s->parameters.cpu_throttle_increment;
519 }
520 
521 uint8_t migrate_cpu_throttle_initial(void)
522 {
523     MigrationState *s;
524 
525     s = migrate_get_current();
526 
527     return s->parameters.cpu_throttle_initial;
528 }
529 
530 bool migrate_cpu_throttle_tailslow(void)
531 {
532     MigrationState *s;
533 
534     s = migrate_get_current();
535 
536     return s->parameters.cpu_throttle_tailslow;
537 }
538 
539 int migrate_decompress_threads(void)
540 {
541     MigrationState *s;
542 
543     s = migrate_get_current();
544 
545     return s->parameters.decompress_threads;
546 }
547 
548 uint8_t migrate_max_cpu_throttle(void)
549 {
550     MigrationState *s;
551 
552     s = migrate_get_current();
553 
554     return s->parameters.max_cpu_throttle;
555 }
556 
557 int64_t migrate_max_postcopy_bandwidth(void)
558 {
559     MigrationState *s;
560 
561     s = migrate_get_current();
562 
563     return s->parameters.max_postcopy_bandwidth;
564 }
565 
566 int migrate_multifd_channels(void)
567 {
568     MigrationState *s;
569 
570     s = migrate_get_current();
571 
572     return s->parameters.multifd_channels;
573 }
574 
575 MultiFDCompression migrate_multifd_compression(void)
576 {
577     MigrationState *s;
578 
579     s = migrate_get_current();
580 
581     assert(s->parameters.multifd_compression < MULTIFD_COMPRESSION__MAX);
582     return s->parameters.multifd_compression;
583 }
584 
585 int migrate_multifd_zlib_level(void)
586 {
587     MigrationState *s;
588 
589     s = migrate_get_current();
590 
591     return s->parameters.multifd_zlib_level;
592 }
593 
594 int migrate_multifd_zstd_level(void)
595 {
596     MigrationState *s;
597 
598     s = migrate_get_current();
599 
600     return s->parameters.multifd_zstd_level;
601 }
602 
603 uint8_t migrate_throttle_trigger_threshold(void)
604 {
605     MigrationState *s;
606 
607     s = migrate_get_current();
608 
609     return s->parameters.throttle_trigger_threshold;
610 }
611 
612 uint64_t migrate_xbzrle_cache_size(void)
613 {
614     MigrationState *s;
615 
616     s = migrate_get_current();
617 
618     return s->parameters.xbzrle_cache_size;
619 }
620 
621 /* parameters helpers */
622 
623 AnnounceParameters *migrate_announce_params(void)
624 {
625     static AnnounceParameters ap;
626 
627     MigrationState *s = migrate_get_current();
628 
629     ap.initial = s->parameters.announce_initial;
630     ap.max = s->parameters.announce_max;
631     ap.rounds = s->parameters.announce_rounds;
632     ap.step = s->parameters.announce_step;
633 
634     return &ap;
635 }
636