xref: /openbmc/qemu/qapi/migration.json (revision 4d807857)
1# -*- Mode: Python -*-
2# vim: filetype=python
3#
4
5##
6# = Migration
7##
8
9{ 'include': 'common.json' }
10{ 'include': 'sockets.json' }
11
12##
13# @MigrationStats:
14#
15# Detailed migration status.
16#
17# @transferred: amount of bytes already transferred to the target VM
18#
19# @remaining: amount of bytes remaining to be transferred to the
20#     target VM
21#
22# @total: total amount of bytes involved in the migration process
23#
24# @duplicate: number of duplicate (zero) pages (since 1.2)
25#
26# @skipped: number of skipped zero pages (since 1.5)
27#
28# @normal: number of normal pages (since 1.2)
29#
30# @normal-bytes: number of normal bytes sent (since 1.2)
31#
32# @dirty-pages-rate: number of pages dirtied by second by the guest
33#     (since 1.3)
34#
35# @mbps: throughput in megabits/sec.  (since 1.6)
36#
37# @dirty-sync-count: number of times that dirty ram was synchronized
38#     (since 2.1)
39#
40# @postcopy-requests: The number of page requests received from the
41#     destination (since 2.7)
42#
43# @page-size: The number of bytes per page for the various page-based
44#     statistics (since 2.10)
45#
46# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
47#
48# @pages-per-second: the number of memory pages transferred per second
49#     (Since 4.0)
50#
51# @precopy-bytes: The number of bytes sent in the pre-copy phase
52#     (since 7.0).
53#
54# @downtime-bytes: The number of bytes sent while the guest is paused
55#     (since 7.0).
56#
57# @postcopy-bytes: The number of bytes sent during the post-copy phase
58#     (since 7.0).
59#
60# @dirty-sync-missed-zero-copy: Number of times dirty RAM
61#     synchronization could not avoid copying dirty pages.  This is
62#     between 0 and @dirty-sync-count * @multifd-channels.  (since
63#     7.1)
64#
65# Since: 0.14
66##
67{ 'struct': 'MigrationStats',
68  'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
69           'duplicate': 'int', 'skipped': 'int', 'normal': 'int',
70           'normal-bytes': 'int', 'dirty-pages-rate': 'int',
71           'mbps': 'number', 'dirty-sync-count': 'int',
72           'postcopy-requests': 'int', 'page-size': 'int',
73           'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
74           'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
75           'postcopy-bytes': 'uint64',
76           'dirty-sync-missed-zero-copy': 'uint64' } }
77
78##
79# @XBZRLECacheStats:
80#
81# Detailed XBZRLE migration cache statistics
82#
83# @cache-size: XBZRLE cache size
84#
85# @bytes: amount of bytes already transferred to the target VM
86#
87# @pages: amount of pages transferred to the target VM
88#
89# @cache-miss: number of cache miss
90#
91# @cache-miss-rate: rate of cache miss (since 2.1)
92#
93# @encoding-rate: rate of encoded bytes (since 5.1)
94#
95# @overflow: number of overflows
96#
97# Since: 1.2
98##
99{ 'struct': 'XBZRLECacheStats',
100  'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
101           'cache-miss': 'int', 'cache-miss-rate': 'number',
102           'encoding-rate': 'number', 'overflow': 'int' } }
103
104##
105# @CompressionStats:
106#
107# Detailed migration compression statistics
108#
109# @pages: amount of pages compressed and transferred to the target VM
110#
111# @busy: count of times that no free thread was available to compress
112#     data
113#
114# @busy-rate: rate of thread busy
115#
116# @compressed-size: amount of bytes after compression
117#
118# @compression-rate: rate of compressed size
119#
120# Since: 3.1
121##
122{ 'struct': 'CompressionStats',
123  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
124           'compressed-size': 'int', 'compression-rate': 'number' } }
125
126##
127# @MigrationStatus:
128#
129# An enumeration of migration status.
130#
131# @none: no migration has ever happened.
132#
133# @setup: migration process has been initiated.
134#
135# @cancelling: in the process of cancelling migration.
136#
137# @cancelled: cancelling migration is finished.
138#
139# @active: in the process of doing migration.
140#
141# @postcopy-active: like active, but now in postcopy mode.  (since
142#     2.5)
143#
144# @postcopy-paused: during postcopy but paused.  (since 3.0)
145#
146# @postcopy-recover: trying to recover from a paused postcopy.  (since
147#     3.0)
148#
149# @completed: migration is finished.
150#
151# @failed: some error occurred during migration process.
152#
153# @colo: VM is in the process of fault tolerance, VM can not get into
154#     this state unless colo capability is enabled for migration.
155#     (since 2.8)
156#
157# @pre-switchover: Paused before device serialisation.  (since 2.11)
158#
159# @device: During device serialisation when pause-before-switchover is
160#     enabled (since 2.11)
161#
162# @wait-unplug: wait for device unplug request by guest OS to be
163#     completed.  (since 4.2)
164#
165# Since: 2.3
166##
167{ 'enum': 'MigrationStatus',
168  'data': [ 'none', 'setup', 'cancelling', 'cancelled',
169            'active', 'postcopy-active', 'postcopy-paused',
170            'postcopy-recover', 'completed', 'failed', 'colo',
171            'pre-switchover', 'device', 'wait-unplug' ] }
172##
173# @VfioStats:
174#
175# Detailed VFIO devices migration statistics
176#
177# @transferred: amount of bytes transferred to the target VM by VFIO
178#     devices
179#
180# Since: 5.2
181##
182{ 'struct': 'VfioStats',
183  'data': {'transferred': 'int' } }
184
185##
186# @MigrationInfo:
187#
188# Information about current migration process.
189#
190# @status: @MigrationStatus describing the current migration status.
191#     If this field is not returned, no migration process has been
192#     initiated
193#
194# @ram: @MigrationStats containing detailed migration status, only
195#     returned if status is 'active' or 'completed'(since 1.2)
196#
197# @disk: @MigrationStats containing detailed disk migration status,
198#     only returned if status is 'active' and it is a block migration
199#
200# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
201#     migration statistics, only returned if XBZRLE feature is on and
202#     status is 'active' or 'completed' (since 1.2)
203#
204# @total-time: total amount of milliseconds since migration started.
205#     If migration has ended, it returns the total migration time.
206#     (since 1.2)
207#
208# @downtime: only present when migration finishes correctly total
209#     downtime in milliseconds for the guest.  (since 1.3)
210#
211# @expected-downtime: only present while migration is active expected
212#     downtime in milliseconds for the guest in last walk of the dirty
213#     bitmap.  (since 1.3)
214#
215# @setup-time: amount of setup time in milliseconds *before* the
216#     iterations begin but *after* the QMP command is issued.  This is
217#     designed to provide an accounting of any activities (such as
218#     RDMA pinning) which may be expensive, but do not actually occur
219#     during the iterative migration rounds themselves.  (since 1.6)
220#
221# @cpu-throttle-percentage: percentage of time guest cpus are being
222#     throttled during auto-converge.  This is only present when
223#     auto-converge has started throttling guest cpus.  (Since 2.7)
224#
225# @error-desc: the human readable error description string, when
226#     @status is 'failed'. Clients should not attempt to parse the
227#     error strings.  (Since 2.7)
228#
229# @postcopy-blocktime: total time when all vCPU were blocked during
230#     postcopy live migration.  This is only present when the
231#     postcopy-blocktime migration capability is enabled.  (Since 3.0)
232#
233# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
234#     This is only present when the postcopy-blocktime migration
235#     capability is enabled.  (Since 3.0)
236#
237# @compression: migration compression statistics, only returned if
238#     compression feature is on and status is 'active' or 'completed'
239#     (Since 3.1)
240#
241# @socket-address: Only used for tcp, to know what the real port is
242#     (Since 4.0)
243#
244# @vfio: @VfioStats containing detailed VFIO devices migration
245#     statistics, only returned if VFIO device is present, migration
246#     is supported by all VFIO devices and status is 'active' or
247#     'completed' (since 5.2)
248#
249# @blocked-reasons: A list of reasons an outgoing migration is
250#     blocked.  Present and non-empty when migration is blocked.
251#     (since 6.0)
252#
253# Since: 0.14
254##
255{ 'struct': 'MigrationInfo',
256  'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
257           '*disk': 'MigrationStats',
258           '*vfio': 'VfioStats',
259           '*xbzrle-cache': 'XBZRLECacheStats',
260           '*total-time': 'int',
261           '*expected-downtime': 'int',
262           '*downtime': 'int',
263           '*setup-time': 'int',
264           '*cpu-throttle-percentage': 'int',
265           '*error-desc': 'str',
266           '*blocked-reasons': ['str'],
267           '*postcopy-blocktime': 'uint32',
268           '*postcopy-vcpu-blocktime': ['uint32'],
269           '*compression': 'CompressionStats',
270           '*socket-address': ['SocketAddress'] } }
271
272##
273# @query-migrate:
274#
275# Returns information about current migration process.  If migration
276# is active there will be another json-object with RAM migration
277# status and if block migration is active another one with block
278# migration status.
279#
280# Returns: @MigrationInfo
281#
282# Since: 0.14
283#
284# Examples:
285#
286# 1. Before the first migration
287#
288# -> { "execute": "query-migrate" }
289# <- { "return": {} }
290#
291# 2. Migration is done and has succeeded
292#
293# -> { "execute": "query-migrate" }
294# <- { "return": {
295#         "status": "completed",
296#         "total-time":12345,
297#         "setup-time":12345,
298#         "downtime":12345,
299#         "ram":{
300#           "transferred":123,
301#           "remaining":123,
302#           "total":246,
303#           "duplicate":123,
304#           "normal":123,
305#           "normal-bytes":123456,
306#           "dirty-sync-count":15
307#         }
308#      }
309#    }
310#
311# 3. Migration is done and has failed
312#
313# -> { "execute": "query-migrate" }
314# <- { "return": { "status": "failed" } }
315#
316# 4. Migration is being performed and is not a block migration:
317#
318# -> { "execute": "query-migrate" }
319# <- {
320#       "return":{
321#          "status":"active",
322#          "total-time":12345,
323#          "setup-time":12345,
324#          "expected-downtime":12345,
325#          "ram":{
326#             "transferred":123,
327#             "remaining":123,
328#             "total":246,
329#             "duplicate":123,
330#             "normal":123,
331#             "normal-bytes":123456,
332#             "dirty-sync-count":15
333#          }
334#       }
335#    }
336#
337# 5. Migration is being performed and is a block migration:
338#
339# -> { "execute": "query-migrate" }
340# <- {
341#       "return":{
342#          "status":"active",
343#          "total-time":12345,
344#          "setup-time":12345,
345#          "expected-downtime":12345,
346#          "ram":{
347#             "total":1057024,
348#             "remaining":1053304,
349#             "transferred":3720,
350#             "duplicate":123,
351#             "normal":123,
352#             "normal-bytes":123456,
353#             "dirty-sync-count":15
354#          },
355#          "disk":{
356#             "total":20971520,
357#             "remaining":20880384,
358#             "transferred":91136
359#          }
360#       }
361#    }
362#
363# 6. Migration is being performed and XBZRLE is active:
364#
365# -> { "execute": "query-migrate" }
366# <- {
367#       "return":{
368#          "status":"active",
369#          "total-time":12345,
370#          "setup-time":12345,
371#          "expected-downtime":12345,
372#          "ram":{
373#             "total":1057024,
374#             "remaining":1053304,
375#             "transferred":3720,
376#             "duplicate":10,
377#             "normal":3333,
378#             "normal-bytes":3412992,
379#             "dirty-sync-count":15
380#          },
381#          "xbzrle-cache":{
382#             "cache-size":67108864,
383#             "bytes":20971520,
384#             "pages":2444343,
385#             "cache-miss":2244,
386#             "cache-miss-rate":0.123,
387#             "encoding-rate":80.1,
388#             "overflow":34434
389#          }
390#       }
391#    }
392##
393{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
394
395##
396# @MigrationCapability:
397#
398# Migration capabilities enumeration
399#
400# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
401#     Encoding). This feature allows us to minimize migration traffic
402#     for certain work loads, by sending compressed difference of the
403#     pages
404#
405# @rdma-pin-all: Controls whether or not the entire VM memory
406#     footprint is mlock()'d on demand or all at once.  Refer to
407#     docs/rdma.txt for usage.  Disabled by default.  (since 2.0)
408#
409# @zero-blocks: During storage migration encode blocks of zeroes
410#     efficiently.  This essentially saves 1MB of zeroes per block on
411#     the wire.  Enabling requires source and target VM to support
412#     this feature.  To enable it is sufficient to enable the
413#     capability on the source VM. The feature is disabled by default.
414#     (since 1.6)
415#
416# @compress: Use multiple compression threads to accelerate live
417#     migration.  This feature can help to reduce the migration
418#     traffic, by sending compressed pages.  Please note that if
419#     compress and xbzrle are both on, compress only takes effect in
420#     the ram bulk stage, after that, it will be disabled and only
421#     xbzrle takes effect, this can help to minimize migration
422#     traffic.  The feature is disabled by default.  (since 2.4 )
423#
424# @events: generate events for each migration state change (since 2.4
425#     )
426#
427# @auto-converge: If enabled, QEMU will automatically throttle down
428#     the guest to speed up convergence of RAM migration.  (since 1.6)
429#
430# @postcopy-ram: Start executing on the migration target before all of
431#     RAM has been migrated, pulling the remaining pages along as
432#     needed.  The capacity must have the same setting on both source
433#     and target or migration will not even start.  NOTE: If the
434#     migration fails during postcopy the VM will fail.  (since 2.6)
435#
436# @x-colo: If enabled, migration will never end, and the state of the
437#     VM on the primary side will be migrated continuously to the VM
438#     on secondary side, this process is called COarse-Grain LOck
439#     Stepping (COLO) for Non-stop Service.  (since 2.8)
440#
441# @release-ram: if enabled, qemu will free the migrated ram pages on
442#     the source during postcopy-ram migration.  (since 2.9)
443#
444# @block: If enabled, QEMU will also migrate the contents of all block
445#     devices.  Default is disabled.  A possible alternative uses
446#     mirror jobs to a builtin NBD server on the destination, which
447#     offers more flexibility.  (Since 2.10)
448#
449# @return-path: If enabled, migration will use the return path even
450#     for precopy.  (since 2.10)
451#
452# @pause-before-switchover: Pause outgoing migration before
453#     serialising device state and before disabling block IO (since
454#     2.11)
455#
456# @multifd: Use more than one fd for migration (since 4.0)
457#
458# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
459#     (since 2.12)
460#
461# @postcopy-blocktime: Calculate downtime for postcopy live migration
462#     (since 3.0)
463#
464# @late-block-activate: If enabled, the destination will not activate
465#     block devices (and thus take locks) immediately at the end of
466#     migration.  (since 3.0)
467#
468# @x-ignore-shared: If enabled, QEMU will not migrate shared memory that is
469#     accessible on the destination machine. (since 4.0)
470#
471# @validate-uuid: Send the UUID of the source to allow the destination
472#     to ensure it is the same.  (since 4.2)
473#
474# @background-snapshot: If enabled, the migration stream will be a
475#     snapshot of the VM exactly at the point when the migration
476#     procedure starts.  The VM RAM is saved with running VM. (since
477#     6.0)
478#
479# @zero-copy-send: Controls behavior on sending memory pages on
480#     migration.  When true, enables a zero-copy mechanism for sending
481#     memory pages, if host supports it.  Requires that QEMU be
482#     permitted to use locked memory for guest RAM pages.  (since 7.1)
483#
484# @postcopy-preempt: If enabled, the migration process will allow
485#     postcopy requests to preempt precopy stream, so postcopy
486#     requests will be handled faster.  This is a performance feature
487#     and should not affect the correctness of postcopy migration.
488#     (since 7.1)
489#
490# @switchover-ack: If enabled, migration will not stop the source VM
491#     and complete the migration until an ACK is received from the
492#     destination that it's OK to do so.  Exactly when this ACK is
493#     sent depends on the migrated devices that use this feature.
494#     For example, a device can use it to make sure some of its data
495#     is sent and loaded in the destination before doing switchover.
496#     This can reduce downtime if devices that support this capability
497#     are present.  'return-path' capability must be enabled to use
498#     it.  (since 8.1)
499#
500# Features:
501#
502# @unstable: Members @x-colo and @x-ignore-shared are experimental.
503#
504# Since: 1.2
505##
506{ 'enum': 'MigrationCapability',
507  'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
508           'compress', 'events', 'postcopy-ram',
509           { 'name': 'x-colo', 'features': [ 'unstable' ] },
510           'release-ram',
511           'block', 'return-path', 'pause-before-switchover', 'multifd',
512           'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
513           { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
514           'validate-uuid', 'background-snapshot',
515           'zero-copy-send', 'postcopy-preempt', 'switchover-ack'] }
516
517##
518# @MigrationCapabilityStatus:
519#
520# Migration capability information
521#
522# @capability: capability enum
523#
524# @state: capability state bool
525#
526# Since: 1.2
527##
528{ 'struct': 'MigrationCapabilityStatus',
529  'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
530
531##
532# @migrate-set-capabilities:
533#
534# Enable/Disable the following migration capabilities (like xbzrle)
535#
536# @capabilities: json array of capability modifications to make
537#
538# Since: 1.2
539#
540# Example:
541#
542# -> { "execute": "migrate-set-capabilities" , "arguments":
543#      { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
544# <- { "return": {} }
545##
546{ 'command': 'migrate-set-capabilities',
547  'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
548
549##
550# @query-migrate-capabilities:
551#
552# Returns information about the current migration capabilities status
553#
554# Returns: @MigrationCapabilityStatus
555#
556# Since: 1.2
557#
558# Example:
559#
560# -> { "execute": "query-migrate-capabilities" }
561# <- { "return": [
562#       {"state": false, "capability": "xbzrle"},
563#       {"state": false, "capability": "rdma-pin-all"},
564#       {"state": false, "capability": "auto-converge"},
565#       {"state": false, "capability": "zero-blocks"},
566#       {"state": false, "capability": "compress"},
567#       {"state": true, "capability": "events"},
568#       {"state": false, "capability": "postcopy-ram"},
569#       {"state": false, "capability": "x-colo"}
570#    ]}
571##
572{ 'command': 'query-migrate-capabilities', 'returns':   ['MigrationCapabilityStatus']}
573
574##
575# @MultiFDCompression:
576#
577# An enumeration of multifd compression methods.
578#
579# @none: no compression.
580#
581# @zlib: use zlib compression method.
582#
583# @zstd: use zstd compression method.
584#
585# Since: 5.0
586##
587{ 'enum': 'MultiFDCompression',
588  'data': [ 'none', 'zlib',
589            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
590
591##
592# @BitmapMigrationBitmapAliasTransform:
593#
594# @persistent: If present, the bitmap will be made persistent or
595#     transient depending on this parameter.
596#
597# Since: 6.0
598##
599{ 'struct': 'BitmapMigrationBitmapAliasTransform',
600  'data': {
601      '*persistent': 'bool'
602  } }
603
604##
605# @BitmapMigrationBitmapAlias:
606#
607# @name: The name of the bitmap.
608#
609# @alias: An alias name for migration (for example the bitmap name on
610#     the opposite site).
611#
612# @transform: Allows the modification of the migrated bitmap.  (since
613#     6.0)
614#
615# Since: 5.2
616##
617{ 'struct': 'BitmapMigrationBitmapAlias',
618  'data': {
619      'name': 'str',
620      'alias': 'str',
621      '*transform': 'BitmapMigrationBitmapAliasTransform'
622  } }
623
624##
625# @BitmapMigrationNodeAlias:
626#
627# Maps a block node name and the bitmaps it has to aliases for dirty
628# bitmap migration.
629#
630# @node-name: A block node name.
631#
632# @alias: An alias block node name for migration (for example the node
633#     name on the opposite site).
634#
635# @bitmaps: Mappings for the bitmaps on this node.
636#
637# Since: 5.2
638##
639{ 'struct': 'BitmapMigrationNodeAlias',
640  'data': {
641      'node-name': 'str',
642      'alias': 'str',
643      'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
644  } }
645
646##
647# @MigrationParameter:
648#
649# Migration parameters enumeration
650#
651# @announce-initial: Initial delay (in milliseconds) before sending
652#     the first announce (Since 4.0)
653#
654# @announce-max: Maximum delay (in milliseconds) between packets in
655#     the announcement (Since 4.0)
656#
657# @announce-rounds: Number of self-announce packets sent after
658#     migration (Since 4.0)
659#
660# @announce-step: Increase in delay (in milliseconds) between
661#     subsequent packets in the announcement (Since 4.0)
662#
663# @compress-level: Set the compression level to be used in live
664#     migration, the compression level is an integer between 0 and 9,
665#     where 0 means no compression, 1 means the best compression
666#     speed, and 9 means best compression ratio which will consume
667#     more CPU.
668#
669# @compress-threads: Set compression thread count to be used in live
670#     migration, the compression thread count is an integer between 1
671#     and 255.
672#
673# @compress-wait-thread: Controls behavior when all compression
674#     threads are currently busy.  If true (default), wait for a free
675#     compression thread to become available; otherwise, send the page
676#     uncompressed.  (Since 3.1)
677#
678# @decompress-threads: Set decompression thread count to be used in
679#     live migration, the decompression thread count is an integer
680#     between 1 and 255. Usually, decompression is at least 4 times as
681#     fast as compression, so set the decompress-threads to the number
682#     about 1/4 of compress-threads is adequate.
683#
684# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
685#     bytes_xfer_period to trigger throttling.  It is expressed as
686#     percentage.  The default value is 50. (Since 5.0)
687#
688# @cpu-throttle-initial: Initial percentage of time guest cpus are
689#     throttled when migration auto-converge is activated.  The
690#     default value is 20. (Since 2.7)
691#
692# @cpu-throttle-increment: throttle percentage increase each time
693#     auto-converge detects that migration is not making progress.
694#     The default value is 10. (Since 2.7)
695#
696# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
697#     the tail stage of throttling, the Guest is very sensitive to CPU
698#     percentage while the @cpu-throttle -increment is excessive
699#     usually at tail stage.  If this parameter is true, we will
700#     compute the ideal CPU percentage used by the Guest, which may
701#     exactly make the dirty rate match the dirty rate threshold.
702#     Then we will choose a smaller throttle increment between the one
703#     specified by @cpu-throttle-increment and the one generated by
704#     ideal CPU percentage.  Therefore, it is compatible to
705#     traditional throttling, meanwhile the throttle increment won't
706#     be excessive at tail stage.  The default value is false.  (Since
707#     5.1)
708#
709# @tls-creds: ID of the 'tls-creds' object that provides credentials
710#     for establishing a TLS connection over the migration data
711#     channel.  On the outgoing side of the migration, the credentials
712#     must be for a 'client' endpoint, while for the incoming side the
713#     credentials must be for a 'server' endpoint.  Setting this will
714#     enable TLS for all migrations.  The default is unset, resulting
715#     in unsecured migration at the QEMU level.  (Since 2.7)
716#
717# @tls-hostname: hostname of the target host for the migration.  This
718#     is required when using x509 based TLS credentials and the
719#     migration URI does not already include a hostname.  For example
720#     if using fd: or exec: based migration, the hostname must be
721#     provided so that the server's x509 certificate identity can be
722#     validated.  (Since 2.7)
723#
724# @tls-authz: ID of the 'authz' object subclass that provides access
725#     control checking of the TLS x509 certificate distinguished name.
726#     This object is only resolved at time of use, so can be deleted
727#     and recreated on the fly while the migration server is active.
728#     If missing, it will default to denying access (Since 4.0)
729#
730# @max-bandwidth: to set maximum speed for migration.  maximum speed
731#     in bytes per second.  (Since 2.8)
732#
733# @downtime-limit: set maximum tolerated downtime for migration.
734#     maximum downtime in milliseconds (Since 2.8)
735#
736# @x-checkpoint-delay: The delay time (in ms) between two COLO
737#     checkpoints in periodic mode.  (Since 2.8)
738#
739# @block-incremental: Affects how much storage is migrated when the
740#     block migration capability is enabled.  When false, the entire
741#     storage backing chain is migrated into a flattened image at the
742#     destination; when true, only the active qcow2 layer is migrated
743#     and the destination must already have access to the same backing
744#     chain as was used on the source.  (since 2.10)
745#
746# @multifd-channels: Number of channels used to migrate data in
747#     parallel.  This is the same number that the number of sockets
748#     used for migration.  The default value is 2 (since 4.0)
749#
750# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
751#     needs to be a multiple of the target page size and a power of 2
752#     (Since 2.11)
753#
754# @max-postcopy-bandwidth: Background transfer bandwidth during
755#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
756#     (Since 3.0)
757#
758# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
759#     (Since 3.1)
760#
761# @multifd-compression: Which compression method to use.  Defaults to
762#     none.  (Since 5.0)
763#
764# @multifd-zlib-level: Set the compression level to be used in live
765#     migration, the compression level is an integer between 0 and 9,
766#     where 0 means no compression, 1 means the best compression
767#     speed, and 9 means best compression ratio which will consume
768#     more CPU. Defaults to 1. (Since 5.0)
769#
770# @multifd-zstd-level: Set the compression level to be used in live
771#     migration, the compression level is an integer between 0 and 20,
772#     where 0 means no compression, 1 means the best compression
773#     speed, and 20 means best compression ratio which will consume
774#     more CPU. Defaults to 1. (Since 5.0)
775#
776# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
777#     aliases for the purpose of dirty bitmap migration.  Such aliases
778#     may for example be the corresponding names on the opposite site.
779#     The mapping must be one-to-one, but not necessarily complete: On
780#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
781#     will be ignored.  On the destination, encountering an unmapped
782#     alias in the incoming migration stream will result in a report,
783#     and all further bitmap migration data will then be discarded.
784#     Note that the destination does not know about bitmaps it does
785#     not receive, so there is no limitation or requirement regarding
786#     the number of bitmaps received, or how they are named, or on
787#     which nodes they are placed.  By default (when this parameter
788#     has never been set), bitmap names are mapped to themselves.
789#     Nodes are mapped to their block device name if there is one, and
790#     to their node name otherwise.  (Since 5.2)
791#
792# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during
793#                             live migration. Should be in the range 1 to 1000ms,
794#                             defaults to 1000ms. (Since 8.1)
795#
796# Features:
797#
798# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
799#            are experimental.
800#
801# Since: 2.4
802##
803{ 'enum': 'MigrationParameter',
804  'data': ['announce-initial', 'announce-max',
805           'announce-rounds', 'announce-step',
806           'compress-level', 'compress-threads', 'decompress-threads',
807           'compress-wait-thread', 'throttle-trigger-threshold',
808           'cpu-throttle-initial', 'cpu-throttle-increment',
809           'cpu-throttle-tailslow',
810           'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
811           'downtime-limit',
812           { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
813           'block-incremental',
814           'multifd-channels',
815           'xbzrle-cache-size', 'max-postcopy-bandwidth',
816           'max-cpu-throttle', 'multifd-compression',
817           'multifd-zlib-level', 'multifd-zstd-level',
818           'block-bitmap-mapping',
819           { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] } ] }
820
821##
822# @MigrateSetParameters:
823#
824# @announce-initial: Initial delay (in milliseconds) before sending
825#     the first announce (Since 4.0)
826#
827# @announce-max: Maximum delay (in milliseconds) between packets in
828#     the announcement (Since 4.0)
829#
830# @announce-rounds: Number of self-announce packets sent after
831#     migration (Since 4.0)
832#
833# @announce-step: Increase in delay (in milliseconds) between
834#     subsequent packets in the announcement (Since 4.0)
835#
836# @compress-level: compression level
837#
838# @compress-threads: compression thread count
839#
840# @compress-wait-thread: Controls behavior when all compression
841#     threads are currently busy.  If true (default), wait for a free
842#     compression thread to become available; otherwise, send the page
843#     uncompressed.  (Since 3.1)
844#
845# @decompress-threads: decompression thread count
846#
847# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
848#     bytes_xfer_period to trigger throttling.  It is expressed as
849#     percentage.  The default value is 50. (Since 5.0)
850#
851# @cpu-throttle-initial: Initial percentage of time guest cpus are
852#     throttled when migration auto-converge is activated.  The
853#     default value is 20. (Since 2.7)
854#
855# @cpu-throttle-increment: throttle percentage increase each time
856#     auto-converge detects that migration is not making progress.
857#     The default value is 10. (Since 2.7)
858#
859# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
860#     the tail stage of throttling, the Guest is very sensitive to CPU
861#     percentage while the @cpu-throttle -increment is excessive
862#     usually at tail stage.  If this parameter is true, we will
863#     compute the ideal CPU percentage used by the Guest, which may
864#     exactly make the dirty rate match the dirty rate threshold.
865#     Then we will choose a smaller throttle increment between the one
866#     specified by @cpu-throttle-increment and the one generated by
867#     ideal CPU percentage.  Therefore, it is compatible to
868#     traditional throttling, meanwhile the throttle increment won't
869#     be excessive at tail stage.  The default value is false.  (Since
870#     5.1)
871#
872# @tls-creds: ID of the 'tls-creds' object that provides credentials
873#     for establishing a TLS connection over the migration data
874#     channel.  On the outgoing side of the migration, the credentials
875#     must be for a 'client' endpoint, while for the incoming side the
876#     credentials must be for a 'server' endpoint.  Setting this to a
877#     non-empty string enables TLS for all migrations.  An empty
878#     string means that QEMU will use plain text mode for migration,
879#     rather than TLS (Since 2.9) Previously (since 2.7), this was
880#     reported by omitting tls-creds instead.
881#
882# @tls-hostname: hostname of the target host for the migration.  This
883#     is required when using x509 based TLS credentials and the
884#     migration URI does not already include a hostname.  For example
885#     if using fd: or exec: based migration, the hostname must be
886#     provided so that the server's x509 certificate identity can be
887#     validated.  (Since 2.7) An empty string means that QEMU will use
888#     the hostname associated with the migration URI, if any.  (Since
889#     2.9) Previously (since 2.7), this was reported by omitting
890#     tls-hostname instead.
891#
892# @max-bandwidth: to set maximum speed for migration.  maximum speed
893#     in bytes per second.  (Since 2.8)
894#
895# @downtime-limit: set maximum tolerated downtime for migration.
896#     maximum downtime in milliseconds (Since 2.8)
897#
898# @x-checkpoint-delay: the delay time between two COLO checkpoints.
899#     (Since 2.8)
900#
901# @block-incremental: Affects how much storage is migrated when the
902#     block migration capability is enabled.  When false, the entire
903#     storage backing chain is migrated into a flattened image at the
904#     destination; when true, only the active qcow2 layer is migrated
905#     and the destination must already have access to the same backing
906#     chain as was used on the source.  (since 2.10)
907#
908# @multifd-channels: Number of channels used to migrate data in
909#     parallel.  This is the same number that the number of sockets
910#     used for migration.  The default value is 2 (since 4.0)
911#
912# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
913#     needs to be a multiple of the target page size and a power of 2
914#     (Since 2.11)
915#
916# @max-postcopy-bandwidth: Background transfer bandwidth during
917#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
918#     (Since 3.0)
919#
920# @max-cpu-throttle: maximum cpu throttle percentage.  The default
921#     value is 99. (Since 3.1)
922#
923# @multifd-compression: Which compression method to use.  Defaults to
924#     none.  (Since 5.0)
925#
926# @multifd-zlib-level: Set the compression level to be used in live
927#     migration, the compression level is an integer between 0 and 9,
928#     where 0 means no compression, 1 means the best compression
929#     speed, and 9 means best compression ratio which will consume
930#     more CPU. Defaults to 1. (Since 5.0)
931#
932# @multifd-zstd-level: Set the compression level to be used in live
933#     migration, the compression level is an integer between 0 and 20,
934#     where 0 means no compression, 1 means the best compression
935#     speed, and 20 means best compression ratio which will consume
936#     more CPU. Defaults to 1. (Since 5.0)
937#
938# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
939#     aliases for the purpose of dirty bitmap migration.  Such aliases
940#     may for example be the corresponding names on the opposite site.
941#     The mapping must be one-to-one, but not necessarily complete: On
942#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
943#     will be ignored.  On the destination, encountering an unmapped
944#     alias in the incoming migration stream will result in a report,
945#     and all further bitmap migration data will then be discarded.
946#     Note that the destination does not know about bitmaps it does
947#     not receive, so there is no limitation or requirement regarding
948#     the number of bitmaps received, or how they are named, or on
949#     which nodes they are placed.  By default (when this parameter
950#     has never been set), bitmap names are mapped to themselves.
951#     Nodes are mapped to their block device name if there is one, and
952#     to their node name otherwise.  (Since 5.2)
953#
954# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during
955#                             live migration. Should be in the range 1 to 1000ms,
956#                             defaults to 1000ms. (Since 8.1)
957#
958# Features:
959#
960# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
961#            are experimental.
962#
963# TODO: either fuse back into MigrationParameters, or make
964#     MigrationParameters members mandatory
965#
966# Since: 2.4
967##
968{ 'struct': 'MigrateSetParameters',
969  'data': { '*announce-initial': 'size',
970            '*announce-max': 'size',
971            '*announce-rounds': 'size',
972            '*announce-step': 'size',
973            '*compress-level': 'uint8',
974            '*compress-threads': 'uint8',
975            '*compress-wait-thread': 'bool',
976            '*decompress-threads': 'uint8',
977            '*throttle-trigger-threshold': 'uint8',
978            '*cpu-throttle-initial': 'uint8',
979            '*cpu-throttle-increment': 'uint8',
980            '*cpu-throttle-tailslow': 'bool',
981            '*tls-creds': 'StrOrNull',
982            '*tls-hostname': 'StrOrNull',
983            '*tls-authz': 'StrOrNull',
984            '*max-bandwidth': 'size',
985            '*downtime-limit': 'uint64',
986            '*x-checkpoint-delay': { 'type': 'uint32',
987                                     'features': [ 'unstable' ] },
988            '*block-incremental': 'bool',
989            '*multifd-channels': 'uint8',
990            '*xbzrle-cache-size': 'size',
991            '*max-postcopy-bandwidth': 'size',
992            '*max-cpu-throttle': 'uint8',
993            '*multifd-compression': 'MultiFDCompression',
994            '*multifd-zlib-level': 'uint8',
995            '*multifd-zstd-level': 'uint8',
996            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
997            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
998                                            'features': [ 'unstable' ] } } }
999
1000##
1001# @migrate-set-parameters:
1002#
1003# Set various migration parameters.
1004#
1005# Since: 2.4
1006#
1007# Example:
1008#
1009# -> { "execute": "migrate-set-parameters" ,
1010#      "arguments": { "compress-level": 1 } }
1011# <- { "return": {} }
1012##
1013{ 'command': 'migrate-set-parameters', 'boxed': true,
1014  'data': 'MigrateSetParameters' }
1015
1016##
1017# @MigrationParameters:
1018#
1019# The optional members aren't actually optional.
1020#
1021# @announce-initial: Initial delay (in milliseconds) before sending
1022#     the first announce (Since 4.0)
1023#
1024# @announce-max: Maximum delay (in milliseconds) between packets in
1025#     the announcement (Since 4.0)
1026#
1027# @announce-rounds: Number of self-announce packets sent after
1028#     migration (Since 4.0)
1029#
1030# @announce-step: Increase in delay (in milliseconds) between
1031#     subsequent packets in the announcement (Since 4.0)
1032#
1033# @compress-level: compression level
1034#
1035# @compress-threads: compression thread count
1036#
1037# @compress-wait-thread: Controls behavior when all compression
1038#     threads are currently busy.  If true (default), wait for a free
1039#     compression thread to become available; otherwise, send the page
1040#     uncompressed.  (Since 3.1)
1041#
1042# @decompress-threads: decompression thread count
1043#
1044# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1045#     bytes_xfer_period to trigger throttling.  It is expressed as
1046#     percentage.  The default value is 50. (Since 5.0)
1047#
1048# @cpu-throttle-initial: Initial percentage of time guest cpus are
1049#     throttled when migration auto-converge is activated.  (Since
1050#     2.7)
1051#
1052# @cpu-throttle-increment: throttle percentage increase each time
1053#     auto-converge detects that migration is not making progress.
1054#     (Since 2.7)
1055#
1056# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1057#     the tail stage of throttling, the Guest is very sensitive to CPU
1058#     percentage while the @cpu-throttle -increment is excessive
1059#     usually at tail stage.  If this parameter is true, we will
1060#     compute the ideal CPU percentage used by the Guest, which may
1061#     exactly make the dirty rate match the dirty rate threshold.
1062#     Then we will choose a smaller throttle increment between the one
1063#     specified by @cpu-throttle-increment and the one generated by
1064#     ideal CPU percentage.  Therefore, it is compatible to
1065#     traditional throttling, meanwhile the throttle increment won't
1066#     be excessive at tail stage.  The default value is false.  (Since
1067#     5.1)
1068#
1069# @tls-creds: ID of the 'tls-creds' object that provides credentials
1070#     for establishing a TLS connection over the migration data
1071#     channel.  On the outgoing side of the migration, the credentials
1072#     must be for a 'client' endpoint, while for the incoming side the
1073#     credentials must be for a 'server' endpoint.  An empty string
1074#     means that QEMU will use plain text mode for migration, rather
1075#     than TLS (Since 2.7) Note: 2.8 reports this by omitting
1076#     tls-creds instead.
1077#
1078# @tls-hostname: hostname of the target host for the migration.  This
1079#     is required when using x509 based TLS credentials and the
1080#     migration URI does not already include a hostname.  For example
1081#     if using fd: or exec: based migration, the hostname must be
1082#     provided so that the server's x509 certificate identity can be
1083#     validated.  (Since 2.7) An empty string means that QEMU will use
1084#     the hostname associated with the migration URI, if any.  (Since
1085#     2.9) Note: 2.8 reports this by omitting tls-hostname instead.
1086#
1087# @tls-authz: ID of the 'authz' object subclass that provides access
1088#     control checking of the TLS x509 certificate distinguished name.
1089#     (Since 4.0)
1090#
1091# @max-bandwidth: to set maximum speed for migration.  maximum speed
1092#     in bytes per second.  (Since 2.8)
1093#
1094# @downtime-limit: set maximum tolerated downtime for migration.
1095#     maximum downtime in milliseconds (Since 2.8)
1096#
1097# @x-checkpoint-delay: the delay time between two COLO checkpoints.
1098#     (Since 2.8)
1099#
1100# @block-incremental: Affects how much storage is migrated when the
1101#     block migration capability is enabled.  When false, the entire
1102#     storage backing chain is migrated into a flattened image at the
1103#     destination; when true, only the active qcow2 layer is migrated
1104#     and the destination must already have access to the same backing
1105#     chain as was used on the source.  (since 2.10)
1106#
1107# @multifd-channels: Number of channels used to migrate data in
1108#     parallel.  This is the same number that the number of sockets
1109#     used for migration.  The default value is 2 (since 4.0)
1110#
1111# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1112#     needs to be a multiple of the target page size and a power of 2
1113#     (Since 2.11)
1114#
1115# @max-postcopy-bandwidth: Background transfer bandwidth during
1116#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1117#     (Since 3.0)
1118#
1119# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
1120#     (Since 3.1)
1121#
1122# @multifd-compression: Which compression method to use.  Defaults to
1123#     none.  (Since 5.0)
1124#
1125# @multifd-zlib-level: Set the compression level to be used in live
1126#     migration, the compression level is an integer between 0 and 9,
1127#     where 0 means no compression, 1 means the best compression
1128#     speed, and 9 means best compression ratio which will consume
1129#     more CPU. Defaults to 1. (Since 5.0)
1130#
1131# @multifd-zstd-level: Set the compression level to be used in live
1132#     migration, the compression level is an integer between 0 and 20,
1133#     where 0 means no compression, 1 means the best compression
1134#     speed, and 20 means best compression ratio which will consume
1135#     more CPU. Defaults to 1. (Since 5.0)
1136#
1137# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1138#     aliases for the purpose of dirty bitmap migration.  Such aliases
1139#     may for example be the corresponding names on the opposite site.
1140#     The mapping must be one-to-one, but not necessarily complete: On
1141#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1142#     will be ignored.  On the destination, encountering an unmapped
1143#     alias in the incoming migration stream will result in a report,
1144#     and all further bitmap migration data will then be discarded.
1145#     Note that the destination does not know about bitmaps it does
1146#     not receive, so there is no limitation or requirement regarding
1147#     the number of bitmaps received, or how they are named, or on
1148#     which nodes they are placed.  By default (when this parameter
1149#     has never been set), bitmap names are mapped to themselves.
1150#     Nodes are mapped to their block device name if there is one, and
1151#     to their node name otherwise.  (Since 5.2)
1152#
1153# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during
1154#                             live migration. Should be in the range 1 to 1000ms,
1155#                             defaults to 1000ms. (Since 8.1)
1156#
1157# Features:
1158#
1159# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1160#            are experimental.
1161#
1162# Since: 2.4
1163##
1164{ 'struct': 'MigrationParameters',
1165  'data': { '*announce-initial': 'size',
1166            '*announce-max': 'size',
1167            '*announce-rounds': 'size',
1168            '*announce-step': 'size',
1169            '*compress-level': 'uint8',
1170            '*compress-threads': 'uint8',
1171            '*compress-wait-thread': 'bool',
1172            '*decompress-threads': 'uint8',
1173            '*throttle-trigger-threshold': 'uint8',
1174            '*cpu-throttle-initial': 'uint8',
1175            '*cpu-throttle-increment': 'uint8',
1176            '*cpu-throttle-tailslow': 'bool',
1177            '*tls-creds': 'str',
1178            '*tls-hostname': 'str',
1179            '*tls-authz': 'str',
1180            '*max-bandwidth': 'size',
1181            '*downtime-limit': 'uint64',
1182            '*x-checkpoint-delay': { 'type': 'uint32',
1183                                     'features': [ 'unstable' ] },
1184            '*block-incremental': 'bool',
1185            '*multifd-channels': 'uint8',
1186            '*xbzrle-cache-size': 'size',
1187            '*max-postcopy-bandwidth': 'size',
1188            '*max-cpu-throttle': 'uint8',
1189            '*multifd-compression': 'MultiFDCompression',
1190            '*multifd-zlib-level': 'uint8',
1191            '*multifd-zstd-level': 'uint8',
1192            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1193            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1194                                            'features': [ 'unstable' ] } } }
1195
1196##
1197# @query-migrate-parameters:
1198#
1199# Returns information about the current migration parameters
1200#
1201# Returns: @MigrationParameters
1202#
1203# Since: 2.4
1204#
1205# Example:
1206#
1207# -> { "execute": "query-migrate-parameters" }
1208# <- { "return": {
1209#          "decompress-threads": 2,
1210#          "cpu-throttle-increment": 10,
1211#          "compress-threads": 8,
1212#          "compress-level": 1,
1213#          "cpu-throttle-initial": 20,
1214#          "max-bandwidth": 33554432,
1215#          "downtime-limit": 300
1216#       }
1217#    }
1218##
1219{ 'command': 'query-migrate-parameters',
1220  'returns': 'MigrationParameters' }
1221
1222##
1223# @migrate-start-postcopy:
1224#
1225# Followup to a migration command to switch the migration to postcopy
1226# mode.  The postcopy-ram capability must be set on both source and
1227# destination before the original migration command.
1228#
1229# Since: 2.5
1230#
1231# Example:
1232#
1233# -> { "execute": "migrate-start-postcopy" }
1234# <- { "return": {} }
1235##
1236{ 'command': 'migrate-start-postcopy' }
1237
1238##
1239# @MIGRATION:
1240#
1241# Emitted when a migration event happens
1242#
1243# @status: @MigrationStatus describing the current migration status.
1244#
1245# Since: 2.4
1246#
1247# Example:
1248#
1249# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1250#     "event": "MIGRATION",
1251#     "data": {"status": "completed"} }
1252##
1253{ 'event': 'MIGRATION',
1254  'data': {'status': 'MigrationStatus'}}
1255
1256##
1257# @MIGRATION_PASS:
1258#
1259# Emitted from the source side of a migration at the start of each
1260# pass (when it syncs the dirty bitmap)
1261#
1262# @pass: An incrementing count (starting at 1 on the first pass)
1263#
1264# Since: 2.6
1265#
1266# Example:
1267#
1268# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1269#       "event": "MIGRATION_PASS", "data": {"pass": 2} }
1270##
1271{ 'event': 'MIGRATION_PASS',
1272  'data': { 'pass': 'int' } }
1273
1274##
1275# @COLOMessage:
1276#
1277# The message transmission between Primary side and Secondary side.
1278#
1279# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1280#
1281# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1282#     checkpointing
1283#
1284# @checkpoint-reply: SVM gets PVM's checkpoint request
1285#
1286# @vmstate-send: VM's state will be sent by PVM.
1287#
1288# @vmstate-size: The total size of VMstate.
1289#
1290# @vmstate-received: VM's state has been received by SVM.
1291#
1292# @vmstate-loaded: VM's state has been loaded by SVM.
1293#
1294# Since: 2.8
1295##
1296{ 'enum': 'COLOMessage',
1297  'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1298            'vmstate-send', 'vmstate-size', 'vmstate-received',
1299            'vmstate-loaded' ] }
1300
1301##
1302# @COLOMode:
1303#
1304# The COLO current mode.
1305#
1306# @none: COLO is disabled.
1307#
1308# @primary: COLO node in primary side.
1309#
1310# @secondary: COLO node in slave side.
1311#
1312# Since: 2.8
1313##
1314{ 'enum': 'COLOMode',
1315  'data': [ 'none', 'primary', 'secondary'] }
1316
1317##
1318# @FailoverStatus:
1319#
1320# An enumeration of COLO failover status
1321#
1322# @none: no failover has ever happened
1323#
1324# @require: got failover requirement but not handled
1325#
1326# @active: in the process of doing failover
1327#
1328# @completed: finish the process of failover
1329#
1330# @relaunch: restart the failover process, from 'none' -> 'completed'
1331#     (Since 2.9)
1332#
1333# Since: 2.8
1334##
1335{ 'enum': 'FailoverStatus',
1336  'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1337
1338##
1339# @COLO_EXIT:
1340#
1341# Emitted when VM finishes COLO mode due to some errors happening or
1342# at the request of users.
1343#
1344# @mode: report COLO mode when COLO exited.
1345#
1346# @reason: describes the reason for the COLO exit.
1347#
1348# Since: 3.1
1349#
1350# Example:
1351#
1352# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1353#      "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1354##
1355{ 'event': 'COLO_EXIT',
1356  'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1357
1358##
1359# @COLOExitReason:
1360#
1361# The reason for a COLO exit.
1362#
1363# @none: failover has never happened.  This state does not occur in
1364#     the COLO_EXIT event, and is only visible in the result of
1365#     query-colo-status.
1366#
1367# @request: COLO exit is due to an external request.
1368#
1369# @error: COLO exit is due to an internal error.
1370#
1371# @processing: COLO is currently handling a failover (since 4.0).
1372#
1373# Since: 3.1
1374##
1375{ 'enum': 'COLOExitReason',
1376  'data': [ 'none', 'request', 'error' , 'processing' ] }
1377
1378##
1379# @x-colo-lost-heartbeat:
1380#
1381# Tell qemu that heartbeat is lost, request it to do takeover
1382# procedures.  If this command is sent to the PVM, the Primary side
1383# will exit COLO mode.  If sent to the Secondary, the Secondary side
1384# will run failover work, then takes over server operation to become
1385# the service VM.
1386#
1387# Features:
1388#
1389# @unstable: This command is experimental.
1390#
1391# Since: 2.8
1392#
1393# Example:
1394#
1395# -> { "execute": "x-colo-lost-heartbeat" }
1396# <- { "return": {} }
1397##
1398{ 'command': 'x-colo-lost-heartbeat',
1399  'features': [ 'unstable' ],
1400  'if': 'CONFIG_REPLICATION' }
1401
1402##
1403# @migrate_cancel:
1404#
1405# Cancel the current executing migration process.
1406#
1407# Returns: nothing on success
1408#
1409# Notes: This command succeeds even if there is no migration process
1410#     running.
1411#
1412# Since: 0.14
1413#
1414# Example:
1415#
1416# -> { "execute": "migrate_cancel" }
1417# <- { "return": {} }
1418##
1419{ 'command': 'migrate_cancel' }
1420
1421##
1422# @migrate-continue:
1423#
1424# Continue migration when it's in a paused state.
1425#
1426# @state: The state the migration is currently expected to be in
1427#
1428# Returns: nothing on success
1429#
1430# Since: 2.11
1431#
1432# Example:
1433#
1434# -> { "execute": "migrate-continue" , "arguments":
1435#      { "state": "pre-switchover" } }
1436# <- { "return": {} }
1437##
1438{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1439
1440##
1441# @migrate:
1442#
1443# Migrates the current running guest to another Virtual Machine.
1444#
1445# @uri: the Uniform Resource Identifier of the destination VM
1446#
1447# @blk: do block migration (full disk copy)
1448#
1449# @inc: incremental disk copy migration
1450#
1451# @detach: this argument exists only for compatibility reasons and is
1452#     ignored by QEMU
1453#
1454# @resume: resume one paused migration, default "off". (since 3.0)
1455#
1456# Returns: nothing on success
1457#
1458# Since: 0.14
1459#
1460# Notes:
1461#
1462# 1. The 'query-migrate' command should be used to check migration's
1463#    progress and final result (this information is provided by the
1464#    'status' member)
1465#
1466# 2. All boolean arguments default to false
1467#
1468# 3. The user Monitor's "detach" argument is invalid in QMP and should
1469#    not be used
1470#
1471# Example:
1472#
1473# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1474# <- { "return": {} }
1475##
1476{ 'command': 'migrate',
1477  'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool',
1478           '*detach': 'bool', '*resume': 'bool' } }
1479
1480##
1481# @migrate-incoming:
1482#
1483# Start an incoming migration, the qemu must have been started with
1484# -incoming defer
1485#
1486# @uri: The Uniform Resource Identifier identifying the source or
1487#     address to listen on
1488#
1489# Returns: nothing on success
1490#
1491# Since: 2.3
1492#
1493# Notes:
1494#
1495# 1. It's a bad idea to use a string for the uri, but it needs
1496#    to stay compatible with -incoming and the format of the uri
1497#    is already exposed above libvirt.
1498#
1499# 2. QEMU must be started with -incoming defer to allow
1500#    migrate-incoming to be used.
1501#
1502# 3. The uri format is the same as for -incoming
1503#
1504# Example:
1505#
1506# -> { "execute": "migrate-incoming",
1507#      "arguments": { "uri": "tcp::4446" } }
1508# <- { "return": {} }
1509##
1510{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
1511
1512##
1513# @xen-save-devices-state:
1514#
1515# Save the state of all devices to file.  The RAM and the block
1516# devices of the VM are not saved by this command.
1517#
1518# @filename: the file to save the state of the devices to as binary
1519#     data.  See xen-save-devices-state.txt for a description of the
1520#     binary format.
1521#
1522# @live: Optional argument to ask QEMU to treat this command as part
1523#     of a live migration.  Default to true.  (since 2.11)
1524#
1525# Returns: Nothing on success
1526#
1527# Since: 1.1
1528#
1529# Example:
1530#
1531# -> { "execute": "xen-save-devices-state",
1532#      "arguments": { "filename": "/tmp/save" } }
1533# <- { "return": {} }
1534##
1535{ 'command': 'xen-save-devices-state',
1536  'data': {'filename': 'str', '*live':'bool' } }
1537
1538##
1539# @xen-set-global-dirty-log:
1540#
1541# Enable or disable the global dirty log mode.
1542#
1543# @enable: true to enable, false to disable.
1544#
1545# Returns: nothing
1546#
1547# Since: 1.3
1548#
1549# Example:
1550#
1551# -> { "execute": "xen-set-global-dirty-log",
1552#      "arguments": { "enable": true } }
1553# <- { "return": {} }
1554##
1555{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1556
1557##
1558# @xen-load-devices-state:
1559#
1560# Load the state of all devices from file.  The RAM and the block
1561# devices of the VM are not loaded by this command.
1562#
1563# @filename: the file to load the state of the devices from as binary
1564#     data.  See xen-save-devices-state.txt for a description of the
1565#     binary format.
1566#
1567# Since: 2.7
1568#
1569# Example:
1570#
1571# -> { "execute": "xen-load-devices-state",
1572#      "arguments": { "filename": "/tmp/resume" } }
1573# <- { "return": {} }
1574##
1575{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1576
1577##
1578# @xen-set-replication:
1579#
1580# Enable or disable replication.
1581#
1582# @enable: true to enable, false to disable.
1583#
1584# @primary: true for primary or false for secondary.
1585#
1586# @failover: true to do failover, false to stop.  but cannot be
1587#     specified if 'enable' is true.  default value is false.
1588#
1589# Returns: nothing.
1590#
1591# Example:
1592#
1593# -> { "execute": "xen-set-replication",
1594#      "arguments": {"enable": true, "primary": false} }
1595# <- { "return": {} }
1596#
1597# Since: 2.9
1598##
1599{ 'command': 'xen-set-replication',
1600  'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1601  'if': 'CONFIG_REPLICATION' }
1602
1603##
1604# @ReplicationStatus:
1605#
1606# The result format for 'query-xen-replication-status'.
1607#
1608# @error: true if an error happened, false if replication is normal.
1609#
1610# @desc: the human readable error description string, when @error is
1611#     'true'.
1612#
1613# Since: 2.9
1614##
1615{ 'struct': 'ReplicationStatus',
1616  'data': { 'error': 'bool', '*desc': 'str' },
1617  'if': 'CONFIG_REPLICATION' }
1618
1619##
1620# @query-xen-replication-status:
1621#
1622# Query replication status while the vm is running.
1623#
1624# Returns: A @ReplicationStatus object showing the status.
1625#
1626# Example:
1627#
1628# -> { "execute": "query-xen-replication-status" }
1629# <- { "return": { "error": false } }
1630#
1631# Since: 2.9
1632##
1633{ 'command': 'query-xen-replication-status',
1634  'returns': 'ReplicationStatus',
1635  'if': 'CONFIG_REPLICATION' }
1636
1637##
1638# @xen-colo-do-checkpoint:
1639#
1640# Xen uses this command to notify replication to trigger a checkpoint.
1641#
1642# Returns: nothing.
1643#
1644# Example:
1645#
1646# -> { "execute": "xen-colo-do-checkpoint" }
1647# <- { "return": {} }
1648#
1649# Since: 2.9
1650##
1651{ 'command': 'xen-colo-do-checkpoint',
1652  'if': 'CONFIG_REPLICATION' }
1653
1654##
1655# @COLOStatus:
1656#
1657# The result format for 'query-colo-status'.
1658#
1659# @mode: COLO running mode.  If COLO is running, this field will
1660#     return 'primary' or 'secondary'.
1661#
1662# @last-mode: COLO last running mode.  If COLO is running, this field
1663#     will return same like mode field, after failover we can use this
1664#     field to get last colo mode.  (since 4.0)
1665#
1666# @reason: describes the reason for the COLO exit.
1667#
1668# Since: 3.1
1669##
1670{ 'struct': 'COLOStatus',
1671  'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1672            'reason': 'COLOExitReason' },
1673  'if': 'CONFIG_REPLICATION' }
1674
1675##
1676# @query-colo-status:
1677#
1678# Query COLO status while the vm is running.
1679#
1680# Returns: A @COLOStatus object showing the status.
1681#
1682# Example:
1683#
1684# -> { "execute": "query-colo-status" }
1685# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1686#
1687# Since: 3.1
1688##
1689{ 'command': 'query-colo-status',
1690  'returns': 'COLOStatus',
1691  'if': 'CONFIG_REPLICATION' }
1692
1693##
1694# @migrate-recover:
1695#
1696# Provide a recovery migration stream URI.
1697#
1698# @uri: the URI to be used for the recovery of migration stream.
1699#
1700# Returns: nothing.
1701#
1702# Example:
1703#
1704# -> { "execute": "migrate-recover",
1705#      "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1706# <- { "return": {} }
1707#
1708# Since: 3.0
1709##
1710{ 'command': 'migrate-recover',
1711  'data': { 'uri': 'str' },
1712  'allow-oob': true }
1713
1714##
1715# @migrate-pause:
1716#
1717# Pause a migration.  Currently it only supports postcopy.
1718#
1719# Returns: nothing.
1720#
1721# Example:
1722#
1723# -> { "execute": "migrate-pause" }
1724# <- { "return": {} }
1725#
1726# Since: 3.0
1727##
1728{ 'command': 'migrate-pause', 'allow-oob': true }
1729
1730##
1731# @UNPLUG_PRIMARY:
1732#
1733# Emitted from source side of a migration when migration state is
1734# WAIT_UNPLUG. Device was unplugged by guest operating system.  Device
1735# resources in QEMU are kept on standby to be able to re-plug it in
1736# case of migration failure.
1737#
1738# @device-id: QEMU device id of the unplugged device
1739#
1740# Since: 4.2
1741#
1742# Example:
1743#
1744# <- { "event": "UNPLUG_PRIMARY",
1745#      "data": { "device-id": "hostdev0" },
1746#      "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1747##
1748{ 'event': 'UNPLUG_PRIMARY',
1749  'data': { 'device-id': 'str' } }
1750
1751##
1752# @DirtyRateVcpu:
1753#
1754# Dirty rate of vcpu.
1755#
1756# @id: vcpu index.
1757#
1758# @dirty-rate: dirty rate.
1759#
1760# Since: 6.2
1761##
1762{ 'struct': 'DirtyRateVcpu',
1763  'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1764
1765##
1766# @DirtyRateStatus:
1767#
1768# Dirty page rate measurement status.
1769#
1770# @unstarted: measuring thread has not been started yet
1771#
1772# @measuring: measuring thread is running
1773#
1774# @measured: dirty page rate is measured and the results are available
1775#
1776# Since: 5.2
1777##
1778{ 'enum': 'DirtyRateStatus',
1779  'data': [ 'unstarted', 'measuring', 'measured'] }
1780
1781##
1782# @DirtyRateMeasureMode:
1783#
1784# Method used to measure dirty page rate.  Differences between
1785# available methods are explained in @calc-dirty-rate.
1786#
1787# @page-sampling: use page sampling
1788#
1789# @dirty-ring: use dirty ring
1790#
1791# @dirty-bitmap: use dirty bitmap
1792#
1793# Since: 6.2
1794##
1795{ 'enum': 'DirtyRateMeasureMode',
1796  'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1797
1798##
1799# @DirtyRateInfo:
1800#
1801# Information about measured dirty page rate.
1802#
1803# @dirty-rate: an estimate of the dirty page rate of the VM in units
1804#     of MiB/s.  Value is present only when @status is 'measured'.
1805#
1806# @status: current status of dirty page rate measurements
1807#
1808# @start-time: start time in units of second for calculation
1809#
1810# @calc-time: time period for which dirty page rate was measured
1811#     (in seconds)
1812#
1813# @sample-pages: number of sampled pages per GiB of guest memory.
1814#     Valid only in page-sampling mode (Since 6.1)
1815#
1816# @mode: mode that was used to measure dirty page rate (Since 6.2)
1817#
1818# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
1819#     specified (Since 6.2)
1820#
1821# Since: 5.2
1822##
1823{ 'struct': 'DirtyRateInfo',
1824  'data': {'*dirty-rate': 'int64',
1825           'status': 'DirtyRateStatus',
1826           'start-time': 'int64',
1827           'calc-time': 'int64',
1828           'sample-pages': 'uint64',
1829           'mode': 'DirtyRateMeasureMode',
1830           '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
1831
1832##
1833# @calc-dirty-rate:
1834#
1835# Start measuring dirty page rate of the VM.  Results can be retrieved
1836# with @query-dirty-rate after measurements are completed.
1837#
1838# Dirty page rate is the number of pages changed in a given time
1839# period expressed in MiB/s.  The following methods of calculation are
1840# available:
1841#
1842# 1. In page sampling mode, a random subset of pages are selected and
1843#    hashed twice: once at the beginning of measurement time period,
1844#    and once again at the end.  If two hashes for some page are
1845#    different, the page is counted as changed.  Since this method
1846#    relies on sampling and hashing, calculated dirty page rate is
1847#    only an estimate of its true value.  Increasing @sample-pages
1848#    improves estimation quality at the cost of higher computational
1849#    overhead.
1850#
1851# 2. Dirty bitmap mode captures writes to memory (for example by
1852#    temporarily revoking write access to all pages) and counting page
1853#    faults.  Information about modified pages is collected into a
1854#    bitmap, where each bit corresponds to one guest page.  This mode
1855#    requires that KVM accelerator property "dirty-ring-size" is *not*
1856#    set.
1857#
1858# 3. Dirty ring mode is similar to dirty bitmap mode, but the
1859#    information about modified pages is collected into ring buffer.
1860#    This mode tracks page modification per each vCPU separately.  It
1861#    requires that KVM accelerator property "dirty-ring-size" is set.
1862#
1863# @calc-time: time period in units of second for which dirty page rate
1864#     is calculated.  Note that larger @calc-time values will
1865#     typically result in smaller dirty page rates because page
1866#     dirtying is a one-time event.  Once some page is counted as
1867#     dirty during @calc-time period, further writes to this page will
1868#     not increase dirty page rate anymore.
1869#
1870# @sample-pages: number of sampled pages per each GiB of guest memory.
1871#     Default value is 512.  For 4KiB guest pages this corresponds to
1872#     sampling ratio of 0.2%.  This argument is used only in page
1873#     sampling mode.  (Since 6.1)
1874#
1875# @mode: mechanism for tracking dirty pages.  Default value is
1876#     'page-sampling'.  Others are 'dirty-bitmap' and 'dirty-ring'.
1877#     (Since 6.1)
1878#
1879# Since: 5.2
1880#
1881# Example:
1882#
1883# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
1884#                                                 'sample-pages': 512} }
1885# <- { "return": {} }
1886##
1887{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
1888                                         '*sample-pages': 'int',
1889                                         '*mode': 'DirtyRateMeasureMode'} }
1890
1891##
1892# @query-dirty-rate:
1893#
1894# Query results of the most recent invocation of @calc-dirty-rate.
1895#
1896# Since: 5.2
1897#
1898# Examples:
1899#
1900# 1. Measurement is in progress:
1901#
1902# <- {"status": "measuring", "sample-pages": 512,
1903#     "mode": "page-sampling", "start-time": 3665220, "calc-time": 10}
1904#
1905# 2. Measurement has been completed:
1906#
1907# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
1908#     "mode": "page-sampling", "start-time": 3665220, "calc-time": 10}
1909##
1910{ 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' }
1911
1912##
1913# @DirtyLimitInfo:
1914#
1915# Dirty page rate limit information of a virtual CPU.
1916#
1917# @cpu-index: index of a virtual CPU.
1918#
1919# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
1920#     CPU, 0 means unlimited.
1921#
1922# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
1923#
1924# Since: 7.1
1925##
1926{ 'struct': 'DirtyLimitInfo',
1927  'data': { 'cpu-index': 'int',
1928            'limit-rate': 'uint64',
1929            'current-rate': 'uint64' } }
1930
1931##
1932# @set-vcpu-dirty-limit:
1933#
1934# Set the upper limit of dirty page rate for virtual CPUs.
1935#
1936# Requires KVM with accelerator property "dirty-ring-size" set.  A
1937# virtual CPU's dirty page rate is a measure of its memory load.  To
1938# observe dirty page rates, use @calc-dirty-rate.
1939#
1940# @cpu-index: index of a virtual CPU, default is all.
1941#
1942# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
1943#
1944# Since: 7.1
1945#
1946# Example:
1947#
1948# -> {"execute": "set-vcpu-dirty-limit"}
1949#     "arguments": { "dirty-rate": 200,
1950#                    "cpu-index": 1 } }
1951# <- { "return": {} }
1952##
1953{ 'command': 'set-vcpu-dirty-limit',
1954  'data': { '*cpu-index': 'int',
1955            'dirty-rate': 'uint64' } }
1956
1957##
1958# @cancel-vcpu-dirty-limit:
1959#
1960# Cancel the upper limit of dirty page rate for virtual CPUs.
1961#
1962# Cancel the dirty page limit for the vCPU which has been set with
1963# set-vcpu-dirty-limit command.  Note that this command requires
1964# support from dirty ring, same as the "set-vcpu-dirty-limit".
1965#
1966# @cpu-index: index of a virtual CPU, default is all.
1967#
1968# Since: 7.1
1969#
1970# Example:
1971#
1972# -> {"execute": "cancel-vcpu-dirty-limit"},
1973#     "arguments": { "cpu-index": 1 } }
1974# <- { "return": {} }
1975##
1976{ 'command': 'cancel-vcpu-dirty-limit',
1977  'data': { '*cpu-index': 'int'} }
1978
1979##
1980# @query-vcpu-dirty-limit:
1981#
1982# Returns information about virtual CPU dirty page rate limits, if
1983# any.
1984#
1985# Since: 7.1
1986#
1987# Example:
1988#
1989# -> {"execute": "query-vcpu-dirty-limit"}
1990# <- {"return": [
1991#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
1992#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
1993##
1994{ 'command': 'query-vcpu-dirty-limit',
1995  'returns': [ 'DirtyLimitInfo' ] }
1996
1997##
1998# @MigrationThreadInfo:
1999#
2000# Information about migrationthreads
2001#
2002# @name: the name of migration thread
2003#
2004# @thread-id: ID of the underlying host thread
2005#
2006# Since: 7.2
2007##
2008{ 'struct': 'MigrationThreadInfo',
2009  'data': {'name': 'str',
2010           'thread-id': 'int'} }
2011
2012##
2013# @query-migrationthreads:
2014#
2015# Returns information of migration threads
2016#
2017# data: migration thread name
2018#
2019# Returns: information about migration threads
2020#
2021# Since: 7.2
2022##
2023{ 'command': 'query-migrationthreads',
2024  'returns': ['MigrationThreadInfo'] }
2025
2026##
2027# @snapshot-save:
2028#
2029# Save a VM snapshot
2030#
2031# @job-id: identifier for the newly created job
2032#
2033# @tag: name of the snapshot to create
2034#
2035# @vmstate: block device node name to save vmstate to
2036#
2037# @devices: list of block device node names to save a snapshot to
2038#
2039# Applications should not assume that the snapshot save is complete
2040# when this command returns.  The job commands / events must be used
2041# to determine completion and to fetch details of any errors that
2042# arise.
2043#
2044# Note that execution of the guest CPUs may be stopped during the time
2045# it takes to save the snapshot.  A future version of QEMU may ensure
2046# CPUs are executing continuously.
2047#
2048# It is strongly recommended that @devices contain all writable block
2049# device nodes if a consistent snapshot is required.
2050#
2051# If @tag already exists, an error will be reported
2052#
2053# Returns: nothing
2054#
2055# Example:
2056#
2057# -> { "execute": "snapshot-save",
2058#      "arguments": {
2059#         "job-id": "snapsave0",
2060#         "tag": "my-snap",
2061#         "vmstate": "disk0",
2062#         "devices": ["disk0", "disk1"]
2063#      }
2064#    }
2065# <- { "return": { } }
2066# <- {"event": "JOB_STATUS_CHANGE",
2067#     "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2068#     "data": {"status": "created", "id": "snapsave0"}}
2069# <- {"event": "JOB_STATUS_CHANGE",
2070#     "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2071#     "data": {"status": "running", "id": "snapsave0"}}
2072# <- {"event": "STOP",
2073#     "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2074# <- {"event": "RESUME",
2075#     "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2076# <- {"event": "JOB_STATUS_CHANGE",
2077#     "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2078#     "data": {"status": "waiting", "id": "snapsave0"}}
2079# <- {"event": "JOB_STATUS_CHANGE",
2080#     "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2081#     "data": {"status": "pending", "id": "snapsave0"}}
2082# <- {"event": "JOB_STATUS_CHANGE",
2083#     "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2084#     "data": {"status": "concluded", "id": "snapsave0"}}
2085# -> {"execute": "query-jobs"}
2086# <- {"return": [{"current-progress": 1,
2087#                 "status": "concluded",
2088#                 "total-progress": 1,
2089#                 "type": "snapshot-save",
2090#                 "id": "snapsave0"}]}
2091#
2092# Since: 6.0
2093##
2094{ 'command': 'snapshot-save',
2095  'data': { 'job-id': 'str',
2096            'tag': 'str',
2097            'vmstate': 'str',
2098            'devices': ['str'] } }
2099
2100##
2101# @snapshot-load:
2102#
2103# Load a VM snapshot
2104#
2105# @job-id: identifier for the newly created job
2106#
2107# @tag: name of the snapshot to load.
2108#
2109# @vmstate: block device node name to load vmstate from
2110#
2111# @devices: list of block device node names to load a snapshot from
2112#
2113# Applications should not assume that the snapshot load is complete
2114# when this command returns.  The job commands / events must be used
2115# to determine completion and to fetch details of any errors that
2116# arise.
2117#
2118# Note that execution of the guest CPUs will be stopped during the
2119# time it takes to load the snapshot.
2120#
2121# It is strongly recommended that @devices contain all writable block
2122# device nodes that can have changed since the original @snapshot-save
2123# command execution.
2124#
2125# Returns: nothing
2126#
2127# Example:
2128#
2129# -> { "execute": "snapshot-load",
2130#      "arguments": {
2131#         "job-id": "snapload0",
2132#         "tag": "my-snap",
2133#         "vmstate": "disk0",
2134#         "devices": ["disk0", "disk1"]
2135#      }
2136#    }
2137# <- { "return": { } }
2138# <- {"event": "JOB_STATUS_CHANGE",
2139#     "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2140#     "data": {"status": "created", "id": "snapload0"}}
2141# <- {"event": "JOB_STATUS_CHANGE",
2142#     "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2143#     "data": {"status": "running", "id": "snapload0"}}
2144# <- {"event": "STOP",
2145#     "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2146# <- {"event": "RESUME",
2147#     "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2148# <- {"event": "JOB_STATUS_CHANGE",
2149#     "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2150#     "data": {"status": "waiting", "id": "snapload0"}}
2151# <- {"event": "JOB_STATUS_CHANGE",
2152#     "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2153#     "data": {"status": "pending", "id": "snapload0"}}
2154# <- {"event": "JOB_STATUS_CHANGE",
2155#     "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2156#     "data": {"status": "concluded", "id": "snapload0"}}
2157# -> {"execute": "query-jobs"}
2158# <- {"return": [{"current-progress": 1,
2159#                 "status": "concluded",
2160#                 "total-progress": 1,
2161#                 "type": "snapshot-load",
2162#                 "id": "snapload0"}]}
2163#
2164# Since: 6.0
2165##
2166{ 'command': 'snapshot-load',
2167  'data': { 'job-id': 'str',
2168            'tag': 'str',
2169            'vmstate': 'str',
2170            'devices': ['str'] } }
2171
2172##
2173# @snapshot-delete:
2174#
2175# Delete a VM snapshot
2176#
2177# @job-id: identifier for the newly created job
2178#
2179# @tag: name of the snapshot to delete.
2180#
2181# @devices: list of block device node names to delete a snapshot from
2182#
2183# Applications should not assume that the snapshot delete is complete
2184# when this command returns.  The job commands / events must be used
2185# to determine completion and to fetch details of any errors that
2186# arise.
2187#
2188# Returns: nothing
2189#
2190# Example:
2191#
2192# -> { "execute": "snapshot-delete",
2193#      "arguments": {
2194#         "job-id": "snapdelete0",
2195#         "tag": "my-snap",
2196#         "devices": ["disk0", "disk1"]
2197#      }
2198#    }
2199# <- { "return": { } }
2200# <- {"event": "JOB_STATUS_CHANGE",
2201#     "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2202#     "data": {"status": "created", "id": "snapdelete0"}}
2203# <- {"event": "JOB_STATUS_CHANGE",
2204#     "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2205#     "data": {"status": "running", "id": "snapdelete0"}}
2206# <- {"event": "JOB_STATUS_CHANGE",
2207#     "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2208#     "data": {"status": "waiting", "id": "snapdelete0"}}
2209# <- {"event": "JOB_STATUS_CHANGE",
2210#     "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2211#     "data": {"status": "pending", "id": "snapdelete0"}}
2212# <- {"event": "JOB_STATUS_CHANGE",
2213#     "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2214#     "data": {"status": "concluded", "id": "snapdelete0"}}
2215# -> {"execute": "query-jobs"}
2216# <- {"return": [{"current-progress": 1,
2217#                 "status": "concluded",
2218#                 "total-progress": 1,
2219#                 "type": "snapshot-delete",
2220#                 "id": "snapdelete0"}]}
2221#
2222# Since: 6.0
2223##
2224{ 'command': 'snapshot-delete',
2225  'data': { 'job-id': 'str',
2226            'tag': 'str',
2227            'devices': ['str'] } }
2228