xref: /openbmc/qemu/qapi/migration.json (revision dbdf841b)
1# -*- Mode: Python -*-
2# vim: filetype=python
3#
4
5##
6# = Migration
7##
8
9{ 'include': 'common.json' }
10{ 'include': 'sockets.json' }
11
12##
13# @MigrationStats:
14#
15# Detailed migration status.
16#
17# @transferred: amount of bytes already transferred to the target VM
18#
19# @remaining: amount of bytes remaining to be transferred to the
20#     target VM
21#
22# @total: total amount of bytes involved in the migration process
23#
24# @duplicate: number of duplicate (zero) pages (since 1.2)
25#
26# @skipped: number of skipped zero pages (since 1.5)
27#
28# @normal: number of normal pages (since 1.2)
29#
30# @normal-bytes: number of normal bytes sent (since 1.2)
31#
32# @dirty-pages-rate: number of pages dirtied by second by the guest
33#     (since 1.3)
34#
35# @mbps: throughput in megabits/sec.  (since 1.6)
36#
37# @dirty-sync-count: number of times that dirty ram was synchronized
38#     (since 2.1)
39#
40# @postcopy-requests: The number of page requests received from the
41#     destination (since 2.7)
42#
43# @page-size: The number of bytes per page for the various page-based
44#     statistics (since 2.10)
45#
46# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
47#
48# @pages-per-second: the number of memory pages transferred per second
49#     (Since 4.0)
50#
51# @precopy-bytes: The number of bytes sent in the pre-copy phase
52#     (since 7.0).
53#
54# @downtime-bytes: The number of bytes sent while the guest is paused
55#     (since 7.0).
56#
57# @postcopy-bytes: The number of bytes sent during the post-copy phase
58#     (since 7.0).
59#
60# @dirty-sync-missed-zero-copy: Number of times dirty RAM
61#     synchronization could not avoid copying dirty pages.  This is
62#     between 0 and @dirty-sync-count * @multifd-channels.  (since
63#     7.1)
64#
65# Since: 0.14
66##
67{ 'struct': 'MigrationStats',
68  'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
69           'duplicate': 'int', 'skipped': 'int', 'normal': 'int',
70           'normal-bytes': 'int', 'dirty-pages-rate' : 'int',
71           'mbps' : 'number', 'dirty-sync-count' : 'int',
72           'postcopy-requests' : 'int', 'page-size' : 'int',
73           'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64',
74           'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64',
75           'postcopy-bytes' : 'uint64',
76           'dirty-sync-missed-zero-copy' : 'uint64' } }
77
78##
79# @XBZRLECacheStats:
80#
81# Detailed XBZRLE migration cache statistics
82#
83# @cache-size: XBZRLE cache size
84#
85# @bytes: amount of bytes already transferred to the target VM
86#
87# @pages: amount of pages transferred to the target VM
88#
89# @cache-miss: number of cache miss
90#
91# @cache-miss-rate: rate of cache miss (since 2.1)
92#
93# @encoding-rate: rate of encoded bytes (since 5.1)
94#
95# @overflow: number of overflows
96#
97# Since: 1.2
98##
99{ 'struct': 'XBZRLECacheStats',
100  'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
101           'cache-miss': 'int', 'cache-miss-rate': 'number',
102           'encoding-rate': 'number', 'overflow': 'int' } }
103
104##
105# @CompressionStats:
106#
107# Detailed migration compression statistics
108#
109# @pages: amount of pages compressed and transferred to the target VM
110#
111# @busy: count of times that no free thread was available to compress
112#     data
113#
114# @busy-rate: rate of thread busy
115#
116# @compressed-size: amount of bytes after compression
117#
118# @compression-rate: rate of compressed size
119#
120# Since: 3.1
121##
122{ 'struct': 'CompressionStats',
123  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
124           'compressed-size': 'int', 'compression-rate': 'number' } }
125
126##
127# @MigrationStatus:
128#
129# An enumeration of migration status.
130#
131# @none: no migration has ever happened.
132#
133# @setup: migration process has been initiated.
134#
135# @cancelling: in the process of cancelling migration.
136#
137# @cancelled: cancelling migration is finished.
138#
139# @active: in the process of doing migration.
140#
141# @postcopy-active: like active, but now in postcopy mode.  (since
142#     2.5)
143#
144# @postcopy-paused: during postcopy but paused.  (since 3.0)
145#
146# @postcopy-recover: trying to recover from a paused postcopy.  (since
147#     3.0)
148#
149# @completed: migration is finished.
150#
151# @failed: some error occurred during migration process.
152#
153# @colo: VM is in the process of fault tolerance, VM can not get into
154#     this state unless colo capability is enabled for migration.
155#     (since 2.8)
156#
157# @pre-switchover: Paused before device serialisation.  (since 2.11)
158#
159# @device: During device serialisation when pause-before-switchover is
160#     enabled (since 2.11)
161#
162# @wait-unplug: wait for device unplug request by guest OS to be
163#     completed.  (since 4.2)
164#
165# Since: 2.3
166##
167{ 'enum': 'MigrationStatus',
168  'data': [ 'none', 'setup', 'cancelling', 'cancelled',
169            'active', 'postcopy-active', 'postcopy-paused',
170            'postcopy-recover', 'completed', 'failed', 'colo',
171            'pre-switchover', 'device', 'wait-unplug' ] }
172##
173# @VfioStats:
174#
175# Detailed VFIO devices migration statistics
176#
177# @transferred: amount of bytes transferred to the target VM by VFIO
178#     devices
179#
180# Since: 5.2
181##
182{ 'struct': 'VfioStats',
183  'data': {'transferred': 'int' } }
184
185##
186# @MigrationInfo:
187#
188# Information about current migration process.
189#
190# @status: @MigrationStatus describing the current migration status.
191#     If this field is not returned, no migration process has been
192#     initiated
193#
194# @ram: @MigrationStats containing detailed migration status, only
195#     returned if status is 'active' or 'completed'(since 1.2)
196#
197# @disk: @MigrationStats containing detailed disk migration status,
198#     only returned if status is 'active' and it is a block migration
199#
200# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
201#     migration statistics, only returned if XBZRLE feature is on and
202#     status is 'active' or 'completed' (since 1.2)
203#
204# @total-time: total amount of milliseconds since migration started.
205#     If migration has ended, it returns the total migration time.
206#     (since 1.2)
207#
208# @downtime: only present when migration finishes correctly total
209#     downtime in milliseconds for the guest.  (since 1.3)
210#
211# @expected-downtime: only present while migration is active expected
212#     downtime in milliseconds for the guest in last walk of the dirty
213#     bitmap.  (since 1.3)
214#
215# @setup-time: amount of setup time in milliseconds *before* the
216#     iterations begin but *after* the QMP command is issued.  This is
217#     designed to provide an accounting of any activities (such as
218#     RDMA pinning) which may be expensive, but do not actually occur
219#     during the iterative migration rounds themselves.  (since 1.6)
220#
221# @cpu-throttle-percentage: percentage of time guest cpus are being
222#     throttled during auto-converge.  This is only present when
223#     auto-converge has started throttling guest cpus.  (Since 2.7)
224#
225# @error-desc: the human readable error description string, when
226#     @status is 'failed'. Clients should not attempt to parse the
227#     error strings.  (Since 2.7)
228#
229# @postcopy-blocktime: total time when all vCPU were blocked during
230#     postcopy live migration.  This is only present when the
231#     postcopy-blocktime migration capability is enabled.  (Since 3.0)
232#
233# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
234#     This is only present when the postcopy-blocktime migration
235#     capability is enabled.  (Since 3.0)
236#
237# @compression: migration compression statistics, only returned if
238#     compression feature is on and status is 'active' or 'completed'
239#     (Since 3.1)
240#
241# @socket-address: Only used for tcp, to know what the real port is
242#     (Since 4.0)
243#
244# @vfio: @VfioStats containing detailed VFIO devices migration
245#     statistics, only returned if VFIO device is present, migration
246#     is supported by all VFIO devices and status is 'active' or
247#     'completed' (since 5.2)
248#
249# @blocked-reasons: A list of reasons an outgoing migration is
250#     blocked.  Present and non-empty when migration is blocked.
251#     (since 6.0)
252#
253# Since: 0.14
254##
255{ 'struct': 'MigrationInfo',
256  'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
257           '*disk': 'MigrationStats',
258           '*vfio': 'VfioStats',
259           '*xbzrle-cache': 'XBZRLECacheStats',
260           '*total-time': 'int',
261           '*expected-downtime': 'int',
262           '*downtime': 'int',
263           '*setup-time': 'int',
264           '*cpu-throttle-percentage': 'int',
265           '*error-desc': 'str',
266           '*blocked-reasons': ['str'],
267           '*postcopy-blocktime' : 'uint32',
268           '*postcopy-vcpu-blocktime': ['uint32'],
269           '*compression': 'CompressionStats',
270           '*socket-address': ['SocketAddress'] } }
271
272##
273# @query-migrate:
274#
275# Returns information about current migration process.  If migration
276# is active there will be another json-object with RAM migration
277# status and if block migration is active another one with block
278# migration status.
279#
280# Returns: @MigrationInfo
281#
282# Since: 0.14
283#
284# Examples:
285#
286# 1. Before the first migration
287#
288# -> { "execute": "query-migrate" }
289# <- { "return": {} }
290#
291# 2. Migration is done and has succeeded
292#
293# -> { "execute": "query-migrate" }
294# <- { "return": {
295#         "status": "completed",
296#         "total-time":12345,
297#         "setup-time":12345,
298#         "downtime":12345,
299#         "ram":{
300#           "transferred":123,
301#           "remaining":123,
302#           "total":246,
303#           "duplicate":123,
304#           "normal":123,
305#           "normal-bytes":123456,
306#           "dirty-sync-count":15
307#         }
308#      }
309#    }
310#
311# 3. Migration is done and has failed
312#
313# -> { "execute": "query-migrate" }
314# <- { "return": { "status": "failed" } }
315#
316# 4. Migration is being performed and is not a block migration:
317#
318# -> { "execute": "query-migrate" }
319# <- {
320#       "return":{
321#          "status":"active",
322#          "total-time":12345,
323#          "setup-time":12345,
324#          "expected-downtime":12345,
325#          "ram":{
326#             "transferred":123,
327#             "remaining":123,
328#             "total":246,
329#             "duplicate":123,
330#             "normal":123,
331#             "normal-bytes":123456,
332#             "dirty-sync-count":15
333#          }
334#       }
335#    }
336#
337# 5. Migration is being performed and is a block migration:
338#
339# -> { "execute": "query-migrate" }
340# <- {
341#       "return":{
342#          "status":"active",
343#          "total-time":12345,
344#          "setup-time":12345,
345#          "expected-downtime":12345,
346#          "ram":{
347#             "total":1057024,
348#             "remaining":1053304,
349#             "transferred":3720,
350#             "duplicate":123,
351#             "normal":123,
352#             "normal-bytes":123456,
353#             "dirty-sync-count":15
354#          },
355#          "disk":{
356#             "total":20971520,
357#             "remaining":20880384,
358#             "transferred":91136
359#          }
360#       }
361#    }
362#
363# 6. Migration is being performed and XBZRLE is active:
364#
365# -> { "execute": "query-migrate" }
366# <- {
367#       "return":{
368#          "status":"active",
369#          "total-time":12345,
370#          "setup-time":12345,
371#          "expected-downtime":12345,
372#          "ram":{
373#             "total":1057024,
374#             "remaining":1053304,
375#             "transferred":3720,
376#             "duplicate":10,
377#             "normal":3333,
378#             "normal-bytes":3412992,
379#             "dirty-sync-count":15
380#          },
381#          "xbzrle-cache":{
382#             "cache-size":67108864,
383#             "bytes":20971520,
384#             "pages":2444343,
385#             "cache-miss":2244,
386#             "cache-miss-rate":0.123,
387#             "encoding-rate":80.1,
388#             "overflow":34434
389#          }
390#       }
391#    }
392##
393{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
394
395##
396# @MigrationCapability:
397#
398# Migration capabilities enumeration
399#
400# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
401#     Encoding). This feature allows us to minimize migration traffic
402#     for certain work loads, by sending compressed difference of the
403#     pages
404#
405# @rdma-pin-all: Controls whether or not the entire VM memory
406#     footprint is mlock()'d on demand or all at once.  Refer to
407#     docs/rdma.txt for usage.  Disabled by default.  (since 2.0)
408#
409# @zero-blocks: During storage migration encode blocks of zeroes
410#     efficiently.  This essentially saves 1MB of zeroes per block on
411#     the wire.  Enabling requires source and target VM to support
412#     this feature.  To enable it is sufficient to enable the
413#     capability on the source VM. The feature is disabled by default.
414#     (since 1.6)
415#
416# @compress: Use multiple compression threads to accelerate live
417#     migration.  This feature can help to reduce the migration
418#     traffic, by sending compressed pages.  Please note that if
419#     compress and xbzrle are both on, compress only takes effect in
420#     the ram bulk stage, after that, it will be disabled and only
421#     xbzrle takes effect, this can help to minimize migration
422#     traffic.  The feature is disabled by default.  (since 2.4 )
423#
424# @events: generate events for each migration state change (since 2.4
425#     )
426#
427# @auto-converge: If enabled, QEMU will automatically throttle down
428#     the guest to speed up convergence of RAM migration.  (since 1.6)
429#
430# @postcopy-ram: Start executing on the migration target before all of
431#     RAM has been migrated, pulling the remaining pages along as
432#     needed.  The capacity must have the same setting on both source
433#     and target or migration will not even start.  NOTE: If the
434#     migration fails during postcopy the VM will fail.  (since 2.6)
435#
436# @x-colo: If enabled, migration will never end, and the state of the
437#     VM on the primary side will be migrated continuously to the VM
438#     on secondary side, this process is called COarse-Grain LOck
439#     Stepping (COLO) for Non-stop Service.  (since 2.8)
440#
441# @release-ram: if enabled, qemu will free the migrated ram pages on
442#     the source during postcopy-ram migration.  (since 2.9)
443#
444# @block: If enabled, QEMU will also migrate the contents of all block
445#     devices.  Default is disabled.  A possible alternative uses
446#     mirror jobs to a builtin NBD server on the destination, which
447#     offers more flexibility.  (Since 2.10)
448#
449# @return-path: If enabled, migration will use the return path even
450#     for precopy.  (since 2.10)
451#
452# @pause-before-switchover: Pause outgoing migration before
453#     serialising device state and before disabling block IO (since
454#     2.11)
455#
456# @multifd: Use more than one fd for migration (since 4.0)
457#
458# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
459#     (since 2.12)
460#
461# @postcopy-blocktime: Calculate downtime for postcopy live migration
462#     (since 3.0)
463#
464# @late-block-activate: If enabled, the destination will not activate
465#     block devices (and thus take locks) immediately at the end of
466#     migration.  (since 3.0)
467#
468# @x-ignore-shared: If enabled, QEMU will not migrate shared memory that is
469#     accessible on the destination machine. (since 4.0)
470#
471# @validate-uuid: Send the UUID of the source to allow the destination
472#     to ensure it is the same.  (since 4.2)
473#
474# @background-snapshot: If enabled, the migration stream will be a
475#     snapshot of the VM exactly at the point when the migration
476#     procedure starts.  The VM RAM is saved with running VM. (since
477#     6.0)
478#
479# @zero-copy-send: Controls behavior on sending memory pages on
480#     migration.  When true, enables a zero-copy mechanism for sending
481#     memory pages, if host supports it.  Requires that QEMU be
482#     permitted to use locked memory for guest RAM pages.  (since 7.1)
483#
484# @postcopy-preempt: If enabled, the migration process will allow
485#     postcopy requests to preempt precopy stream, so postcopy
486#     requests will be handled faster.  This is a performance feature
487#     and should not affect the correctness of postcopy migration.
488#     (since 7.1)
489#
490# @switchover-ack: If enabled, migration will not stop the source VM
491#     and complete the migration until an ACK is received from the
492#     destination that it's OK to do so.  Exactly when this ACK is
493#     sent depends on the migrated devices that use this feature.
494#     For example, a device can use it to make sure some of its data
495#     is sent and loaded in the destination before doing switchover.
496#     This can reduce downtime if devices that support this capability
497#     are present.  'return-path' capability must be enabled to use
498#     it.  (since 8.1)
499#
500# Features:
501#
502# @unstable: Members @x-colo and @x-ignore-shared are experimental.
503#
504# Since: 1.2
505##
506{ 'enum': 'MigrationCapability',
507  'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
508           'compress', 'events', 'postcopy-ram',
509           { 'name': 'x-colo', 'features': [ 'unstable' ] },
510           'release-ram',
511           'block', 'return-path', 'pause-before-switchover', 'multifd',
512           'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
513           { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
514           'validate-uuid', 'background-snapshot',
515           'zero-copy-send', 'postcopy-preempt', 'switchover-ack'] }
516
517##
518# @MigrationCapabilityStatus:
519#
520# Migration capability information
521#
522# @capability: capability enum
523#
524# @state: capability state bool
525#
526# Since: 1.2
527##
528{ 'struct': 'MigrationCapabilityStatus',
529  'data': { 'capability' : 'MigrationCapability', 'state' : 'bool' } }
530
531##
532# @migrate-set-capabilities:
533#
534# Enable/Disable the following migration capabilities (like xbzrle)
535#
536# @capabilities: json array of capability modifications to make
537#
538# Since: 1.2
539#
540# Example:
541#
542# -> { "execute": "migrate-set-capabilities" , "arguments":
543#      { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
544# <- { "return": {} }
545##
546{ 'command': 'migrate-set-capabilities',
547  'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
548
549##
550# @query-migrate-capabilities:
551#
552# Returns information about the current migration capabilities status
553#
554# Returns: @MigrationCapabilityStatus
555#
556# Since: 1.2
557#
558# Example:
559#
560# -> { "execute": "query-migrate-capabilities" }
561# <- { "return": [
562#       {"state": false, "capability": "xbzrle"},
563#       {"state": false, "capability": "rdma-pin-all"},
564#       {"state": false, "capability": "auto-converge"},
565#       {"state": false, "capability": "zero-blocks"},
566#       {"state": false, "capability": "compress"},
567#       {"state": true, "capability": "events"},
568#       {"state": false, "capability": "postcopy-ram"},
569#       {"state": false, "capability": "x-colo"}
570#    ]}
571##
572{ 'command': 'query-migrate-capabilities', 'returns':   ['MigrationCapabilityStatus']}
573
574##
575# @MultiFDCompression:
576#
577# An enumeration of multifd compression methods.
578#
579# @none: no compression.
580#
581# @zlib: use zlib compression method.
582#
583# @zstd: use zstd compression method.
584#
585# Since: 5.0
586##
587{ 'enum': 'MultiFDCompression',
588  'data': [ 'none', 'zlib',
589            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
590
591##
592# @BitmapMigrationBitmapAliasTransform:
593#
594# @persistent: If present, the bitmap will be made persistent or
595#     transient depending on this parameter.
596#
597# Since: 6.0
598##
599{ 'struct': 'BitmapMigrationBitmapAliasTransform',
600  'data': {
601      '*persistent': 'bool'
602  } }
603
604##
605# @BitmapMigrationBitmapAlias:
606#
607# @name: The name of the bitmap.
608#
609# @alias: An alias name for migration (for example the bitmap name on
610#     the opposite site).
611#
612# @transform: Allows the modification of the migrated bitmap.  (since
613#     6.0)
614#
615# Since: 5.2
616##
617{ 'struct': 'BitmapMigrationBitmapAlias',
618  'data': {
619      'name': 'str',
620      'alias': 'str',
621      '*transform': 'BitmapMigrationBitmapAliasTransform'
622  } }
623
624##
625# @BitmapMigrationNodeAlias:
626#
627# Maps a block node name and the bitmaps it has to aliases for dirty
628# bitmap migration.
629#
630# @node-name: A block node name.
631#
632# @alias: An alias block node name for migration (for example the node
633#     name on the opposite site).
634#
635# @bitmaps: Mappings for the bitmaps on this node.
636#
637# Since: 5.2
638##
639{ 'struct': 'BitmapMigrationNodeAlias',
640  'data': {
641      'node-name': 'str',
642      'alias': 'str',
643      'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
644  } }
645
646##
647# @MigrationParameter:
648#
649# Migration parameters enumeration
650#
651# @announce-initial: Initial delay (in milliseconds) before sending
652#     the first announce (Since 4.0)
653#
654# @announce-max: Maximum delay (in milliseconds) between packets in
655#     the announcement (Since 4.0)
656#
657# @announce-rounds: Number of self-announce packets sent after
658#     migration (Since 4.0)
659#
660# @announce-step: Increase in delay (in milliseconds) between
661#     subsequent packets in the announcement (Since 4.0)
662#
663# @compress-level: Set the compression level to be used in live
664#     migration, the compression level is an integer between 0 and 9,
665#     where 0 means no compression, 1 means the best compression
666#     speed, and 9 means best compression ratio which will consume
667#     more CPU.
668#
669# @compress-threads: Set compression thread count to be used in live
670#     migration, the compression thread count is an integer between 1
671#     and 255.
672#
673# @compress-wait-thread: Controls behavior when all compression
674#     threads are currently busy.  If true (default), wait for a free
675#     compression thread to become available; otherwise, send the page
676#     uncompressed.  (Since 3.1)
677#
678# @decompress-threads: Set decompression thread count to be used in
679#     live migration, the decompression thread count is an integer
680#     between 1 and 255. Usually, decompression is at least 4 times as
681#     fast as compression, so set the decompress-threads to the number
682#     about 1/4 of compress-threads is adequate.
683#
684# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
685#     bytes_xfer_period to trigger throttling.  It is expressed as
686#     percentage.  The default value is 50. (Since 5.0)
687#
688# @cpu-throttle-initial: Initial percentage of time guest cpus are
689#     throttled when migration auto-converge is activated.  The
690#     default value is 20. (Since 2.7)
691#
692# @cpu-throttle-increment: throttle percentage increase each time
693#     auto-converge detects that migration is not making progress.
694#     The default value is 10. (Since 2.7)
695#
696# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
697#     the tail stage of throttling, the Guest is very sensitive to CPU
698#     percentage while the @cpu-throttle -increment is excessive
699#     usually at tail stage.  If this parameter is true, we will
700#     compute the ideal CPU percentage used by the Guest, which may
701#     exactly make the dirty rate match the dirty rate threshold.
702#     Then we will choose a smaller throttle increment between the one
703#     specified by @cpu-throttle-increment and the one generated by
704#     ideal CPU percentage.  Therefore, it is compatible to
705#     traditional throttling, meanwhile the throttle increment won't
706#     be excessive at tail stage.  The default value is false.  (Since
707#     5.1)
708#
709# @tls-creds: ID of the 'tls-creds' object that provides credentials
710#     for establishing a TLS connection over the migration data
711#     channel.  On the outgoing side of the migration, the credentials
712#     must be for a 'client' endpoint, while for the incoming side the
713#     credentials must be for a 'server' endpoint.  Setting this will
714#     enable TLS for all migrations.  The default is unset, resulting
715#     in unsecured migration at the QEMU level.  (Since 2.7)
716#
717# @tls-hostname: hostname of the target host for the migration.  This
718#     is required when using x509 based TLS credentials and the
719#     migration URI does not already include a hostname.  For example
720#     if using fd: or exec: based migration, the hostname must be
721#     provided so that the server's x509 certificate identity can be
722#     validated.  (Since 2.7)
723#
724# @tls-authz: ID of the 'authz' object subclass that provides access
725#     control checking of the TLS x509 certificate distinguished name.
726#     This object is only resolved at time of use, so can be deleted
727#     and recreated on the fly while the migration server is active.
728#     If missing, it will default to denying access (Since 4.0)
729#
730# @max-bandwidth: to set maximum speed for migration.  maximum speed
731#     in bytes per second.  (Since 2.8)
732#
733# @downtime-limit: set maximum tolerated downtime for migration.
734#     maximum downtime in milliseconds (Since 2.8)
735#
736# @x-checkpoint-delay: The delay time (in ms) between two COLO
737#     checkpoints in periodic mode.  (Since 2.8)
738#
739# @block-incremental: Affects how much storage is migrated when the
740#     block migration capability is enabled.  When false, the entire
741#     storage backing chain is migrated into a flattened image at the
742#     destination; when true, only the active qcow2 layer is migrated
743#     and the destination must already have access to the same backing
744#     chain as was used on the source.  (since 2.10)
745#
746# @multifd-channels: Number of channels used to migrate data in
747#     parallel.  This is the same number that the number of sockets
748#     used for migration.  The default value is 2 (since 4.0)
749#
750# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
751#     needs to be a multiple of the target page size and a power of 2
752#     (Since 2.11)
753#
754# @max-postcopy-bandwidth: Background transfer bandwidth during
755#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
756#     (Since 3.0)
757#
758# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
759#     (Since 3.1)
760#
761# @multifd-compression: Which compression method to use.  Defaults to
762#     none.  (Since 5.0)
763#
764# @multifd-zlib-level: Set the compression level to be used in live
765#     migration, the compression level is an integer between 0 and 9,
766#     where 0 means no compression, 1 means the best compression
767#     speed, and 9 means best compression ratio which will consume
768#     more CPU. Defaults to 1. (Since 5.0)
769#
770# @multifd-zstd-level: Set the compression level to be used in live
771#     migration, the compression level is an integer between 0 and 20,
772#     where 0 means no compression, 1 means the best compression
773#     speed, and 20 means best compression ratio which will consume
774#     more CPU. Defaults to 1. (Since 5.0)
775#
776# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
777#     aliases for the purpose of dirty bitmap migration.  Such aliases
778#     may for example be the corresponding names on the opposite site.
779#     The mapping must be one-to-one, but not necessarily complete: On
780#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
781#     will be ignored.  On the destination, encountering an unmapped
782#     alias in the incoming migration stream will result in a report,
783#     and all further bitmap migration data will then be discarded.
784#     Note that the destination does not know about bitmaps it does
785#     not receive, so there is no limitation or requirement regarding
786#     the number of bitmaps received, or how they are named, or on
787#     which nodes they are placed.  By default (when this parameter
788#     has never been set), bitmap names are mapped to themselves.
789#     Nodes are mapped to their block device name if there is one, and
790#     to their node name otherwise.  (Since 5.2)
791#
792# Features:
793#
794# @unstable: Member @x-checkpoint-delay is experimental.
795#
796# Since: 2.4
797##
798{ 'enum': 'MigrationParameter',
799  'data': ['announce-initial', 'announce-max',
800           'announce-rounds', 'announce-step',
801           'compress-level', 'compress-threads', 'decompress-threads',
802           'compress-wait-thread', 'throttle-trigger-threshold',
803           'cpu-throttle-initial', 'cpu-throttle-increment',
804           'cpu-throttle-tailslow',
805           'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
806           'downtime-limit',
807           { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
808           'block-incremental',
809           'multifd-channels',
810           'xbzrle-cache-size', 'max-postcopy-bandwidth',
811           'max-cpu-throttle', 'multifd-compression',
812           'multifd-zlib-level' ,'multifd-zstd-level',
813           'block-bitmap-mapping' ] }
814
815##
816# @MigrateSetParameters:
817#
818# @announce-initial: Initial delay (in milliseconds) before sending
819#     the first announce (Since 4.0)
820#
821# @announce-max: Maximum delay (in milliseconds) between packets in
822#     the announcement (Since 4.0)
823#
824# @announce-rounds: Number of self-announce packets sent after
825#     migration (Since 4.0)
826#
827# @announce-step: Increase in delay (in milliseconds) between
828#     subsequent packets in the announcement (Since 4.0)
829#
830# @compress-level: compression level
831#
832# @compress-threads: compression thread count
833#
834# @compress-wait-thread: Controls behavior when all compression
835#     threads are currently busy.  If true (default), wait for a free
836#     compression thread to become available; otherwise, send the page
837#     uncompressed.  (Since 3.1)
838#
839# @decompress-threads: decompression thread count
840#
841# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
842#     bytes_xfer_period to trigger throttling.  It is expressed as
843#     percentage.  The default value is 50. (Since 5.0)
844#
845# @cpu-throttle-initial: Initial percentage of time guest cpus are
846#     throttled when migration auto-converge is activated.  The
847#     default value is 20. (Since 2.7)
848#
849# @cpu-throttle-increment: throttle percentage increase each time
850#     auto-converge detects that migration is not making progress.
851#     The default value is 10. (Since 2.7)
852#
853# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
854#     the tail stage of throttling, the Guest is very sensitive to CPU
855#     percentage while the @cpu-throttle -increment is excessive
856#     usually at tail stage.  If this parameter is true, we will
857#     compute the ideal CPU percentage used by the Guest, which may
858#     exactly make the dirty rate match the dirty rate threshold.
859#     Then we will choose a smaller throttle increment between the one
860#     specified by @cpu-throttle-increment and the one generated by
861#     ideal CPU percentage.  Therefore, it is compatible to
862#     traditional throttling, meanwhile the throttle increment won't
863#     be excessive at tail stage.  The default value is false.  (Since
864#     5.1)
865#
866# @tls-creds: ID of the 'tls-creds' object that provides credentials
867#     for establishing a TLS connection over the migration data
868#     channel.  On the outgoing side of the migration, the credentials
869#     must be for a 'client' endpoint, while for the incoming side the
870#     credentials must be for a 'server' endpoint.  Setting this to a
871#     non-empty string enables TLS for all migrations.  An empty
872#     string means that QEMU will use plain text mode for migration,
873#     rather than TLS (Since 2.9) Previously (since 2.7), this was
874#     reported by omitting tls-creds instead.
875#
876# @tls-hostname: hostname of the target host for the migration.  This
877#     is required when using x509 based TLS credentials and the
878#     migration URI does not already include a hostname.  For example
879#     if using fd: or exec: based migration, the hostname must be
880#     provided so that the server's x509 certificate identity can be
881#     validated.  (Since 2.7) An empty string means that QEMU will use
882#     the hostname associated with the migration URI, if any.  (Since
883#     2.9) Previously (since 2.7), this was reported by omitting
884#     tls-hostname instead.
885#
886# @max-bandwidth: to set maximum speed for migration.  maximum speed
887#     in bytes per second.  (Since 2.8)
888#
889# @downtime-limit: set maximum tolerated downtime for migration.
890#     maximum downtime in milliseconds (Since 2.8)
891#
892# @x-checkpoint-delay: the delay time between two COLO checkpoints.
893#     (Since 2.8)
894#
895# @block-incremental: Affects how much storage is migrated when the
896#     block migration capability is enabled.  When false, the entire
897#     storage backing chain is migrated into a flattened image at the
898#     destination; when true, only the active qcow2 layer is migrated
899#     and the destination must already have access to the same backing
900#     chain as was used on the source.  (since 2.10)
901#
902# @multifd-channels: Number of channels used to migrate data in
903#     parallel.  This is the same number that the number of sockets
904#     used for migration.  The default value is 2 (since 4.0)
905#
906# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
907#     needs to be a multiple of the target page size and a power of 2
908#     (Since 2.11)
909#
910# @max-postcopy-bandwidth: Background transfer bandwidth during
911#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
912#     (Since 3.0)
913#
914# @max-cpu-throttle: maximum cpu throttle percentage.  The default
915#     value is 99. (Since 3.1)
916#
917# @multifd-compression: Which compression method to use.  Defaults to
918#     none.  (Since 5.0)
919#
920# @multifd-zlib-level: Set the compression level to be used in live
921#     migration, the compression level is an integer between 0 and 9,
922#     where 0 means no compression, 1 means the best compression
923#     speed, and 9 means best compression ratio which will consume
924#     more CPU. Defaults to 1. (Since 5.0)
925#
926# @multifd-zstd-level: Set the compression level to be used in live
927#     migration, the compression level is an integer between 0 and 20,
928#     where 0 means no compression, 1 means the best compression
929#     speed, and 20 means best compression ratio which will consume
930#     more CPU. Defaults to 1. (Since 5.0)
931#
932# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
933#     aliases for the purpose of dirty bitmap migration.  Such aliases
934#     may for example be the corresponding names on the opposite site.
935#     The mapping must be one-to-one, but not necessarily complete: On
936#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
937#     will be ignored.  On the destination, encountering an unmapped
938#     alias in the incoming migration stream will result in a report,
939#     and all further bitmap migration data will then be discarded.
940#     Note that the destination does not know about bitmaps it does
941#     not receive, so there is no limitation or requirement regarding
942#     the number of bitmaps received, or how they are named, or on
943#     which nodes they are placed.  By default (when this parameter
944#     has never been set), bitmap names are mapped to themselves.
945#     Nodes are mapped to their block device name if there is one, and
946#     to their node name otherwise.  (Since 5.2)
947#
948# Features:
949#
950# @unstable: Member @x-checkpoint-delay is experimental.
951#
952# TODO: either fuse back into MigrationParameters, or make
953#     MigrationParameters members mandatory
954#
955# Since: 2.4
956##
957{ 'struct': 'MigrateSetParameters',
958  'data': { '*announce-initial': 'size',
959            '*announce-max': 'size',
960            '*announce-rounds': 'size',
961            '*announce-step': 'size',
962            '*compress-level': 'uint8',
963            '*compress-threads': 'uint8',
964            '*compress-wait-thread': 'bool',
965            '*decompress-threads': 'uint8',
966            '*throttle-trigger-threshold': 'uint8',
967            '*cpu-throttle-initial': 'uint8',
968            '*cpu-throttle-increment': 'uint8',
969            '*cpu-throttle-tailslow': 'bool',
970            '*tls-creds': 'StrOrNull',
971            '*tls-hostname': 'StrOrNull',
972            '*tls-authz': 'StrOrNull',
973            '*max-bandwidth': 'size',
974            '*downtime-limit': 'uint64',
975            '*x-checkpoint-delay': { 'type': 'uint32',
976                                     'features': [ 'unstable' ] },
977            '*block-incremental': 'bool',
978            '*multifd-channels': 'uint8',
979            '*xbzrle-cache-size': 'size',
980            '*max-postcopy-bandwidth': 'size',
981            '*max-cpu-throttle': 'uint8',
982            '*multifd-compression': 'MultiFDCompression',
983            '*multifd-zlib-level': 'uint8',
984            '*multifd-zstd-level': 'uint8',
985            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } }
986
987##
988# @migrate-set-parameters:
989#
990# Set various migration parameters.
991#
992# Since: 2.4
993#
994# Example:
995#
996# -> { "execute": "migrate-set-parameters" ,
997#      "arguments": { "compress-level": 1 } }
998# <- { "return": {} }
999##
1000{ 'command': 'migrate-set-parameters', 'boxed': true,
1001  'data': 'MigrateSetParameters' }
1002
1003##
1004# @MigrationParameters:
1005#
1006# The optional members aren't actually optional.
1007#
1008# @announce-initial: Initial delay (in milliseconds) before sending
1009#     the first announce (Since 4.0)
1010#
1011# @announce-max: Maximum delay (in milliseconds) between packets in
1012#     the announcement (Since 4.0)
1013#
1014# @announce-rounds: Number of self-announce packets sent after
1015#     migration (Since 4.0)
1016#
1017# @announce-step: Increase in delay (in milliseconds) between
1018#     subsequent packets in the announcement (Since 4.0)
1019#
1020# @compress-level: compression level
1021#
1022# @compress-threads: compression thread count
1023#
1024# @compress-wait-thread: Controls behavior when all compression
1025#     threads are currently busy.  If true (default), wait for a free
1026#     compression thread to become available; otherwise, send the page
1027#     uncompressed.  (Since 3.1)
1028#
1029# @decompress-threads: decompression thread count
1030#
1031# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1032#     bytes_xfer_period to trigger throttling.  It is expressed as
1033#     percentage.  The default value is 50. (Since 5.0)
1034#
1035# @cpu-throttle-initial: Initial percentage of time guest cpus are
1036#     throttled when migration auto-converge is activated.  (Since
1037#     2.7)
1038#
1039# @cpu-throttle-increment: throttle percentage increase each time
1040#     auto-converge detects that migration is not making progress.
1041#     (Since 2.7)
1042#
1043# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1044#     the tail stage of throttling, the Guest is very sensitive to CPU
1045#     percentage while the @cpu-throttle -increment is excessive
1046#     usually at tail stage.  If this parameter is true, we will
1047#     compute the ideal CPU percentage used by the Guest, which may
1048#     exactly make the dirty rate match the dirty rate threshold.
1049#     Then we will choose a smaller throttle increment between the one
1050#     specified by @cpu-throttle-increment and the one generated by
1051#     ideal CPU percentage.  Therefore, it is compatible to
1052#     traditional throttling, meanwhile the throttle increment won't
1053#     be excessive at tail stage.  The default value is false.  (Since
1054#     5.1)
1055#
1056# @tls-creds: ID of the 'tls-creds' object that provides credentials
1057#     for establishing a TLS connection over the migration data
1058#     channel.  On the outgoing side of the migration, the credentials
1059#     must be for a 'client' endpoint, while for the incoming side the
1060#     credentials must be for a 'server' endpoint.  An empty string
1061#     means that QEMU will use plain text mode for migration, rather
1062#     than TLS (Since 2.7) Note: 2.8 reports this by omitting
1063#     tls-creds instead.
1064#
1065# @tls-hostname: hostname of the target host for the migration.  This
1066#     is required when using x509 based TLS credentials and the
1067#     migration URI does not already include a hostname.  For example
1068#     if using fd: or exec: based migration, the hostname must be
1069#     provided so that the server's x509 certificate identity can be
1070#     validated.  (Since 2.7) An empty string means that QEMU will use
1071#     the hostname associated with the migration URI, if any.  (Since
1072#     2.9) Note: 2.8 reports this by omitting tls-hostname instead.
1073#
1074# @tls-authz: ID of the 'authz' object subclass that provides access
1075#     control checking of the TLS x509 certificate distinguished name.
1076#     (Since 4.0)
1077#
1078# @max-bandwidth: to set maximum speed for migration.  maximum speed
1079#     in bytes per second.  (Since 2.8)
1080#
1081# @downtime-limit: set maximum tolerated downtime for migration.
1082#     maximum downtime in milliseconds (Since 2.8)
1083#
1084# @x-checkpoint-delay: the delay time between two COLO checkpoints.
1085#     (Since 2.8)
1086#
1087# @block-incremental: Affects how much storage is migrated when the
1088#     block migration capability is enabled.  When false, the entire
1089#     storage backing chain is migrated into a flattened image at the
1090#     destination; when true, only the active qcow2 layer is migrated
1091#     and the destination must already have access to the same backing
1092#     chain as was used on the source.  (since 2.10)
1093#
1094# @multifd-channels: Number of channels used to migrate data in
1095#     parallel.  This is the same number that the number of sockets
1096#     used for migration.  The default value is 2 (since 4.0)
1097#
1098# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1099#     needs to be a multiple of the target page size and a power of 2
1100#     (Since 2.11)
1101#
1102# @max-postcopy-bandwidth: Background transfer bandwidth during
1103#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1104#     (Since 3.0)
1105#
1106# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
1107#     (Since 3.1)
1108#
1109# @multifd-compression: Which compression method to use.  Defaults to
1110#     none.  (Since 5.0)
1111#
1112# @multifd-zlib-level: Set the compression level to be used in live
1113#     migration, the compression level is an integer between 0 and 9,
1114#     where 0 means no compression, 1 means the best compression
1115#     speed, and 9 means best compression ratio which will consume
1116#     more CPU. Defaults to 1. (Since 5.0)
1117#
1118# @multifd-zstd-level: Set the compression level to be used in live
1119#     migration, the compression level is an integer between 0 and 20,
1120#     where 0 means no compression, 1 means the best compression
1121#     speed, and 20 means best compression ratio which will consume
1122#     more CPU. Defaults to 1. (Since 5.0)
1123#
1124# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1125#     aliases for the purpose of dirty bitmap migration.  Such aliases
1126#     may for example be the corresponding names on the opposite site.
1127#     The mapping must be one-to-one, but not necessarily complete: On
1128#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1129#     will be ignored.  On the destination, encountering an unmapped
1130#     alias in the incoming migration stream will result in a report,
1131#     and all further bitmap migration data will then be discarded.
1132#     Note that the destination does not know about bitmaps it does
1133#     not receive, so there is no limitation or requirement regarding
1134#     the number of bitmaps received, or how they are named, or on
1135#     which nodes they are placed.  By default (when this parameter
1136#     has never been set), bitmap names are mapped to themselves.
1137#     Nodes are mapped to their block device name if there is one, and
1138#     to their node name otherwise.  (Since 5.2)
1139#
1140# Features:
1141#
1142# @unstable: Member @x-checkpoint-delay is experimental.
1143#
1144# Since: 2.4
1145##
1146{ 'struct': 'MigrationParameters',
1147  'data': { '*announce-initial': 'size',
1148            '*announce-max': 'size',
1149            '*announce-rounds': 'size',
1150            '*announce-step': 'size',
1151            '*compress-level': 'uint8',
1152            '*compress-threads': 'uint8',
1153            '*compress-wait-thread': 'bool',
1154            '*decompress-threads': 'uint8',
1155            '*throttle-trigger-threshold': 'uint8',
1156            '*cpu-throttle-initial': 'uint8',
1157            '*cpu-throttle-increment': 'uint8',
1158            '*cpu-throttle-tailslow': 'bool',
1159            '*tls-creds': 'str',
1160            '*tls-hostname': 'str',
1161            '*tls-authz': 'str',
1162            '*max-bandwidth': 'size',
1163            '*downtime-limit': 'uint64',
1164            '*x-checkpoint-delay': { 'type': 'uint32',
1165                                     'features': [ 'unstable' ] },
1166            '*block-incremental': 'bool',
1167            '*multifd-channels': 'uint8',
1168            '*xbzrle-cache-size': 'size',
1169            '*max-postcopy-bandwidth': 'size',
1170            '*max-cpu-throttle': 'uint8',
1171            '*multifd-compression': 'MultiFDCompression',
1172            '*multifd-zlib-level': 'uint8',
1173            '*multifd-zstd-level': 'uint8',
1174            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } }
1175
1176##
1177# @query-migrate-parameters:
1178#
1179# Returns information about the current migration parameters
1180#
1181# Returns: @MigrationParameters
1182#
1183# Since: 2.4
1184#
1185# Example:
1186#
1187# -> { "execute": "query-migrate-parameters" }
1188# <- { "return": {
1189#          "decompress-threads": 2,
1190#          "cpu-throttle-increment": 10,
1191#          "compress-threads": 8,
1192#          "compress-level": 1,
1193#          "cpu-throttle-initial": 20,
1194#          "max-bandwidth": 33554432,
1195#          "downtime-limit": 300
1196#       }
1197#    }
1198##
1199{ 'command': 'query-migrate-parameters',
1200  'returns': 'MigrationParameters' }
1201
1202##
1203# @migrate-start-postcopy:
1204#
1205# Followup to a migration command to switch the migration to postcopy
1206# mode.  The postcopy-ram capability must be set on both source and
1207# destination before the original migration command.
1208#
1209# Since: 2.5
1210#
1211# Example:
1212#
1213# -> { "execute": "migrate-start-postcopy" }
1214# <- { "return": {} }
1215##
1216{ 'command': 'migrate-start-postcopy' }
1217
1218##
1219# @MIGRATION:
1220#
1221# Emitted when a migration event happens
1222#
1223# @status: @MigrationStatus describing the current migration status.
1224#
1225# Since: 2.4
1226#
1227# Example:
1228#
1229# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1230#     "event": "MIGRATION",
1231#     "data": {"status": "completed"} }
1232##
1233{ 'event': 'MIGRATION',
1234  'data': {'status': 'MigrationStatus'}}
1235
1236##
1237# @MIGRATION_PASS:
1238#
1239# Emitted from the source side of a migration at the start of each
1240# pass (when it syncs the dirty bitmap)
1241#
1242# @pass: An incrementing count (starting at 1 on the first pass)
1243#
1244# Since: 2.6
1245#
1246# Example:
1247#
1248# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1249#       "event": "MIGRATION_PASS", "data": {"pass": 2} }
1250##
1251{ 'event': 'MIGRATION_PASS',
1252  'data': { 'pass': 'int' } }
1253
1254##
1255# @COLOMessage:
1256#
1257# The message transmission between Primary side and Secondary side.
1258#
1259# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1260#
1261# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1262#     checkpointing
1263#
1264# @checkpoint-reply: SVM gets PVM's checkpoint request
1265#
1266# @vmstate-send: VM's state will be sent by PVM.
1267#
1268# @vmstate-size: The total size of VMstate.
1269#
1270# @vmstate-received: VM's state has been received by SVM.
1271#
1272# @vmstate-loaded: VM's state has been loaded by SVM.
1273#
1274# Since: 2.8
1275##
1276{ 'enum': 'COLOMessage',
1277  'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1278            'vmstate-send', 'vmstate-size', 'vmstate-received',
1279            'vmstate-loaded' ] }
1280
1281##
1282# @COLOMode:
1283#
1284# The COLO current mode.
1285#
1286# @none: COLO is disabled.
1287#
1288# @primary: COLO node in primary side.
1289#
1290# @secondary: COLO node in slave side.
1291#
1292# Since: 2.8
1293##
1294{ 'enum': 'COLOMode',
1295  'data': [ 'none', 'primary', 'secondary'] }
1296
1297##
1298# @FailoverStatus:
1299#
1300# An enumeration of COLO failover status
1301#
1302# @none: no failover has ever happened
1303#
1304# @require: got failover requirement but not handled
1305#
1306# @active: in the process of doing failover
1307#
1308# @completed: finish the process of failover
1309#
1310# @relaunch: restart the failover process, from 'none' -> 'completed'
1311#     (Since 2.9)
1312#
1313# Since: 2.8
1314##
1315{ 'enum': 'FailoverStatus',
1316  'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1317
1318##
1319# @COLO_EXIT:
1320#
1321# Emitted when VM finishes COLO mode due to some errors happening or
1322# at the request of users.
1323#
1324# @mode: report COLO mode when COLO exited.
1325#
1326# @reason: describes the reason for the COLO exit.
1327#
1328# Since: 3.1
1329#
1330# Example:
1331#
1332# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1333#      "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1334##
1335{ 'event': 'COLO_EXIT',
1336  'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1337
1338##
1339# @COLOExitReason:
1340#
1341# The reason for a COLO exit.
1342#
1343# @none: failover has never happened.  This state does not occur in
1344#     the COLO_EXIT event, and is only visible in the result of
1345#     query-colo-status.
1346#
1347# @request: COLO exit is due to an external request.
1348#
1349# @error: COLO exit is due to an internal error.
1350#
1351# @processing: COLO is currently handling a failover (since 4.0).
1352#
1353# Since: 3.1
1354##
1355{ 'enum': 'COLOExitReason',
1356  'data': [ 'none', 'request', 'error' , 'processing' ] }
1357
1358##
1359# @x-colo-lost-heartbeat:
1360#
1361# Tell qemu that heartbeat is lost, request it to do takeover
1362# procedures.  If this command is sent to the PVM, the Primary side
1363# will exit COLO mode.  If sent to the Secondary, the Secondary side
1364# will run failover work, then takes over server operation to become
1365# the service VM.
1366#
1367# Features:
1368#
1369# @unstable: This command is experimental.
1370#
1371# Since: 2.8
1372#
1373# Example:
1374#
1375# -> { "execute": "x-colo-lost-heartbeat" }
1376# <- { "return": {} }
1377##
1378{ 'command': 'x-colo-lost-heartbeat',
1379  'features': [ 'unstable' ],
1380  'if': 'CONFIG_REPLICATION' }
1381
1382##
1383# @migrate_cancel:
1384#
1385# Cancel the current executing migration process.
1386#
1387# Returns: nothing on success
1388#
1389# Notes: This command succeeds even if there is no migration process
1390#     running.
1391#
1392# Since: 0.14
1393#
1394# Example:
1395#
1396# -> { "execute": "migrate_cancel" }
1397# <- { "return": {} }
1398##
1399{ 'command': 'migrate_cancel' }
1400
1401##
1402# @migrate-continue:
1403#
1404# Continue migration when it's in a paused state.
1405#
1406# @state: The state the migration is currently expected to be in
1407#
1408# Returns: nothing on success
1409#
1410# Since: 2.11
1411#
1412# Example:
1413#
1414# -> { "execute": "migrate-continue" , "arguments":
1415#      { "state": "pre-switchover" } }
1416# <- { "return": {} }
1417##
1418{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1419
1420##
1421# @migrate:
1422#
1423# Migrates the current running guest to another Virtual Machine.
1424#
1425# @uri: the Uniform Resource Identifier of the destination VM
1426#
1427# @blk: do block migration (full disk copy)
1428#
1429# @inc: incremental disk copy migration
1430#
1431# @detach: this argument exists only for compatibility reasons and is
1432#     ignored by QEMU
1433#
1434# @resume: resume one paused migration, default "off". (since 3.0)
1435#
1436# Returns: nothing on success
1437#
1438# Since: 0.14
1439#
1440# Notes:
1441#
1442# 1. The 'query-migrate' command should be used to check migration's
1443#    progress and final result (this information is provided by the
1444#    'status' member)
1445#
1446# 2. All boolean arguments default to false
1447#
1448# 3. The user Monitor's "detach" argument is invalid in QMP and should
1449#    not be used
1450#
1451# Example:
1452#
1453# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1454# <- { "return": {} }
1455##
1456{ 'command': 'migrate',
1457  'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool',
1458           '*detach': 'bool', '*resume': 'bool' } }
1459
1460##
1461# @migrate-incoming:
1462#
1463# Start an incoming migration, the qemu must have been started with
1464# -incoming defer
1465#
1466# @uri: The Uniform Resource Identifier identifying the source or
1467#     address to listen on
1468#
1469# Returns: nothing on success
1470#
1471# Since: 2.3
1472#
1473# Notes:
1474#
1475# 1. It's a bad idea to use a string for the uri, but it needs
1476#    to stay compatible with -incoming and the format of the uri
1477#    is already exposed above libvirt.
1478#
1479# 2. QEMU must be started with -incoming defer to allow
1480#    migrate-incoming to be used.
1481#
1482# 3. The uri format is the same as for -incoming
1483#
1484# Example:
1485#
1486# -> { "execute": "migrate-incoming",
1487#      "arguments": { "uri": "tcp::4446" } }
1488# <- { "return": {} }
1489##
1490{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
1491
1492##
1493# @xen-save-devices-state:
1494#
1495# Save the state of all devices to file.  The RAM and the block
1496# devices of the VM are not saved by this command.
1497#
1498# @filename: the file to save the state of the devices to as binary
1499#     data.  See xen-save-devices-state.txt for a description of the
1500#     binary format.
1501#
1502# @live: Optional argument to ask QEMU to treat this command as part
1503#     of a live migration.  Default to true.  (since 2.11)
1504#
1505# Returns: Nothing on success
1506#
1507# Since: 1.1
1508#
1509# Example:
1510#
1511# -> { "execute": "xen-save-devices-state",
1512#      "arguments": { "filename": "/tmp/save" } }
1513# <- { "return": {} }
1514##
1515{ 'command': 'xen-save-devices-state',
1516  'data': {'filename': 'str', '*live':'bool' } }
1517
1518##
1519# @xen-set-global-dirty-log:
1520#
1521# Enable or disable the global dirty log mode.
1522#
1523# @enable: true to enable, false to disable.
1524#
1525# Returns: nothing
1526#
1527# Since: 1.3
1528#
1529# Example:
1530#
1531# -> { "execute": "xen-set-global-dirty-log",
1532#      "arguments": { "enable": true } }
1533# <- { "return": {} }
1534##
1535{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1536
1537##
1538# @xen-load-devices-state:
1539#
1540# Load the state of all devices from file.  The RAM and the block
1541# devices of the VM are not loaded by this command.
1542#
1543# @filename: the file to load the state of the devices from as binary
1544#     data.  See xen-save-devices-state.txt for a description of the
1545#     binary format.
1546#
1547# Since: 2.7
1548#
1549# Example:
1550#
1551# -> { "execute": "xen-load-devices-state",
1552#      "arguments": { "filename": "/tmp/resume" } }
1553# <- { "return": {} }
1554##
1555{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1556
1557##
1558# @xen-set-replication:
1559#
1560# Enable or disable replication.
1561#
1562# @enable: true to enable, false to disable.
1563#
1564# @primary: true for primary or false for secondary.
1565#
1566# @failover: true to do failover, false to stop.  but cannot be
1567#     specified if 'enable' is true.  default value is false.
1568#
1569# Returns: nothing.
1570#
1571# Example:
1572#
1573# -> { "execute": "xen-set-replication",
1574#      "arguments": {"enable": true, "primary": false} }
1575# <- { "return": {} }
1576#
1577# Since: 2.9
1578##
1579{ 'command': 'xen-set-replication',
1580  'data': { 'enable': 'bool', 'primary': 'bool', '*failover' : 'bool' },
1581  'if': 'CONFIG_REPLICATION' }
1582
1583##
1584# @ReplicationStatus:
1585#
1586# The result format for 'query-xen-replication-status'.
1587#
1588# @error: true if an error happened, false if replication is normal.
1589#
1590# @desc: the human readable error description string, when @error is
1591#     'true'.
1592#
1593# Since: 2.9
1594##
1595{ 'struct': 'ReplicationStatus',
1596  'data': { 'error': 'bool', '*desc': 'str' },
1597  'if': 'CONFIG_REPLICATION' }
1598
1599##
1600# @query-xen-replication-status:
1601#
1602# Query replication status while the vm is running.
1603#
1604# Returns: A @ReplicationStatus object showing the status.
1605#
1606# Example:
1607#
1608# -> { "execute": "query-xen-replication-status" }
1609# <- { "return": { "error": false } }
1610#
1611# Since: 2.9
1612##
1613{ 'command': 'query-xen-replication-status',
1614  'returns': 'ReplicationStatus',
1615  'if': 'CONFIG_REPLICATION' }
1616
1617##
1618# @xen-colo-do-checkpoint:
1619#
1620# Xen uses this command to notify replication to trigger a checkpoint.
1621#
1622# Returns: nothing.
1623#
1624# Example:
1625#
1626# -> { "execute": "xen-colo-do-checkpoint" }
1627# <- { "return": {} }
1628#
1629# Since: 2.9
1630##
1631{ 'command': 'xen-colo-do-checkpoint',
1632  'if': 'CONFIG_REPLICATION' }
1633
1634##
1635# @COLOStatus:
1636#
1637# The result format for 'query-colo-status'.
1638#
1639# @mode: COLO running mode.  If COLO is running, this field will
1640#     return 'primary' or 'secondary'.
1641#
1642# @last-mode: COLO last running mode.  If COLO is running, this field
1643#     will return same like mode field, after failover we can use this
1644#     field to get last colo mode.  (since 4.0)
1645#
1646# @reason: describes the reason for the COLO exit.
1647#
1648# Since: 3.1
1649##
1650{ 'struct': 'COLOStatus',
1651  'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1652            'reason': 'COLOExitReason' },
1653  'if': 'CONFIG_REPLICATION' }
1654
1655##
1656# @query-colo-status:
1657#
1658# Query COLO status while the vm is running.
1659#
1660# Returns: A @COLOStatus object showing the status.
1661#
1662# Example:
1663#
1664# -> { "execute": "query-colo-status" }
1665# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1666#
1667# Since: 3.1
1668##
1669{ 'command': 'query-colo-status',
1670  'returns': 'COLOStatus',
1671  'if': 'CONFIG_REPLICATION' }
1672
1673##
1674# @migrate-recover:
1675#
1676# Provide a recovery migration stream URI.
1677#
1678# @uri: the URI to be used for the recovery of migration stream.
1679#
1680# Returns: nothing.
1681#
1682# Example:
1683#
1684# -> { "execute": "migrate-recover",
1685#      "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1686# <- { "return": {} }
1687#
1688# Since: 3.0
1689##
1690{ 'command': 'migrate-recover',
1691  'data': { 'uri': 'str' },
1692  'allow-oob': true }
1693
1694##
1695# @migrate-pause:
1696#
1697# Pause a migration.  Currently it only supports postcopy.
1698#
1699# Returns: nothing.
1700#
1701# Example:
1702#
1703# -> { "execute": "migrate-pause" }
1704# <- { "return": {} }
1705#
1706# Since: 3.0
1707##
1708{ 'command': 'migrate-pause', 'allow-oob': true }
1709
1710##
1711# @UNPLUG_PRIMARY:
1712#
1713# Emitted from source side of a migration when migration state is
1714# WAIT_UNPLUG. Device was unplugged by guest operating system.  Device
1715# resources in QEMU are kept on standby to be able to re-plug it in
1716# case of migration failure.
1717#
1718# @device-id: QEMU device id of the unplugged device
1719#
1720# Since: 4.2
1721#
1722# Example:
1723#
1724# <- { "event": "UNPLUG_PRIMARY",
1725#      "data": { "device-id": "hostdev0" },
1726#      "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1727##
1728{ 'event': 'UNPLUG_PRIMARY',
1729  'data': { 'device-id': 'str' } }
1730
1731##
1732# @DirtyRateVcpu:
1733#
1734# Dirty rate of vcpu.
1735#
1736# @id: vcpu index.
1737#
1738# @dirty-rate: dirty rate.
1739#
1740# Since: 6.2
1741##
1742{ 'struct': 'DirtyRateVcpu',
1743  'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1744
1745##
1746# @DirtyRateStatus:
1747#
1748# An enumeration of dirtyrate status.
1749#
1750# @unstarted: the dirtyrate thread has not been started.
1751#
1752# @measuring: the dirtyrate thread is measuring.
1753#
1754# @measured: the dirtyrate thread has measured and results are
1755#     available.
1756#
1757# Since: 5.2
1758##
1759{ 'enum': 'DirtyRateStatus',
1760  'data': [ 'unstarted', 'measuring', 'measured'] }
1761
1762##
1763# @DirtyRateMeasureMode:
1764#
1765# An enumeration of mode of measuring dirtyrate.
1766#
1767# @page-sampling: calculate dirtyrate by sampling pages.
1768#
1769# @dirty-ring: calculate dirtyrate by dirty ring.
1770#
1771# @dirty-bitmap: calculate dirtyrate by dirty bitmap.
1772#
1773# Since: 6.2
1774##
1775{ 'enum': 'DirtyRateMeasureMode',
1776  'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1777
1778##
1779# @DirtyRateInfo:
1780#
1781# Information about current dirty page rate of vm.
1782#
1783# @dirty-rate: an estimate of the dirty page rate of the VM in units
1784#     of MB/s, present only when estimating the rate has completed.
1785#
1786# @status: status containing dirtyrate query status includes
1787#     'unstarted' or 'measuring' or 'measured'
1788#
1789# @start-time: start time in units of second for calculation
1790#
1791# @calc-time: time in units of second for sample dirty pages
1792#
1793# @sample-pages: page count per GB for sample dirty pages the default
1794#     value is 512 (since 6.1)
1795#
1796# @mode: mode containing method of calculate dirtyrate includes
1797#     'page-sampling' and 'dirty-ring' (Since 6.2)
1798#
1799# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring mode
1800#     specified (Since 6.2)
1801#
1802# Since: 5.2
1803##
1804{ 'struct': 'DirtyRateInfo',
1805  'data': {'*dirty-rate': 'int64',
1806           'status': 'DirtyRateStatus',
1807           'start-time': 'int64',
1808           'calc-time': 'int64',
1809           'sample-pages': 'uint64',
1810           'mode': 'DirtyRateMeasureMode',
1811           '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
1812
1813##
1814# @calc-dirty-rate:
1815#
1816# start calculating dirty page rate for vm
1817#
1818# @calc-time: time in units of second for sample dirty pages
1819#
1820# @sample-pages: page count per GB for sample dirty pages the default
1821#     value is 512 (since 6.1)
1822#
1823# @mode: mechanism of calculating dirtyrate includes 'page-sampling'
1824#     and 'dirty-ring' (Since 6.1)
1825#
1826# Since: 5.2
1827#
1828# Example:
1829#
1830# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
1831#                                                 'sample-pages': 512} }
1832# <- { "return": {} }
1833##
1834{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
1835                                         '*sample-pages': 'int',
1836                                         '*mode': 'DirtyRateMeasureMode'} }
1837
1838##
1839# @query-dirty-rate:
1840#
1841# query dirty page rate in units of MB/s for vm
1842#
1843# Since: 5.2
1844##
1845{ 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' }
1846
1847##
1848# @DirtyLimitInfo:
1849#
1850# Dirty page rate limit information of a virtual CPU.
1851#
1852# @cpu-index: index of a virtual CPU.
1853#
1854# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
1855#     CPU, 0 means unlimited.
1856#
1857# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
1858#
1859# Since: 7.1
1860##
1861{ 'struct': 'DirtyLimitInfo',
1862  'data': { 'cpu-index': 'int',
1863            'limit-rate': 'uint64',
1864            'current-rate': 'uint64' } }
1865
1866##
1867# @set-vcpu-dirty-limit:
1868#
1869# Set the upper limit of dirty page rate for virtual CPUs.
1870#
1871# Requires KVM with accelerator property "dirty-ring-size" set.  A
1872# virtual CPU's dirty page rate is a measure of its memory load.  To
1873# observe dirty page rates, use @calc-dirty-rate.
1874#
1875# @cpu-index: index of a virtual CPU, default is all.
1876#
1877# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
1878#
1879# Since: 7.1
1880#
1881# Example:
1882#
1883# -> {"execute": "set-vcpu-dirty-limit"}
1884#     "arguments": { "dirty-rate": 200,
1885#                    "cpu-index": 1 } }
1886# <- { "return": {} }
1887##
1888{ 'command': 'set-vcpu-dirty-limit',
1889  'data': { '*cpu-index': 'int',
1890            'dirty-rate': 'uint64' } }
1891
1892##
1893# @cancel-vcpu-dirty-limit:
1894#
1895# Cancel the upper limit of dirty page rate for virtual CPUs.
1896#
1897# Cancel the dirty page limit for the vCPU which has been set with
1898# set-vcpu-dirty-limit command.  Note that this command requires
1899# support from dirty ring, same as the "set-vcpu-dirty-limit".
1900#
1901# @cpu-index: index of a virtual CPU, default is all.
1902#
1903# Since: 7.1
1904#
1905# Example:
1906#
1907# -> {"execute": "cancel-vcpu-dirty-limit"},
1908#     "arguments": { "cpu-index": 1 } }
1909# <- { "return": {} }
1910##
1911{ 'command': 'cancel-vcpu-dirty-limit',
1912  'data': { '*cpu-index': 'int'} }
1913
1914##
1915# @query-vcpu-dirty-limit:
1916#
1917# Returns information about virtual CPU dirty page rate limits, if
1918# any.
1919#
1920# Since: 7.1
1921#
1922# Example:
1923#
1924# -> {"execute": "query-vcpu-dirty-limit"}
1925# <- {"return": [
1926#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
1927#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
1928##
1929{ 'command': 'query-vcpu-dirty-limit',
1930  'returns': [ 'DirtyLimitInfo' ] }
1931
1932##
1933# @MigrationThreadInfo:
1934#
1935# Information about migrationthreads
1936#
1937# @name: the name of migration thread
1938#
1939# @thread-id: ID of the underlying host thread
1940#
1941# Since: 7.2
1942##
1943{ 'struct': 'MigrationThreadInfo',
1944  'data': {'name': 'str',
1945           'thread-id': 'int'} }
1946
1947##
1948# @query-migrationthreads:
1949#
1950# Returns information of migration threads
1951#
1952# data: migration thread name
1953#
1954# Returns: information about migration threads
1955#
1956# Since: 7.2
1957##
1958{ 'command': 'query-migrationthreads',
1959  'returns': ['MigrationThreadInfo'] }
1960
1961##
1962# @snapshot-save:
1963#
1964# Save a VM snapshot
1965#
1966# @job-id: identifier for the newly created job
1967#
1968# @tag: name of the snapshot to create
1969#
1970# @vmstate: block device node name to save vmstate to
1971#
1972# @devices: list of block device node names to save a snapshot to
1973#
1974# Applications should not assume that the snapshot save is complete
1975# when this command returns.  The job commands / events must be used
1976# to determine completion and to fetch details of any errors that
1977# arise.
1978#
1979# Note that execution of the guest CPUs may be stopped during the time
1980# it takes to save the snapshot.  A future version of QEMU may ensure
1981# CPUs are executing continuously.
1982#
1983# It is strongly recommended that @devices contain all writable block
1984# device nodes if a consistent snapshot is required.
1985#
1986# If @tag already exists, an error will be reported
1987#
1988# Returns: nothing
1989#
1990# Example:
1991#
1992# -> { "execute": "snapshot-save",
1993#      "arguments": {
1994#         "job-id": "snapsave0",
1995#         "tag": "my-snap",
1996#         "vmstate": "disk0",
1997#         "devices": ["disk0", "disk1"]
1998#      }
1999#    }
2000# <- { "return": { } }
2001# <- {"event": "JOB_STATUS_CHANGE",
2002#     "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2003#     "data": {"status": "created", "id": "snapsave0"}}
2004# <- {"event": "JOB_STATUS_CHANGE",
2005#     "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2006#     "data": {"status": "running", "id": "snapsave0"}}
2007# <- {"event": "STOP",
2008#     "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2009# <- {"event": "RESUME",
2010#     "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2011# <- {"event": "JOB_STATUS_CHANGE",
2012#     "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2013#     "data": {"status": "waiting", "id": "snapsave0"}}
2014# <- {"event": "JOB_STATUS_CHANGE",
2015#     "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2016#     "data": {"status": "pending", "id": "snapsave0"}}
2017# <- {"event": "JOB_STATUS_CHANGE",
2018#     "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2019#     "data": {"status": "concluded", "id": "snapsave0"}}
2020# -> {"execute": "query-jobs"}
2021# <- {"return": [{"current-progress": 1,
2022#                 "status": "concluded",
2023#                 "total-progress": 1,
2024#                 "type": "snapshot-save",
2025#                 "id": "snapsave0"}]}
2026#
2027# Since: 6.0
2028##
2029{ 'command': 'snapshot-save',
2030  'data': { 'job-id': 'str',
2031            'tag': 'str',
2032            'vmstate': 'str',
2033            'devices': ['str'] } }
2034
2035##
2036# @snapshot-load:
2037#
2038# Load a VM snapshot
2039#
2040# @job-id: identifier for the newly created job
2041#
2042# @tag: name of the snapshot to load.
2043#
2044# @vmstate: block device node name to load vmstate from
2045#
2046# @devices: list of block device node names to load a snapshot from
2047#
2048# Applications should not assume that the snapshot load is complete
2049# when this command returns.  The job commands / events must be used
2050# to determine completion and to fetch details of any errors that
2051# arise.
2052#
2053# Note that execution of the guest CPUs will be stopped during the
2054# time it takes to load the snapshot.
2055#
2056# It is strongly recommended that @devices contain all writable block
2057# device nodes that can have changed since the original @snapshot-save
2058# command execution.
2059#
2060# Returns: nothing
2061#
2062# Example:
2063#
2064# -> { "execute": "snapshot-load",
2065#      "arguments": {
2066#         "job-id": "snapload0",
2067#         "tag": "my-snap",
2068#         "vmstate": "disk0",
2069#         "devices": ["disk0", "disk1"]
2070#      }
2071#    }
2072# <- { "return": { } }
2073# <- {"event": "JOB_STATUS_CHANGE",
2074#     "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2075#     "data": {"status": "created", "id": "snapload0"}}
2076# <- {"event": "JOB_STATUS_CHANGE",
2077#     "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2078#     "data": {"status": "running", "id": "snapload0"}}
2079# <- {"event": "STOP",
2080#     "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2081# <- {"event": "RESUME",
2082#     "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2083# <- {"event": "JOB_STATUS_CHANGE",
2084#     "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2085#     "data": {"status": "waiting", "id": "snapload0"}}
2086# <- {"event": "JOB_STATUS_CHANGE",
2087#     "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2088#     "data": {"status": "pending", "id": "snapload0"}}
2089# <- {"event": "JOB_STATUS_CHANGE",
2090#     "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2091#     "data": {"status": "concluded", "id": "snapload0"}}
2092# -> {"execute": "query-jobs"}
2093# <- {"return": [{"current-progress": 1,
2094#                 "status": "concluded",
2095#                 "total-progress": 1,
2096#                 "type": "snapshot-load",
2097#                 "id": "snapload0"}]}
2098#
2099# Since: 6.0
2100##
2101{ 'command': 'snapshot-load',
2102  'data': { 'job-id': 'str',
2103            'tag': 'str',
2104            'vmstate': 'str',
2105            'devices': ['str'] } }
2106
2107##
2108# @snapshot-delete:
2109#
2110# Delete a VM snapshot
2111#
2112# @job-id: identifier for the newly created job
2113#
2114# @tag: name of the snapshot to delete.
2115#
2116# @devices: list of block device node names to delete a snapshot from
2117#
2118# Applications should not assume that the snapshot delete is complete
2119# when this command returns.  The job commands / events must be used
2120# to determine completion and to fetch details of any errors that
2121# arise.
2122#
2123# Returns: nothing
2124#
2125# Example:
2126#
2127# -> { "execute": "snapshot-delete",
2128#      "arguments": {
2129#         "job-id": "snapdelete0",
2130#         "tag": "my-snap",
2131#         "devices": ["disk0", "disk1"]
2132#      }
2133#    }
2134# <- { "return": { } }
2135# <- {"event": "JOB_STATUS_CHANGE",
2136#     "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2137#     "data": {"status": "created", "id": "snapdelete0"}}
2138# <- {"event": "JOB_STATUS_CHANGE",
2139#     "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2140#     "data": {"status": "running", "id": "snapdelete0"}}
2141# <- {"event": "JOB_STATUS_CHANGE",
2142#     "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2143#     "data": {"status": "waiting", "id": "snapdelete0"}}
2144# <- {"event": "JOB_STATUS_CHANGE",
2145#     "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2146#     "data": {"status": "pending", "id": "snapdelete0"}}
2147# <- {"event": "JOB_STATUS_CHANGE",
2148#     "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2149#     "data": {"status": "concluded", "id": "snapdelete0"}}
2150# -> {"execute": "query-jobs"}
2151# <- {"return": [{"current-progress": 1,
2152#                 "status": "concluded",
2153#                 "total-progress": 1,
2154#                 "type": "snapshot-delete",
2155#                 "id": "snapdelete0"}]}
2156#
2157# Since: 6.0
2158##
2159{ 'command': 'snapshot-delete',
2160  'data': { 'job-id': 'str',
2161            'tag': 'str',
2162            'devices': ['str'] } }
2163