xref: /openbmc/qemu/qapi/migration.json (revision 15699cf5)
1# -*- Mode: Python -*-
2# vim: filetype=python
3#
4
5##
6# = Migration
7##
8
9{ 'include': 'common.json' }
10{ 'include': 'sockets.json' }
11
12##
13# @MigrationStats:
14#
15# Detailed migration status.
16#
17# @transferred: amount of bytes already transferred to the target VM
18#
19# @remaining: amount of bytes remaining to be transferred to the
20#     target VM
21#
22# @total: total amount of bytes involved in the migration process
23#
24# @duplicate: number of duplicate (zero) pages (since 1.2)
25#
26# @skipped: number of skipped zero pages (since 1.5)
27#
28# @normal: number of normal pages (since 1.2)
29#
30# @normal-bytes: number of normal bytes sent (since 1.2)
31#
32# @dirty-pages-rate: number of pages dirtied by second by the guest
33#     (since 1.3)
34#
35# @mbps: throughput in megabits/sec.  (since 1.6)
36#
37# @dirty-sync-count: number of times that dirty ram was synchronized
38#     (since 2.1)
39#
40# @postcopy-requests: The number of page requests received from the
41#     destination (since 2.7)
42#
43# @page-size: The number of bytes per page for the various page-based
44#     statistics (since 2.10)
45#
46# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
47#
48# @pages-per-second: the number of memory pages transferred per second
49#     (Since 4.0)
50#
51# @precopy-bytes: The number of bytes sent in the pre-copy phase
52#     (since 7.0).
53#
54# @downtime-bytes: The number of bytes sent while the guest is paused
55#     (since 7.0).
56#
57# @postcopy-bytes: The number of bytes sent during the post-copy phase
58#     (since 7.0).
59#
60# @dirty-sync-missed-zero-copy: Number of times dirty RAM
61#     synchronization could not avoid copying dirty pages.  This is
62#     between 0 and @dirty-sync-count * @multifd-channels.  (since
63#     7.1)
64#
65# Since: 0.14
66##
67{ 'struct': 'MigrationStats',
68  'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
69           'duplicate': 'int', 'skipped': 'int', 'normal': 'int',
70           'normal-bytes': 'int', 'dirty-pages-rate': 'int',
71           'mbps': 'number', 'dirty-sync-count': 'int',
72           'postcopy-requests': 'int', 'page-size': 'int',
73           'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
74           'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
75           'postcopy-bytes': 'uint64',
76           'dirty-sync-missed-zero-copy': 'uint64' } }
77
78##
79# @XBZRLECacheStats:
80#
81# Detailed XBZRLE migration cache statistics
82#
83# @cache-size: XBZRLE cache size
84#
85# @bytes: amount of bytes already transferred to the target VM
86#
87# @pages: amount of pages transferred to the target VM
88#
89# @cache-miss: number of cache miss
90#
91# @cache-miss-rate: rate of cache miss (since 2.1)
92#
93# @encoding-rate: rate of encoded bytes (since 5.1)
94#
95# @overflow: number of overflows
96#
97# Since: 1.2
98##
99{ 'struct': 'XBZRLECacheStats',
100  'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
101           'cache-miss': 'int', 'cache-miss-rate': 'number',
102           'encoding-rate': 'number', 'overflow': 'int' } }
103
104##
105# @CompressionStats:
106#
107# Detailed migration compression statistics
108#
109# @pages: amount of pages compressed and transferred to the target VM
110#
111# @busy: count of times that no free thread was available to compress
112#     data
113#
114# @busy-rate: rate of thread busy
115#
116# @compressed-size: amount of bytes after compression
117#
118# @compression-rate: rate of compressed size
119#
120# Since: 3.1
121##
122{ 'struct': 'CompressionStats',
123  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
124           'compressed-size': 'int', 'compression-rate': 'number' } }
125
126##
127# @MigrationStatus:
128#
129# An enumeration of migration status.
130#
131# @none: no migration has ever happened.
132#
133# @setup: migration process has been initiated.
134#
135# @cancelling: in the process of cancelling migration.
136#
137# @cancelled: cancelling migration is finished.
138#
139# @active: in the process of doing migration.
140#
141# @postcopy-active: like active, but now in postcopy mode.  (since
142#     2.5)
143#
144# @postcopy-paused: during postcopy but paused.  (since 3.0)
145#
146# @postcopy-recover: trying to recover from a paused postcopy.  (since
147#     3.0)
148#
149# @completed: migration is finished.
150#
151# @failed: some error occurred during migration process.
152#
153# @colo: VM is in the process of fault tolerance, VM can not get into
154#     this state unless colo capability is enabled for migration.
155#     (since 2.8)
156#
157# @pre-switchover: Paused before device serialisation.  (since 2.11)
158#
159# @device: During device serialisation when pause-before-switchover is
160#     enabled (since 2.11)
161#
162# @wait-unplug: wait for device unplug request by guest OS to be
163#     completed.  (since 4.2)
164#
165# Since: 2.3
166##
167{ 'enum': 'MigrationStatus',
168  'data': [ 'none', 'setup', 'cancelling', 'cancelled',
169            'active', 'postcopy-active', 'postcopy-paused',
170            'postcopy-recover', 'completed', 'failed', 'colo',
171            'pre-switchover', 'device', 'wait-unplug' ] }
172##
173# @VfioStats:
174#
175# Detailed VFIO devices migration statistics
176#
177# @transferred: amount of bytes transferred to the target VM by VFIO
178#     devices
179#
180# Since: 5.2
181##
182{ 'struct': 'VfioStats',
183  'data': {'transferred': 'int' } }
184
185##
186# @MigrationInfo:
187#
188# Information about current migration process.
189#
190# @status: @MigrationStatus describing the current migration status.
191#     If this field is not returned, no migration process has been
192#     initiated
193#
194# @ram: @MigrationStats containing detailed migration status, only
195#     returned if status is 'active' or 'completed'(since 1.2)
196#
197# @disk: @MigrationStats containing detailed disk migration status,
198#     only returned if status is 'active' and it is a block migration
199#
200# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
201#     migration statistics, only returned if XBZRLE feature is on and
202#     status is 'active' or 'completed' (since 1.2)
203#
204# @total-time: total amount of milliseconds since migration started.
205#     If migration has ended, it returns the total migration time.
206#     (since 1.2)
207#
208# @downtime: only present when migration finishes correctly total
209#     downtime in milliseconds for the guest.  (since 1.3)
210#
211# @expected-downtime: only present while migration is active expected
212#     downtime in milliseconds for the guest in last walk of the dirty
213#     bitmap.  (since 1.3)
214#
215# @setup-time: amount of setup time in milliseconds *before* the
216#     iterations begin but *after* the QMP command is issued.  This is
217#     designed to provide an accounting of any activities (such as
218#     RDMA pinning) which may be expensive, but do not actually occur
219#     during the iterative migration rounds themselves.  (since 1.6)
220#
221# @cpu-throttle-percentage: percentage of time guest cpus are being
222#     throttled during auto-converge.  This is only present when
223#     auto-converge has started throttling guest cpus.  (Since 2.7)
224#
225# @error-desc: the human readable error description string, when
226#     @status is 'failed'. Clients should not attempt to parse the
227#     error strings.  (Since 2.7)
228#
229# @postcopy-blocktime: total time when all vCPU were blocked during
230#     postcopy live migration.  This is only present when the
231#     postcopy-blocktime migration capability is enabled.  (Since 3.0)
232#
233# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
234#     This is only present when the postcopy-blocktime migration
235#     capability is enabled.  (Since 3.0)
236#
237# @compression: migration compression statistics, only returned if
238#     compression feature is on and status is 'active' or 'completed'
239#     (Since 3.1)
240#
241# @socket-address: Only used for tcp, to know what the real port is
242#     (Since 4.0)
243#
244# @vfio: @VfioStats containing detailed VFIO devices migration
245#     statistics, only returned if VFIO device is present, migration
246#     is supported by all VFIO devices and status is 'active' or
247#     'completed' (since 5.2)
248#
249# @blocked-reasons: A list of reasons an outgoing migration is
250#     blocked.  Present and non-empty when migration is blocked.
251#     (since 6.0)
252#
253# @dirty-limit-throttle-time-per-round: Maximum throttle time (in microseconds) of virtual
254#                                       CPUs each dirty ring full round, which shows how
255#                                       MigrationCapability dirty-limit affects the guest
256#                                       during live migration. (since 8.1)
257#
258# @dirty-limit-ring-full-time: Estimated average dirty ring full time (in microseconds)
259#                              each dirty ring full round, note that the value equals
260#                              dirty ring memory size divided by average dirty page rate
261#                              of virtual CPU, which can be used to observe the average
262#                              memory load of virtual CPU indirectly. Note that zero
263#                              means guest doesn't dirty memory (since 8.1)
264#
265# Since: 0.14
266##
267{ 'struct': 'MigrationInfo',
268  'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
269           '*disk': 'MigrationStats',
270           '*vfio': 'VfioStats',
271           '*xbzrle-cache': 'XBZRLECacheStats',
272           '*total-time': 'int',
273           '*expected-downtime': 'int',
274           '*downtime': 'int',
275           '*setup-time': 'int',
276           '*cpu-throttle-percentage': 'int',
277           '*error-desc': 'str',
278           '*blocked-reasons': ['str'],
279           '*postcopy-blocktime': 'uint32',
280           '*postcopy-vcpu-blocktime': ['uint32'],
281           '*compression': 'CompressionStats',
282           '*socket-address': ['SocketAddress'],
283           '*dirty-limit-throttle-time-per-round': 'uint64',
284           '*dirty-limit-ring-full-time': 'uint64'} }
285
286##
287# @query-migrate:
288#
289# Returns information about current migration process.  If migration
290# is active there will be another json-object with RAM migration
291# status and if block migration is active another one with block
292# migration status.
293#
294# Returns: @MigrationInfo
295#
296# Since: 0.14
297#
298# Examples:
299#
300# 1. Before the first migration
301#
302# -> { "execute": "query-migrate" }
303# <- { "return": {} }
304#
305# 2. Migration is done and has succeeded
306#
307# -> { "execute": "query-migrate" }
308# <- { "return": {
309#         "status": "completed",
310#         "total-time":12345,
311#         "setup-time":12345,
312#         "downtime":12345,
313#         "ram":{
314#           "transferred":123,
315#           "remaining":123,
316#           "total":246,
317#           "duplicate":123,
318#           "normal":123,
319#           "normal-bytes":123456,
320#           "dirty-sync-count":15
321#         }
322#      }
323#    }
324#
325# 3. Migration is done and has failed
326#
327# -> { "execute": "query-migrate" }
328# <- { "return": { "status": "failed" } }
329#
330# 4. Migration is being performed and is not a block migration:
331#
332# -> { "execute": "query-migrate" }
333# <- {
334#       "return":{
335#          "status":"active",
336#          "total-time":12345,
337#          "setup-time":12345,
338#          "expected-downtime":12345,
339#          "ram":{
340#             "transferred":123,
341#             "remaining":123,
342#             "total":246,
343#             "duplicate":123,
344#             "normal":123,
345#             "normal-bytes":123456,
346#             "dirty-sync-count":15
347#          }
348#       }
349#    }
350#
351# 5. Migration is being performed and is a block migration:
352#
353# -> { "execute": "query-migrate" }
354# <- {
355#       "return":{
356#          "status":"active",
357#          "total-time":12345,
358#          "setup-time":12345,
359#          "expected-downtime":12345,
360#          "ram":{
361#             "total":1057024,
362#             "remaining":1053304,
363#             "transferred":3720,
364#             "duplicate":123,
365#             "normal":123,
366#             "normal-bytes":123456,
367#             "dirty-sync-count":15
368#          },
369#          "disk":{
370#             "total":20971520,
371#             "remaining":20880384,
372#             "transferred":91136
373#          }
374#       }
375#    }
376#
377# 6. Migration is being performed and XBZRLE is active:
378#
379# -> { "execute": "query-migrate" }
380# <- {
381#       "return":{
382#          "status":"active",
383#          "total-time":12345,
384#          "setup-time":12345,
385#          "expected-downtime":12345,
386#          "ram":{
387#             "total":1057024,
388#             "remaining":1053304,
389#             "transferred":3720,
390#             "duplicate":10,
391#             "normal":3333,
392#             "normal-bytes":3412992,
393#             "dirty-sync-count":15
394#          },
395#          "xbzrle-cache":{
396#             "cache-size":67108864,
397#             "bytes":20971520,
398#             "pages":2444343,
399#             "cache-miss":2244,
400#             "cache-miss-rate":0.123,
401#             "encoding-rate":80.1,
402#             "overflow":34434
403#          }
404#       }
405#    }
406##
407{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
408
409##
410# @MigrationCapability:
411#
412# Migration capabilities enumeration
413#
414# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
415#     Encoding). This feature allows us to minimize migration traffic
416#     for certain work loads, by sending compressed difference of the
417#     pages
418#
419# @rdma-pin-all: Controls whether or not the entire VM memory
420#     footprint is mlock()'d on demand or all at once.  Refer to
421#     docs/rdma.txt for usage.  Disabled by default.  (since 2.0)
422#
423# @zero-blocks: During storage migration encode blocks of zeroes
424#     efficiently.  This essentially saves 1MB of zeroes per block on
425#     the wire.  Enabling requires source and target VM to support
426#     this feature.  To enable it is sufficient to enable the
427#     capability on the source VM. The feature is disabled by default.
428#     (since 1.6)
429#
430# @compress: Use multiple compression threads to accelerate live
431#     migration.  This feature can help to reduce the migration
432#     traffic, by sending compressed pages.  Please note that if
433#     compress and xbzrle are both on, compress only takes effect in
434#     the ram bulk stage, after that, it will be disabled and only
435#     xbzrle takes effect, this can help to minimize migration
436#     traffic.  The feature is disabled by default.  (since 2.4 )
437#
438# @events: generate events for each migration state change (since 2.4
439#     )
440#
441# @auto-converge: If enabled, QEMU will automatically throttle down
442#     the guest to speed up convergence of RAM migration.  (since 1.6)
443#
444# @postcopy-ram: Start executing on the migration target before all of
445#     RAM has been migrated, pulling the remaining pages along as
446#     needed.  The capacity must have the same setting on both source
447#     and target or migration will not even start.  NOTE: If the
448#     migration fails during postcopy the VM will fail.  (since 2.6)
449#
450# @x-colo: If enabled, migration will never end, and the state of the
451#     VM on the primary side will be migrated continuously to the VM
452#     on secondary side, this process is called COarse-Grain LOck
453#     Stepping (COLO) for Non-stop Service.  (since 2.8)
454#
455# @release-ram: if enabled, qemu will free the migrated ram pages on
456#     the source during postcopy-ram migration.  (since 2.9)
457#
458# @block: If enabled, QEMU will also migrate the contents of all block
459#     devices.  Default is disabled.  A possible alternative uses
460#     mirror jobs to a builtin NBD server on the destination, which
461#     offers more flexibility.  (Since 2.10)
462#
463# @return-path: If enabled, migration will use the return path even
464#     for precopy.  (since 2.10)
465#
466# @pause-before-switchover: Pause outgoing migration before
467#     serialising device state and before disabling block IO (since
468#     2.11)
469#
470# @multifd: Use more than one fd for migration (since 4.0)
471#
472# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
473#     (since 2.12)
474#
475# @postcopy-blocktime: Calculate downtime for postcopy live migration
476#     (since 3.0)
477#
478# @late-block-activate: If enabled, the destination will not activate
479#     block devices (and thus take locks) immediately at the end of
480#     migration.  (since 3.0)
481#
482# @x-ignore-shared: If enabled, QEMU will not migrate shared memory that is
483#     accessible on the destination machine. (since 4.0)
484#
485# @validate-uuid: Send the UUID of the source to allow the destination
486#     to ensure it is the same.  (since 4.2)
487#
488# @background-snapshot: If enabled, the migration stream will be a
489#     snapshot of the VM exactly at the point when the migration
490#     procedure starts.  The VM RAM is saved with running VM. (since
491#     6.0)
492#
493# @zero-copy-send: Controls behavior on sending memory pages on
494#     migration.  When true, enables a zero-copy mechanism for sending
495#     memory pages, if host supports it.  Requires that QEMU be
496#     permitted to use locked memory for guest RAM pages.  (since 7.1)
497#
498# @postcopy-preempt: If enabled, the migration process will allow
499#     postcopy requests to preempt precopy stream, so postcopy
500#     requests will be handled faster.  This is a performance feature
501#     and should not affect the correctness of postcopy migration.
502#     (since 7.1)
503#
504# @switchover-ack: If enabled, migration will not stop the source VM
505#     and complete the migration until an ACK is received from the
506#     destination that it's OK to do so.  Exactly when this ACK is
507#     sent depends on the migrated devices that use this feature.
508#     For example, a device can use it to make sure some of its data
509#     is sent and loaded in the destination before doing switchover.
510#     This can reduce downtime if devices that support this capability
511#     are present.  'return-path' capability must be enabled to use
512#     it.  (since 8.1)
513#
514# @dirty-limit: If enabled, migration will use the dirty-limit algo to
515#               throttle down guest instead of auto-converge algo.
516#               Throttle algo only works when vCPU's dirtyrate greater
517#               than 'vcpu-dirty-limit', read processes in guest os
518#               aren't penalized any more, so this algo can improve
519#               performance of vCPU during live migration. This is an
520#               optional performance feature and should not affect the
521#               correctness of the existing auto-converge algo.
522#               (since 8.1)
523#
524# Features:
525#
526# @unstable: Members @x-colo and @x-ignore-shared are experimental.
527#
528# Since: 1.2
529##
530{ 'enum': 'MigrationCapability',
531  'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
532           'compress', 'events', 'postcopy-ram',
533           { 'name': 'x-colo', 'features': [ 'unstable' ] },
534           'release-ram',
535           'block', 'return-path', 'pause-before-switchover', 'multifd',
536           'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
537           { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
538           'validate-uuid', 'background-snapshot',
539           'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
540           'dirty-limit'] }
541
542##
543# @MigrationCapabilityStatus:
544#
545# Migration capability information
546#
547# @capability: capability enum
548#
549# @state: capability state bool
550#
551# Since: 1.2
552##
553{ 'struct': 'MigrationCapabilityStatus',
554  'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
555
556##
557# @migrate-set-capabilities:
558#
559# Enable/Disable the following migration capabilities (like xbzrle)
560#
561# @capabilities: json array of capability modifications to make
562#
563# Since: 1.2
564#
565# Example:
566#
567# -> { "execute": "migrate-set-capabilities" , "arguments":
568#      { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
569# <- { "return": {} }
570##
571{ 'command': 'migrate-set-capabilities',
572  'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
573
574##
575# @query-migrate-capabilities:
576#
577# Returns information about the current migration capabilities status
578#
579# Returns: @MigrationCapabilityStatus
580#
581# Since: 1.2
582#
583# Example:
584#
585# -> { "execute": "query-migrate-capabilities" }
586# <- { "return": [
587#       {"state": false, "capability": "xbzrle"},
588#       {"state": false, "capability": "rdma-pin-all"},
589#       {"state": false, "capability": "auto-converge"},
590#       {"state": false, "capability": "zero-blocks"},
591#       {"state": false, "capability": "compress"},
592#       {"state": true, "capability": "events"},
593#       {"state": false, "capability": "postcopy-ram"},
594#       {"state": false, "capability": "x-colo"}
595#    ]}
596##
597{ 'command': 'query-migrate-capabilities', 'returns':   ['MigrationCapabilityStatus']}
598
599##
600# @MultiFDCompression:
601#
602# An enumeration of multifd compression methods.
603#
604# @none: no compression.
605#
606# @zlib: use zlib compression method.
607#
608# @zstd: use zstd compression method.
609#
610# Since: 5.0
611##
612{ 'enum': 'MultiFDCompression',
613  'data': [ 'none', 'zlib',
614            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
615
616##
617# @BitmapMigrationBitmapAliasTransform:
618#
619# @persistent: If present, the bitmap will be made persistent or
620#     transient depending on this parameter.
621#
622# Since: 6.0
623##
624{ 'struct': 'BitmapMigrationBitmapAliasTransform',
625  'data': {
626      '*persistent': 'bool'
627  } }
628
629##
630# @BitmapMigrationBitmapAlias:
631#
632# @name: The name of the bitmap.
633#
634# @alias: An alias name for migration (for example the bitmap name on
635#     the opposite site).
636#
637# @transform: Allows the modification of the migrated bitmap.  (since
638#     6.0)
639#
640# Since: 5.2
641##
642{ 'struct': 'BitmapMigrationBitmapAlias',
643  'data': {
644      'name': 'str',
645      'alias': 'str',
646      '*transform': 'BitmapMigrationBitmapAliasTransform'
647  } }
648
649##
650# @BitmapMigrationNodeAlias:
651#
652# Maps a block node name and the bitmaps it has to aliases for dirty
653# bitmap migration.
654#
655# @node-name: A block node name.
656#
657# @alias: An alias block node name for migration (for example the node
658#     name on the opposite site).
659#
660# @bitmaps: Mappings for the bitmaps on this node.
661#
662# Since: 5.2
663##
664{ 'struct': 'BitmapMigrationNodeAlias',
665  'data': {
666      'node-name': 'str',
667      'alias': 'str',
668      'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
669  } }
670
671##
672# @MigrationParameter:
673#
674# Migration parameters enumeration
675#
676# @announce-initial: Initial delay (in milliseconds) before sending
677#     the first announce (Since 4.0)
678#
679# @announce-max: Maximum delay (in milliseconds) between packets in
680#     the announcement (Since 4.0)
681#
682# @announce-rounds: Number of self-announce packets sent after
683#     migration (Since 4.0)
684#
685# @announce-step: Increase in delay (in milliseconds) between
686#     subsequent packets in the announcement (Since 4.0)
687#
688# @compress-level: Set the compression level to be used in live
689#     migration, the compression level is an integer between 0 and 9,
690#     where 0 means no compression, 1 means the best compression
691#     speed, and 9 means best compression ratio which will consume
692#     more CPU.
693#
694# @compress-threads: Set compression thread count to be used in live
695#     migration, the compression thread count is an integer between 1
696#     and 255.
697#
698# @compress-wait-thread: Controls behavior when all compression
699#     threads are currently busy.  If true (default), wait for a free
700#     compression thread to become available; otherwise, send the page
701#     uncompressed.  (Since 3.1)
702#
703# @decompress-threads: Set decompression thread count to be used in
704#     live migration, the decompression thread count is an integer
705#     between 1 and 255. Usually, decompression is at least 4 times as
706#     fast as compression, so set the decompress-threads to the number
707#     about 1/4 of compress-threads is adequate.
708#
709# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
710#     bytes_xfer_period to trigger throttling.  It is expressed as
711#     percentage.  The default value is 50. (Since 5.0)
712#
713# @cpu-throttle-initial: Initial percentage of time guest cpus are
714#     throttled when migration auto-converge is activated.  The
715#     default value is 20. (Since 2.7)
716#
717# @cpu-throttle-increment: throttle percentage increase each time
718#     auto-converge detects that migration is not making progress.
719#     The default value is 10. (Since 2.7)
720#
721# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
722#     the tail stage of throttling, the Guest is very sensitive to CPU
723#     percentage while the @cpu-throttle -increment is excessive
724#     usually at tail stage.  If this parameter is true, we will
725#     compute the ideal CPU percentage used by the Guest, which may
726#     exactly make the dirty rate match the dirty rate threshold.
727#     Then we will choose a smaller throttle increment between the one
728#     specified by @cpu-throttle-increment and the one generated by
729#     ideal CPU percentage.  Therefore, it is compatible to
730#     traditional throttling, meanwhile the throttle increment won't
731#     be excessive at tail stage.  The default value is false.  (Since
732#     5.1)
733#
734# @tls-creds: ID of the 'tls-creds' object that provides credentials
735#     for establishing a TLS connection over the migration data
736#     channel.  On the outgoing side of the migration, the credentials
737#     must be for a 'client' endpoint, while for the incoming side the
738#     credentials must be for a 'server' endpoint.  Setting this will
739#     enable TLS for all migrations.  The default is unset, resulting
740#     in unsecured migration at the QEMU level.  (Since 2.7)
741#
742# @tls-hostname: hostname of the target host for the migration.  This
743#     is required when using x509 based TLS credentials and the
744#     migration URI does not already include a hostname.  For example
745#     if using fd: or exec: based migration, the hostname must be
746#     provided so that the server's x509 certificate identity can be
747#     validated.  (Since 2.7)
748#
749# @tls-authz: ID of the 'authz' object subclass that provides access
750#     control checking of the TLS x509 certificate distinguished name.
751#     This object is only resolved at time of use, so can be deleted
752#     and recreated on the fly while the migration server is active.
753#     If missing, it will default to denying access (Since 4.0)
754#
755# @max-bandwidth: to set maximum speed for migration.  maximum speed
756#     in bytes per second.  (Since 2.8)
757#
758# @downtime-limit: set maximum tolerated downtime for migration.
759#     maximum downtime in milliseconds (Since 2.8)
760#
761# @x-checkpoint-delay: The delay time (in ms) between two COLO
762#     checkpoints in periodic mode.  (Since 2.8)
763#
764# @block-incremental: Affects how much storage is migrated when the
765#     block migration capability is enabled.  When false, the entire
766#     storage backing chain is migrated into a flattened image at the
767#     destination; when true, only the active qcow2 layer is migrated
768#     and the destination must already have access to the same backing
769#     chain as was used on the source.  (since 2.10)
770#
771# @multifd-channels: Number of channels used to migrate data in
772#     parallel.  This is the same number that the number of sockets
773#     used for migration.  The default value is 2 (since 4.0)
774#
775# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
776#     needs to be a multiple of the target page size and a power of 2
777#     (Since 2.11)
778#
779# @max-postcopy-bandwidth: Background transfer bandwidth during
780#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
781#     (Since 3.0)
782#
783# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
784#     (Since 3.1)
785#
786# @multifd-compression: Which compression method to use.  Defaults to
787#     none.  (Since 5.0)
788#
789# @multifd-zlib-level: Set the compression level to be used in live
790#     migration, the compression level is an integer between 0 and 9,
791#     where 0 means no compression, 1 means the best compression
792#     speed, and 9 means best compression ratio which will consume
793#     more CPU. Defaults to 1. (Since 5.0)
794#
795# @multifd-zstd-level: Set the compression level to be used in live
796#     migration, the compression level is an integer between 0 and 20,
797#     where 0 means no compression, 1 means the best compression
798#     speed, and 20 means best compression ratio which will consume
799#     more CPU. Defaults to 1. (Since 5.0)
800#
801# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
802#     aliases for the purpose of dirty bitmap migration.  Such aliases
803#     may for example be the corresponding names on the opposite site.
804#     The mapping must be one-to-one, but not necessarily complete: On
805#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
806#     will be ignored.  On the destination, encountering an unmapped
807#     alias in the incoming migration stream will result in a report,
808#     and all further bitmap migration data will then be discarded.
809#     Note that the destination does not know about bitmaps it does
810#     not receive, so there is no limitation or requirement regarding
811#     the number of bitmaps received, or how they are named, or on
812#     which nodes they are placed.  By default (when this parameter
813#     has never been set), bitmap names are mapped to themselves.
814#     Nodes are mapped to their block device name if there is one, and
815#     to their node name otherwise.  (Since 5.2)
816#
817# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during
818#                             live migration. Should be in the range 1 to 1000ms,
819#                             defaults to 1000ms. (Since 8.1)
820#
821# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
822#                    Defaults to 1. (Since 8.1)
823#
824# Features:
825#
826# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
827#            are experimental.
828#
829# Since: 2.4
830##
831{ 'enum': 'MigrationParameter',
832  'data': ['announce-initial', 'announce-max',
833           'announce-rounds', 'announce-step',
834           'compress-level', 'compress-threads', 'decompress-threads',
835           'compress-wait-thread', 'throttle-trigger-threshold',
836           'cpu-throttle-initial', 'cpu-throttle-increment',
837           'cpu-throttle-tailslow',
838           'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
839           'downtime-limit',
840           { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
841           'block-incremental',
842           'multifd-channels',
843           'xbzrle-cache-size', 'max-postcopy-bandwidth',
844           'max-cpu-throttle', 'multifd-compression',
845           'multifd-zlib-level', 'multifd-zstd-level',
846           'block-bitmap-mapping',
847           { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
848           'vcpu-dirty-limit'] }
849
850##
851# @MigrateSetParameters:
852#
853# @announce-initial: Initial delay (in milliseconds) before sending
854#     the first announce (Since 4.0)
855#
856# @announce-max: Maximum delay (in milliseconds) between packets in
857#     the announcement (Since 4.0)
858#
859# @announce-rounds: Number of self-announce packets sent after
860#     migration (Since 4.0)
861#
862# @announce-step: Increase in delay (in milliseconds) between
863#     subsequent packets in the announcement (Since 4.0)
864#
865# @compress-level: compression level
866#
867# @compress-threads: compression thread count
868#
869# @compress-wait-thread: Controls behavior when all compression
870#     threads are currently busy.  If true (default), wait for a free
871#     compression thread to become available; otherwise, send the page
872#     uncompressed.  (Since 3.1)
873#
874# @decompress-threads: decompression thread count
875#
876# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
877#     bytes_xfer_period to trigger throttling.  It is expressed as
878#     percentage.  The default value is 50. (Since 5.0)
879#
880# @cpu-throttle-initial: Initial percentage of time guest cpus are
881#     throttled when migration auto-converge is activated.  The
882#     default value is 20. (Since 2.7)
883#
884# @cpu-throttle-increment: throttle percentage increase each time
885#     auto-converge detects that migration is not making progress.
886#     The default value is 10. (Since 2.7)
887#
888# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
889#     the tail stage of throttling, the Guest is very sensitive to CPU
890#     percentage while the @cpu-throttle -increment is excessive
891#     usually at tail stage.  If this parameter is true, we will
892#     compute the ideal CPU percentage used by the Guest, which may
893#     exactly make the dirty rate match the dirty rate threshold.
894#     Then we will choose a smaller throttle increment between the one
895#     specified by @cpu-throttle-increment and the one generated by
896#     ideal CPU percentage.  Therefore, it is compatible to
897#     traditional throttling, meanwhile the throttle increment won't
898#     be excessive at tail stage.  The default value is false.  (Since
899#     5.1)
900#
901# @tls-creds: ID of the 'tls-creds' object that provides credentials
902#     for establishing a TLS connection over the migration data
903#     channel.  On the outgoing side of the migration, the credentials
904#     must be for a 'client' endpoint, while for the incoming side the
905#     credentials must be for a 'server' endpoint.  Setting this to a
906#     non-empty string enables TLS for all migrations.  An empty
907#     string means that QEMU will use plain text mode for migration,
908#     rather than TLS (Since 2.9) Previously (since 2.7), this was
909#     reported by omitting tls-creds instead.
910#
911# @tls-hostname: hostname of the target host for the migration.  This
912#     is required when using x509 based TLS credentials and the
913#     migration URI does not already include a hostname.  For example
914#     if using fd: or exec: based migration, the hostname must be
915#     provided so that the server's x509 certificate identity can be
916#     validated.  (Since 2.7) An empty string means that QEMU will use
917#     the hostname associated with the migration URI, if any.  (Since
918#     2.9) Previously (since 2.7), this was reported by omitting
919#     tls-hostname instead.
920#
921# @max-bandwidth: to set maximum speed for migration.  maximum speed
922#     in bytes per second.  (Since 2.8)
923#
924# @downtime-limit: set maximum tolerated downtime for migration.
925#     maximum downtime in milliseconds (Since 2.8)
926#
927# @x-checkpoint-delay: the delay time between two COLO checkpoints.
928#     (Since 2.8)
929#
930# @block-incremental: Affects how much storage is migrated when the
931#     block migration capability is enabled.  When false, the entire
932#     storage backing chain is migrated into a flattened image at the
933#     destination; when true, only the active qcow2 layer is migrated
934#     and the destination must already have access to the same backing
935#     chain as was used on the source.  (since 2.10)
936#
937# @multifd-channels: Number of channels used to migrate data in
938#     parallel.  This is the same number that the number of sockets
939#     used for migration.  The default value is 2 (since 4.0)
940#
941# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
942#     needs to be a multiple of the target page size and a power of 2
943#     (Since 2.11)
944#
945# @max-postcopy-bandwidth: Background transfer bandwidth during
946#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
947#     (Since 3.0)
948#
949# @max-cpu-throttle: maximum cpu throttle percentage.  The default
950#     value is 99. (Since 3.1)
951#
952# @multifd-compression: Which compression method to use.  Defaults to
953#     none.  (Since 5.0)
954#
955# @multifd-zlib-level: Set the compression level to be used in live
956#     migration, the compression level is an integer between 0 and 9,
957#     where 0 means no compression, 1 means the best compression
958#     speed, and 9 means best compression ratio which will consume
959#     more CPU. Defaults to 1. (Since 5.0)
960#
961# @multifd-zstd-level: Set the compression level to be used in live
962#     migration, the compression level is an integer between 0 and 20,
963#     where 0 means no compression, 1 means the best compression
964#     speed, and 20 means best compression ratio which will consume
965#     more CPU. Defaults to 1. (Since 5.0)
966#
967# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
968#     aliases for the purpose of dirty bitmap migration.  Such aliases
969#     may for example be the corresponding names on the opposite site.
970#     The mapping must be one-to-one, but not necessarily complete: On
971#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
972#     will be ignored.  On the destination, encountering an unmapped
973#     alias in the incoming migration stream will result in a report,
974#     and all further bitmap migration data will then be discarded.
975#     Note that the destination does not know about bitmaps it does
976#     not receive, so there is no limitation or requirement regarding
977#     the number of bitmaps received, or how they are named, or on
978#     which nodes they are placed.  By default (when this parameter
979#     has never been set), bitmap names are mapped to themselves.
980#     Nodes are mapped to their block device name if there is one, and
981#     to their node name otherwise.  (Since 5.2)
982#
983# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during
984#                             live migration. Should be in the range 1 to 1000ms,
985#                             defaults to 1000ms. (Since 8.1)
986#
987# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
988#                    Defaults to 1. (Since 8.1)
989#
990# Features:
991#
992# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
993#            are experimental.
994#
995# TODO: either fuse back into MigrationParameters, or make
996#     MigrationParameters members mandatory
997#
998# Since: 2.4
999##
1000{ 'struct': 'MigrateSetParameters',
1001  'data': { '*announce-initial': 'size',
1002            '*announce-max': 'size',
1003            '*announce-rounds': 'size',
1004            '*announce-step': 'size',
1005            '*compress-level': 'uint8',
1006            '*compress-threads': 'uint8',
1007            '*compress-wait-thread': 'bool',
1008            '*decompress-threads': 'uint8',
1009            '*throttle-trigger-threshold': 'uint8',
1010            '*cpu-throttle-initial': 'uint8',
1011            '*cpu-throttle-increment': 'uint8',
1012            '*cpu-throttle-tailslow': 'bool',
1013            '*tls-creds': 'StrOrNull',
1014            '*tls-hostname': 'StrOrNull',
1015            '*tls-authz': 'StrOrNull',
1016            '*max-bandwidth': 'size',
1017            '*downtime-limit': 'uint64',
1018            '*x-checkpoint-delay': { 'type': 'uint32',
1019                                     'features': [ 'unstable' ] },
1020            '*block-incremental': 'bool',
1021            '*multifd-channels': 'uint8',
1022            '*xbzrle-cache-size': 'size',
1023            '*max-postcopy-bandwidth': 'size',
1024            '*max-cpu-throttle': 'uint8',
1025            '*multifd-compression': 'MultiFDCompression',
1026            '*multifd-zlib-level': 'uint8',
1027            '*multifd-zstd-level': 'uint8',
1028            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1029            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1030                                            'features': [ 'unstable' ] },
1031            '*vcpu-dirty-limit': 'uint64'} }
1032
1033##
1034# @migrate-set-parameters:
1035#
1036# Set various migration parameters.
1037#
1038# Since: 2.4
1039#
1040# Example:
1041#
1042# -> { "execute": "migrate-set-parameters" ,
1043#      "arguments": { "compress-level": 1 } }
1044# <- { "return": {} }
1045##
1046{ 'command': 'migrate-set-parameters', 'boxed': true,
1047  'data': 'MigrateSetParameters' }
1048
1049##
1050# @MigrationParameters:
1051#
1052# The optional members aren't actually optional.
1053#
1054# @announce-initial: Initial delay (in milliseconds) before sending
1055#     the first announce (Since 4.0)
1056#
1057# @announce-max: Maximum delay (in milliseconds) between packets in
1058#     the announcement (Since 4.0)
1059#
1060# @announce-rounds: Number of self-announce packets sent after
1061#     migration (Since 4.0)
1062#
1063# @announce-step: Increase in delay (in milliseconds) between
1064#     subsequent packets in the announcement (Since 4.0)
1065#
1066# @compress-level: compression level
1067#
1068# @compress-threads: compression thread count
1069#
1070# @compress-wait-thread: Controls behavior when all compression
1071#     threads are currently busy.  If true (default), wait for a free
1072#     compression thread to become available; otherwise, send the page
1073#     uncompressed.  (Since 3.1)
1074#
1075# @decompress-threads: decompression thread count
1076#
1077# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1078#     bytes_xfer_period to trigger throttling.  It is expressed as
1079#     percentage.  The default value is 50. (Since 5.0)
1080#
1081# @cpu-throttle-initial: Initial percentage of time guest cpus are
1082#     throttled when migration auto-converge is activated.  (Since
1083#     2.7)
1084#
1085# @cpu-throttle-increment: throttle percentage increase each time
1086#     auto-converge detects that migration is not making progress.
1087#     (Since 2.7)
1088#
1089# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1090#     the tail stage of throttling, the Guest is very sensitive to CPU
1091#     percentage while the @cpu-throttle -increment is excessive
1092#     usually at tail stage.  If this parameter is true, we will
1093#     compute the ideal CPU percentage used by the Guest, which may
1094#     exactly make the dirty rate match the dirty rate threshold.
1095#     Then we will choose a smaller throttle increment between the one
1096#     specified by @cpu-throttle-increment and the one generated by
1097#     ideal CPU percentage.  Therefore, it is compatible to
1098#     traditional throttling, meanwhile the throttle increment won't
1099#     be excessive at tail stage.  The default value is false.  (Since
1100#     5.1)
1101#
1102# @tls-creds: ID of the 'tls-creds' object that provides credentials
1103#     for establishing a TLS connection over the migration data
1104#     channel.  On the outgoing side of the migration, the credentials
1105#     must be for a 'client' endpoint, while for the incoming side the
1106#     credentials must be for a 'server' endpoint.  An empty string
1107#     means that QEMU will use plain text mode for migration, rather
1108#     than TLS (Since 2.7) Note: 2.8 reports this by omitting
1109#     tls-creds instead.
1110#
1111# @tls-hostname: hostname of the target host for the migration.  This
1112#     is required when using x509 based TLS credentials and the
1113#     migration URI does not already include a hostname.  For example
1114#     if using fd: or exec: based migration, the hostname must be
1115#     provided so that the server's x509 certificate identity can be
1116#     validated.  (Since 2.7) An empty string means that QEMU will use
1117#     the hostname associated with the migration URI, if any.  (Since
1118#     2.9) Note: 2.8 reports this by omitting tls-hostname instead.
1119#
1120# @tls-authz: ID of the 'authz' object subclass that provides access
1121#     control checking of the TLS x509 certificate distinguished name.
1122#     (Since 4.0)
1123#
1124# @max-bandwidth: to set maximum speed for migration.  maximum speed
1125#     in bytes per second.  (Since 2.8)
1126#
1127# @downtime-limit: set maximum tolerated downtime for migration.
1128#     maximum downtime in milliseconds (Since 2.8)
1129#
1130# @x-checkpoint-delay: the delay time between two COLO checkpoints.
1131#     (Since 2.8)
1132#
1133# @block-incremental: Affects how much storage is migrated when the
1134#     block migration capability is enabled.  When false, the entire
1135#     storage backing chain is migrated into a flattened image at the
1136#     destination; when true, only the active qcow2 layer is migrated
1137#     and the destination must already have access to the same backing
1138#     chain as was used on the source.  (since 2.10)
1139#
1140# @multifd-channels: Number of channels used to migrate data in
1141#     parallel.  This is the same number that the number of sockets
1142#     used for migration.  The default value is 2 (since 4.0)
1143#
1144# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1145#     needs to be a multiple of the target page size and a power of 2
1146#     (Since 2.11)
1147#
1148# @max-postcopy-bandwidth: Background transfer bandwidth during
1149#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1150#     (Since 3.0)
1151#
1152# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
1153#     (Since 3.1)
1154#
1155# @multifd-compression: Which compression method to use.  Defaults to
1156#     none.  (Since 5.0)
1157#
1158# @multifd-zlib-level: Set the compression level to be used in live
1159#     migration, the compression level is an integer between 0 and 9,
1160#     where 0 means no compression, 1 means the best compression
1161#     speed, and 9 means best compression ratio which will consume
1162#     more CPU. Defaults to 1. (Since 5.0)
1163#
1164# @multifd-zstd-level: Set the compression level to be used in live
1165#     migration, the compression level is an integer between 0 and 20,
1166#     where 0 means no compression, 1 means the best compression
1167#     speed, and 20 means best compression ratio which will consume
1168#     more CPU. Defaults to 1. (Since 5.0)
1169#
1170# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1171#     aliases for the purpose of dirty bitmap migration.  Such aliases
1172#     may for example be the corresponding names on the opposite site.
1173#     The mapping must be one-to-one, but not necessarily complete: On
1174#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1175#     will be ignored.  On the destination, encountering an unmapped
1176#     alias in the incoming migration stream will result in a report,
1177#     and all further bitmap migration data will then be discarded.
1178#     Note that the destination does not know about bitmaps it does
1179#     not receive, so there is no limitation or requirement regarding
1180#     the number of bitmaps received, or how they are named, or on
1181#     which nodes they are placed.  By default (when this parameter
1182#     has never been set), bitmap names are mapped to themselves.
1183#     Nodes are mapped to their block device name if there is one, and
1184#     to their node name otherwise.  (Since 5.2)
1185#
1186# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during
1187#                             live migration. Should be in the range 1 to 1000ms,
1188#                             defaults to 1000ms. (Since 8.1)
1189#
1190# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1191#                    Defaults to 1. (Since 8.1)
1192#
1193# Features:
1194#
1195# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1196#            are experimental.
1197#
1198# Since: 2.4
1199##
1200{ 'struct': 'MigrationParameters',
1201  'data': { '*announce-initial': 'size',
1202            '*announce-max': 'size',
1203            '*announce-rounds': 'size',
1204            '*announce-step': 'size',
1205            '*compress-level': 'uint8',
1206            '*compress-threads': 'uint8',
1207            '*compress-wait-thread': 'bool',
1208            '*decompress-threads': 'uint8',
1209            '*throttle-trigger-threshold': 'uint8',
1210            '*cpu-throttle-initial': 'uint8',
1211            '*cpu-throttle-increment': 'uint8',
1212            '*cpu-throttle-tailslow': 'bool',
1213            '*tls-creds': 'str',
1214            '*tls-hostname': 'str',
1215            '*tls-authz': 'str',
1216            '*max-bandwidth': 'size',
1217            '*downtime-limit': 'uint64',
1218            '*x-checkpoint-delay': { 'type': 'uint32',
1219                                     'features': [ 'unstable' ] },
1220            '*block-incremental': 'bool',
1221            '*multifd-channels': 'uint8',
1222            '*xbzrle-cache-size': 'size',
1223            '*max-postcopy-bandwidth': 'size',
1224            '*max-cpu-throttle': 'uint8',
1225            '*multifd-compression': 'MultiFDCompression',
1226            '*multifd-zlib-level': 'uint8',
1227            '*multifd-zstd-level': 'uint8',
1228            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1229            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1230                                            'features': [ 'unstable' ] },
1231            '*vcpu-dirty-limit': 'uint64'} }
1232
1233##
1234# @query-migrate-parameters:
1235#
1236# Returns information about the current migration parameters
1237#
1238# Returns: @MigrationParameters
1239#
1240# Since: 2.4
1241#
1242# Example:
1243#
1244# -> { "execute": "query-migrate-parameters" }
1245# <- { "return": {
1246#          "decompress-threads": 2,
1247#          "cpu-throttle-increment": 10,
1248#          "compress-threads": 8,
1249#          "compress-level": 1,
1250#          "cpu-throttle-initial": 20,
1251#          "max-bandwidth": 33554432,
1252#          "downtime-limit": 300
1253#       }
1254#    }
1255##
1256{ 'command': 'query-migrate-parameters',
1257  'returns': 'MigrationParameters' }
1258
1259##
1260# @migrate-start-postcopy:
1261#
1262# Followup to a migration command to switch the migration to postcopy
1263# mode.  The postcopy-ram capability must be set on both source and
1264# destination before the original migration command.
1265#
1266# Since: 2.5
1267#
1268# Example:
1269#
1270# -> { "execute": "migrate-start-postcopy" }
1271# <- { "return": {} }
1272##
1273{ 'command': 'migrate-start-postcopy' }
1274
1275##
1276# @MIGRATION:
1277#
1278# Emitted when a migration event happens
1279#
1280# @status: @MigrationStatus describing the current migration status.
1281#
1282# Since: 2.4
1283#
1284# Example:
1285#
1286# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1287#     "event": "MIGRATION",
1288#     "data": {"status": "completed"} }
1289##
1290{ 'event': 'MIGRATION',
1291  'data': {'status': 'MigrationStatus'}}
1292
1293##
1294# @MIGRATION_PASS:
1295#
1296# Emitted from the source side of a migration at the start of each
1297# pass (when it syncs the dirty bitmap)
1298#
1299# @pass: An incrementing count (starting at 1 on the first pass)
1300#
1301# Since: 2.6
1302#
1303# Example:
1304#
1305# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1306#       "event": "MIGRATION_PASS", "data": {"pass": 2} }
1307##
1308{ 'event': 'MIGRATION_PASS',
1309  'data': { 'pass': 'int' } }
1310
1311##
1312# @COLOMessage:
1313#
1314# The message transmission between Primary side and Secondary side.
1315#
1316# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1317#
1318# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1319#     checkpointing
1320#
1321# @checkpoint-reply: SVM gets PVM's checkpoint request
1322#
1323# @vmstate-send: VM's state will be sent by PVM.
1324#
1325# @vmstate-size: The total size of VMstate.
1326#
1327# @vmstate-received: VM's state has been received by SVM.
1328#
1329# @vmstate-loaded: VM's state has been loaded by SVM.
1330#
1331# Since: 2.8
1332##
1333{ 'enum': 'COLOMessage',
1334  'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1335            'vmstate-send', 'vmstate-size', 'vmstate-received',
1336            'vmstate-loaded' ] }
1337
1338##
1339# @COLOMode:
1340#
1341# The COLO current mode.
1342#
1343# @none: COLO is disabled.
1344#
1345# @primary: COLO node in primary side.
1346#
1347# @secondary: COLO node in slave side.
1348#
1349# Since: 2.8
1350##
1351{ 'enum': 'COLOMode',
1352  'data': [ 'none', 'primary', 'secondary'] }
1353
1354##
1355# @FailoverStatus:
1356#
1357# An enumeration of COLO failover status
1358#
1359# @none: no failover has ever happened
1360#
1361# @require: got failover requirement but not handled
1362#
1363# @active: in the process of doing failover
1364#
1365# @completed: finish the process of failover
1366#
1367# @relaunch: restart the failover process, from 'none' -> 'completed'
1368#     (Since 2.9)
1369#
1370# Since: 2.8
1371##
1372{ 'enum': 'FailoverStatus',
1373  'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1374
1375##
1376# @COLO_EXIT:
1377#
1378# Emitted when VM finishes COLO mode due to some errors happening or
1379# at the request of users.
1380#
1381# @mode: report COLO mode when COLO exited.
1382#
1383# @reason: describes the reason for the COLO exit.
1384#
1385# Since: 3.1
1386#
1387# Example:
1388#
1389# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1390#      "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1391##
1392{ 'event': 'COLO_EXIT',
1393  'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1394
1395##
1396# @COLOExitReason:
1397#
1398# The reason for a COLO exit.
1399#
1400# @none: failover has never happened.  This state does not occur in
1401#     the COLO_EXIT event, and is only visible in the result of
1402#     query-colo-status.
1403#
1404# @request: COLO exit is due to an external request.
1405#
1406# @error: COLO exit is due to an internal error.
1407#
1408# @processing: COLO is currently handling a failover (since 4.0).
1409#
1410# Since: 3.1
1411##
1412{ 'enum': 'COLOExitReason',
1413  'data': [ 'none', 'request', 'error' , 'processing' ] }
1414
1415##
1416# @x-colo-lost-heartbeat:
1417#
1418# Tell qemu that heartbeat is lost, request it to do takeover
1419# procedures.  If this command is sent to the PVM, the Primary side
1420# will exit COLO mode.  If sent to the Secondary, the Secondary side
1421# will run failover work, then takes over server operation to become
1422# the service VM.
1423#
1424# Features:
1425#
1426# @unstable: This command is experimental.
1427#
1428# Since: 2.8
1429#
1430# Example:
1431#
1432# -> { "execute": "x-colo-lost-heartbeat" }
1433# <- { "return": {} }
1434##
1435{ 'command': 'x-colo-lost-heartbeat',
1436  'features': [ 'unstable' ],
1437  'if': 'CONFIG_REPLICATION' }
1438
1439##
1440# @migrate_cancel:
1441#
1442# Cancel the current executing migration process.
1443#
1444# Returns: nothing on success
1445#
1446# Notes: This command succeeds even if there is no migration process
1447#     running.
1448#
1449# Since: 0.14
1450#
1451# Example:
1452#
1453# -> { "execute": "migrate_cancel" }
1454# <- { "return": {} }
1455##
1456{ 'command': 'migrate_cancel' }
1457
1458##
1459# @migrate-continue:
1460#
1461# Continue migration when it's in a paused state.
1462#
1463# @state: The state the migration is currently expected to be in
1464#
1465# Returns: nothing on success
1466#
1467# Since: 2.11
1468#
1469# Example:
1470#
1471# -> { "execute": "migrate-continue" , "arguments":
1472#      { "state": "pre-switchover" } }
1473# <- { "return": {} }
1474##
1475{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1476
1477##
1478# @migrate:
1479#
1480# Migrates the current running guest to another Virtual Machine.
1481#
1482# @uri: the Uniform Resource Identifier of the destination VM
1483#
1484# @blk: do block migration (full disk copy)
1485#
1486# @inc: incremental disk copy migration
1487#
1488# @detach: this argument exists only for compatibility reasons and is
1489#     ignored by QEMU
1490#
1491# @resume: resume one paused migration, default "off". (since 3.0)
1492#
1493# Returns: nothing on success
1494#
1495# Since: 0.14
1496#
1497# Notes:
1498#
1499# 1. The 'query-migrate' command should be used to check migration's
1500#    progress and final result (this information is provided by the
1501#    'status' member)
1502#
1503# 2. All boolean arguments default to false
1504#
1505# 3. The user Monitor's "detach" argument is invalid in QMP and should
1506#    not be used
1507#
1508# Example:
1509#
1510# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1511# <- { "return": {} }
1512##
1513{ 'command': 'migrate',
1514  'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool',
1515           '*detach': 'bool', '*resume': 'bool' } }
1516
1517##
1518# @migrate-incoming:
1519#
1520# Start an incoming migration, the qemu must have been started with
1521# -incoming defer
1522#
1523# @uri: The Uniform Resource Identifier identifying the source or
1524#     address to listen on
1525#
1526# Returns: nothing on success
1527#
1528# Since: 2.3
1529#
1530# Notes:
1531#
1532# 1. It's a bad idea to use a string for the uri, but it needs
1533#    to stay compatible with -incoming and the format of the uri
1534#    is already exposed above libvirt.
1535#
1536# 2. QEMU must be started with -incoming defer to allow
1537#    migrate-incoming to be used.
1538#
1539# 3. The uri format is the same as for -incoming
1540#
1541# Example:
1542#
1543# -> { "execute": "migrate-incoming",
1544#      "arguments": { "uri": "tcp::4446" } }
1545# <- { "return": {} }
1546##
1547{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
1548
1549##
1550# @xen-save-devices-state:
1551#
1552# Save the state of all devices to file.  The RAM and the block
1553# devices of the VM are not saved by this command.
1554#
1555# @filename: the file to save the state of the devices to as binary
1556#     data.  See xen-save-devices-state.txt for a description of the
1557#     binary format.
1558#
1559# @live: Optional argument to ask QEMU to treat this command as part
1560#     of a live migration.  Default to true.  (since 2.11)
1561#
1562# Returns: Nothing on success
1563#
1564# Since: 1.1
1565#
1566# Example:
1567#
1568# -> { "execute": "xen-save-devices-state",
1569#      "arguments": { "filename": "/tmp/save" } }
1570# <- { "return": {} }
1571##
1572{ 'command': 'xen-save-devices-state',
1573  'data': {'filename': 'str', '*live':'bool' } }
1574
1575##
1576# @xen-set-global-dirty-log:
1577#
1578# Enable or disable the global dirty log mode.
1579#
1580# @enable: true to enable, false to disable.
1581#
1582# Returns: nothing
1583#
1584# Since: 1.3
1585#
1586# Example:
1587#
1588# -> { "execute": "xen-set-global-dirty-log",
1589#      "arguments": { "enable": true } }
1590# <- { "return": {} }
1591##
1592{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1593
1594##
1595# @xen-load-devices-state:
1596#
1597# Load the state of all devices from file.  The RAM and the block
1598# devices of the VM are not loaded by this command.
1599#
1600# @filename: the file to load the state of the devices from as binary
1601#     data.  See xen-save-devices-state.txt for a description of the
1602#     binary format.
1603#
1604# Since: 2.7
1605#
1606# Example:
1607#
1608# -> { "execute": "xen-load-devices-state",
1609#      "arguments": { "filename": "/tmp/resume" } }
1610# <- { "return": {} }
1611##
1612{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1613
1614##
1615# @xen-set-replication:
1616#
1617# Enable or disable replication.
1618#
1619# @enable: true to enable, false to disable.
1620#
1621# @primary: true for primary or false for secondary.
1622#
1623# @failover: true to do failover, false to stop.  but cannot be
1624#     specified if 'enable' is true.  default value is false.
1625#
1626# Returns: nothing.
1627#
1628# Example:
1629#
1630# -> { "execute": "xen-set-replication",
1631#      "arguments": {"enable": true, "primary": false} }
1632# <- { "return": {} }
1633#
1634# Since: 2.9
1635##
1636{ 'command': 'xen-set-replication',
1637  'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1638  'if': 'CONFIG_REPLICATION' }
1639
1640##
1641# @ReplicationStatus:
1642#
1643# The result format for 'query-xen-replication-status'.
1644#
1645# @error: true if an error happened, false if replication is normal.
1646#
1647# @desc: the human readable error description string, when @error is
1648#     'true'.
1649#
1650# Since: 2.9
1651##
1652{ 'struct': 'ReplicationStatus',
1653  'data': { 'error': 'bool', '*desc': 'str' },
1654  'if': 'CONFIG_REPLICATION' }
1655
1656##
1657# @query-xen-replication-status:
1658#
1659# Query replication status while the vm is running.
1660#
1661# Returns: A @ReplicationStatus object showing the status.
1662#
1663# Example:
1664#
1665# -> { "execute": "query-xen-replication-status" }
1666# <- { "return": { "error": false } }
1667#
1668# Since: 2.9
1669##
1670{ 'command': 'query-xen-replication-status',
1671  'returns': 'ReplicationStatus',
1672  'if': 'CONFIG_REPLICATION' }
1673
1674##
1675# @xen-colo-do-checkpoint:
1676#
1677# Xen uses this command to notify replication to trigger a checkpoint.
1678#
1679# Returns: nothing.
1680#
1681# Example:
1682#
1683# -> { "execute": "xen-colo-do-checkpoint" }
1684# <- { "return": {} }
1685#
1686# Since: 2.9
1687##
1688{ 'command': 'xen-colo-do-checkpoint',
1689  'if': 'CONFIG_REPLICATION' }
1690
1691##
1692# @COLOStatus:
1693#
1694# The result format for 'query-colo-status'.
1695#
1696# @mode: COLO running mode.  If COLO is running, this field will
1697#     return 'primary' or 'secondary'.
1698#
1699# @last-mode: COLO last running mode.  If COLO is running, this field
1700#     will return same like mode field, after failover we can use this
1701#     field to get last colo mode.  (since 4.0)
1702#
1703# @reason: describes the reason for the COLO exit.
1704#
1705# Since: 3.1
1706##
1707{ 'struct': 'COLOStatus',
1708  'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1709            'reason': 'COLOExitReason' },
1710  'if': 'CONFIG_REPLICATION' }
1711
1712##
1713# @query-colo-status:
1714#
1715# Query COLO status while the vm is running.
1716#
1717# Returns: A @COLOStatus object showing the status.
1718#
1719# Example:
1720#
1721# -> { "execute": "query-colo-status" }
1722# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1723#
1724# Since: 3.1
1725##
1726{ 'command': 'query-colo-status',
1727  'returns': 'COLOStatus',
1728  'if': 'CONFIG_REPLICATION' }
1729
1730##
1731# @migrate-recover:
1732#
1733# Provide a recovery migration stream URI.
1734#
1735# @uri: the URI to be used for the recovery of migration stream.
1736#
1737# Returns: nothing.
1738#
1739# Example:
1740#
1741# -> { "execute": "migrate-recover",
1742#      "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1743# <- { "return": {} }
1744#
1745# Since: 3.0
1746##
1747{ 'command': 'migrate-recover',
1748  'data': { 'uri': 'str' },
1749  'allow-oob': true }
1750
1751##
1752# @migrate-pause:
1753#
1754# Pause a migration.  Currently it only supports postcopy.
1755#
1756# Returns: nothing.
1757#
1758# Example:
1759#
1760# -> { "execute": "migrate-pause" }
1761# <- { "return": {} }
1762#
1763# Since: 3.0
1764##
1765{ 'command': 'migrate-pause', 'allow-oob': true }
1766
1767##
1768# @UNPLUG_PRIMARY:
1769#
1770# Emitted from source side of a migration when migration state is
1771# WAIT_UNPLUG. Device was unplugged by guest operating system.  Device
1772# resources in QEMU are kept on standby to be able to re-plug it in
1773# case of migration failure.
1774#
1775# @device-id: QEMU device id of the unplugged device
1776#
1777# Since: 4.2
1778#
1779# Example:
1780#
1781# <- { "event": "UNPLUG_PRIMARY",
1782#      "data": { "device-id": "hostdev0" },
1783#      "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1784##
1785{ 'event': 'UNPLUG_PRIMARY',
1786  'data': { 'device-id': 'str' } }
1787
1788##
1789# @DirtyRateVcpu:
1790#
1791# Dirty rate of vcpu.
1792#
1793# @id: vcpu index.
1794#
1795# @dirty-rate: dirty rate.
1796#
1797# Since: 6.2
1798##
1799{ 'struct': 'DirtyRateVcpu',
1800  'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1801
1802##
1803# @DirtyRateStatus:
1804#
1805# Dirty page rate measurement status.
1806#
1807# @unstarted: measuring thread has not been started yet
1808#
1809# @measuring: measuring thread is running
1810#
1811# @measured: dirty page rate is measured and the results are available
1812#
1813# Since: 5.2
1814##
1815{ 'enum': 'DirtyRateStatus',
1816  'data': [ 'unstarted', 'measuring', 'measured'] }
1817
1818##
1819# @DirtyRateMeasureMode:
1820#
1821# Method used to measure dirty page rate.  Differences between
1822# available methods are explained in @calc-dirty-rate.
1823#
1824# @page-sampling: use page sampling
1825#
1826# @dirty-ring: use dirty ring
1827#
1828# @dirty-bitmap: use dirty bitmap
1829#
1830# Since: 6.2
1831##
1832{ 'enum': 'DirtyRateMeasureMode',
1833  'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1834
1835##
1836# @DirtyRateInfo:
1837#
1838# Information about measured dirty page rate.
1839#
1840# @dirty-rate: an estimate of the dirty page rate of the VM in units
1841#     of MiB/s.  Value is present only when @status is 'measured'.
1842#
1843# @status: current status of dirty page rate measurements
1844#
1845# @start-time: start time in units of second for calculation
1846#
1847# @calc-time: time period for which dirty page rate was measured
1848#     (in seconds)
1849#
1850# @sample-pages: number of sampled pages per GiB of guest memory.
1851#     Valid only in page-sampling mode (Since 6.1)
1852#
1853# @mode: mode that was used to measure dirty page rate (Since 6.2)
1854#
1855# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
1856#     specified (Since 6.2)
1857#
1858# Since: 5.2
1859##
1860{ 'struct': 'DirtyRateInfo',
1861  'data': {'*dirty-rate': 'int64',
1862           'status': 'DirtyRateStatus',
1863           'start-time': 'int64',
1864           'calc-time': 'int64',
1865           'sample-pages': 'uint64',
1866           'mode': 'DirtyRateMeasureMode',
1867           '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
1868
1869##
1870# @calc-dirty-rate:
1871#
1872# Start measuring dirty page rate of the VM.  Results can be retrieved
1873# with @query-dirty-rate after measurements are completed.
1874#
1875# Dirty page rate is the number of pages changed in a given time
1876# period expressed in MiB/s.  The following methods of calculation are
1877# available:
1878#
1879# 1. In page sampling mode, a random subset of pages are selected and
1880#    hashed twice: once at the beginning of measurement time period,
1881#    and once again at the end.  If two hashes for some page are
1882#    different, the page is counted as changed.  Since this method
1883#    relies on sampling and hashing, calculated dirty page rate is
1884#    only an estimate of its true value.  Increasing @sample-pages
1885#    improves estimation quality at the cost of higher computational
1886#    overhead.
1887#
1888# 2. Dirty bitmap mode captures writes to memory (for example by
1889#    temporarily revoking write access to all pages) and counting page
1890#    faults.  Information about modified pages is collected into a
1891#    bitmap, where each bit corresponds to one guest page.  This mode
1892#    requires that KVM accelerator property "dirty-ring-size" is *not*
1893#    set.
1894#
1895# 3. Dirty ring mode is similar to dirty bitmap mode, but the
1896#    information about modified pages is collected into ring buffer.
1897#    This mode tracks page modification per each vCPU separately.  It
1898#    requires that KVM accelerator property "dirty-ring-size" is set.
1899#
1900# @calc-time: time period in units of second for which dirty page rate
1901#     is calculated.  Note that larger @calc-time values will
1902#     typically result in smaller dirty page rates because page
1903#     dirtying is a one-time event.  Once some page is counted as
1904#     dirty during @calc-time period, further writes to this page will
1905#     not increase dirty page rate anymore.
1906#
1907# @sample-pages: number of sampled pages per each GiB of guest memory.
1908#     Default value is 512.  For 4KiB guest pages this corresponds to
1909#     sampling ratio of 0.2%.  This argument is used only in page
1910#     sampling mode.  (Since 6.1)
1911#
1912# @mode: mechanism for tracking dirty pages.  Default value is
1913#     'page-sampling'.  Others are 'dirty-bitmap' and 'dirty-ring'.
1914#     (Since 6.1)
1915#
1916# Since: 5.2
1917#
1918# Example:
1919#
1920# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
1921#                                                 'sample-pages': 512} }
1922# <- { "return": {} }
1923##
1924{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
1925                                         '*sample-pages': 'int',
1926                                         '*mode': 'DirtyRateMeasureMode'} }
1927
1928##
1929# @query-dirty-rate:
1930#
1931# Query results of the most recent invocation of @calc-dirty-rate.
1932#
1933# Since: 5.2
1934#
1935# Examples:
1936#
1937# 1. Measurement is in progress:
1938#
1939# <- {"status": "measuring", "sample-pages": 512,
1940#     "mode": "page-sampling", "start-time": 3665220, "calc-time": 10}
1941#
1942# 2. Measurement has been completed:
1943#
1944# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
1945#     "mode": "page-sampling", "start-time": 3665220, "calc-time": 10}
1946##
1947{ 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' }
1948
1949##
1950# @DirtyLimitInfo:
1951#
1952# Dirty page rate limit information of a virtual CPU.
1953#
1954# @cpu-index: index of a virtual CPU.
1955#
1956# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
1957#     CPU, 0 means unlimited.
1958#
1959# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
1960#
1961# Since: 7.1
1962##
1963{ 'struct': 'DirtyLimitInfo',
1964  'data': { 'cpu-index': 'int',
1965            'limit-rate': 'uint64',
1966            'current-rate': 'uint64' } }
1967
1968##
1969# @set-vcpu-dirty-limit:
1970#
1971# Set the upper limit of dirty page rate for virtual CPUs.
1972#
1973# Requires KVM with accelerator property "dirty-ring-size" set.  A
1974# virtual CPU's dirty page rate is a measure of its memory load.  To
1975# observe dirty page rates, use @calc-dirty-rate.
1976#
1977# @cpu-index: index of a virtual CPU, default is all.
1978#
1979# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
1980#
1981# Since: 7.1
1982#
1983# Example:
1984#
1985# -> {"execute": "set-vcpu-dirty-limit"}
1986#     "arguments": { "dirty-rate": 200,
1987#                    "cpu-index": 1 } }
1988# <- { "return": {} }
1989##
1990{ 'command': 'set-vcpu-dirty-limit',
1991  'data': { '*cpu-index': 'int',
1992            'dirty-rate': 'uint64' } }
1993
1994##
1995# @cancel-vcpu-dirty-limit:
1996#
1997# Cancel the upper limit of dirty page rate for virtual CPUs.
1998#
1999# Cancel the dirty page limit for the vCPU which has been set with
2000# set-vcpu-dirty-limit command.  Note that this command requires
2001# support from dirty ring, same as the "set-vcpu-dirty-limit".
2002#
2003# @cpu-index: index of a virtual CPU, default is all.
2004#
2005# Since: 7.1
2006#
2007# Example:
2008#
2009# -> {"execute": "cancel-vcpu-dirty-limit"},
2010#     "arguments": { "cpu-index": 1 } }
2011# <- { "return": {} }
2012##
2013{ 'command': 'cancel-vcpu-dirty-limit',
2014  'data': { '*cpu-index': 'int'} }
2015
2016##
2017# @query-vcpu-dirty-limit:
2018#
2019# Returns information about virtual CPU dirty page rate limits, if
2020# any.
2021#
2022# Since: 7.1
2023#
2024# Example:
2025#
2026# -> {"execute": "query-vcpu-dirty-limit"}
2027# <- {"return": [
2028#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
2029#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
2030##
2031{ 'command': 'query-vcpu-dirty-limit',
2032  'returns': [ 'DirtyLimitInfo' ] }
2033
2034##
2035# @MigrationThreadInfo:
2036#
2037# Information about migrationthreads
2038#
2039# @name: the name of migration thread
2040#
2041# @thread-id: ID of the underlying host thread
2042#
2043# Since: 7.2
2044##
2045{ 'struct': 'MigrationThreadInfo',
2046  'data': {'name': 'str',
2047           'thread-id': 'int'} }
2048
2049##
2050# @query-migrationthreads:
2051#
2052# Returns information of migration threads
2053#
2054# data: migration thread name
2055#
2056# Returns: information about migration threads
2057#
2058# Since: 7.2
2059##
2060{ 'command': 'query-migrationthreads',
2061  'returns': ['MigrationThreadInfo'] }
2062
2063##
2064# @snapshot-save:
2065#
2066# Save a VM snapshot
2067#
2068# @job-id: identifier for the newly created job
2069#
2070# @tag: name of the snapshot to create
2071#
2072# @vmstate: block device node name to save vmstate to
2073#
2074# @devices: list of block device node names to save a snapshot to
2075#
2076# Applications should not assume that the snapshot save is complete
2077# when this command returns.  The job commands / events must be used
2078# to determine completion and to fetch details of any errors that
2079# arise.
2080#
2081# Note that execution of the guest CPUs may be stopped during the time
2082# it takes to save the snapshot.  A future version of QEMU may ensure
2083# CPUs are executing continuously.
2084#
2085# It is strongly recommended that @devices contain all writable block
2086# device nodes if a consistent snapshot is required.
2087#
2088# If @tag already exists, an error will be reported
2089#
2090# Returns: nothing
2091#
2092# Example:
2093#
2094# -> { "execute": "snapshot-save",
2095#      "arguments": {
2096#         "job-id": "snapsave0",
2097#         "tag": "my-snap",
2098#         "vmstate": "disk0",
2099#         "devices": ["disk0", "disk1"]
2100#      }
2101#    }
2102# <- { "return": { } }
2103# <- {"event": "JOB_STATUS_CHANGE",
2104#     "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2105#     "data": {"status": "created", "id": "snapsave0"}}
2106# <- {"event": "JOB_STATUS_CHANGE",
2107#     "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2108#     "data": {"status": "running", "id": "snapsave0"}}
2109# <- {"event": "STOP",
2110#     "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2111# <- {"event": "RESUME",
2112#     "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2113# <- {"event": "JOB_STATUS_CHANGE",
2114#     "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2115#     "data": {"status": "waiting", "id": "snapsave0"}}
2116# <- {"event": "JOB_STATUS_CHANGE",
2117#     "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2118#     "data": {"status": "pending", "id": "snapsave0"}}
2119# <- {"event": "JOB_STATUS_CHANGE",
2120#     "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2121#     "data": {"status": "concluded", "id": "snapsave0"}}
2122# -> {"execute": "query-jobs"}
2123# <- {"return": [{"current-progress": 1,
2124#                 "status": "concluded",
2125#                 "total-progress": 1,
2126#                 "type": "snapshot-save",
2127#                 "id": "snapsave0"}]}
2128#
2129# Since: 6.0
2130##
2131{ 'command': 'snapshot-save',
2132  'data': { 'job-id': 'str',
2133            'tag': 'str',
2134            'vmstate': 'str',
2135            'devices': ['str'] } }
2136
2137##
2138# @snapshot-load:
2139#
2140# Load a VM snapshot
2141#
2142# @job-id: identifier for the newly created job
2143#
2144# @tag: name of the snapshot to load.
2145#
2146# @vmstate: block device node name to load vmstate from
2147#
2148# @devices: list of block device node names to load a snapshot from
2149#
2150# Applications should not assume that the snapshot load is complete
2151# when this command returns.  The job commands / events must be used
2152# to determine completion and to fetch details of any errors that
2153# arise.
2154#
2155# Note that execution of the guest CPUs will be stopped during the
2156# time it takes to load the snapshot.
2157#
2158# It is strongly recommended that @devices contain all writable block
2159# device nodes that can have changed since the original @snapshot-save
2160# command execution.
2161#
2162# Returns: nothing
2163#
2164# Example:
2165#
2166# -> { "execute": "snapshot-load",
2167#      "arguments": {
2168#         "job-id": "snapload0",
2169#         "tag": "my-snap",
2170#         "vmstate": "disk0",
2171#         "devices": ["disk0", "disk1"]
2172#      }
2173#    }
2174# <- { "return": { } }
2175# <- {"event": "JOB_STATUS_CHANGE",
2176#     "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2177#     "data": {"status": "created", "id": "snapload0"}}
2178# <- {"event": "JOB_STATUS_CHANGE",
2179#     "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2180#     "data": {"status": "running", "id": "snapload0"}}
2181# <- {"event": "STOP",
2182#     "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2183# <- {"event": "RESUME",
2184#     "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2185# <- {"event": "JOB_STATUS_CHANGE",
2186#     "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2187#     "data": {"status": "waiting", "id": "snapload0"}}
2188# <- {"event": "JOB_STATUS_CHANGE",
2189#     "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2190#     "data": {"status": "pending", "id": "snapload0"}}
2191# <- {"event": "JOB_STATUS_CHANGE",
2192#     "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2193#     "data": {"status": "concluded", "id": "snapload0"}}
2194# -> {"execute": "query-jobs"}
2195# <- {"return": [{"current-progress": 1,
2196#                 "status": "concluded",
2197#                 "total-progress": 1,
2198#                 "type": "snapshot-load",
2199#                 "id": "snapload0"}]}
2200#
2201# Since: 6.0
2202##
2203{ 'command': 'snapshot-load',
2204  'data': { 'job-id': 'str',
2205            'tag': 'str',
2206            'vmstate': 'str',
2207            'devices': ['str'] } }
2208
2209##
2210# @snapshot-delete:
2211#
2212# Delete a VM snapshot
2213#
2214# @job-id: identifier for the newly created job
2215#
2216# @tag: name of the snapshot to delete.
2217#
2218# @devices: list of block device node names to delete a snapshot from
2219#
2220# Applications should not assume that the snapshot delete is complete
2221# when this command returns.  The job commands / events must be used
2222# to determine completion and to fetch details of any errors that
2223# arise.
2224#
2225# Returns: nothing
2226#
2227# Example:
2228#
2229# -> { "execute": "snapshot-delete",
2230#      "arguments": {
2231#         "job-id": "snapdelete0",
2232#         "tag": "my-snap",
2233#         "devices": ["disk0", "disk1"]
2234#      }
2235#    }
2236# <- { "return": { } }
2237# <- {"event": "JOB_STATUS_CHANGE",
2238#     "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2239#     "data": {"status": "created", "id": "snapdelete0"}}
2240# <- {"event": "JOB_STATUS_CHANGE",
2241#     "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2242#     "data": {"status": "running", "id": "snapdelete0"}}
2243# <- {"event": "JOB_STATUS_CHANGE",
2244#     "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2245#     "data": {"status": "waiting", "id": "snapdelete0"}}
2246# <- {"event": "JOB_STATUS_CHANGE",
2247#     "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2248#     "data": {"status": "pending", "id": "snapdelete0"}}
2249# <- {"event": "JOB_STATUS_CHANGE",
2250#     "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2251#     "data": {"status": "concluded", "id": "snapdelete0"}}
2252# -> {"execute": "query-jobs"}
2253# <- {"return": [{"current-progress": 1,
2254#                 "status": "concluded",
2255#                 "total-progress": 1,
2256#                 "type": "snapshot-delete",
2257#                 "id": "snapdelete0"}]}
2258#
2259# Since: 6.0
2260##
2261{ 'command': 'snapshot-delete',
2262  'data': { 'job-id': 'str',
2263            'tag': 'str',
2264            'devices': ['str'] } }
2265