xref: /openbmc/qemu/qapi/migration.json (revision 66db46ca)
1# -*- Mode: Python -*-
2# vim: filetype=python
3#
4
5##
6# = Migration
7##
8
9{ 'include': 'common.json' }
10{ 'include': 'sockets.json' }
11
12##
13# @MigrationStats:
14#
15# Detailed migration status.
16#
17# @transferred: amount of bytes already transferred to the target VM
18#
19# @remaining: amount of bytes remaining to be transferred to the
20#     target VM
21#
22# @total: total amount of bytes involved in the migration process
23#
24# @duplicate: number of duplicate (zero) pages (since 1.2)
25#
26# @skipped: number of skipped zero pages. Always zero, only provided for
27#     compatibility (since 1.5)
28#
29# @normal: number of normal pages (since 1.2)
30#
31# @normal-bytes: number of normal bytes sent (since 1.2)
32#
33# @dirty-pages-rate: number of pages dirtied by second by the guest
34#     (since 1.3)
35#
36# @mbps: throughput in megabits/sec.  (since 1.6)
37#
38# @dirty-sync-count: number of times that dirty ram was synchronized
39#     (since 2.1)
40#
41# @postcopy-requests: The number of page requests received from the
42#     destination (since 2.7)
43#
44# @page-size: The number of bytes per page for the various page-based
45#     statistics (since 2.10)
46#
47# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
48#
49# @pages-per-second: the number of memory pages transferred per second
50#     (Since 4.0)
51#
52# @precopy-bytes: The number of bytes sent in the pre-copy phase
53#     (since 7.0).
54#
55# @downtime-bytes: The number of bytes sent while the guest is paused
56#     (since 7.0).
57#
58# @postcopy-bytes: The number of bytes sent during the post-copy phase
59#     (since 7.0).
60#
61# @dirty-sync-missed-zero-copy: Number of times dirty RAM
62#     synchronization could not avoid copying dirty pages.  This is
63#     between 0 and @dirty-sync-count * @multifd-channels.  (since
64#     7.1)
65#
66# Features:
67#
68# @deprecated: Member @skipped is always zero since 1.5.3
69#
70# Since: 0.14
71#
72##
73{ 'struct': 'MigrationStats',
74  'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
75           'duplicate': 'int',
76           'skipped': { 'type': 'int', 'features': [ 'deprecated' ] },
77           'normal': 'int',
78           'normal-bytes': 'int', 'dirty-pages-rate': 'int',
79           'mbps': 'number', 'dirty-sync-count': 'int',
80           'postcopy-requests': 'int', 'page-size': 'int',
81           'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
82           'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
83           'postcopy-bytes': 'uint64',
84           'dirty-sync-missed-zero-copy': 'uint64' } }
85
86##
87# @XBZRLECacheStats:
88#
89# Detailed XBZRLE migration cache statistics
90#
91# @cache-size: XBZRLE cache size
92#
93# @bytes: amount of bytes already transferred to the target VM
94#
95# @pages: amount of pages transferred to the target VM
96#
97# @cache-miss: number of cache miss
98#
99# @cache-miss-rate: rate of cache miss (since 2.1)
100#
101# @encoding-rate: rate of encoded bytes (since 5.1)
102#
103# @overflow: number of overflows
104#
105# Since: 1.2
106##
107{ 'struct': 'XBZRLECacheStats',
108  'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
109           'cache-miss': 'int', 'cache-miss-rate': 'number',
110           'encoding-rate': 'number', 'overflow': 'int' } }
111
112##
113# @CompressionStats:
114#
115# Detailed migration compression statistics
116#
117# @pages: amount of pages compressed and transferred to the target VM
118#
119# @busy: count of times that no free thread was available to compress
120#     data
121#
122# @busy-rate: rate of thread busy
123#
124# @compressed-size: amount of bytes after compression
125#
126# @compression-rate: rate of compressed size
127#
128# Since: 3.1
129##
130{ 'struct': 'CompressionStats',
131  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
132           'compressed-size': 'int', 'compression-rate': 'number' } }
133
134##
135# @MigrationStatus:
136#
137# An enumeration of migration status.
138#
139# @none: no migration has ever happened.
140#
141# @setup: migration process has been initiated.
142#
143# @cancelling: in the process of cancelling migration.
144#
145# @cancelled: cancelling migration is finished.
146#
147# @active: in the process of doing migration.
148#
149# @postcopy-active: like active, but now in postcopy mode.  (since
150#     2.5)
151#
152# @postcopy-paused: during postcopy but paused.  (since 3.0)
153#
154# @postcopy-recover: trying to recover from a paused postcopy.  (since
155#     3.0)
156#
157# @completed: migration is finished.
158#
159# @failed: some error occurred during migration process.
160#
161# @colo: VM is in the process of fault tolerance, VM can not get into
162#     this state unless colo capability is enabled for migration.
163#     (since 2.8)
164#
165# @pre-switchover: Paused before device serialisation.  (since 2.11)
166#
167# @device: During device serialisation when pause-before-switchover is
168#     enabled (since 2.11)
169#
170# @wait-unplug: wait for device unplug request by guest OS to be
171#     completed.  (since 4.2)
172#
173# Since: 2.3
174##
175{ 'enum': 'MigrationStatus',
176  'data': [ 'none', 'setup', 'cancelling', 'cancelled',
177            'active', 'postcopy-active', 'postcopy-paused',
178            'postcopy-recover', 'completed', 'failed', 'colo',
179            'pre-switchover', 'device', 'wait-unplug' ] }
180##
181# @VfioStats:
182#
183# Detailed VFIO devices migration statistics
184#
185# @transferred: amount of bytes transferred to the target VM by VFIO
186#     devices
187#
188# Since: 5.2
189##
190{ 'struct': 'VfioStats',
191  'data': {'transferred': 'int' } }
192
193##
194# @MigrationInfo:
195#
196# Information about current migration process.
197#
198# @status: @MigrationStatus describing the current migration status.
199#     If this field is not returned, no migration process has been
200#     initiated
201#
202# @ram: @MigrationStats containing detailed migration status, only
203#     returned if status is 'active' or 'completed'(since 1.2)
204#
205# @disk: @MigrationStats containing detailed disk migration status,
206#     only returned if status is 'active' and it is a block migration
207#
208# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
209#     migration statistics, only returned if XBZRLE feature is on and
210#     status is 'active' or 'completed' (since 1.2)
211#
212# @total-time: total amount of milliseconds since migration started.
213#     If migration has ended, it returns the total migration time.
214#     (since 1.2)
215#
216# @downtime: only present when migration finishes correctly total
217#     downtime in milliseconds for the guest.  (since 1.3)
218#
219# @expected-downtime: only present while migration is active expected
220#     downtime in milliseconds for the guest in last walk of the dirty
221#     bitmap.  (since 1.3)
222#
223# @setup-time: amount of setup time in milliseconds *before* the
224#     iterations begin but *after* the QMP command is issued.  This is
225#     designed to provide an accounting of any activities (such as
226#     RDMA pinning) which may be expensive, but do not actually occur
227#     during the iterative migration rounds themselves.  (since 1.6)
228#
229# @cpu-throttle-percentage: percentage of time guest cpus are being
230#     throttled during auto-converge.  This is only present when
231#     auto-converge has started throttling guest cpus.  (Since 2.7)
232#
233# @error-desc: the human readable error description string. Clients
234#     should not attempt to parse the error strings.  (Since 2.7)
235#
236# @postcopy-blocktime: total time when all vCPU were blocked during
237#     postcopy live migration.  This is only present when the
238#     postcopy-blocktime migration capability is enabled.  (Since 3.0)
239#
240# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
241#     This is only present when the postcopy-blocktime migration
242#     capability is enabled.  (Since 3.0)
243#
244# @compression: migration compression statistics, only returned if
245#     compression feature is on and status is 'active' or 'completed'
246#     (Since 3.1)
247#
248# @socket-address: Only used for tcp, to know what the real port is
249#     (Since 4.0)
250#
251# @vfio: @VfioStats containing detailed VFIO devices migration
252#     statistics, only returned if VFIO device is present, migration
253#     is supported by all VFIO devices and status is 'active' or
254#     'completed' (since 5.2)
255#
256# @blocked-reasons: A list of reasons an outgoing migration is
257#     blocked.  Present and non-empty when migration is blocked.
258#     (since 6.0)
259#
260# @dirty-limit-throttle-time-per-round: Maximum throttle time
261#     (in microseconds) of virtual CPUs each dirty ring full round,
262#     which shows how MigrationCapability dirty-limit affects the
263#     guest during live migration.  (Since 8.1)
264#
265# @dirty-limit-ring-full-time: Estimated average dirty ring full time
266#     (in microseconds) for each dirty ring full round.  The value
267#     equals the dirty ring memory size divided by the average dirty
268#     page rate of the virtual CPU, which can be used to observe the
269#     average memory load of the virtual CPU indirectly.  Note that
270#     zero means guest doesn't dirty memory.  (Since 8.1)
271#
272# Features:
273#
274# @deprecated: Member @disk is deprecated because block migration is.
275#
276# Since: 0.14
277##
278{ 'struct': 'MigrationInfo',
279  'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
280           '*disk': { 'type': 'MigrationStats', 'features': [ 'deprecated' ] },
281           '*vfio': 'VfioStats',
282           '*xbzrle-cache': 'XBZRLECacheStats',
283           '*total-time': 'int',
284           '*expected-downtime': 'int',
285           '*downtime': 'int',
286           '*setup-time': 'int',
287           '*cpu-throttle-percentage': 'int',
288           '*error-desc': 'str',
289           '*blocked-reasons': ['str'],
290           '*postcopy-blocktime': 'uint32',
291           '*postcopy-vcpu-blocktime': ['uint32'],
292           '*compression': 'CompressionStats',
293           '*socket-address': ['SocketAddress'],
294           '*dirty-limit-throttle-time-per-round': 'uint64',
295           '*dirty-limit-ring-full-time': 'uint64'} }
296
297##
298# @query-migrate:
299#
300# Returns information about current migration process.  If migration
301# is active there will be another json-object with RAM migration
302# status and if block migration is active another one with block
303# migration status.
304#
305# Returns: @MigrationInfo
306#
307# Since: 0.14
308#
309# Examples:
310#
311# 1. Before the first migration
312#
313# -> { "execute": "query-migrate" }
314# <- { "return": {} }
315#
316# 2. Migration is done and has succeeded
317#
318# -> { "execute": "query-migrate" }
319# <- { "return": {
320#         "status": "completed",
321#         "total-time":12345,
322#         "setup-time":12345,
323#         "downtime":12345,
324#         "ram":{
325#           "transferred":123,
326#           "remaining":123,
327#           "total":246,
328#           "duplicate":123,
329#           "normal":123,
330#           "normal-bytes":123456,
331#           "dirty-sync-count":15
332#         }
333#      }
334#    }
335#
336# 3. Migration is done and has failed
337#
338# -> { "execute": "query-migrate" }
339# <- { "return": { "status": "failed" } }
340#
341# 4. Migration is being performed and is not a block migration:
342#
343# -> { "execute": "query-migrate" }
344# <- {
345#       "return":{
346#          "status":"active",
347#          "total-time":12345,
348#          "setup-time":12345,
349#          "expected-downtime":12345,
350#          "ram":{
351#             "transferred":123,
352#             "remaining":123,
353#             "total":246,
354#             "duplicate":123,
355#             "normal":123,
356#             "normal-bytes":123456,
357#             "dirty-sync-count":15
358#          }
359#       }
360#    }
361#
362# 5. Migration is being performed and is a block migration:
363#
364# -> { "execute": "query-migrate" }
365# <- {
366#       "return":{
367#          "status":"active",
368#          "total-time":12345,
369#          "setup-time":12345,
370#          "expected-downtime":12345,
371#          "ram":{
372#             "total":1057024,
373#             "remaining":1053304,
374#             "transferred":3720,
375#             "duplicate":123,
376#             "normal":123,
377#             "normal-bytes":123456,
378#             "dirty-sync-count":15
379#          },
380#          "disk":{
381#             "total":20971520,
382#             "remaining":20880384,
383#             "transferred":91136
384#          }
385#       }
386#    }
387#
388# 6. Migration is being performed and XBZRLE is active:
389#
390# -> { "execute": "query-migrate" }
391# <- {
392#       "return":{
393#          "status":"active",
394#          "total-time":12345,
395#          "setup-time":12345,
396#          "expected-downtime":12345,
397#          "ram":{
398#             "total":1057024,
399#             "remaining":1053304,
400#             "transferred":3720,
401#             "duplicate":10,
402#             "normal":3333,
403#             "normal-bytes":3412992,
404#             "dirty-sync-count":15
405#          },
406#          "xbzrle-cache":{
407#             "cache-size":67108864,
408#             "bytes":20971520,
409#             "pages":2444343,
410#             "cache-miss":2244,
411#             "cache-miss-rate":0.123,
412#             "encoding-rate":80.1,
413#             "overflow":34434
414#          }
415#       }
416#    }
417##
418{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
419
420##
421# @MigrationCapability:
422#
423# Migration capabilities enumeration
424#
425# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
426#     Encoding). This feature allows us to minimize migration traffic
427#     for certain work loads, by sending compressed difference of the
428#     pages
429#
430# @rdma-pin-all: Controls whether or not the entire VM memory
431#     footprint is mlock()'d on demand or all at once.  Refer to
432#     docs/rdma.txt for usage.  Disabled by default.  (since 2.0)
433#
434# @zero-blocks: During storage migration encode blocks of zeroes
435#     efficiently.  This essentially saves 1MB of zeroes per block on
436#     the wire.  Enabling requires source and target VM to support
437#     this feature.  To enable it is sufficient to enable the
438#     capability on the source VM. The feature is disabled by default.
439#     (since 1.6)
440#
441# @compress: Use multiple compression threads to accelerate live
442#     migration.  This feature can help to reduce the migration
443#     traffic, by sending compressed pages.  Please note that if
444#     compress and xbzrle are both on, compress only takes effect in
445#     the ram bulk stage, after that, it will be disabled and only
446#     xbzrle takes effect, this can help to minimize migration
447#     traffic.  The feature is disabled by default.  (since 2.4)
448#
449# @events: generate events for each migration state change (since 2.4)
450#
451# @auto-converge: If enabled, QEMU will automatically throttle down
452#     the guest to speed up convergence of RAM migration.  (since 1.6)
453#
454# @postcopy-ram: Start executing on the migration target before all of
455#     RAM has been migrated, pulling the remaining pages along as
456#     needed.  The capacity must have the same setting on both source
457#     and target or migration will not even start.  NOTE: If the
458#     migration fails during postcopy the VM will fail.  (since 2.6)
459#
460# @x-colo: If enabled, migration will never end, and the state of the
461#     VM on the primary side will be migrated continuously to the VM
462#     on secondary side, this process is called COarse-Grain LOck
463#     Stepping (COLO) for Non-stop Service.  (since 2.8)
464#
465# @release-ram: if enabled, qemu will free the migrated ram pages on
466#     the source during postcopy-ram migration.  (since 2.9)
467#
468# @block: If enabled, QEMU will also migrate the contents of all block
469#     devices.  Default is disabled.  A possible alternative uses
470#     mirror jobs to a builtin NBD server on the destination, which
471#     offers more flexibility.  (Since 2.10)
472#
473# @return-path: If enabled, migration will use the return path even
474#     for precopy.  (since 2.10)
475#
476# @pause-before-switchover: Pause outgoing migration before
477#     serialising device state and before disabling block IO (since
478#     2.11)
479#
480# @multifd: Use more than one fd for migration (since 4.0)
481#
482# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
483#     (since 2.12)
484#
485# @postcopy-blocktime: Calculate downtime for postcopy live migration
486#     (since 3.0)
487#
488# @late-block-activate: If enabled, the destination will not activate
489#     block devices (and thus take locks) immediately at the end of
490#     migration.  (since 3.0)
491#
492# @x-ignore-shared: If enabled, QEMU will not migrate shared memory
493#     that is accessible on the destination machine.  (since 4.0)
494#
495# @validate-uuid: Send the UUID of the source to allow the destination
496#     to ensure it is the same.  (since 4.2)
497#
498# @background-snapshot: If enabled, the migration stream will be a
499#     snapshot of the VM exactly at the point when the migration
500#     procedure starts.  The VM RAM is saved with running VM. (since
501#     6.0)
502#
503# @zero-copy-send: Controls behavior on sending memory pages on
504#     migration.  When true, enables a zero-copy mechanism for sending
505#     memory pages, if host supports it.  Requires that QEMU be
506#     permitted to use locked memory for guest RAM pages.  (since 7.1)
507#
508# @postcopy-preempt: If enabled, the migration process will allow
509#     postcopy requests to preempt precopy stream, so postcopy
510#     requests will be handled faster.  This is a performance feature
511#     and should not affect the correctness of postcopy migration.
512#     (since 7.1)
513#
514# @switchover-ack: If enabled, migration will not stop the source VM
515#     and complete the migration until an ACK is received from the
516#     destination that it's OK to do so.  Exactly when this ACK is
517#     sent depends on the migrated devices that use this feature.  For
518#     example, a device can use it to make sure some of its data is
519#     sent and loaded in the destination before doing switchover.
520#     This can reduce downtime if devices that support this capability
521#     are present.  'return-path' capability must be enabled to use
522#     it.  (since 8.1)
523#
524# @dirty-limit: If enabled, migration will throttle vCPUs as needed to
525#     keep their dirty page rate within @vcpu-dirty-limit.  This can
526#     improve responsiveness of large guests during live migration,
527#     and can result in more stable read performance.  Requires KVM
528#     with accelerator property "dirty-ring-size" set.  (Since 8.1)
529#
530# Features:
531#
532# @deprecated: Member @block is deprecated.  Use blockdev-mirror with
533#     NBD instead.
534#
535# @unstable: Members @x-colo and @x-ignore-shared are experimental.
536#
537# Since: 1.2
538##
539{ 'enum': 'MigrationCapability',
540  'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
541           'compress', 'events', 'postcopy-ram',
542           { 'name': 'x-colo', 'features': [ 'unstable' ] },
543           'release-ram',
544           { 'name': 'block', 'features': [ 'deprecated' ] },
545           'return-path', 'pause-before-switchover', 'multifd',
546           'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
547           { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
548           'validate-uuid', 'background-snapshot',
549           'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
550           'dirty-limit'] }
551
552##
553# @MigrationCapabilityStatus:
554#
555# Migration capability information
556#
557# @capability: capability enum
558#
559# @state: capability state bool
560#
561# Since: 1.2
562##
563{ 'struct': 'MigrationCapabilityStatus',
564  'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
565
566##
567# @migrate-set-capabilities:
568#
569# Enable/Disable the following migration capabilities (like xbzrle)
570#
571# @capabilities: json array of capability modifications to make
572#
573# Since: 1.2
574#
575# Example:
576#
577# -> { "execute": "migrate-set-capabilities" , "arguments":
578#      { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
579# <- { "return": {} }
580##
581{ 'command': 'migrate-set-capabilities',
582  'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
583
584##
585# @query-migrate-capabilities:
586#
587# Returns information about the current migration capabilities status
588#
589# Returns: @MigrationCapabilityStatus
590#
591# Since: 1.2
592#
593# Example:
594#
595# -> { "execute": "query-migrate-capabilities" }
596# <- { "return": [
597#       {"state": false, "capability": "xbzrle"},
598#       {"state": false, "capability": "rdma-pin-all"},
599#       {"state": false, "capability": "auto-converge"},
600#       {"state": false, "capability": "zero-blocks"},
601#       {"state": false, "capability": "compress"},
602#       {"state": true, "capability": "events"},
603#       {"state": false, "capability": "postcopy-ram"},
604#       {"state": false, "capability": "x-colo"}
605#    ]}
606##
607{ 'command': 'query-migrate-capabilities', 'returns':   ['MigrationCapabilityStatus']}
608
609##
610# @MultiFDCompression:
611#
612# An enumeration of multifd compression methods.
613#
614# @none: no compression.
615#
616# @zlib: use zlib compression method.
617#
618# @zstd: use zstd compression method.
619#
620# Since: 5.0
621##
622{ 'enum': 'MultiFDCompression',
623  'data': [ 'none', 'zlib',
624            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
625
626##
627# @BitmapMigrationBitmapAliasTransform:
628#
629# @persistent: If present, the bitmap will be made persistent or
630#     transient depending on this parameter.
631#
632# Since: 6.0
633##
634{ 'struct': 'BitmapMigrationBitmapAliasTransform',
635  'data': {
636      '*persistent': 'bool'
637  } }
638
639##
640# @BitmapMigrationBitmapAlias:
641#
642# @name: The name of the bitmap.
643#
644# @alias: An alias name for migration (for example the bitmap name on
645#     the opposite site).
646#
647# @transform: Allows the modification of the migrated bitmap.  (since
648#     6.0)
649#
650# Since: 5.2
651##
652{ 'struct': 'BitmapMigrationBitmapAlias',
653  'data': {
654      'name': 'str',
655      'alias': 'str',
656      '*transform': 'BitmapMigrationBitmapAliasTransform'
657  } }
658
659##
660# @BitmapMigrationNodeAlias:
661#
662# Maps a block node name and the bitmaps it has to aliases for dirty
663# bitmap migration.
664#
665# @node-name: A block node name.
666#
667# @alias: An alias block node name for migration (for example the node
668#     name on the opposite site).
669#
670# @bitmaps: Mappings for the bitmaps on this node.
671#
672# Since: 5.2
673##
674{ 'struct': 'BitmapMigrationNodeAlias',
675  'data': {
676      'node-name': 'str',
677      'alias': 'str',
678      'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
679  } }
680
681##
682# @MigrationParameter:
683#
684# Migration parameters enumeration
685#
686# @announce-initial: Initial delay (in milliseconds) before sending
687#     the first announce (Since 4.0)
688#
689# @announce-max: Maximum delay (in milliseconds) between packets in
690#     the announcement (Since 4.0)
691#
692# @announce-rounds: Number of self-announce packets sent after
693#     migration (Since 4.0)
694#
695# @announce-step: Increase in delay (in milliseconds) between
696#     subsequent packets in the announcement (Since 4.0)
697#
698# @compress-level: Set the compression level to be used in live
699#     migration, the compression level is an integer between 0 and 9,
700#     where 0 means no compression, 1 means the best compression
701#     speed, and 9 means best compression ratio which will consume
702#     more CPU.
703#
704# @compress-threads: Set compression thread count to be used in live
705#     migration, the compression thread count is an integer between 1
706#     and 255.
707#
708# @compress-wait-thread: Controls behavior when all compression
709#     threads are currently busy.  If true (default), wait for a free
710#     compression thread to become available; otherwise, send the page
711#     uncompressed.  (Since 3.1)
712#
713# @decompress-threads: Set decompression thread count to be used in
714#     live migration, the decompression thread count is an integer
715#     between 1 and 255. Usually, decompression is at least 4 times as
716#     fast as compression, so set the decompress-threads to the number
717#     about 1/4 of compress-threads is adequate.
718#
719# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
720#     bytes_xfer_period to trigger throttling.  It is expressed as
721#     percentage.  The default value is 50. (Since 5.0)
722#
723# @cpu-throttle-initial: Initial percentage of time guest cpus are
724#     throttled when migration auto-converge is activated.  The
725#     default value is 20. (Since 2.7)
726#
727# @cpu-throttle-increment: throttle percentage increase each time
728#     auto-converge detects that migration is not making progress.
729#     The default value is 10. (Since 2.7)
730#
731# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
732#     the tail stage of throttling, the Guest is very sensitive to CPU
733#     percentage while the @cpu-throttle -increment is excessive
734#     usually at tail stage.  If this parameter is true, we will
735#     compute the ideal CPU percentage used by the Guest, which may
736#     exactly make the dirty rate match the dirty rate threshold.
737#     Then we will choose a smaller throttle increment between the one
738#     specified by @cpu-throttle-increment and the one generated by
739#     ideal CPU percentage.  Therefore, it is compatible to
740#     traditional throttling, meanwhile the throttle increment won't
741#     be excessive at tail stage.  The default value is false.  (Since
742#     5.1)
743#
744# @tls-creds: ID of the 'tls-creds' object that provides credentials
745#     for establishing a TLS connection over the migration data
746#     channel.  On the outgoing side of the migration, the credentials
747#     must be for a 'client' endpoint, while for the incoming side the
748#     credentials must be for a 'server' endpoint.  Setting this will
749#     enable TLS for all migrations.  The default is unset, resulting
750#     in unsecured migration at the QEMU level.  (Since 2.7)
751#
752# @tls-hostname: hostname of the target host for the migration.  This
753#     is required when using x509 based TLS credentials and the
754#     migration URI does not already include a hostname.  For example
755#     if using fd: or exec: based migration, the hostname must be
756#     provided so that the server's x509 certificate identity can be
757#     validated.  (Since 2.7)
758#
759# @tls-authz: ID of the 'authz' object subclass that provides access
760#     control checking of the TLS x509 certificate distinguished name.
761#     This object is only resolved at time of use, so can be deleted
762#     and recreated on the fly while the migration server is active.
763#     If missing, it will default to denying access (Since 4.0)
764#
765# @max-bandwidth: to set maximum speed for migration.  maximum speed
766#     in bytes per second.  (Since 2.8)
767#
768# @avail-switchover-bandwidth: to set the available bandwidth that
769#     migration can use during switchover phase.  NOTE!  This does not
770#     limit the bandwidth during switchover, but only for calculations when
771#     making decisions to switchover.  By default, this value is zero,
772#     which means QEMU will estimate the bandwidth automatically.  This can
773#     be set when the estimated value is not accurate, while the user is
774#     able to guarantee such bandwidth is available when switching over.
775#     When specified correctly, this can make the switchover decision much
776#     more accurate.  (Since 8.2)
777#
778# @downtime-limit: set maximum tolerated downtime for migration.
779#     maximum downtime in milliseconds (Since 2.8)
780#
781# @x-checkpoint-delay: The delay time (in ms) between two COLO
782#     checkpoints in periodic mode.  (Since 2.8)
783#
784# @block-incremental: Affects how much storage is migrated when the
785#     block migration capability is enabled.  When false, the entire
786#     storage backing chain is migrated into a flattened image at the
787#     destination; when true, only the active qcow2 layer is migrated
788#     and the destination must already have access to the same backing
789#     chain as was used on the source.  (since 2.10)
790#
791# @multifd-channels: Number of channels used to migrate data in
792#     parallel.  This is the same number that the number of sockets
793#     used for migration.  The default value is 2 (since 4.0)
794#
795# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
796#     needs to be a multiple of the target page size and a power of 2
797#     (Since 2.11)
798#
799# @max-postcopy-bandwidth: Background transfer bandwidth during
800#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
801#     (Since 3.0)
802#
803# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
804#     (Since 3.1)
805#
806# @multifd-compression: Which compression method to use.  Defaults to
807#     none.  (Since 5.0)
808#
809# @multifd-zlib-level: Set the compression level to be used in live
810#     migration, the compression level is an integer between 0 and 9,
811#     where 0 means no compression, 1 means the best compression
812#     speed, and 9 means best compression ratio which will consume
813#     more CPU. Defaults to 1. (Since 5.0)
814#
815# @multifd-zstd-level: Set the compression level to be used in live
816#     migration, the compression level is an integer between 0 and 20,
817#     where 0 means no compression, 1 means the best compression
818#     speed, and 20 means best compression ratio which will consume
819#     more CPU. Defaults to 1. (Since 5.0)
820#
821# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
822#     aliases for the purpose of dirty bitmap migration.  Such aliases
823#     may for example be the corresponding names on the opposite site.
824#     The mapping must be one-to-one, but not necessarily complete: On
825#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
826#     will be ignored.  On the destination, encountering an unmapped
827#     alias in the incoming migration stream will result in a report,
828#     and all further bitmap migration data will then be discarded.
829#     Note that the destination does not know about bitmaps it does
830#     not receive, so there is no limitation or requirement regarding
831#     the number of bitmaps received, or how they are named, or on
832#     which nodes they are placed.  By default (when this parameter
833#     has never been set), bitmap names are mapped to themselves.
834#     Nodes are mapped to their block device name if there is one, and
835#     to their node name otherwise.  (Since 5.2)
836#
837# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
838#     limit during live migration.  Should be in the range 1 to 1000ms.
839#     Defaults to 1000ms.  (Since 8.1)
840#
841# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
842#     Defaults to 1.  (Since 8.1)
843#
844# Features:
845#
846# @deprecated: Member @block-incremental is deprecated.  Use
847#     blockdev-mirror with NBD instead.
848#
849# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
850#     are experimental.
851#
852# Since: 2.4
853##
854{ 'enum': 'MigrationParameter',
855  'data': ['announce-initial', 'announce-max',
856           'announce-rounds', 'announce-step',
857           'compress-level', 'compress-threads', 'decompress-threads',
858           'compress-wait-thread', 'throttle-trigger-threshold',
859           'cpu-throttle-initial', 'cpu-throttle-increment',
860           'cpu-throttle-tailslow',
861           'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
862           'avail-switchover-bandwidth', 'downtime-limit',
863           { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
864           { 'name': 'block-incremental', 'features': [ 'deprecated' ] },
865           'multifd-channels',
866           'xbzrle-cache-size', 'max-postcopy-bandwidth',
867           'max-cpu-throttle', 'multifd-compression',
868           'multifd-zlib-level', 'multifd-zstd-level',
869           'block-bitmap-mapping',
870           { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
871           'vcpu-dirty-limit'] }
872
873##
874# @MigrateSetParameters:
875#
876# @announce-initial: Initial delay (in milliseconds) before sending
877#     the first announce (Since 4.0)
878#
879# @announce-max: Maximum delay (in milliseconds) between packets in
880#     the announcement (Since 4.0)
881#
882# @announce-rounds: Number of self-announce packets sent after
883#     migration (Since 4.0)
884#
885# @announce-step: Increase in delay (in milliseconds) between
886#     subsequent packets in the announcement (Since 4.0)
887#
888# @compress-level: compression level
889#
890# @compress-threads: compression thread count
891#
892# @compress-wait-thread: Controls behavior when all compression
893#     threads are currently busy.  If true (default), wait for a free
894#     compression thread to become available; otherwise, send the page
895#     uncompressed.  (Since 3.1)
896#
897# @decompress-threads: decompression thread count
898#
899# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
900#     bytes_xfer_period to trigger throttling.  It is expressed as
901#     percentage.  The default value is 50. (Since 5.0)
902#
903# @cpu-throttle-initial: Initial percentage of time guest cpus are
904#     throttled when migration auto-converge is activated.  The
905#     default value is 20. (Since 2.7)
906#
907# @cpu-throttle-increment: throttle percentage increase each time
908#     auto-converge detects that migration is not making progress.
909#     The default value is 10. (Since 2.7)
910#
911# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
912#     the tail stage of throttling, the Guest is very sensitive to CPU
913#     percentage while the @cpu-throttle -increment is excessive
914#     usually at tail stage.  If this parameter is true, we will
915#     compute the ideal CPU percentage used by the Guest, which may
916#     exactly make the dirty rate match the dirty rate threshold.
917#     Then we will choose a smaller throttle increment between the one
918#     specified by @cpu-throttle-increment and the one generated by
919#     ideal CPU percentage.  Therefore, it is compatible to
920#     traditional throttling, meanwhile the throttle increment won't
921#     be excessive at tail stage.  The default value is false.  (Since
922#     5.1)
923#
924# @tls-creds: ID of the 'tls-creds' object that provides credentials
925#     for establishing a TLS connection over the migration data
926#     channel.  On the outgoing side of the migration, the credentials
927#     must be for a 'client' endpoint, while for the incoming side the
928#     credentials must be for a 'server' endpoint.  Setting this to a
929#     non-empty string enables TLS for all migrations.  An empty
930#     string means that QEMU will use plain text mode for migration,
931#     rather than TLS (Since 2.9) Previously (since 2.7), this was
932#     reported by omitting tls-creds instead.
933#
934# @tls-hostname: hostname of the target host for the migration.  This
935#     is required when using x509 based TLS credentials and the
936#     migration URI does not already include a hostname.  For example
937#     if using fd: or exec: based migration, the hostname must be
938#     provided so that the server's x509 certificate identity can be
939#     validated.  (Since 2.7) An empty string means that QEMU will use
940#     the hostname associated with the migration URI, if any.  (Since
941#     2.9) Previously (since 2.7), this was reported by omitting
942#     tls-hostname instead.
943#
944# @max-bandwidth: to set maximum speed for migration.  maximum speed
945#     in bytes per second.  (Since 2.8)
946#
947# @avail-switchover-bandwidth: to set the available bandwidth that
948#     migration can use during switchover phase.  NOTE!  This does not
949#     limit the bandwidth during switchover, but only for calculations when
950#     making decisions to switchover.  By default, this value is zero,
951#     which means QEMU will estimate the bandwidth automatically.  This can
952#     be set when the estimated value is not accurate, while the user is
953#     able to guarantee such bandwidth is available when switching over.
954#     When specified correctly, this can make the switchover decision much
955#     more accurate.  (Since 8.2)
956#
957# @downtime-limit: set maximum tolerated downtime for migration.
958#     maximum downtime in milliseconds (Since 2.8)
959#
960# @x-checkpoint-delay: the delay time between two COLO checkpoints.
961#     (Since 2.8)
962#
963# @block-incremental: Affects how much storage is migrated when the
964#     block migration capability is enabled.  When false, the entire
965#     storage backing chain is migrated into a flattened image at the
966#     destination; when true, only the active qcow2 layer is migrated
967#     and the destination must already have access to the same backing
968#     chain as was used on the source.  (since 2.10)
969#
970# @multifd-channels: Number of channels used to migrate data in
971#     parallel.  This is the same number that the number of sockets
972#     used for migration.  The default value is 2 (since 4.0)
973#
974# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
975#     needs to be a multiple of the target page size and a power of 2
976#     (Since 2.11)
977#
978# @max-postcopy-bandwidth: Background transfer bandwidth during
979#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
980#     (Since 3.0)
981#
982# @max-cpu-throttle: maximum cpu throttle percentage.  The default
983#     value is 99. (Since 3.1)
984#
985# @multifd-compression: Which compression method to use.  Defaults to
986#     none.  (Since 5.0)
987#
988# @multifd-zlib-level: Set the compression level to be used in live
989#     migration, the compression level is an integer between 0 and 9,
990#     where 0 means no compression, 1 means the best compression
991#     speed, and 9 means best compression ratio which will consume
992#     more CPU. Defaults to 1. (Since 5.0)
993#
994# @multifd-zstd-level: Set the compression level to be used in live
995#     migration, the compression level is an integer between 0 and 20,
996#     where 0 means no compression, 1 means the best compression
997#     speed, and 20 means best compression ratio which will consume
998#     more CPU. Defaults to 1. (Since 5.0)
999#
1000# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1001#     aliases for the purpose of dirty bitmap migration.  Such aliases
1002#     may for example be the corresponding names on the opposite site.
1003#     The mapping must be one-to-one, but not necessarily complete: On
1004#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1005#     will be ignored.  On the destination, encountering an unmapped
1006#     alias in the incoming migration stream will result in a report,
1007#     and all further bitmap migration data will then be discarded.
1008#     Note that the destination does not know about bitmaps it does
1009#     not receive, so there is no limitation or requirement regarding
1010#     the number of bitmaps received, or how they are named, or on
1011#     which nodes they are placed.  By default (when this parameter
1012#     has never been set), bitmap names are mapped to themselves.
1013#     Nodes are mapped to their block device name if there is one, and
1014#     to their node name otherwise.  (Since 5.2)
1015#
1016# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1017#     limit during live migration.  Should be in the range 1 to 1000ms.
1018#     Defaults to 1000ms.  (Since 8.1)
1019#
1020# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1021#     Defaults to 1.  (Since 8.1)
1022#
1023# Features:
1024#
1025# @deprecated: Member @block-incremental is deprecated.  Use
1026#     blockdev-mirror with NBD instead.
1027#
1028# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1029#     are experimental.
1030#
1031# TODO: either fuse back into MigrationParameters, or make
1032#     MigrationParameters members mandatory
1033#
1034# Since: 2.4
1035##
1036{ 'struct': 'MigrateSetParameters',
1037  'data': { '*announce-initial': 'size',
1038            '*announce-max': 'size',
1039            '*announce-rounds': 'size',
1040            '*announce-step': 'size',
1041            '*compress-level': 'uint8',
1042            '*compress-threads': 'uint8',
1043            '*compress-wait-thread': 'bool',
1044            '*decompress-threads': 'uint8',
1045            '*throttle-trigger-threshold': 'uint8',
1046            '*cpu-throttle-initial': 'uint8',
1047            '*cpu-throttle-increment': 'uint8',
1048            '*cpu-throttle-tailslow': 'bool',
1049            '*tls-creds': 'StrOrNull',
1050            '*tls-hostname': 'StrOrNull',
1051            '*tls-authz': 'StrOrNull',
1052            '*max-bandwidth': 'size',
1053            '*avail-switchover-bandwidth': 'size',
1054            '*downtime-limit': 'uint64',
1055            '*x-checkpoint-delay': { 'type': 'uint32',
1056                                     'features': [ 'unstable' ] },
1057            '*block-incremental': { 'type': 'bool',
1058                                    'features': [ 'deprecated' ] },
1059            '*multifd-channels': 'uint8',
1060            '*xbzrle-cache-size': 'size',
1061            '*max-postcopy-bandwidth': 'size',
1062            '*max-cpu-throttle': 'uint8',
1063            '*multifd-compression': 'MultiFDCompression',
1064            '*multifd-zlib-level': 'uint8',
1065            '*multifd-zstd-level': 'uint8',
1066            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1067            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1068                                            'features': [ 'unstable' ] },
1069            '*vcpu-dirty-limit': 'uint64'} }
1070
1071##
1072# @migrate-set-parameters:
1073#
1074# Set various migration parameters.
1075#
1076# Since: 2.4
1077#
1078# Example:
1079#
1080# -> { "execute": "migrate-set-parameters" ,
1081#      "arguments": { "compress-level": 1 } }
1082# <- { "return": {} }
1083##
1084{ 'command': 'migrate-set-parameters', 'boxed': true,
1085  'data': 'MigrateSetParameters' }
1086
1087##
1088# @MigrationParameters:
1089#
1090# The optional members aren't actually optional.
1091#
1092# @announce-initial: Initial delay (in milliseconds) before sending
1093#     the first announce (Since 4.0)
1094#
1095# @announce-max: Maximum delay (in milliseconds) between packets in
1096#     the announcement (Since 4.0)
1097#
1098# @announce-rounds: Number of self-announce packets sent after
1099#     migration (Since 4.0)
1100#
1101# @announce-step: Increase in delay (in milliseconds) between
1102#     subsequent packets in the announcement (Since 4.0)
1103#
1104# @compress-level: compression level
1105#
1106# @compress-threads: compression thread count
1107#
1108# @compress-wait-thread: Controls behavior when all compression
1109#     threads are currently busy.  If true (default), wait for a free
1110#     compression thread to become available; otherwise, send the page
1111#     uncompressed.  (Since 3.1)
1112#
1113# @decompress-threads: decompression thread count
1114#
1115# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1116#     bytes_xfer_period to trigger throttling.  It is expressed as
1117#     percentage.  The default value is 50. (Since 5.0)
1118#
1119# @cpu-throttle-initial: Initial percentage of time guest cpus are
1120#     throttled when migration auto-converge is activated.  (Since
1121#     2.7)
1122#
1123# @cpu-throttle-increment: throttle percentage increase each time
1124#     auto-converge detects that migration is not making progress.
1125#     (Since 2.7)
1126#
1127# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1128#     the tail stage of throttling, the Guest is very sensitive to CPU
1129#     percentage while the @cpu-throttle -increment is excessive
1130#     usually at tail stage.  If this parameter is true, we will
1131#     compute the ideal CPU percentage used by the Guest, which may
1132#     exactly make the dirty rate match the dirty rate threshold.
1133#     Then we will choose a smaller throttle increment between the one
1134#     specified by @cpu-throttle-increment and the one generated by
1135#     ideal CPU percentage.  Therefore, it is compatible to
1136#     traditional throttling, meanwhile the throttle increment won't
1137#     be excessive at tail stage.  The default value is false.  (Since
1138#     5.1)
1139#
1140# @tls-creds: ID of the 'tls-creds' object that provides credentials
1141#     for establishing a TLS connection over the migration data
1142#     channel.  On the outgoing side of the migration, the credentials
1143#     must be for a 'client' endpoint, while for the incoming side the
1144#     credentials must be for a 'server' endpoint.  An empty string
1145#     means that QEMU will use plain text mode for migration, rather
1146#     than TLS (Since 2.7) Note: 2.8 reports this by omitting
1147#     tls-creds instead.
1148#
1149# @tls-hostname: hostname of the target host for the migration.  This
1150#     is required when using x509 based TLS credentials and the
1151#     migration URI does not already include a hostname.  For example
1152#     if using fd: or exec: based migration, the hostname must be
1153#     provided so that the server's x509 certificate identity can be
1154#     validated.  (Since 2.7) An empty string means that QEMU will use
1155#     the hostname associated with the migration URI, if any.  (Since
1156#     2.9) Note: 2.8 reports this by omitting tls-hostname instead.
1157#
1158# @tls-authz: ID of the 'authz' object subclass that provides access
1159#     control checking of the TLS x509 certificate distinguished name.
1160#     (Since 4.0)
1161#
1162# @max-bandwidth: to set maximum speed for migration.  maximum speed
1163#     in bytes per second.  (Since 2.8)
1164#
1165# @avail-switchover-bandwidth: to set the available bandwidth that
1166#     migration can use during switchover phase.  NOTE!  This does not
1167#     limit the bandwidth during switchover, but only for calculations when
1168#     making decisions to switchover.  By default, this value is zero,
1169#     which means QEMU will estimate the bandwidth automatically.  This can
1170#     be set when the estimated value is not accurate, while the user is
1171#     able to guarantee such bandwidth is available when switching over.
1172#     When specified correctly, this can make the switchover decision much
1173#     more accurate.  (Since 8.2)
1174#
1175# @downtime-limit: set maximum tolerated downtime for migration.
1176#     maximum downtime in milliseconds (Since 2.8)
1177#
1178# @x-checkpoint-delay: the delay time between two COLO checkpoints.
1179#     (Since 2.8)
1180#
1181# @block-incremental: Affects how much storage is migrated when the
1182#     block migration capability is enabled.  When false, the entire
1183#     storage backing chain is migrated into a flattened image at the
1184#     destination; when true, only the active qcow2 layer is migrated
1185#     and the destination must already have access to the same backing
1186#     chain as was used on the source.  (since 2.10)
1187#
1188# @multifd-channels: Number of channels used to migrate data in
1189#     parallel.  This is the same number that the number of sockets
1190#     used for migration.  The default value is 2 (since 4.0)
1191#
1192# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1193#     needs to be a multiple of the target page size and a power of 2
1194#     (Since 2.11)
1195#
1196# @max-postcopy-bandwidth: Background transfer bandwidth during
1197#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1198#     (Since 3.0)
1199#
1200# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
1201#     (Since 3.1)
1202#
1203# @multifd-compression: Which compression method to use.  Defaults to
1204#     none.  (Since 5.0)
1205#
1206# @multifd-zlib-level: Set the compression level to be used in live
1207#     migration, the compression level is an integer between 0 and 9,
1208#     where 0 means no compression, 1 means the best compression
1209#     speed, and 9 means best compression ratio which will consume
1210#     more CPU. Defaults to 1. (Since 5.0)
1211#
1212# @multifd-zstd-level: Set the compression level to be used in live
1213#     migration, the compression level is an integer between 0 and 20,
1214#     where 0 means no compression, 1 means the best compression
1215#     speed, and 20 means best compression ratio which will consume
1216#     more CPU. Defaults to 1. (Since 5.0)
1217#
1218# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1219#     aliases for the purpose of dirty bitmap migration.  Such aliases
1220#     may for example be the corresponding names on the opposite site.
1221#     The mapping must be one-to-one, but not necessarily complete: On
1222#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1223#     will be ignored.  On the destination, encountering an unmapped
1224#     alias in the incoming migration stream will result in a report,
1225#     and all further bitmap migration data will then be discarded.
1226#     Note that the destination does not know about bitmaps it does
1227#     not receive, so there is no limitation or requirement regarding
1228#     the number of bitmaps received, or how they are named, or on
1229#     which nodes they are placed.  By default (when this parameter
1230#     has never been set), bitmap names are mapped to themselves.
1231#     Nodes are mapped to their block device name if there is one, and
1232#     to their node name otherwise.  (Since 5.2)
1233#
1234# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1235#     limit during live migration.  Should be in the range 1 to 1000ms.
1236#     Defaults to 1000ms.  (Since 8.1)
1237#
1238# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1239#     Defaults to 1.  (Since 8.1)
1240#
1241# Features:
1242#
1243# @deprecated: Member @block-incremental is deprecated.  Use
1244#     blockdev-mirror with NBD instead.
1245#
1246# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1247#     are experimental.
1248#
1249# Since: 2.4
1250##
1251{ 'struct': 'MigrationParameters',
1252  'data': { '*announce-initial': 'size',
1253            '*announce-max': 'size',
1254            '*announce-rounds': 'size',
1255            '*announce-step': 'size',
1256            '*compress-level': 'uint8',
1257            '*compress-threads': 'uint8',
1258            '*compress-wait-thread': 'bool',
1259            '*decompress-threads': 'uint8',
1260            '*throttle-trigger-threshold': 'uint8',
1261            '*cpu-throttle-initial': 'uint8',
1262            '*cpu-throttle-increment': 'uint8',
1263            '*cpu-throttle-tailslow': 'bool',
1264            '*tls-creds': 'str',
1265            '*tls-hostname': 'str',
1266            '*tls-authz': 'str',
1267            '*max-bandwidth': 'size',
1268            '*avail-switchover-bandwidth': 'size',
1269            '*downtime-limit': 'uint64',
1270            '*x-checkpoint-delay': { 'type': 'uint32',
1271                                     'features': [ 'unstable' ] },
1272            '*block-incremental': { 'type': 'bool',
1273                                    'features': [ 'deprecated' ] },
1274            '*multifd-channels': 'uint8',
1275            '*xbzrle-cache-size': 'size',
1276            '*max-postcopy-bandwidth': 'size',
1277            '*max-cpu-throttle': 'uint8',
1278            '*multifd-compression': 'MultiFDCompression',
1279            '*multifd-zlib-level': 'uint8',
1280            '*multifd-zstd-level': 'uint8',
1281            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1282            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1283                                            'features': [ 'unstable' ] },
1284            '*vcpu-dirty-limit': 'uint64'} }
1285
1286##
1287# @query-migrate-parameters:
1288#
1289# Returns information about the current migration parameters
1290#
1291# Returns: @MigrationParameters
1292#
1293# Since: 2.4
1294#
1295# Example:
1296#
1297# -> { "execute": "query-migrate-parameters" }
1298# <- { "return": {
1299#          "decompress-threads": 2,
1300#          "cpu-throttle-increment": 10,
1301#          "compress-threads": 8,
1302#          "compress-level": 1,
1303#          "cpu-throttle-initial": 20,
1304#          "max-bandwidth": 33554432,
1305#          "downtime-limit": 300
1306#       }
1307#    }
1308##
1309{ 'command': 'query-migrate-parameters',
1310  'returns': 'MigrationParameters' }
1311
1312##
1313# @migrate-start-postcopy:
1314#
1315# Followup to a migration command to switch the migration to postcopy
1316# mode.  The postcopy-ram capability must be set on both source and
1317# destination before the original migration command.
1318#
1319# Since: 2.5
1320#
1321# Example:
1322#
1323# -> { "execute": "migrate-start-postcopy" }
1324# <- { "return": {} }
1325##
1326{ 'command': 'migrate-start-postcopy' }
1327
1328##
1329# @MIGRATION:
1330#
1331# Emitted when a migration event happens
1332#
1333# @status: @MigrationStatus describing the current migration status.
1334#
1335# Since: 2.4
1336#
1337# Example:
1338#
1339# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1340#     "event": "MIGRATION",
1341#     "data": {"status": "completed"} }
1342##
1343{ 'event': 'MIGRATION',
1344  'data': {'status': 'MigrationStatus'}}
1345
1346##
1347# @MIGRATION_PASS:
1348#
1349# Emitted from the source side of a migration at the start of each
1350# pass (when it syncs the dirty bitmap)
1351#
1352# @pass: An incrementing count (starting at 1 on the first pass)
1353#
1354# Since: 2.6
1355#
1356# Example:
1357#
1358# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1359#       "event": "MIGRATION_PASS", "data": {"pass": 2} }
1360##
1361{ 'event': 'MIGRATION_PASS',
1362  'data': { 'pass': 'int' } }
1363
1364##
1365# @COLOMessage:
1366#
1367# The message transmission between Primary side and Secondary side.
1368#
1369# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1370#
1371# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1372#     checkpointing
1373#
1374# @checkpoint-reply: SVM gets PVM's checkpoint request
1375#
1376# @vmstate-send: VM's state will be sent by PVM.
1377#
1378# @vmstate-size: The total size of VMstate.
1379#
1380# @vmstate-received: VM's state has been received by SVM.
1381#
1382# @vmstate-loaded: VM's state has been loaded by SVM.
1383#
1384# Since: 2.8
1385##
1386{ 'enum': 'COLOMessage',
1387  'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1388            'vmstate-send', 'vmstate-size', 'vmstate-received',
1389            'vmstate-loaded' ] }
1390
1391##
1392# @COLOMode:
1393#
1394# The COLO current mode.
1395#
1396# @none: COLO is disabled.
1397#
1398# @primary: COLO node in primary side.
1399#
1400# @secondary: COLO node in slave side.
1401#
1402# Since: 2.8
1403##
1404{ 'enum': 'COLOMode',
1405  'data': [ 'none', 'primary', 'secondary'] }
1406
1407##
1408# @FailoverStatus:
1409#
1410# An enumeration of COLO failover status
1411#
1412# @none: no failover has ever happened
1413#
1414# @require: got failover requirement but not handled
1415#
1416# @active: in the process of doing failover
1417#
1418# @completed: finish the process of failover
1419#
1420# @relaunch: restart the failover process, from 'none' -> 'completed'
1421#     (Since 2.9)
1422#
1423# Since: 2.8
1424##
1425{ 'enum': 'FailoverStatus',
1426  'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1427
1428##
1429# @COLO_EXIT:
1430#
1431# Emitted when VM finishes COLO mode due to some errors happening or
1432# at the request of users.
1433#
1434# @mode: report COLO mode when COLO exited.
1435#
1436# @reason: describes the reason for the COLO exit.
1437#
1438# Since: 3.1
1439#
1440# Example:
1441#
1442# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1443#      "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1444##
1445{ 'event': 'COLO_EXIT',
1446  'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1447
1448##
1449# @COLOExitReason:
1450#
1451# The reason for a COLO exit.
1452#
1453# @none: failover has never happened.  This state does not occur in
1454#     the COLO_EXIT event, and is only visible in the result of
1455#     query-colo-status.
1456#
1457# @request: COLO exit is due to an external request.
1458#
1459# @error: COLO exit is due to an internal error.
1460#
1461# @processing: COLO is currently handling a failover (since 4.0).
1462#
1463# Since: 3.1
1464##
1465{ 'enum': 'COLOExitReason',
1466  'data': [ 'none', 'request', 'error' , 'processing' ] }
1467
1468##
1469# @x-colo-lost-heartbeat:
1470#
1471# Tell qemu that heartbeat is lost, request it to do takeover
1472# procedures.  If this command is sent to the PVM, the Primary side
1473# will exit COLO mode.  If sent to the Secondary, the Secondary side
1474# will run failover work, then takes over server operation to become
1475# the service VM.
1476#
1477# Features:
1478#
1479# @unstable: This command is experimental.
1480#
1481# Since: 2.8
1482#
1483# Example:
1484#
1485# -> { "execute": "x-colo-lost-heartbeat" }
1486# <- { "return": {} }
1487##
1488{ 'command': 'x-colo-lost-heartbeat',
1489  'features': [ 'unstable' ],
1490  'if': 'CONFIG_REPLICATION' }
1491
1492##
1493# @migrate_cancel:
1494#
1495# Cancel the current executing migration process.
1496#
1497# Returns: nothing on success
1498#
1499# Notes: This command succeeds even if there is no migration process
1500#     running.
1501#
1502# Since: 0.14
1503#
1504# Example:
1505#
1506# -> { "execute": "migrate_cancel" }
1507# <- { "return": {} }
1508##
1509{ 'command': 'migrate_cancel' }
1510
1511##
1512# @migrate-continue:
1513#
1514# Continue migration when it's in a paused state.
1515#
1516# @state: The state the migration is currently expected to be in
1517#
1518# Returns: nothing on success
1519#
1520# Since: 2.11
1521#
1522# Example:
1523#
1524# -> { "execute": "migrate-continue" , "arguments":
1525#      { "state": "pre-switchover" } }
1526# <- { "return": {} }
1527##
1528{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1529
1530##
1531# @migrate:
1532#
1533# Migrates the current running guest to another Virtual Machine.
1534#
1535# @uri: the Uniform Resource Identifier of the destination VM
1536#
1537# @blk: do block migration (full disk copy)
1538#
1539# @inc: incremental disk copy migration
1540#
1541# @detach: this argument exists only for compatibility reasons and is
1542#     ignored by QEMU
1543#
1544# @resume: resume one paused migration, default "off". (since 3.0)
1545#
1546# Features:
1547#
1548# @deprecated: Members @inc and @blk are deprecated.  Use
1549#     blockdev-mirror with NBD instead.
1550#
1551# Returns: nothing on success
1552#
1553# Since: 0.14
1554#
1555# Notes:
1556#
1557# 1. The 'query-migrate' command should be used to check migration's
1558#    progress and final result (this information is provided by the
1559#    'status' member)
1560#
1561# 2. All boolean arguments default to false
1562#
1563# 3. The user Monitor's "detach" argument is invalid in QMP and should
1564#    not be used
1565#
1566# Example:
1567#
1568# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1569# <- { "return": {} }
1570##
1571{ 'command': 'migrate',
1572  'data': {'uri': 'str',
1573           '*blk': { 'type': 'bool', 'features': [ 'deprecated' ] },
1574           '*inc': { 'type': 'bool', 'features': [ 'deprecated' ] },
1575           '*detach': 'bool', '*resume': 'bool' } }
1576
1577##
1578# @migrate-incoming:
1579#
1580# Start an incoming migration, the qemu must have been started with
1581# -incoming defer
1582#
1583# @uri: The Uniform Resource Identifier identifying the source or
1584#     address to listen on
1585#
1586# Returns: nothing on success
1587#
1588# Since: 2.3
1589#
1590# Notes:
1591#
1592# 1. It's a bad idea to use a string for the uri, but it needs
1593#    to stay compatible with -incoming and the format of the uri
1594#    is already exposed above libvirt.
1595#
1596# 2. QEMU must be started with -incoming defer to allow
1597#    migrate-incoming to be used.
1598#
1599# 3. The uri format is the same as for -incoming
1600#
1601# Example:
1602#
1603# -> { "execute": "migrate-incoming",
1604#      "arguments": { "uri": "tcp::4446" } }
1605# <- { "return": {} }
1606##
1607{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
1608
1609##
1610# @xen-save-devices-state:
1611#
1612# Save the state of all devices to file.  The RAM and the block
1613# devices of the VM are not saved by this command.
1614#
1615# @filename: the file to save the state of the devices to as binary
1616#     data.  See xen-save-devices-state.txt for a description of the
1617#     binary format.
1618#
1619# @live: Optional argument to ask QEMU to treat this command as part
1620#     of a live migration.  Default to true.  (since 2.11)
1621#
1622# Returns: Nothing on success
1623#
1624# Since: 1.1
1625#
1626# Example:
1627#
1628# -> { "execute": "xen-save-devices-state",
1629#      "arguments": { "filename": "/tmp/save" } }
1630# <- { "return": {} }
1631##
1632{ 'command': 'xen-save-devices-state',
1633  'data': {'filename': 'str', '*live':'bool' } }
1634
1635##
1636# @xen-set-global-dirty-log:
1637#
1638# Enable or disable the global dirty log mode.
1639#
1640# @enable: true to enable, false to disable.
1641#
1642# Returns: nothing
1643#
1644# Since: 1.3
1645#
1646# Example:
1647#
1648# -> { "execute": "xen-set-global-dirty-log",
1649#      "arguments": { "enable": true } }
1650# <- { "return": {} }
1651##
1652{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1653
1654##
1655# @xen-load-devices-state:
1656#
1657# Load the state of all devices from file.  The RAM and the block
1658# devices of the VM are not loaded by this command.
1659#
1660# @filename: the file to load the state of the devices from as binary
1661#     data.  See xen-save-devices-state.txt for a description of the
1662#     binary format.
1663#
1664# Since: 2.7
1665#
1666# Example:
1667#
1668# -> { "execute": "xen-load-devices-state",
1669#      "arguments": { "filename": "/tmp/resume" } }
1670# <- { "return": {} }
1671##
1672{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1673
1674##
1675# @xen-set-replication:
1676#
1677# Enable or disable replication.
1678#
1679# @enable: true to enable, false to disable.
1680#
1681# @primary: true for primary or false for secondary.
1682#
1683# @failover: true to do failover, false to stop.  but cannot be
1684#     specified if 'enable' is true.  default value is false.
1685#
1686# Returns: nothing.
1687#
1688# Example:
1689#
1690# -> { "execute": "xen-set-replication",
1691#      "arguments": {"enable": true, "primary": false} }
1692# <- { "return": {} }
1693#
1694# Since: 2.9
1695##
1696{ 'command': 'xen-set-replication',
1697  'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1698  'if': 'CONFIG_REPLICATION' }
1699
1700##
1701# @ReplicationStatus:
1702#
1703# The result format for 'query-xen-replication-status'.
1704#
1705# @error: true if an error happened, false if replication is normal.
1706#
1707# @desc: the human readable error description string, when @error is
1708#     'true'.
1709#
1710# Since: 2.9
1711##
1712{ 'struct': 'ReplicationStatus',
1713  'data': { 'error': 'bool', '*desc': 'str' },
1714  'if': 'CONFIG_REPLICATION' }
1715
1716##
1717# @query-xen-replication-status:
1718#
1719# Query replication status while the vm is running.
1720#
1721# Returns: A @ReplicationStatus object showing the status.
1722#
1723# Example:
1724#
1725# -> { "execute": "query-xen-replication-status" }
1726# <- { "return": { "error": false } }
1727#
1728# Since: 2.9
1729##
1730{ 'command': 'query-xen-replication-status',
1731  'returns': 'ReplicationStatus',
1732  'if': 'CONFIG_REPLICATION' }
1733
1734##
1735# @xen-colo-do-checkpoint:
1736#
1737# Xen uses this command to notify replication to trigger a checkpoint.
1738#
1739# Returns: nothing.
1740#
1741# Example:
1742#
1743# -> { "execute": "xen-colo-do-checkpoint" }
1744# <- { "return": {} }
1745#
1746# Since: 2.9
1747##
1748{ 'command': 'xen-colo-do-checkpoint',
1749  'if': 'CONFIG_REPLICATION' }
1750
1751##
1752# @COLOStatus:
1753#
1754# The result format for 'query-colo-status'.
1755#
1756# @mode: COLO running mode.  If COLO is running, this field will
1757#     return 'primary' or 'secondary'.
1758#
1759# @last-mode: COLO last running mode.  If COLO is running, this field
1760#     will return same like mode field, after failover we can use this
1761#     field to get last colo mode.  (since 4.0)
1762#
1763# @reason: describes the reason for the COLO exit.
1764#
1765# Since: 3.1
1766##
1767{ 'struct': 'COLOStatus',
1768  'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1769            'reason': 'COLOExitReason' },
1770  'if': 'CONFIG_REPLICATION' }
1771
1772##
1773# @query-colo-status:
1774#
1775# Query COLO status while the vm is running.
1776#
1777# Returns: A @COLOStatus object showing the status.
1778#
1779# Example:
1780#
1781# -> { "execute": "query-colo-status" }
1782# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1783#
1784# Since: 3.1
1785##
1786{ 'command': 'query-colo-status',
1787  'returns': 'COLOStatus',
1788  'if': 'CONFIG_REPLICATION' }
1789
1790##
1791# @migrate-recover:
1792#
1793# Provide a recovery migration stream URI.
1794#
1795# @uri: the URI to be used for the recovery of migration stream.
1796#
1797# Returns: nothing.
1798#
1799# Example:
1800#
1801# -> { "execute": "migrate-recover",
1802#      "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1803# <- { "return": {} }
1804#
1805# Since: 3.0
1806##
1807{ 'command': 'migrate-recover',
1808  'data': { 'uri': 'str' },
1809  'allow-oob': true }
1810
1811##
1812# @migrate-pause:
1813#
1814# Pause a migration.  Currently it only supports postcopy.
1815#
1816# Returns: nothing.
1817#
1818# Example:
1819#
1820# -> { "execute": "migrate-pause" }
1821# <- { "return": {} }
1822#
1823# Since: 3.0
1824##
1825{ 'command': 'migrate-pause', 'allow-oob': true }
1826
1827##
1828# @UNPLUG_PRIMARY:
1829#
1830# Emitted from source side of a migration when migration state is
1831# WAIT_UNPLUG. Device was unplugged by guest operating system.  Device
1832# resources in QEMU are kept on standby to be able to re-plug it in
1833# case of migration failure.
1834#
1835# @device-id: QEMU device id of the unplugged device
1836#
1837# Since: 4.2
1838#
1839# Example:
1840#
1841# <- { "event": "UNPLUG_PRIMARY",
1842#      "data": { "device-id": "hostdev0" },
1843#      "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1844##
1845{ 'event': 'UNPLUG_PRIMARY',
1846  'data': { 'device-id': 'str' } }
1847
1848##
1849# @DirtyRateVcpu:
1850#
1851# Dirty rate of vcpu.
1852#
1853# @id: vcpu index.
1854#
1855# @dirty-rate: dirty rate.
1856#
1857# Since: 6.2
1858##
1859{ 'struct': 'DirtyRateVcpu',
1860  'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1861
1862##
1863# @DirtyRateStatus:
1864#
1865# Dirty page rate measurement status.
1866#
1867# @unstarted: measuring thread has not been started yet
1868#
1869# @measuring: measuring thread is running
1870#
1871# @measured: dirty page rate is measured and the results are available
1872#
1873# Since: 5.2
1874##
1875{ 'enum': 'DirtyRateStatus',
1876  'data': [ 'unstarted', 'measuring', 'measured'] }
1877
1878##
1879# @DirtyRateMeasureMode:
1880#
1881# Method used to measure dirty page rate.  Differences between
1882# available methods are explained in @calc-dirty-rate.
1883#
1884# @page-sampling: use page sampling
1885#
1886# @dirty-ring: use dirty ring
1887#
1888# @dirty-bitmap: use dirty bitmap
1889#
1890# Since: 6.2
1891##
1892{ 'enum': 'DirtyRateMeasureMode',
1893  'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1894
1895##
1896# @TimeUnit:
1897#
1898# Specifies unit in which time-related value is specified.
1899#
1900# @second: value is in seconds
1901#
1902# @millisecond: value is in milliseconds
1903#
1904# Since 8.2
1905#
1906##
1907{ 'enum': 'TimeUnit',
1908  'data': ['second', 'millisecond'] }
1909
1910##
1911# @DirtyRateInfo:
1912#
1913# Information about measured dirty page rate.
1914#
1915# @dirty-rate: an estimate of the dirty page rate of the VM in units
1916#     of MiB/s.  Value is present only when @status is 'measured'.
1917#
1918# @status: current status of dirty page rate measurements
1919#
1920# @start-time: start time in units of second for calculation
1921#
1922# @calc-time: time period for which dirty page rate was measured,
1923#     expressed and rounded down to @calc-time-unit.
1924#
1925# @calc-time-unit: time unit of @calc-time  (Since 8.2)
1926#
1927# @sample-pages: number of sampled pages per GiB of guest memory.
1928#     Valid only in page-sampling mode (Since 6.1)
1929#
1930# @mode: mode that was used to measure dirty page rate (Since 6.2)
1931#
1932# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
1933#     specified (Since 6.2)
1934#
1935# Since: 5.2
1936##
1937{ 'struct': 'DirtyRateInfo',
1938  'data': {'*dirty-rate': 'int64',
1939           'status': 'DirtyRateStatus',
1940           'start-time': 'int64',
1941           'calc-time': 'int64',
1942           'calc-time-unit': 'TimeUnit',
1943           'sample-pages': 'uint64',
1944           'mode': 'DirtyRateMeasureMode',
1945           '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
1946
1947##
1948# @calc-dirty-rate:
1949#
1950# Start measuring dirty page rate of the VM.  Results can be retrieved
1951# with @query-dirty-rate after measurements are completed.
1952#
1953# Dirty page rate is the number of pages changed in a given time
1954# period expressed in MiB/s.  The following methods of calculation are
1955# available:
1956#
1957# 1. In page sampling mode, a random subset of pages are selected and
1958#    hashed twice: once at the beginning of measurement time period,
1959#    and once again at the end.  If two hashes for some page are
1960#    different, the page is counted as changed.  Since this method
1961#    relies on sampling and hashing, calculated dirty page rate is
1962#    only an estimate of its true value.  Increasing @sample-pages
1963#    improves estimation quality at the cost of higher computational
1964#    overhead.
1965#
1966# 2. Dirty bitmap mode captures writes to memory (for example by
1967#    temporarily revoking write access to all pages) and counting page
1968#    faults.  Information about modified pages is collected into a
1969#    bitmap, where each bit corresponds to one guest page.  This mode
1970#    requires that KVM accelerator property "dirty-ring-size" is *not*
1971#    set.
1972#
1973# 3. Dirty ring mode is similar to dirty bitmap mode, but the
1974#    information about modified pages is collected into ring buffer.
1975#    This mode tracks page modification per each vCPU separately.  It
1976#    requires that KVM accelerator property "dirty-ring-size" is set.
1977#
1978# @calc-time: time period for which dirty page rate is calculated.
1979#     By default it is specified in seconds, but the unit can be set
1980#     explicitly with @calc-time-unit.  Note that larger @calc-time
1981#     values will typically result in smaller dirty page rates because
1982#     page dirtying is a one-time event.  Once some page is counted
1983#     as dirty during @calc-time period, further writes to this page
1984#     will not increase dirty page rate anymore.
1985#
1986# @calc-time-unit: time unit in which @calc-time is specified.
1987#     By default it is seconds. (Since 8.2)
1988#
1989# @sample-pages: number of sampled pages per each GiB of guest memory.
1990#     Default value is 512.  For 4KiB guest pages this corresponds to
1991#     sampling ratio of 0.2%.  This argument is used only in page
1992#     sampling mode.  (Since 6.1)
1993#
1994# @mode: mechanism for tracking dirty pages.  Default value is
1995#     'page-sampling'.  Others are 'dirty-bitmap' and 'dirty-ring'.
1996#     (Since 6.1)
1997#
1998# Since: 5.2
1999#
2000# Example:
2001#
2002# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
2003#                                                 'sample-pages': 512} }
2004# <- { "return": {} }
2005#
2006# Measure dirty rate using dirty bitmap for 500 milliseconds:
2007#
2008# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500,
2009#     "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} }
2010#
2011# <- { "return": {} }
2012##
2013{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
2014                                         '*calc-time-unit': 'TimeUnit',
2015                                         '*sample-pages': 'int',
2016                                         '*mode': 'DirtyRateMeasureMode'} }
2017
2018##
2019# @query-dirty-rate:
2020#
2021# Query results of the most recent invocation of @calc-dirty-rate.
2022#
2023# @calc-time-unit: time unit in which to report calculation time.
2024#     By default it is reported in seconds. (Since 8.2)
2025#
2026# Since: 5.2
2027#
2028# Examples:
2029#
2030# 1. Measurement is in progress:
2031#
2032# <- {"status": "measuring", "sample-pages": 512,
2033#     "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2034#     "calc-time-unit": "second"}
2035#
2036# 2. Measurement has been completed:
2037#
2038# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
2039#     "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2040#     "calc-time-unit": "second"}
2041##
2042{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' },
2043                                 'returns': 'DirtyRateInfo' }
2044
2045##
2046# @DirtyLimitInfo:
2047#
2048# Dirty page rate limit information of a virtual CPU.
2049#
2050# @cpu-index: index of a virtual CPU.
2051#
2052# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
2053#     CPU, 0 means unlimited.
2054#
2055# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
2056#
2057# Since: 7.1
2058##
2059{ 'struct': 'DirtyLimitInfo',
2060  'data': { 'cpu-index': 'int',
2061            'limit-rate': 'uint64',
2062            'current-rate': 'uint64' } }
2063
2064##
2065# @set-vcpu-dirty-limit:
2066#
2067# Set the upper limit of dirty page rate for virtual CPUs.
2068#
2069# Requires KVM with accelerator property "dirty-ring-size" set.  A
2070# virtual CPU's dirty page rate is a measure of its memory load.  To
2071# observe dirty page rates, use @calc-dirty-rate.
2072#
2073# @cpu-index: index of a virtual CPU, default is all.
2074#
2075# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
2076#
2077# Since: 7.1
2078#
2079# Example:
2080#
2081# -> {"execute": "set-vcpu-dirty-limit"}
2082#     "arguments": { "dirty-rate": 200,
2083#                    "cpu-index": 1 } }
2084# <- { "return": {} }
2085##
2086{ 'command': 'set-vcpu-dirty-limit',
2087  'data': { '*cpu-index': 'int',
2088            'dirty-rate': 'uint64' } }
2089
2090##
2091# @cancel-vcpu-dirty-limit:
2092#
2093# Cancel the upper limit of dirty page rate for virtual CPUs.
2094#
2095# Cancel the dirty page limit for the vCPU which has been set with
2096# set-vcpu-dirty-limit command.  Note that this command requires
2097# support from dirty ring, same as the "set-vcpu-dirty-limit".
2098#
2099# @cpu-index: index of a virtual CPU, default is all.
2100#
2101# Since: 7.1
2102#
2103# Example:
2104#
2105# -> {"execute": "cancel-vcpu-dirty-limit"},
2106#     "arguments": { "cpu-index": 1 } }
2107# <- { "return": {} }
2108##
2109{ 'command': 'cancel-vcpu-dirty-limit',
2110  'data': { '*cpu-index': 'int'} }
2111
2112##
2113# @query-vcpu-dirty-limit:
2114#
2115# Returns information about virtual CPU dirty page rate limits, if
2116# any.
2117#
2118# Since: 7.1
2119#
2120# Example:
2121#
2122# -> {"execute": "query-vcpu-dirty-limit"}
2123# <- {"return": [
2124#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
2125#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
2126##
2127{ 'command': 'query-vcpu-dirty-limit',
2128  'returns': [ 'DirtyLimitInfo' ] }
2129
2130##
2131# @MigrationThreadInfo:
2132#
2133# Information about migrationthreads
2134#
2135# @name: the name of migration thread
2136#
2137# @thread-id: ID of the underlying host thread
2138#
2139# Since: 7.2
2140##
2141{ 'struct': 'MigrationThreadInfo',
2142  'data': {'name': 'str',
2143           'thread-id': 'int'} }
2144
2145##
2146# @query-migrationthreads:
2147#
2148# Returns information of migration threads
2149#
2150# data: migration thread name
2151#
2152# Returns: information about migration threads
2153#
2154# Since: 7.2
2155##
2156{ 'command': 'query-migrationthreads',
2157  'returns': ['MigrationThreadInfo'] }
2158
2159##
2160# @snapshot-save:
2161#
2162# Save a VM snapshot
2163#
2164# @job-id: identifier for the newly created job
2165#
2166# @tag: name of the snapshot to create
2167#
2168# @vmstate: block device node name to save vmstate to
2169#
2170# @devices: list of block device node names to save a snapshot to
2171#
2172# Applications should not assume that the snapshot save is complete
2173# when this command returns.  The job commands / events must be used
2174# to determine completion and to fetch details of any errors that
2175# arise.
2176#
2177# Note that execution of the guest CPUs may be stopped during the time
2178# it takes to save the snapshot.  A future version of QEMU may ensure
2179# CPUs are executing continuously.
2180#
2181# It is strongly recommended that @devices contain all writable block
2182# device nodes if a consistent snapshot is required.
2183#
2184# If @tag already exists, an error will be reported
2185#
2186# Returns: nothing
2187#
2188# Example:
2189#
2190# -> { "execute": "snapshot-save",
2191#      "arguments": {
2192#         "job-id": "snapsave0",
2193#         "tag": "my-snap",
2194#         "vmstate": "disk0",
2195#         "devices": ["disk0", "disk1"]
2196#      }
2197#    }
2198# <- { "return": { } }
2199# <- {"event": "JOB_STATUS_CHANGE",
2200#     "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2201#     "data": {"status": "created", "id": "snapsave0"}}
2202# <- {"event": "JOB_STATUS_CHANGE",
2203#     "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2204#     "data": {"status": "running", "id": "snapsave0"}}
2205# <- {"event": "STOP",
2206#     "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2207# <- {"event": "RESUME",
2208#     "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2209# <- {"event": "JOB_STATUS_CHANGE",
2210#     "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2211#     "data": {"status": "waiting", "id": "snapsave0"}}
2212# <- {"event": "JOB_STATUS_CHANGE",
2213#     "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2214#     "data": {"status": "pending", "id": "snapsave0"}}
2215# <- {"event": "JOB_STATUS_CHANGE",
2216#     "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2217#     "data": {"status": "concluded", "id": "snapsave0"}}
2218# -> {"execute": "query-jobs"}
2219# <- {"return": [{"current-progress": 1,
2220#                 "status": "concluded",
2221#                 "total-progress": 1,
2222#                 "type": "snapshot-save",
2223#                 "id": "snapsave0"}]}
2224#
2225# Since: 6.0
2226##
2227{ 'command': 'snapshot-save',
2228  'data': { 'job-id': 'str',
2229            'tag': 'str',
2230            'vmstate': 'str',
2231            'devices': ['str'] } }
2232
2233##
2234# @snapshot-load:
2235#
2236# Load a VM snapshot
2237#
2238# @job-id: identifier for the newly created job
2239#
2240# @tag: name of the snapshot to load.
2241#
2242# @vmstate: block device node name to load vmstate from
2243#
2244# @devices: list of block device node names to load a snapshot from
2245#
2246# Applications should not assume that the snapshot load is complete
2247# when this command returns.  The job commands / events must be used
2248# to determine completion and to fetch details of any errors that
2249# arise.
2250#
2251# Note that execution of the guest CPUs will be stopped during the
2252# time it takes to load the snapshot.
2253#
2254# It is strongly recommended that @devices contain all writable block
2255# device nodes that can have changed since the original @snapshot-save
2256# command execution.
2257#
2258# Returns: nothing
2259#
2260# Example:
2261#
2262# -> { "execute": "snapshot-load",
2263#      "arguments": {
2264#         "job-id": "snapload0",
2265#         "tag": "my-snap",
2266#         "vmstate": "disk0",
2267#         "devices": ["disk0", "disk1"]
2268#      }
2269#    }
2270# <- { "return": { } }
2271# <- {"event": "JOB_STATUS_CHANGE",
2272#     "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2273#     "data": {"status": "created", "id": "snapload0"}}
2274# <- {"event": "JOB_STATUS_CHANGE",
2275#     "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2276#     "data": {"status": "running", "id": "snapload0"}}
2277# <- {"event": "STOP",
2278#     "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2279# <- {"event": "RESUME",
2280#     "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2281# <- {"event": "JOB_STATUS_CHANGE",
2282#     "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2283#     "data": {"status": "waiting", "id": "snapload0"}}
2284# <- {"event": "JOB_STATUS_CHANGE",
2285#     "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2286#     "data": {"status": "pending", "id": "snapload0"}}
2287# <- {"event": "JOB_STATUS_CHANGE",
2288#     "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2289#     "data": {"status": "concluded", "id": "snapload0"}}
2290# -> {"execute": "query-jobs"}
2291# <- {"return": [{"current-progress": 1,
2292#                 "status": "concluded",
2293#                 "total-progress": 1,
2294#                 "type": "snapshot-load",
2295#                 "id": "snapload0"}]}
2296#
2297# Since: 6.0
2298##
2299{ 'command': 'snapshot-load',
2300  'data': { 'job-id': 'str',
2301            'tag': 'str',
2302            'vmstate': 'str',
2303            'devices': ['str'] } }
2304
2305##
2306# @snapshot-delete:
2307#
2308# Delete a VM snapshot
2309#
2310# @job-id: identifier for the newly created job
2311#
2312# @tag: name of the snapshot to delete.
2313#
2314# @devices: list of block device node names to delete a snapshot from
2315#
2316# Applications should not assume that the snapshot delete is complete
2317# when this command returns.  The job commands / events must be used
2318# to determine completion and to fetch details of any errors that
2319# arise.
2320#
2321# Returns: nothing
2322#
2323# Example:
2324#
2325# -> { "execute": "snapshot-delete",
2326#      "arguments": {
2327#         "job-id": "snapdelete0",
2328#         "tag": "my-snap",
2329#         "devices": ["disk0", "disk1"]
2330#      }
2331#    }
2332# <- { "return": { } }
2333# <- {"event": "JOB_STATUS_CHANGE",
2334#     "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2335#     "data": {"status": "created", "id": "snapdelete0"}}
2336# <- {"event": "JOB_STATUS_CHANGE",
2337#     "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2338#     "data": {"status": "running", "id": "snapdelete0"}}
2339# <- {"event": "JOB_STATUS_CHANGE",
2340#     "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2341#     "data": {"status": "waiting", "id": "snapdelete0"}}
2342# <- {"event": "JOB_STATUS_CHANGE",
2343#     "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2344#     "data": {"status": "pending", "id": "snapdelete0"}}
2345# <- {"event": "JOB_STATUS_CHANGE",
2346#     "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2347#     "data": {"status": "concluded", "id": "snapdelete0"}}
2348# -> {"execute": "query-jobs"}
2349# <- {"return": [{"current-progress": 1,
2350#                 "status": "concluded",
2351#                 "total-progress": 1,
2352#                 "type": "snapshot-delete",
2353#                 "id": "snapdelete0"}]}
2354#
2355# Since: 6.0
2356##
2357{ 'command': 'snapshot-delete',
2358  'data': { 'job-id': 'str',
2359            'tag': 'str',
2360            'devices': ['str'] } }
2361