xref: /openbmc/qemu/qapi/migration.json (revision 8846b5bf)
1# -*- Mode: Python -*-
2# vim: filetype=python
3#
4
5##
6# = Migration
7##
8
9{ 'include': 'common.json' }
10{ 'include': 'sockets.json' }
11
12##
13# @MigrationStats:
14#
15# Detailed migration status.
16#
17# @transferred: amount of bytes already transferred to the target VM
18#
19# @remaining: amount of bytes remaining to be transferred to the
20#     target VM
21#
22# @total: total amount of bytes involved in the migration process
23#
24# @duplicate: number of duplicate (zero) pages (since 1.2)
25#
26# @skipped: number of skipped zero pages. Always zero, only provided for
27#     compatibility (since 1.5)
28#
29# @normal: number of normal pages (since 1.2)
30#
31# @normal-bytes: number of normal bytes sent (since 1.2)
32#
33# @dirty-pages-rate: number of pages dirtied by second by the guest
34#     (since 1.3)
35#
36# @mbps: throughput in megabits/sec.  (since 1.6)
37#
38# @dirty-sync-count: number of times that dirty ram was synchronized
39#     (since 2.1)
40#
41# @postcopy-requests: The number of page requests received from the
42#     destination (since 2.7)
43#
44# @page-size: The number of bytes per page for the various page-based
45#     statistics (since 2.10)
46#
47# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
48#
49# @pages-per-second: the number of memory pages transferred per second
50#     (Since 4.0)
51#
52# @precopy-bytes: The number of bytes sent in the pre-copy phase
53#     (since 7.0).
54#
55# @downtime-bytes: The number of bytes sent while the guest is paused
56#     (since 7.0).
57#
58# @postcopy-bytes: The number of bytes sent during the post-copy phase
59#     (since 7.0).
60#
61# @dirty-sync-missed-zero-copy: Number of times dirty RAM
62#     synchronization could not avoid copying dirty pages.  This is
63#     between 0 and @dirty-sync-count * @multifd-channels.  (since
64#     7.1)
65#
66# Features:
67#
68# @deprecated: Member @skipped is always zero since 1.5.3
69#
70# Since: 0.14
71#
72##
73{ 'struct': 'MigrationStats',
74  'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
75           'duplicate': 'int',
76           'skipped': { 'type': 'int', 'features': [ 'deprecated' ] },
77           'normal': 'int',
78           'normal-bytes': 'int', 'dirty-pages-rate': 'int',
79           'mbps': 'number', 'dirty-sync-count': 'int',
80           'postcopy-requests': 'int', 'page-size': 'int',
81           'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
82           'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
83           'postcopy-bytes': 'uint64',
84           'dirty-sync-missed-zero-copy': 'uint64' } }
85
86##
87# @XBZRLECacheStats:
88#
89# Detailed XBZRLE migration cache statistics
90#
91# @cache-size: XBZRLE cache size
92#
93# @bytes: amount of bytes already transferred to the target VM
94#
95# @pages: amount of pages transferred to the target VM
96#
97# @cache-miss: number of cache miss
98#
99# @cache-miss-rate: rate of cache miss (since 2.1)
100#
101# @encoding-rate: rate of encoded bytes (since 5.1)
102#
103# @overflow: number of overflows
104#
105# Since: 1.2
106##
107{ 'struct': 'XBZRLECacheStats',
108  'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
109           'cache-miss': 'int', 'cache-miss-rate': 'number',
110           'encoding-rate': 'number', 'overflow': 'int' } }
111
112##
113# @CompressionStats:
114#
115# Detailed migration compression statistics
116#
117# @pages: amount of pages compressed and transferred to the target VM
118#
119# @busy: count of times that no free thread was available to compress
120#     data
121#
122# @busy-rate: rate of thread busy
123#
124# @compressed-size: amount of bytes after compression
125#
126# @compression-rate: rate of compressed size
127#
128# Since: 3.1
129##
130{ 'struct': 'CompressionStats',
131  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
132           'compressed-size': 'int', 'compression-rate': 'number' } }
133
134##
135# @MigrationStatus:
136#
137# An enumeration of migration status.
138#
139# @none: no migration has ever happened.
140#
141# @setup: migration process has been initiated.
142#
143# @cancelling: in the process of cancelling migration.
144#
145# @cancelled: cancelling migration is finished.
146#
147# @active: in the process of doing migration.
148#
149# @postcopy-active: like active, but now in postcopy mode.  (since
150#     2.5)
151#
152# @postcopy-paused: during postcopy but paused.  (since 3.0)
153#
154# @postcopy-recover: trying to recover from a paused postcopy.  (since
155#     3.0)
156#
157# @completed: migration is finished.
158#
159# @failed: some error occurred during migration process.
160#
161# @colo: VM is in the process of fault tolerance, VM can not get into
162#     this state unless colo capability is enabled for migration.
163#     (since 2.8)
164#
165# @pre-switchover: Paused before device serialisation.  (since 2.11)
166#
167# @device: During device serialisation when pause-before-switchover is
168#     enabled (since 2.11)
169#
170# @wait-unplug: wait for device unplug request by guest OS to be
171#     completed.  (since 4.2)
172#
173# Since: 2.3
174##
175{ 'enum': 'MigrationStatus',
176  'data': [ 'none', 'setup', 'cancelling', 'cancelled',
177            'active', 'postcopy-active', 'postcopy-paused',
178            'postcopy-recover', 'completed', 'failed', 'colo',
179            'pre-switchover', 'device', 'wait-unplug' ] }
180##
181# @VfioStats:
182#
183# Detailed VFIO devices migration statistics
184#
185# @transferred: amount of bytes transferred to the target VM by VFIO
186#     devices
187#
188# Since: 5.2
189##
190{ 'struct': 'VfioStats',
191  'data': {'transferred': 'int' } }
192
193##
194# @MigrationInfo:
195#
196# Information about current migration process.
197#
198# @status: @MigrationStatus describing the current migration status.
199#     If this field is not returned, no migration process has been
200#     initiated
201#
202# @ram: @MigrationStats containing detailed migration status, only
203#     returned if status is 'active' or 'completed'(since 1.2)
204#
205# @disk: @MigrationStats containing detailed disk migration status,
206#     only returned if status is 'active' and it is a block migration
207#
208# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
209#     migration statistics, only returned if XBZRLE feature is on and
210#     status is 'active' or 'completed' (since 1.2)
211#
212# @total-time: total amount of milliseconds since migration started.
213#     If migration has ended, it returns the total migration time.
214#     (since 1.2)
215#
216# @downtime: only present when migration finishes correctly total
217#     downtime in milliseconds for the guest.  (since 1.3)
218#
219# @expected-downtime: only present while migration is active expected
220#     downtime in milliseconds for the guest in last walk of the dirty
221#     bitmap.  (since 1.3)
222#
223# @setup-time: amount of setup time in milliseconds *before* the
224#     iterations begin but *after* the QMP command is issued.  This is
225#     designed to provide an accounting of any activities (such as
226#     RDMA pinning) which may be expensive, but do not actually occur
227#     during the iterative migration rounds themselves.  (since 1.6)
228#
229# @cpu-throttle-percentage: percentage of time guest cpus are being
230#     throttled during auto-converge.  This is only present when
231#     auto-converge has started throttling guest cpus.  (Since 2.7)
232#
233# @error-desc: the human readable error description string. Clients
234#     should not attempt to parse the error strings.  (Since 2.7)
235#
236# @postcopy-blocktime: total time when all vCPU were blocked during
237#     postcopy live migration.  This is only present when the
238#     postcopy-blocktime migration capability is enabled.  (Since 3.0)
239#
240# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
241#     This is only present when the postcopy-blocktime migration
242#     capability is enabled.  (Since 3.0)
243#
244# @compression: migration compression statistics, only returned if
245#     compression feature is on and status is 'active' or 'completed'
246#     (Since 3.1)
247#
248# @socket-address: Only used for tcp, to know what the real port is
249#     (Since 4.0)
250#
251# @vfio: @VfioStats containing detailed VFIO devices migration
252#     statistics, only returned if VFIO device is present, migration
253#     is supported by all VFIO devices and status is 'active' or
254#     'completed' (since 5.2)
255#
256# @blocked-reasons: A list of reasons an outgoing migration is
257#     blocked.  Present and non-empty when migration is blocked.
258#     (since 6.0)
259#
260# @dirty-limit-throttle-time-per-round: Maximum throttle time
261#     (in microseconds) of virtual CPUs each dirty ring full round,
262#     which shows how MigrationCapability dirty-limit affects the
263#     guest during live migration.  (Since 8.1)
264#
265# @dirty-limit-ring-full-time: Estimated average dirty ring full time
266#     (in microseconds) for each dirty ring full round.  The value
267#     equals the dirty ring memory size divided by the average dirty
268#     page rate of the virtual CPU, which can be used to observe the
269#     average memory load of the virtual CPU indirectly.  Note that
270#     zero means guest doesn't dirty memory.  (Since 8.1)
271#
272# Since: 0.14
273##
274{ 'struct': 'MigrationInfo',
275  'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
276           '*disk': 'MigrationStats',
277           '*vfio': 'VfioStats',
278           '*xbzrle-cache': 'XBZRLECacheStats',
279           '*total-time': 'int',
280           '*expected-downtime': 'int',
281           '*downtime': 'int',
282           '*setup-time': 'int',
283           '*cpu-throttle-percentage': 'int',
284           '*error-desc': 'str',
285           '*blocked-reasons': ['str'],
286           '*postcopy-blocktime': 'uint32',
287           '*postcopy-vcpu-blocktime': ['uint32'],
288           '*compression': 'CompressionStats',
289           '*socket-address': ['SocketAddress'],
290           '*dirty-limit-throttle-time-per-round': 'uint64',
291           '*dirty-limit-ring-full-time': 'uint64'} }
292
293##
294# @query-migrate:
295#
296# Returns information about current migration process.  If migration
297# is active there will be another json-object with RAM migration
298# status and if block migration is active another one with block
299# migration status.
300#
301# Returns: @MigrationInfo
302#
303# Since: 0.14
304#
305# Examples:
306#
307# 1. Before the first migration
308#
309# -> { "execute": "query-migrate" }
310# <- { "return": {} }
311#
312# 2. Migration is done and has succeeded
313#
314# -> { "execute": "query-migrate" }
315# <- { "return": {
316#         "status": "completed",
317#         "total-time":12345,
318#         "setup-time":12345,
319#         "downtime":12345,
320#         "ram":{
321#           "transferred":123,
322#           "remaining":123,
323#           "total":246,
324#           "duplicate":123,
325#           "normal":123,
326#           "normal-bytes":123456,
327#           "dirty-sync-count":15
328#         }
329#      }
330#    }
331#
332# 3. Migration is done and has failed
333#
334# -> { "execute": "query-migrate" }
335# <- { "return": { "status": "failed" } }
336#
337# 4. Migration is being performed and is not a block migration:
338#
339# -> { "execute": "query-migrate" }
340# <- {
341#       "return":{
342#          "status":"active",
343#          "total-time":12345,
344#          "setup-time":12345,
345#          "expected-downtime":12345,
346#          "ram":{
347#             "transferred":123,
348#             "remaining":123,
349#             "total":246,
350#             "duplicate":123,
351#             "normal":123,
352#             "normal-bytes":123456,
353#             "dirty-sync-count":15
354#          }
355#       }
356#    }
357#
358# 5. Migration is being performed and is a block migration:
359#
360# -> { "execute": "query-migrate" }
361# <- {
362#       "return":{
363#          "status":"active",
364#          "total-time":12345,
365#          "setup-time":12345,
366#          "expected-downtime":12345,
367#          "ram":{
368#             "total":1057024,
369#             "remaining":1053304,
370#             "transferred":3720,
371#             "duplicate":123,
372#             "normal":123,
373#             "normal-bytes":123456,
374#             "dirty-sync-count":15
375#          },
376#          "disk":{
377#             "total":20971520,
378#             "remaining":20880384,
379#             "transferred":91136
380#          }
381#       }
382#    }
383#
384# 6. Migration is being performed and XBZRLE is active:
385#
386# -> { "execute": "query-migrate" }
387# <- {
388#       "return":{
389#          "status":"active",
390#          "total-time":12345,
391#          "setup-time":12345,
392#          "expected-downtime":12345,
393#          "ram":{
394#             "total":1057024,
395#             "remaining":1053304,
396#             "transferred":3720,
397#             "duplicate":10,
398#             "normal":3333,
399#             "normal-bytes":3412992,
400#             "dirty-sync-count":15
401#          },
402#          "xbzrle-cache":{
403#             "cache-size":67108864,
404#             "bytes":20971520,
405#             "pages":2444343,
406#             "cache-miss":2244,
407#             "cache-miss-rate":0.123,
408#             "encoding-rate":80.1,
409#             "overflow":34434
410#          }
411#       }
412#    }
413##
414{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
415
416##
417# @MigrationCapability:
418#
419# Migration capabilities enumeration
420#
421# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
422#     Encoding). This feature allows us to minimize migration traffic
423#     for certain work loads, by sending compressed difference of the
424#     pages
425#
426# @rdma-pin-all: Controls whether or not the entire VM memory
427#     footprint is mlock()'d on demand or all at once.  Refer to
428#     docs/rdma.txt for usage.  Disabled by default.  (since 2.0)
429#
430# @zero-blocks: During storage migration encode blocks of zeroes
431#     efficiently.  This essentially saves 1MB of zeroes per block on
432#     the wire.  Enabling requires source and target VM to support
433#     this feature.  To enable it is sufficient to enable the
434#     capability on the source VM. The feature is disabled by default.
435#     (since 1.6)
436#
437# @compress: Use multiple compression threads to accelerate live
438#     migration.  This feature can help to reduce the migration
439#     traffic, by sending compressed pages.  Please note that if
440#     compress and xbzrle are both on, compress only takes effect in
441#     the ram bulk stage, after that, it will be disabled and only
442#     xbzrle takes effect, this can help to minimize migration
443#     traffic.  The feature is disabled by default.  (since 2.4)
444#
445# @events: generate events for each migration state change (since 2.4)
446#
447# @auto-converge: If enabled, QEMU will automatically throttle down
448#     the guest to speed up convergence of RAM migration.  (since 1.6)
449#
450# @postcopy-ram: Start executing on the migration target before all of
451#     RAM has been migrated, pulling the remaining pages along as
452#     needed.  The capacity must have the same setting on both source
453#     and target or migration will not even start.  NOTE: If the
454#     migration fails during postcopy the VM will fail.  (since 2.6)
455#
456# @x-colo: If enabled, migration will never end, and the state of the
457#     VM on the primary side will be migrated continuously to the VM
458#     on secondary side, this process is called COarse-Grain LOck
459#     Stepping (COLO) for Non-stop Service.  (since 2.8)
460#
461# @release-ram: if enabled, qemu will free the migrated ram pages on
462#     the source during postcopy-ram migration.  (since 2.9)
463#
464# @block: If enabled, QEMU will also migrate the contents of all block
465#     devices.  Default is disabled.  A possible alternative uses
466#     mirror jobs to a builtin NBD server on the destination, which
467#     offers more flexibility.  (Since 2.10)
468#
469# @return-path: If enabled, migration will use the return path even
470#     for precopy.  (since 2.10)
471#
472# @pause-before-switchover: Pause outgoing migration before
473#     serialising device state and before disabling block IO (since
474#     2.11)
475#
476# @multifd: Use more than one fd for migration (since 4.0)
477#
478# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
479#     (since 2.12)
480#
481# @postcopy-blocktime: Calculate downtime for postcopy live migration
482#     (since 3.0)
483#
484# @late-block-activate: If enabled, the destination will not activate
485#     block devices (and thus take locks) immediately at the end of
486#     migration.  (since 3.0)
487#
488# @x-ignore-shared: If enabled, QEMU will not migrate shared memory
489#     that is accessible on the destination machine.  (since 4.0)
490#
491# @validate-uuid: Send the UUID of the source to allow the destination
492#     to ensure it is the same.  (since 4.2)
493#
494# @background-snapshot: If enabled, the migration stream will be a
495#     snapshot of the VM exactly at the point when the migration
496#     procedure starts.  The VM RAM is saved with running VM. (since
497#     6.0)
498#
499# @zero-copy-send: Controls behavior on sending memory pages on
500#     migration.  When true, enables a zero-copy mechanism for sending
501#     memory pages, if host supports it.  Requires that QEMU be
502#     permitted to use locked memory for guest RAM pages.  (since 7.1)
503#
504# @postcopy-preempt: If enabled, the migration process will allow
505#     postcopy requests to preempt precopy stream, so postcopy
506#     requests will be handled faster.  This is a performance feature
507#     and should not affect the correctness of postcopy migration.
508#     (since 7.1)
509#
510# @switchover-ack: If enabled, migration will not stop the source VM
511#     and complete the migration until an ACK is received from the
512#     destination that it's OK to do so.  Exactly when this ACK is
513#     sent depends on the migrated devices that use this feature.  For
514#     example, a device can use it to make sure some of its data is
515#     sent and loaded in the destination before doing switchover.
516#     This can reduce downtime if devices that support this capability
517#     are present.  'return-path' capability must be enabled to use
518#     it.  (since 8.1)
519#
520# @dirty-limit: If enabled, migration will throttle vCPUs as needed to
521#     keep their dirty page rate within @vcpu-dirty-limit.  This can
522#     improve responsiveness of large guests during live migration,
523#     and can result in more stable read performance.  Requires KVM
524#     with accelerator property "dirty-ring-size" set.  (Since 8.1)
525#
526# Features:
527#
528# @unstable: Members @x-colo and @x-ignore-shared are experimental.
529#
530# Since: 1.2
531##
532{ 'enum': 'MigrationCapability',
533  'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
534           'compress', 'events', 'postcopy-ram',
535           { 'name': 'x-colo', 'features': [ 'unstable' ] },
536           'release-ram',
537           'block', 'return-path', 'pause-before-switchover', 'multifd',
538           'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
539           { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
540           'validate-uuid', 'background-snapshot',
541           'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
542           'dirty-limit'] }
543
544##
545# @MigrationCapabilityStatus:
546#
547# Migration capability information
548#
549# @capability: capability enum
550#
551# @state: capability state bool
552#
553# Since: 1.2
554##
555{ 'struct': 'MigrationCapabilityStatus',
556  'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
557
558##
559# @migrate-set-capabilities:
560#
561# Enable/Disable the following migration capabilities (like xbzrle)
562#
563# @capabilities: json array of capability modifications to make
564#
565# Since: 1.2
566#
567# Example:
568#
569# -> { "execute": "migrate-set-capabilities" , "arguments":
570#      { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
571# <- { "return": {} }
572##
573{ 'command': 'migrate-set-capabilities',
574  'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
575
576##
577# @query-migrate-capabilities:
578#
579# Returns information about the current migration capabilities status
580#
581# Returns: @MigrationCapabilityStatus
582#
583# Since: 1.2
584#
585# Example:
586#
587# -> { "execute": "query-migrate-capabilities" }
588# <- { "return": [
589#       {"state": false, "capability": "xbzrle"},
590#       {"state": false, "capability": "rdma-pin-all"},
591#       {"state": false, "capability": "auto-converge"},
592#       {"state": false, "capability": "zero-blocks"},
593#       {"state": false, "capability": "compress"},
594#       {"state": true, "capability": "events"},
595#       {"state": false, "capability": "postcopy-ram"},
596#       {"state": false, "capability": "x-colo"}
597#    ]}
598##
599{ 'command': 'query-migrate-capabilities', 'returns':   ['MigrationCapabilityStatus']}
600
601##
602# @MultiFDCompression:
603#
604# An enumeration of multifd compression methods.
605#
606# @none: no compression.
607#
608# @zlib: use zlib compression method.
609#
610# @zstd: use zstd compression method.
611#
612# Since: 5.0
613##
614{ 'enum': 'MultiFDCompression',
615  'data': [ 'none', 'zlib',
616            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
617
618##
619# @BitmapMigrationBitmapAliasTransform:
620#
621# @persistent: If present, the bitmap will be made persistent or
622#     transient depending on this parameter.
623#
624# Since: 6.0
625##
626{ 'struct': 'BitmapMigrationBitmapAliasTransform',
627  'data': {
628      '*persistent': 'bool'
629  } }
630
631##
632# @BitmapMigrationBitmapAlias:
633#
634# @name: The name of the bitmap.
635#
636# @alias: An alias name for migration (for example the bitmap name on
637#     the opposite site).
638#
639# @transform: Allows the modification of the migrated bitmap.  (since
640#     6.0)
641#
642# Since: 5.2
643##
644{ 'struct': 'BitmapMigrationBitmapAlias',
645  'data': {
646      'name': 'str',
647      'alias': 'str',
648      '*transform': 'BitmapMigrationBitmapAliasTransform'
649  } }
650
651##
652# @BitmapMigrationNodeAlias:
653#
654# Maps a block node name and the bitmaps it has to aliases for dirty
655# bitmap migration.
656#
657# @node-name: A block node name.
658#
659# @alias: An alias block node name for migration (for example the node
660#     name on the opposite site).
661#
662# @bitmaps: Mappings for the bitmaps on this node.
663#
664# Since: 5.2
665##
666{ 'struct': 'BitmapMigrationNodeAlias',
667  'data': {
668      'node-name': 'str',
669      'alias': 'str',
670      'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
671  } }
672
673##
674# @MigrationParameter:
675#
676# Migration parameters enumeration
677#
678# @announce-initial: Initial delay (in milliseconds) before sending
679#     the first announce (Since 4.0)
680#
681# @announce-max: Maximum delay (in milliseconds) between packets in
682#     the announcement (Since 4.0)
683#
684# @announce-rounds: Number of self-announce packets sent after
685#     migration (Since 4.0)
686#
687# @announce-step: Increase in delay (in milliseconds) between
688#     subsequent packets in the announcement (Since 4.0)
689#
690# @compress-level: Set the compression level to be used in live
691#     migration, the compression level is an integer between 0 and 9,
692#     where 0 means no compression, 1 means the best compression
693#     speed, and 9 means best compression ratio which will consume
694#     more CPU.
695#
696# @compress-threads: Set compression thread count to be used in live
697#     migration, the compression thread count is an integer between 1
698#     and 255.
699#
700# @compress-wait-thread: Controls behavior when all compression
701#     threads are currently busy.  If true (default), wait for a free
702#     compression thread to become available; otherwise, send the page
703#     uncompressed.  (Since 3.1)
704#
705# @decompress-threads: Set decompression thread count to be used in
706#     live migration, the decompression thread count is an integer
707#     between 1 and 255. Usually, decompression is at least 4 times as
708#     fast as compression, so set the decompress-threads to the number
709#     about 1/4 of compress-threads is adequate.
710#
711# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
712#     bytes_xfer_period to trigger throttling.  It is expressed as
713#     percentage.  The default value is 50. (Since 5.0)
714#
715# @cpu-throttle-initial: Initial percentage of time guest cpus are
716#     throttled when migration auto-converge is activated.  The
717#     default value is 20. (Since 2.7)
718#
719# @cpu-throttle-increment: throttle percentage increase each time
720#     auto-converge detects that migration is not making progress.
721#     The default value is 10. (Since 2.7)
722#
723# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
724#     the tail stage of throttling, the Guest is very sensitive to CPU
725#     percentage while the @cpu-throttle -increment is excessive
726#     usually at tail stage.  If this parameter is true, we will
727#     compute the ideal CPU percentage used by the Guest, which may
728#     exactly make the dirty rate match the dirty rate threshold.
729#     Then we will choose a smaller throttle increment between the one
730#     specified by @cpu-throttle-increment and the one generated by
731#     ideal CPU percentage.  Therefore, it is compatible to
732#     traditional throttling, meanwhile the throttle increment won't
733#     be excessive at tail stage.  The default value is false.  (Since
734#     5.1)
735#
736# @tls-creds: ID of the 'tls-creds' object that provides credentials
737#     for establishing a TLS connection over the migration data
738#     channel.  On the outgoing side of the migration, the credentials
739#     must be for a 'client' endpoint, while for the incoming side the
740#     credentials must be for a 'server' endpoint.  Setting this will
741#     enable TLS for all migrations.  The default is unset, resulting
742#     in unsecured migration at the QEMU level.  (Since 2.7)
743#
744# @tls-hostname: hostname of the target host for the migration.  This
745#     is required when using x509 based TLS credentials and the
746#     migration URI does not already include a hostname.  For example
747#     if using fd: or exec: based migration, the hostname must be
748#     provided so that the server's x509 certificate identity can be
749#     validated.  (Since 2.7)
750#
751# @tls-authz: ID of the 'authz' object subclass that provides access
752#     control checking of the TLS x509 certificate distinguished name.
753#     This object is only resolved at time of use, so can be deleted
754#     and recreated on the fly while the migration server is active.
755#     If missing, it will default to denying access (Since 4.0)
756#
757# @max-bandwidth: to set maximum speed for migration.  maximum speed
758#     in bytes per second.  (Since 2.8)
759#
760# @avail-switchover-bandwidth: to set the available bandwidth that
761#     migration can use during switchover phase.  NOTE!  This does not
762#     limit the bandwidth during switchover, but only for calculations when
763#     making decisions to switchover.  By default, this value is zero,
764#     which means QEMU will estimate the bandwidth automatically.  This can
765#     be set when the estimated value is not accurate, while the user is
766#     able to guarantee such bandwidth is available when switching over.
767#     When specified correctly, this can make the switchover decision much
768#     more accurate.  (Since 8.2)
769#
770# @downtime-limit: set maximum tolerated downtime for migration.
771#     maximum downtime in milliseconds (Since 2.8)
772#
773# @x-checkpoint-delay: The delay time (in ms) between two COLO
774#     checkpoints in periodic mode.  (Since 2.8)
775#
776# @block-incremental: Affects how much storage is migrated when the
777#     block migration capability is enabled.  When false, the entire
778#     storage backing chain is migrated into a flattened image at the
779#     destination; when true, only the active qcow2 layer is migrated
780#     and the destination must already have access to the same backing
781#     chain as was used on the source.  (since 2.10)
782#
783# @multifd-channels: Number of channels used to migrate data in
784#     parallel.  This is the same number that the number of sockets
785#     used for migration.  The default value is 2 (since 4.0)
786#
787# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
788#     needs to be a multiple of the target page size and a power of 2
789#     (Since 2.11)
790#
791# @max-postcopy-bandwidth: Background transfer bandwidth during
792#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
793#     (Since 3.0)
794#
795# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
796#     (Since 3.1)
797#
798# @multifd-compression: Which compression method to use.  Defaults to
799#     none.  (Since 5.0)
800#
801# @multifd-zlib-level: Set the compression level to be used in live
802#     migration, the compression level is an integer between 0 and 9,
803#     where 0 means no compression, 1 means the best compression
804#     speed, and 9 means best compression ratio which will consume
805#     more CPU. Defaults to 1. (Since 5.0)
806#
807# @multifd-zstd-level: Set the compression level to be used in live
808#     migration, the compression level is an integer between 0 and 20,
809#     where 0 means no compression, 1 means the best compression
810#     speed, and 20 means best compression ratio which will consume
811#     more CPU. Defaults to 1. (Since 5.0)
812#
813# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
814#     aliases for the purpose of dirty bitmap migration.  Such aliases
815#     may for example be the corresponding names on the opposite site.
816#     The mapping must be one-to-one, but not necessarily complete: On
817#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
818#     will be ignored.  On the destination, encountering an unmapped
819#     alias in the incoming migration stream will result in a report,
820#     and all further bitmap migration data will then be discarded.
821#     Note that the destination does not know about bitmaps it does
822#     not receive, so there is no limitation or requirement regarding
823#     the number of bitmaps received, or how they are named, or on
824#     which nodes they are placed.  By default (when this parameter
825#     has never been set), bitmap names are mapped to themselves.
826#     Nodes are mapped to their block device name if there is one, and
827#     to their node name otherwise.  (Since 5.2)
828#
829# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
830#     limit during live migration.  Should be in the range 1 to 1000ms.
831#     Defaults to 1000ms.  (Since 8.1)
832#
833# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
834#     Defaults to 1.  (Since 8.1)
835#
836# Features:
837#
838# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
839#     are experimental.
840#
841# Since: 2.4
842##
843{ 'enum': 'MigrationParameter',
844  'data': ['announce-initial', 'announce-max',
845           'announce-rounds', 'announce-step',
846           'compress-level', 'compress-threads', 'decompress-threads',
847           'compress-wait-thread', 'throttle-trigger-threshold',
848           'cpu-throttle-initial', 'cpu-throttle-increment',
849           'cpu-throttle-tailslow',
850           'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
851           'avail-switchover-bandwidth', 'downtime-limit',
852           { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
853           'block-incremental',
854           'multifd-channels',
855           'xbzrle-cache-size', 'max-postcopy-bandwidth',
856           'max-cpu-throttle', 'multifd-compression',
857           'multifd-zlib-level', 'multifd-zstd-level',
858           'block-bitmap-mapping',
859           { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
860           'vcpu-dirty-limit'] }
861
862##
863# @MigrateSetParameters:
864#
865# @announce-initial: Initial delay (in milliseconds) before sending
866#     the first announce (Since 4.0)
867#
868# @announce-max: Maximum delay (in milliseconds) between packets in
869#     the announcement (Since 4.0)
870#
871# @announce-rounds: Number of self-announce packets sent after
872#     migration (Since 4.0)
873#
874# @announce-step: Increase in delay (in milliseconds) between
875#     subsequent packets in the announcement (Since 4.0)
876#
877# @compress-level: compression level
878#
879# @compress-threads: compression thread count
880#
881# @compress-wait-thread: Controls behavior when all compression
882#     threads are currently busy.  If true (default), wait for a free
883#     compression thread to become available; otherwise, send the page
884#     uncompressed.  (Since 3.1)
885#
886# @decompress-threads: decompression thread count
887#
888# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
889#     bytes_xfer_period to trigger throttling.  It is expressed as
890#     percentage.  The default value is 50. (Since 5.0)
891#
892# @cpu-throttle-initial: Initial percentage of time guest cpus are
893#     throttled when migration auto-converge is activated.  The
894#     default value is 20. (Since 2.7)
895#
896# @cpu-throttle-increment: throttle percentage increase each time
897#     auto-converge detects that migration is not making progress.
898#     The default value is 10. (Since 2.7)
899#
900# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
901#     the tail stage of throttling, the Guest is very sensitive to CPU
902#     percentage while the @cpu-throttle -increment is excessive
903#     usually at tail stage.  If this parameter is true, we will
904#     compute the ideal CPU percentage used by the Guest, which may
905#     exactly make the dirty rate match the dirty rate threshold.
906#     Then we will choose a smaller throttle increment between the one
907#     specified by @cpu-throttle-increment and the one generated by
908#     ideal CPU percentage.  Therefore, it is compatible to
909#     traditional throttling, meanwhile the throttle increment won't
910#     be excessive at tail stage.  The default value is false.  (Since
911#     5.1)
912#
913# @tls-creds: ID of the 'tls-creds' object that provides credentials
914#     for establishing a TLS connection over the migration data
915#     channel.  On the outgoing side of the migration, the credentials
916#     must be for a 'client' endpoint, while for the incoming side the
917#     credentials must be for a 'server' endpoint.  Setting this to a
918#     non-empty string enables TLS for all migrations.  An empty
919#     string means that QEMU will use plain text mode for migration,
920#     rather than TLS (Since 2.9) Previously (since 2.7), this was
921#     reported by omitting tls-creds instead.
922#
923# @tls-hostname: hostname of the target host for the migration.  This
924#     is required when using x509 based TLS credentials and the
925#     migration URI does not already include a hostname.  For example
926#     if using fd: or exec: based migration, the hostname must be
927#     provided so that the server's x509 certificate identity can be
928#     validated.  (Since 2.7) An empty string means that QEMU will use
929#     the hostname associated with the migration URI, if any.  (Since
930#     2.9) Previously (since 2.7), this was reported by omitting
931#     tls-hostname instead.
932#
933# @max-bandwidth: to set maximum speed for migration.  maximum speed
934#     in bytes per second.  (Since 2.8)
935#
936# @avail-switchover-bandwidth: to set the available bandwidth that
937#     migration can use during switchover phase.  NOTE!  This does not
938#     limit the bandwidth during switchover, but only for calculations when
939#     making decisions to switchover.  By default, this value is zero,
940#     which means QEMU will estimate the bandwidth automatically.  This can
941#     be set when the estimated value is not accurate, while the user is
942#     able to guarantee such bandwidth is available when switching over.
943#     When specified correctly, this can make the switchover decision much
944#     more accurate.  (Since 8.2)
945#
946# @downtime-limit: set maximum tolerated downtime for migration.
947#     maximum downtime in milliseconds (Since 2.8)
948#
949# @x-checkpoint-delay: the delay time between two COLO checkpoints.
950#     (Since 2.8)
951#
952# @block-incremental: Affects how much storage is migrated when the
953#     block migration capability is enabled.  When false, the entire
954#     storage backing chain is migrated into a flattened image at the
955#     destination; when true, only the active qcow2 layer is migrated
956#     and the destination must already have access to the same backing
957#     chain as was used on the source.  (since 2.10)
958#
959# @multifd-channels: Number of channels used to migrate data in
960#     parallel.  This is the same number that the number of sockets
961#     used for migration.  The default value is 2 (since 4.0)
962#
963# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
964#     needs to be a multiple of the target page size and a power of 2
965#     (Since 2.11)
966#
967# @max-postcopy-bandwidth: Background transfer bandwidth during
968#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
969#     (Since 3.0)
970#
971# @max-cpu-throttle: maximum cpu throttle percentage.  The default
972#     value is 99. (Since 3.1)
973#
974# @multifd-compression: Which compression method to use.  Defaults to
975#     none.  (Since 5.0)
976#
977# @multifd-zlib-level: Set the compression level to be used in live
978#     migration, the compression level is an integer between 0 and 9,
979#     where 0 means no compression, 1 means the best compression
980#     speed, and 9 means best compression ratio which will consume
981#     more CPU. Defaults to 1. (Since 5.0)
982#
983# @multifd-zstd-level: Set the compression level to be used in live
984#     migration, the compression level is an integer between 0 and 20,
985#     where 0 means no compression, 1 means the best compression
986#     speed, and 20 means best compression ratio which will consume
987#     more CPU. Defaults to 1. (Since 5.0)
988#
989# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
990#     aliases for the purpose of dirty bitmap migration.  Such aliases
991#     may for example be the corresponding names on the opposite site.
992#     The mapping must be one-to-one, but not necessarily complete: On
993#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
994#     will be ignored.  On the destination, encountering an unmapped
995#     alias in the incoming migration stream will result in a report,
996#     and all further bitmap migration data will then be discarded.
997#     Note that the destination does not know about bitmaps it does
998#     not receive, so there is no limitation or requirement regarding
999#     the number of bitmaps received, or how they are named, or on
1000#     which nodes they are placed.  By default (when this parameter
1001#     has never been set), bitmap names are mapped to themselves.
1002#     Nodes are mapped to their block device name if there is one, and
1003#     to their node name otherwise.  (Since 5.2)
1004#
1005# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1006#     limit during live migration.  Should be in the range 1 to 1000ms.
1007#     Defaults to 1000ms.  (Since 8.1)
1008#
1009# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1010#     Defaults to 1.  (Since 8.1)
1011#
1012# Features:
1013#
1014# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1015#     are experimental.
1016#
1017# TODO: either fuse back into MigrationParameters, or make
1018#     MigrationParameters members mandatory
1019#
1020# Since: 2.4
1021##
1022{ 'struct': 'MigrateSetParameters',
1023  'data': { '*announce-initial': 'size',
1024            '*announce-max': 'size',
1025            '*announce-rounds': 'size',
1026            '*announce-step': 'size',
1027            '*compress-level': 'uint8',
1028            '*compress-threads': 'uint8',
1029            '*compress-wait-thread': 'bool',
1030            '*decompress-threads': 'uint8',
1031            '*throttle-trigger-threshold': 'uint8',
1032            '*cpu-throttle-initial': 'uint8',
1033            '*cpu-throttle-increment': 'uint8',
1034            '*cpu-throttle-tailslow': 'bool',
1035            '*tls-creds': 'StrOrNull',
1036            '*tls-hostname': 'StrOrNull',
1037            '*tls-authz': 'StrOrNull',
1038            '*max-bandwidth': 'size',
1039            '*avail-switchover-bandwidth': 'size',
1040            '*downtime-limit': 'uint64',
1041            '*x-checkpoint-delay': { 'type': 'uint32',
1042                                     'features': [ 'unstable' ] },
1043            '*block-incremental': 'bool',
1044            '*multifd-channels': 'uint8',
1045            '*xbzrle-cache-size': 'size',
1046            '*max-postcopy-bandwidth': 'size',
1047            '*max-cpu-throttle': 'uint8',
1048            '*multifd-compression': 'MultiFDCompression',
1049            '*multifd-zlib-level': 'uint8',
1050            '*multifd-zstd-level': 'uint8',
1051            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1052            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1053                                            'features': [ 'unstable' ] },
1054            '*vcpu-dirty-limit': 'uint64'} }
1055
1056##
1057# @migrate-set-parameters:
1058#
1059# Set various migration parameters.
1060#
1061# Since: 2.4
1062#
1063# Example:
1064#
1065# -> { "execute": "migrate-set-parameters" ,
1066#      "arguments": { "compress-level": 1 } }
1067# <- { "return": {} }
1068##
1069{ 'command': 'migrate-set-parameters', 'boxed': true,
1070  'data': 'MigrateSetParameters' }
1071
1072##
1073# @MigrationParameters:
1074#
1075# The optional members aren't actually optional.
1076#
1077# @announce-initial: Initial delay (in milliseconds) before sending
1078#     the first announce (Since 4.0)
1079#
1080# @announce-max: Maximum delay (in milliseconds) between packets in
1081#     the announcement (Since 4.0)
1082#
1083# @announce-rounds: Number of self-announce packets sent after
1084#     migration (Since 4.0)
1085#
1086# @announce-step: Increase in delay (in milliseconds) between
1087#     subsequent packets in the announcement (Since 4.0)
1088#
1089# @compress-level: compression level
1090#
1091# @compress-threads: compression thread count
1092#
1093# @compress-wait-thread: Controls behavior when all compression
1094#     threads are currently busy.  If true (default), wait for a free
1095#     compression thread to become available; otherwise, send the page
1096#     uncompressed.  (Since 3.1)
1097#
1098# @decompress-threads: decompression thread count
1099#
1100# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1101#     bytes_xfer_period to trigger throttling.  It is expressed as
1102#     percentage.  The default value is 50. (Since 5.0)
1103#
1104# @cpu-throttle-initial: Initial percentage of time guest cpus are
1105#     throttled when migration auto-converge is activated.  (Since
1106#     2.7)
1107#
1108# @cpu-throttle-increment: throttle percentage increase each time
1109#     auto-converge detects that migration is not making progress.
1110#     (Since 2.7)
1111#
1112# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1113#     the tail stage of throttling, the Guest is very sensitive to CPU
1114#     percentage while the @cpu-throttle -increment is excessive
1115#     usually at tail stage.  If this parameter is true, we will
1116#     compute the ideal CPU percentage used by the Guest, which may
1117#     exactly make the dirty rate match the dirty rate threshold.
1118#     Then we will choose a smaller throttle increment between the one
1119#     specified by @cpu-throttle-increment and the one generated by
1120#     ideal CPU percentage.  Therefore, it is compatible to
1121#     traditional throttling, meanwhile the throttle increment won't
1122#     be excessive at tail stage.  The default value is false.  (Since
1123#     5.1)
1124#
1125# @tls-creds: ID of the 'tls-creds' object that provides credentials
1126#     for establishing a TLS connection over the migration data
1127#     channel.  On the outgoing side of the migration, the credentials
1128#     must be for a 'client' endpoint, while for the incoming side the
1129#     credentials must be for a 'server' endpoint.  An empty string
1130#     means that QEMU will use plain text mode for migration, rather
1131#     than TLS (Since 2.7) Note: 2.8 reports this by omitting
1132#     tls-creds instead.
1133#
1134# @tls-hostname: hostname of the target host for the migration.  This
1135#     is required when using x509 based TLS credentials and the
1136#     migration URI does not already include a hostname.  For example
1137#     if using fd: or exec: based migration, the hostname must be
1138#     provided so that the server's x509 certificate identity can be
1139#     validated.  (Since 2.7) An empty string means that QEMU will use
1140#     the hostname associated with the migration URI, if any.  (Since
1141#     2.9) Note: 2.8 reports this by omitting tls-hostname instead.
1142#
1143# @tls-authz: ID of the 'authz' object subclass that provides access
1144#     control checking of the TLS x509 certificate distinguished name.
1145#     (Since 4.0)
1146#
1147# @max-bandwidth: to set maximum speed for migration.  maximum speed
1148#     in bytes per second.  (Since 2.8)
1149#
1150# @avail-switchover-bandwidth: to set the available bandwidth that
1151#     migration can use during switchover phase.  NOTE!  This does not
1152#     limit the bandwidth during switchover, but only for calculations when
1153#     making decisions to switchover.  By default, this value is zero,
1154#     which means QEMU will estimate the bandwidth automatically.  This can
1155#     be set when the estimated value is not accurate, while the user is
1156#     able to guarantee such bandwidth is available when switching over.
1157#     When specified correctly, this can make the switchover decision much
1158#     more accurate.  (Since 8.2)
1159#
1160# @downtime-limit: set maximum tolerated downtime for migration.
1161#     maximum downtime in milliseconds (Since 2.8)
1162#
1163# @x-checkpoint-delay: the delay time between two COLO checkpoints.
1164#     (Since 2.8)
1165#
1166# @block-incremental: Affects how much storage is migrated when the
1167#     block migration capability is enabled.  When false, the entire
1168#     storage backing chain is migrated into a flattened image at the
1169#     destination; when true, only the active qcow2 layer is migrated
1170#     and the destination must already have access to the same backing
1171#     chain as was used on the source.  (since 2.10)
1172#
1173# @multifd-channels: Number of channels used to migrate data in
1174#     parallel.  This is the same number that the number of sockets
1175#     used for migration.  The default value is 2 (since 4.0)
1176#
1177# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1178#     needs to be a multiple of the target page size and a power of 2
1179#     (Since 2.11)
1180#
1181# @max-postcopy-bandwidth: Background transfer bandwidth during
1182#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1183#     (Since 3.0)
1184#
1185# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
1186#     (Since 3.1)
1187#
1188# @multifd-compression: Which compression method to use.  Defaults to
1189#     none.  (Since 5.0)
1190#
1191# @multifd-zlib-level: Set the compression level to be used in live
1192#     migration, the compression level is an integer between 0 and 9,
1193#     where 0 means no compression, 1 means the best compression
1194#     speed, and 9 means best compression ratio which will consume
1195#     more CPU. Defaults to 1. (Since 5.0)
1196#
1197# @multifd-zstd-level: Set the compression level to be used in live
1198#     migration, the compression level is an integer between 0 and 20,
1199#     where 0 means no compression, 1 means the best compression
1200#     speed, and 20 means best compression ratio which will consume
1201#     more CPU. Defaults to 1. (Since 5.0)
1202#
1203# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1204#     aliases for the purpose of dirty bitmap migration.  Such aliases
1205#     may for example be the corresponding names on the opposite site.
1206#     The mapping must be one-to-one, but not necessarily complete: On
1207#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1208#     will be ignored.  On the destination, encountering an unmapped
1209#     alias in the incoming migration stream will result in a report,
1210#     and all further bitmap migration data will then be discarded.
1211#     Note that the destination does not know about bitmaps it does
1212#     not receive, so there is no limitation or requirement regarding
1213#     the number of bitmaps received, or how they are named, or on
1214#     which nodes they are placed.  By default (when this parameter
1215#     has never been set), bitmap names are mapped to themselves.
1216#     Nodes are mapped to their block device name if there is one, and
1217#     to their node name otherwise.  (Since 5.2)
1218#
1219# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1220#     limit during live migration.  Should be in the range 1 to 1000ms.
1221#     Defaults to 1000ms.  (Since 8.1)
1222#
1223# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1224#     Defaults to 1.  (Since 8.1)
1225#
1226# Features:
1227#
1228# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period
1229#     are experimental.
1230#
1231# Since: 2.4
1232##
1233{ 'struct': 'MigrationParameters',
1234  'data': { '*announce-initial': 'size',
1235            '*announce-max': 'size',
1236            '*announce-rounds': 'size',
1237            '*announce-step': 'size',
1238            '*compress-level': 'uint8',
1239            '*compress-threads': 'uint8',
1240            '*compress-wait-thread': 'bool',
1241            '*decompress-threads': 'uint8',
1242            '*throttle-trigger-threshold': 'uint8',
1243            '*cpu-throttle-initial': 'uint8',
1244            '*cpu-throttle-increment': 'uint8',
1245            '*cpu-throttle-tailslow': 'bool',
1246            '*tls-creds': 'str',
1247            '*tls-hostname': 'str',
1248            '*tls-authz': 'str',
1249            '*max-bandwidth': 'size',
1250            '*avail-switchover-bandwidth': 'size',
1251            '*downtime-limit': 'uint64',
1252            '*x-checkpoint-delay': { 'type': 'uint32',
1253                                     'features': [ 'unstable' ] },
1254            '*block-incremental': 'bool',
1255            '*multifd-channels': 'uint8',
1256            '*xbzrle-cache-size': 'size',
1257            '*max-postcopy-bandwidth': 'size',
1258            '*max-cpu-throttle': 'uint8',
1259            '*multifd-compression': 'MultiFDCompression',
1260            '*multifd-zlib-level': 'uint8',
1261            '*multifd-zstd-level': 'uint8',
1262            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1263            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1264                                            'features': [ 'unstable' ] },
1265            '*vcpu-dirty-limit': 'uint64'} }
1266
1267##
1268# @query-migrate-parameters:
1269#
1270# Returns information about the current migration parameters
1271#
1272# Returns: @MigrationParameters
1273#
1274# Since: 2.4
1275#
1276# Example:
1277#
1278# -> { "execute": "query-migrate-parameters" }
1279# <- { "return": {
1280#          "decompress-threads": 2,
1281#          "cpu-throttle-increment": 10,
1282#          "compress-threads": 8,
1283#          "compress-level": 1,
1284#          "cpu-throttle-initial": 20,
1285#          "max-bandwidth": 33554432,
1286#          "downtime-limit": 300
1287#       }
1288#    }
1289##
1290{ 'command': 'query-migrate-parameters',
1291  'returns': 'MigrationParameters' }
1292
1293##
1294# @migrate-start-postcopy:
1295#
1296# Followup to a migration command to switch the migration to postcopy
1297# mode.  The postcopy-ram capability must be set on both source and
1298# destination before the original migration command.
1299#
1300# Since: 2.5
1301#
1302# Example:
1303#
1304# -> { "execute": "migrate-start-postcopy" }
1305# <- { "return": {} }
1306##
1307{ 'command': 'migrate-start-postcopy' }
1308
1309##
1310# @MIGRATION:
1311#
1312# Emitted when a migration event happens
1313#
1314# @status: @MigrationStatus describing the current migration status.
1315#
1316# Since: 2.4
1317#
1318# Example:
1319#
1320# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1321#     "event": "MIGRATION",
1322#     "data": {"status": "completed"} }
1323##
1324{ 'event': 'MIGRATION',
1325  'data': {'status': 'MigrationStatus'}}
1326
1327##
1328# @MIGRATION_PASS:
1329#
1330# Emitted from the source side of a migration at the start of each
1331# pass (when it syncs the dirty bitmap)
1332#
1333# @pass: An incrementing count (starting at 1 on the first pass)
1334#
1335# Since: 2.6
1336#
1337# Example:
1338#
1339# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1340#       "event": "MIGRATION_PASS", "data": {"pass": 2} }
1341##
1342{ 'event': 'MIGRATION_PASS',
1343  'data': { 'pass': 'int' } }
1344
1345##
1346# @COLOMessage:
1347#
1348# The message transmission between Primary side and Secondary side.
1349#
1350# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1351#
1352# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1353#     checkpointing
1354#
1355# @checkpoint-reply: SVM gets PVM's checkpoint request
1356#
1357# @vmstate-send: VM's state will be sent by PVM.
1358#
1359# @vmstate-size: The total size of VMstate.
1360#
1361# @vmstate-received: VM's state has been received by SVM.
1362#
1363# @vmstate-loaded: VM's state has been loaded by SVM.
1364#
1365# Since: 2.8
1366##
1367{ 'enum': 'COLOMessage',
1368  'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1369            'vmstate-send', 'vmstate-size', 'vmstate-received',
1370            'vmstate-loaded' ] }
1371
1372##
1373# @COLOMode:
1374#
1375# The COLO current mode.
1376#
1377# @none: COLO is disabled.
1378#
1379# @primary: COLO node in primary side.
1380#
1381# @secondary: COLO node in slave side.
1382#
1383# Since: 2.8
1384##
1385{ 'enum': 'COLOMode',
1386  'data': [ 'none', 'primary', 'secondary'] }
1387
1388##
1389# @FailoverStatus:
1390#
1391# An enumeration of COLO failover status
1392#
1393# @none: no failover has ever happened
1394#
1395# @require: got failover requirement but not handled
1396#
1397# @active: in the process of doing failover
1398#
1399# @completed: finish the process of failover
1400#
1401# @relaunch: restart the failover process, from 'none' -> 'completed'
1402#     (Since 2.9)
1403#
1404# Since: 2.8
1405##
1406{ 'enum': 'FailoverStatus',
1407  'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1408
1409##
1410# @COLO_EXIT:
1411#
1412# Emitted when VM finishes COLO mode due to some errors happening or
1413# at the request of users.
1414#
1415# @mode: report COLO mode when COLO exited.
1416#
1417# @reason: describes the reason for the COLO exit.
1418#
1419# Since: 3.1
1420#
1421# Example:
1422#
1423# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1424#      "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1425##
1426{ 'event': 'COLO_EXIT',
1427  'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1428
1429##
1430# @COLOExitReason:
1431#
1432# The reason for a COLO exit.
1433#
1434# @none: failover has never happened.  This state does not occur in
1435#     the COLO_EXIT event, and is only visible in the result of
1436#     query-colo-status.
1437#
1438# @request: COLO exit is due to an external request.
1439#
1440# @error: COLO exit is due to an internal error.
1441#
1442# @processing: COLO is currently handling a failover (since 4.0).
1443#
1444# Since: 3.1
1445##
1446{ 'enum': 'COLOExitReason',
1447  'data': [ 'none', 'request', 'error' , 'processing' ] }
1448
1449##
1450# @x-colo-lost-heartbeat:
1451#
1452# Tell qemu that heartbeat is lost, request it to do takeover
1453# procedures.  If this command is sent to the PVM, the Primary side
1454# will exit COLO mode.  If sent to the Secondary, the Secondary side
1455# will run failover work, then takes over server operation to become
1456# the service VM.
1457#
1458# Features:
1459#
1460# @unstable: This command is experimental.
1461#
1462# Since: 2.8
1463#
1464# Example:
1465#
1466# -> { "execute": "x-colo-lost-heartbeat" }
1467# <- { "return": {} }
1468##
1469{ 'command': 'x-colo-lost-heartbeat',
1470  'features': [ 'unstable' ],
1471  'if': 'CONFIG_REPLICATION' }
1472
1473##
1474# @migrate_cancel:
1475#
1476# Cancel the current executing migration process.
1477#
1478# Returns: nothing on success
1479#
1480# Notes: This command succeeds even if there is no migration process
1481#     running.
1482#
1483# Since: 0.14
1484#
1485# Example:
1486#
1487# -> { "execute": "migrate_cancel" }
1488# <- { "return": {} }
1489##
1490{ 'command': 'migrate_cancel' }
1491
1492##
1493# @migrate-continue:
1494#
1495# Continue migration when it's in a paused state.
1496#
1497# @state: The state the migration is currently expected to be in
1498#
1499# Returns: nothing on success
1500#
1501# Since: 2.11
1502#
1503# Example:
1504#
1505# -> { "execute": "migrate-continue" , "arguments":
1506#      { "state": "pre-switchover" } }
1507# <- { "return": {} }
1508##
1509{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1510
1511##
1512# @migrate:
1513#
1514# Migrates the current running guest to another Virtual Machine.
1515#
1516# @uri: the Uniform Resource Identifier of the destination VM
1517#
1518# @blk: do block migration (full disk copy)
1519#
1520# @inc: incremental disk copy migration
1521#
1522# @detach: this argument exists only for compatibility reasons and is
1523#     ignored by QEMU
1524#
1525# @resume: resume one paused migration, default "off". (since 3.0)
1526#
1527# Features:
1528#
1529# @deprecated: Members @inc and @blk are deprecated.  Use
1530#     blockdev-mirror with NBD instead.
1531#
1532# Returns: nothing on success
1533#
1534# Since: 0.14
1535#
1536# Notes:
1537#
1538# 1. The 'query-migrate' command should be used to check migration's
1539#    progress and final result (this information is provided by the
1540#    'status' member)
1541#
1542# 2. All boolean arguments default to false
1543#
1544# 3. The user Monitor's "detach" argument is invalid in QMP and should
1545#    not be used
1546#
1547# Example:
1548#
1549# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1550# <- { "return": {} }
1551##
1552{ 'command': 'migrate',
1553  'data': {'uri': 'str',
1554           '*blk': { 'type': 'bool', 'features': [ 'deprecated' ] },
1555           '*inc': { 'type': 'bool', 'features': [ 'deprecated' ] },
1556           '*detach': 'bool', '*resume': 'bool' } }
1557
1558##
1559# @migrate-incoming:
1560#
1561# Start an incoming migration, the qemu must have been started with
1562# -incoming defer
1563#
1564# @uri: The Uniform Resource Identifier identifying the source or
1565#     address to listen on
1566#
1567# Returns: nothing on success
1568#
1569# Since: 2.3
1570#
1571# Notes:
1572#
1573# 1. It's a bad idea to use a string for the uri, but it needs
1574#    to stay compatible with -incoming and the format of the uri
1575#    is already exposed above libvirt.
1576#
1577# 2. QEMU must be started with -incoming defer to allow
1578#    migrate-incoming to be used.
1579#
1580# 3. The uri format is the same as for -incoming
1581#
1582# Example:
1583#
1584# -> { "execute": "migrate-incoming",
1585#      "arguments": { "uri": "tcp::4446" } }
1586# <- { "return": {} }
1587##
1588{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
1589
1590##
1591# @xen-save-devices-state:
1592#
1593# Save the state of all devices to file.  The RAM and the block
1594# devices of the VM are not saved by this command.
1595#
1596# @filename: the file to save the state of the devices to as binary
1597#     data.  See xen-save-devices-state.txt for a description of the
1598#     binary format.
1599#
1600# @live: Optional argument to ask QEMU to treat this command as part
1601#     of a live migration.  Default to true.  (since 2.11)
1602#
1603# Returns: Nothing on success
1604#
1605# Since: 1.1
1606#
1607# Example:
1608#
1609# -> { "execute": "xen-save-devices-state",
1610#      "arguments": { "filename": "/tmp/save" } }
1611# <- { "return": {} }
1612##
1613{ 'command': 'xen-save-devices-state',
1614  'data': {'filename': 'str', '*live':'bool' } }
1615
1616##
1617# @xen-set-global-dirty-log:
1618#
1619# Enable or disable the global dirty log mode.
1620#
1621# @enable: true to enable, false to disable.
1622#
1623# Returns: nothing
1624#
1625# Since: 1.3
1626#
1627# Example:
1628#
1629# -> { "execute": "xen-set-global-dirty-log",
1630#      "arguments": { "enable": true } }
1631# <- { "return": {} }
1632##
1633{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1634
1635##
1636# @xen-load-devices-state:
1637#
1638# Load the state of all devices from file.  The RAM and the block
1639# devices of the VM are not loaded by this command.
1640#
1641# @filename: the file to load the state of the devices from as binary
1642#     data.  See xen-save-devices-state.txt for a description of the
1643#     binary format.
1644#
1645# Since: 2.7
1646#
1647# Example:
1648#
1649# -> { "execute": "xen-load-devices-state",
1650#      "arguments": { "filename": "/tmp/resume" } }
1651# <- { "return": {} }
1652##
1653{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1654
1655##
1656# @xen-set-replication:
1657#
1658# Enable or disable replication.
1659#
1660# @enable: true to enable, false to disable.
1661#
1662# @primary: true for primary or false for secondary.
1663#
1664# @failover: true to do failover, false to stop.  but cannot be
1665#     specified if 'enable' is true.  default value is false.
1666#
1667# Returns: nothing.
1668#
1669# Example:
1670#
1671# -> { "execute": "xen-set-replication",
1672#      "arguments": {"enable": true, "primary": false} }
1673# <- { "return": {} }
1674#
1675# Since: 2.9
1676##
1677{ 'command': 'xen-set-replication',
1678  'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1679  'if': 'CONFIG_REPLICATION' }
1680
1681##
1682# @ReplicationStatus:
1683#
1684# The result format for 'query-xen-replication-status'.
1685#
1686# @error: true if an error happened, false if replication is normal.
1687#
1688# @desc: the human readable error description string, when @error is
1689#     'true'.
1690#
1691# Since: 2.9
1692##
1693{ 'struct': 'ReplicationStatus',
1694  'data': { 'error': 'bool', '*desc': 'str' },
1695  'if': 'CONFIG_REPLICATION' }
1696
1697##
1698# @query-xen-replication-status:
1699#
1700# Query replication status while the vm is running.
1701#
1702# Returns: A @ReplicationStatus object showing the status.
1703#
1704# Example:
1705#
1706# -> { "execute": "query-xen-replication-status" }
1707# <- { "return": { "error": false } }
1708#
1709# Since: 2.9
1710##
1711{ 'command': 'query-xen-replication-status',
1712  'returns': 'ReplicationStatus',
1713  'if': 'CONFIG_REPLICATION' }
1714
1715##
1716# @xen-colo-do-checkpoint:
1717#
1718# Xen uses this command to notify replication to trigger a checkpoint.
1719#
1720# Returns: nothing.
1721#
1722# Example:
1723#
1724# -> { "execute": "xen-colo-do-checkpoint" }
1725# <- { "return": {} }
1726#
1727# Since: 2.9
1728##
1729{ 'command': 'xen-colo-do-checkpoint',
1730  'if': 'CONFIG_REPLICATION' }
1731
1732##
1733# @COLOStatus:
1734#
1735# The result format for 'query-colo-status'.
1736#
1737# @mode: COLO running mode.  If COLO is running, this field will
1738#     return 'primary' or 'secondary'.
1739#
1740# @last-mode: COLO last running mode.  If COLO is running, this field
1741#     will return same like mode field, after failover we can use this
1742#     field to get last colo mode.  (since 4.0)
1743#
1744# @reason: describes the reason for the COLO exit.
1745#
1746# Since: 3.1
1747##
1748{ 'struct': 'COLOStatus',
1749  'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1750            'reason': 'COLOExitReason' },
1751  'if': 'CONFIG_REPLICATION' }
1752
1753##
1754# @query-colo-status:
1755#
1756# Query COLO status while the vm is running.
1757#
1758# Returns: A @COLOStatus object showing the status.
1759#
1760# Example:
1761#
1762# -> { "execute": "query-colo-status" }
1763# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1764#
1765# Since: 3.1
1766##
1767{ 'command': 'query-colo-status',
1768  'returns': 'COLOStatus',
1769  'if': 'CONFIG_REPLICATION' }
1770
1771##
1772# @migrate-recover:
1773#
1774# Provide a recovery migration stream URI.
1775#
1776# @uri: the URI to be used for the recovery of migration stream.
1777#
1778# Returns: nothing.
1779#
1780# Example:
1781#
1782# -> { "execute": "migrate-recover",
1783#      "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1784# <- { "return": {} }
1785#
1786# Since: 3.0
1787##
1788{ 'command': 'migrate-recover',
1789  'data': { 'uri': 'str' },
1790  'allow-oob': true }
1791
1792##
1793# @migrate-pause:
1794#
1795# Pause a migration.  Currently it only supports postcopy.
1796#
1797# Returns: nothing.
1798#
1799# Example:
1800#
1801# -> { "execute": "migrate-pause" }
1802# <- { "return": {} }
1803#
1804# Since: 3.0
1805##
1806{ 'command': 'migrate-pause', 'allow-oob': true }
1807
1808##
1809# @UNPLUG_PRIMARY:
1810#
1811# Emitted from source side of a migration when migration state is
1812# WAIT_UNPLUG. Device was unplugged by guest operating system.  Device
1813# resources in QEMU are kept on standby to be able to re-plug it in
1814# case of migration failure.
1815#
1816# @device-id: QEMU device id of the unplugged device
1817#
1818# Since: 4.2
1819#
1820# Example:
1821#
1822# <- { "event": "UNPLUG_PRIMARY",
1823#      "data": { "device-id": "hostdev0" },
1824#      "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1825##
1826{ 'event': 'UNPLUG_PRIMARY',
1827  'data': { 'device-id': 'str' } }
1828
1829##
1830# @DirtyRateVcpu:
1831#
1832# Dirty rate of vcpu.
1833#
1834# @id: vcpu index.
1835#
1836# @dirty-rate: dirty rate.
1837#
1838# Since: 6.2
1839##
1840{ 'struct': 'DirtyRateVcpu',
1841  'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1842
1843##
1844# @DirtyRateStatus:
1845#
1846# Dirty page rate measurement status.
1847#
1848# @unstarted: measuring thread has not been started yet
1849#
1850# @measuring: measuring thread is running
1851#
1852# @measured: dirty page rate is measured and the results are available
1853#
1854# Since: 5.2
1855##
1856{ 'enum': 'DirtyRateStatus',
1857  'data': [ 'unstarted', 'measuring', 'measured'] }
1858
1859##
1860# @DirtyRateMeasureMode:
1861#
1862# Method used to measure dirty page rate.  Differences between
1863# available methods are explained in @calc-dirty-rate.
1864#
1865# @page-sampling: use page sampling
1866#
1867# @dirty-ring: use dirty ring
1868#
1869# @dirty-bitmap: use dirty bitmap
1870#
1871# Since: 6.2
1872##
1873{ 'enum': 'DirtyRateMeasureMode',
1874  'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1875
1876##
1877# @TimeUnit:
1878#
1879# Specifies unit in which time-related value is specified.
1880#
1881# @second: value is in seconds
1882#
1883# @millisecond: value is in milliseconds
1884#
1885# Since 8.2
1886#
1887##
1888{ 'enum': 'TimeUnit',
1889  'data': ['second', 'millisecond'] }
1890
1891##
1892# @DirtyRateInfo:
1893#
1894# Information about measured dirty page rate.
1895#
1896# @dirty-rate: an estimate of the dirty page rate of the VM in units
1897#     of MiB/s.  Value is present only when @status is 'measured'.
1898#
1899# @status: current status of dirty page rate measurements
1900#
1901# @start-time: start time in units of second for calculation
1902#
1903# @calc-time: time period for which dirty page rate was measured,
1904#     expressed and rounded down to @calc-time-unit.
1905#
1906# @calc-time-unit: time unit of @calc-time  (Since 8.2)
1907#
1908# @sample-pages: number of sampled pages per GiB of guest memory.
1909#     Valid only in page-sampling mode (Since 6.1)
1910#
1911# @mode: mode that was used to measure dirty page rate (Since 6.2)
1912#
1913# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
1914#     specified (Since 6.2)
1915#
1916# Since: 5.2
1917##
1918{ 'struct': 'DirtyRateInfo',
1919  'data': {'*dirty-rate': 'int64',
1920           'status': 'DirtyRateStatus',
1921           'start-time': 'int64',
1922           'calc-time': 'int64',
1923           'calc-time-unit': 'TimeUnit',
1924           'sample-pages': 'uint64',
1925           'mode': 'DirtyRateMeasureMode',
1926           '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
1927
1928##
1929# @calc-dirty-rate:
1930#
1931# Start measuring dirty page rate of the VM.  Results can be retrieved
1932# with @query-dirty-rate after measurements are completed.
1933#
1934# Dirty page rate is the number of pages changed in a given time
1935# period expressed in MiB/s.  The following methods of calculation are
1936# available:
1937#
1938# 1. In page sampling mode, a random subset of pages are selected and
1939#    hashed twice: once at the beginning of measurement time period,
1940#    and once again at the end.  If two hashes for some page are
1941#    different, the page is counted as changed.  Since this method
1942#    relies on sampling and hashing, calculated dirty page rate is
1943#    only an estimate of its true value.  Increasing @sample-pages
1944#    improves estimation quality at the cost of higher computational
1945#    overhead.
1946#
1947# 2. Dirty bitmap mode captures writes to memory (for example by
1948#    temporarily revoking write access to all pages) and counting page
1949#    faults.  Information about modified pages is collected into a
1950#    bitmap, where each bit corresponds to one guest page.  This mode
1951#    requires that KVM accelerator property "dirty-ring-size" is *not*
1952#    set.
1953#
1954# 3. Dirty ring mode is similar to dirty bitmap mode, but the
1955#    information about modified pages is collected into ring buffer.
1956#    This mode tracks page modification per each vCPU separately.  It
1957#    requires that KVM accelerator property "dirty-ring-size" is set.
1958#
1959# @calc-time: time period for which dirty page rate is calculated.
1960#     By default it is specified in seconds, but the unit can be set
1961#     explicitly with @calc-time-unit.  Note that larger @calc-time
1962#     values will typically result in smaller dirty page rates because
1963#     page dirtying is a one-time event.  Once some page is counted
1964#     as dirty during @calc-time period, further writes to this page
1965#     will not increase dirty page rate anymore.
1966#
1967# @calc-time-unit: time unit in which @calc-time is specified.
1968#     By default it is seconds. (Since 8.2)
1969#
1970# @sample-pages: number of sampled pages per each GiB of guest memory.
1971#     Default value is 512.  For 4KiB guest pages this corresponds to
1972#     sampling ratio of 0.2%.  This argument is used only in page
1973#     sampling mode.  (Since 6.1)
1974#
1975# @mode: mechanism for tracking dirty pages.  Default value is
1976#     'page-sampling'.  Others are 'dirty-bitmap' and 'dirty-ring'.
1977#     (Since 6.1)
1978#
1979# Since: 5.2
1980#
1981# Example:
1982#
1983# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
1984#                                                 'sample-pages': 512} }
1985# <- { "return": {} }
1986#
1987# Measure dirty rate using dirty bitmap for 500 milliseconds:
1988#
1989# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500,
1990#     "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} }
1991#
1992# <- { "return": {} }
1993##
1994{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
1995                                         '*calc-time-unit': 'TimeUnit',
1996                                         '*sample-pages': 'int',
1997                                         '*mode': 'DirtyRateMeasureMode'} }
1998
1999##
2000# @query-dirty-rate:
2001#
2002# Query results of the most recent invocation of @calc-dirty-rate.
2003#
2004# @calc-time-unit: time unit in which to report calculation time.
2005#     By default it is reported in seconds. (Since 8.2)
2006#
2007# Since: 5.2
2008#
2009# Examples:
2010#
2011# 1. Measurement is in progress:
2012#
2013# <- {"status": "measuring", "sample-pages": 512,
2014#     "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2015#     "calc-time-unit": "second"}
2016#
2017# 2. Measurement has been completed:
2018#
2019# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
2020#     "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2021#     "calc-time-unit": "second"}
2022##
2023{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' },
2024                                 'returns': 'DirtyRateInfo' }
2025
2026##
2027# @DirtyLimitInfo:
2028#
2029# Dirty page rate limit information of a virtual CPU.
2030#
2031# @cpu-index: index of a virtual CPU.
2032#
2033# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
2034#     CPU, 0 means unlimited.
2035#
2036# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
2037#
2038# Since: 7.1
2039##
2040{ 'struct': 'DirtyLimitInfo',
2041  'data': { 'cpu-index': 'int',
2042            'limit-rate': 'uint64',
2043            'current-rate': 'uint64' } }
2044
2045##
2046# @set-vcpu-dirty-limit:
2047#
2048# Set the upper limit of dirty page rate for virtual CPUs.
2049#
2050# Requires KVM with accelerator property "dirty-ring-size" set.  A
2051# virtual CPU's dirty page rate is a measure of its memory load.  To
2052# observe dirty page rates, use @calc-dirty-rate.
2053#
2054# @cpu-index: index of a virtual CPU, default is all.
2055#
2056# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
2057#
2058# Since: 7.1
2059#
2060# Example:
2061#
2062# -> {"execute": "set-vcpu-dirty-limit"}
2063#     "arguments": { "dirty-rate": 200,
2064#                    "cpu-index": 1 } }
2065# <- { "return": {} }
2066##
2067{ 'command': 'set-vcpu-dirty-limit',
2068  'data': { '*cpu-index': 'int',
2069            'dirty-rate': 'uint64' } }
2070
2071##
2072# @cancel-vcpu-dirty-limit:
2073#
2074# Cancel the upper limit of dirty page rate for virtual CPUs.
2075#
2076# Cancel the dirty page limit for the vCPU which has been set with
2077# set-vcpu-dirty-limit command.  Note that this command requires
2078# support from dirty ring, same as the "set-vcpu-dirty-limit".
2079#
2080# @cpu-index: index of a virtual CPU, default is all.
2081#
2082# Since: 7.1
2083#
2084# Example:
2085#
2086# -> {"execute": "cancel-vcpu-dirty-limit"},
2087#     "arguments": { "cpu-index": 1 } }
2088# <- { "return": {} }
2089##
2090{ 'command': 'cancel-vcpu-dirty-limit',
2091  'data': { '*cpu-index': 'int'} }
2092
2093##
2094# @query-vcpu-dirty-limit:
2095#
2096# Returns information about virtual CPU dirty page rate limits, if
2097# any.
2098#
2099# Since: 7.1
2100#
2101# Example:
2102#
2103# -> {"execute": "query-vcpu-dirty-limit"}
2104# <- {"return": [
2105#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
2106#        { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
2107##
2108{ 'command': 'query-vcpu-dirty-limit',
2109  'returns': [ 'DirtyLimitInfo' ] }
2110
2111##
2112# @MigrationThreadInfo:
2113#
2114# Information about migrationthreads
2115#
2116# @name: the name of migration thread
2117#
2118# @thread-id: ID of the underlying host thread
2119#
2120# Since: 7.2
2121##
2122{ 'struct': 'MigrationThreadInfo',
2123  'data': {'name': 'str',
2124           'thread-id': 'int'} }
2125
2126##
2127# @query-migrationthreads:
2128#
2129# Returns information of migration threads
2130#
2131# data: migration thread name
2132#
2133# Returns: information about migration threads
2134#
2135# Since: 7.2
2136##
2137{ 'command': 'query-migrationthreads',
2138  'returns': ['MigrationThreadInfo'] }
2139
2140##
2141# @snapshot-save:
2142#
2143# Save a VM snapshot
2144#
2145# @job-id: identifier for the newly created job
2146#
2147# @tag: name of the snapshot to create
2148#
2149# @vmstate: block device node name to save vmstate to
2150#
2151# @devices: list of block device node names to save a snapshot to
2152#
2153# Applications should not assume that the snapshot save is complete
2154# when this command returns.  The job commands / events must be used
2155# to determine completion and to fetch details of any errors that
2156# arise.
2157#
2158# Note that execution of the guest CPUs may be stopped during the time
2159# it takes to save the snapshot.  A future version of QEMU may ensure
2160# CPUs are executing continuously.
2161#
2162# It is strongly recommended that @devices contain all writable block
2163# device nodes if a consistent snapshot is required.
2164#
2165# If @tag already exists, an error will be reported
2166#
2167# Returns: nothing
2168#
2169# Example:
2170#
2171# -> { "execute": "snapshot-save",
2172#      "arguments": {
2173#         "job-id": "snapsave0",
2174#         "tag": "my-snap",
2175#         "vmstate": "disk0",
2176#         "devices": ["disk0", "disk1"]
2177#      }
2178#    }
2179# <- { "return": { } }
2180# <- {"event": "JOB_STATUS_CHANGE",
2181#     "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2182#     "data": {"status": "created", "id": "snapsave0"}}
2183# <- {"event": "JOB_STATUS_CHANGE",
2184#     "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2185#     "data": {"status": "running", "id": "snapsave0"}}
2186# <- {"event": "STOP",
2187#     "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2188# <- {"event": "RESUME",
2189#     "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2190# <- {"event": "JOB_STATUS_CHANGE",
2191#     "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2192#     "data": {"status": "waiting", "id": "snapsave0"}}
2193# <- {"event": "JOB_STATUS_CHANGE",
2194#     "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2195#     "data": {"status": "pending", "id": "snapsave0"}}
2196# <- {"event": "JOB_STATUS_CHANGE",
2197#     "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2198#     "data": {"status": "concluded", "id": "snapsave0"}}
2199# -> {"execute": "query-jobs"}
2200# <- {"return": [{"current-progress": 1,
2201#                 "status": "concluded",
2202#                 "total-progress": 1,
2203#                 "type": "snapshot-save",
2204#                 "id": "snapsave0"}]}
2205#
2206# Since: 6.0
2207##
2208{ 'command': 'snapshot-save',
2209  'data': { 'job-id': 'str',
2210            'tag': 'str',
2211            'vmstate': 'str',
2212            'devices': ['str'] } }
2213
2214##
2215# @snapshot-load:
2216#
2217# Load a VM snapshot
2218#
2219# @job-id: identifier for the newly created job
2220#
2221# @tag: name of the snapshot to load.
2222#
2223# @vmstate: block device node name to load vmstate from
2224#
2225# @devices: list of block device node names to load a snapshot from
2226#
2227# Applications should not assume that the snapshot load is complete
2228# when this command returns.  The job commands / events must be used
2229# to determine completion and to fetch details of any errors that
2230# arise.
2231#
2232# Note that execution of the guest CPUs will be stopped during the
2233# time it takes to load the snapshot.
2234#
2235# It is strongly recommended that @devices contain all writable block
2236# device nodes that can have changed since the original @snapshot-save
2237# command execution.
2238#
2239# Returns: nothing
2240#
2241# Example:
2242#
2243# -> { "execute": "snapshot-load",
2244#      "arguments": {
2245#         "job-id": "snapload0",
2246#         "tag": "my-snap",
2247#         "vmstate": "disk0",
2248#         "devices": ["disk0", "disk1"]
2249#      }
2250#    }
2251# <- { "return": { } }
2252# <- {"event": "JOB_STATUS_CHANGE",
2253#     "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2254#     "data": {"status": "created", "id": "snapload0"}}
2255# <- {"event": "JOB_STATUS_CHANGE",
2256#     "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2257#     "data": {"status": "running", "id": "snapload0"}}
2258# <- {"event": "STOP",
2259#     "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2260# <- {"event": "RESUME",
2261#     "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2262# <- {"event": "JOB_STATUS_CHANGE",
2263#     "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2264#     "data": {"status": "waiting", "id": "snapload0"}}
2265# <- {"event": "JOB_STATUS_CHANGE",
2266#     "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2267#     "data": {"status": "pending", "id": "snapload0"}}
2268# <- {"event": "JOB_STATUS_CHANGE",
2269#     "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2270#     "data": {"status": "concluded", "id": "snapload0"}}
2271# -> {"execute": "query-jobs"}
2272# <- {"return": [{"current-progress": 1,
2273#                 "status": "concluded",
2274#                 "total-progress": 1,
2275#                 "type": "snapshot-load",
2276#                 "id": "snapload0"}]}
2277#
2278# Since: 6.0
2279##
2280{ 'command': 'snapshot-load',
2281  'data': { 'job-id': 'str',
2282            'tag': 'str',
2283            'vmstate': 'str',
2284            'devices': ['str'] } }
2285
2286##
2287# @snapshot-delete:
2288#
2289# Delete a VM snapshot
2290#
2291# @job-id: identifier for the newly created job
2292#
2293# @tag: name of the snapshot to delete.
2294#
2295# @devices: list of block device node names to delete a snapshot from
2296#
2297# Applications should not assume that the snapshot delete is complete
2298# when this command returns.  The job commands / events must be used
2299# to determine completion and to fetch details of any errors that
2300# arise.
2301#
2302# Returns: nothing
2303#
2304# Example:
2305#
2306# -> { "execute": "snapshot-delete",
2307#      "arguments": {
2308#         "job-id": "snapdelete0",
2309#         "tag": "my-snap",
2310#         "devices": ["disk0", "disk1"]
2311#      }
2312#    }
2313# <- { "return": { } }
2314# <- {"event": "JOB_STATUS_CHANGE",
2315#     "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2316#     "data": {"status": "created", "id": "snapdelete0"}}
2317# <- {"event": "JOB_STATUS_CHANGE",
2318#     "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2319#     "data": {"status": "running", "id": "snapdelete0"}}
2320# <- {"event": "JOB_STATUS_CHANGE",
2321#     "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2322#     "data": {"status": "waiting", "id": "snapdelete0"}}
2323# <- {"event": "JOB_STATUS_CHANGE",
2324#     "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2325#     "data": {"status": "pending", "id": "snapdelete0"}}
2326# <- {"event": "JOB_STATUS_CHANGE",
2327#     "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2328#     "data": {"status": "concluded", "id": "snapdelete0"}}
2329# -> {"execute": "query-jobs"}
2330# <- {"return": [{"current-progress": 1,
2331#                 "status": "concluded",
2332#                 "total-progress": 1,
2333#                 "type": "snapshot-delete",
2334#                 "id": "snapdelete0"}]}
2335#
2336# Since: 6.0
2337##
2338{ 'command': 'snapshot-delete',
2339  'data': { 'job-id': 'str',
2340            'tag': 'str',
2341            'devices': ['str'] } }
2342