xref: /openbmc/qemu/qapi/migration.json (revision 154fd4d1)
1# -*- Mode: Python -*-
2# vim: filetype=python
3#
4
5##
6# = Migration
7##
8
9{ 'include': 'common.json' }
10{ 'include': 'sockets.json' }
11
12##
13# @MigrationStats:
14#
15# Detailed migration status.
16#
17# @transferred: amount of bytes already transferred to the target VM
18#
19# @remaining: amount of bytes remaining to be transferred to the
20#     target VM
21#
22# @total: total amount of bytes involved in the migration process
23#
24# @duplicate: number of duplicate (zero) pages (since 1.2)
25#
26# @normal: number of normal pages (since 1.2)
27#
28# @normal-bytes: number of normal bytes sent (since 1.2)
29#
30# @dirty-pages-rate: number of pages dirtied by second by the guest
31#     (since 1.3)
32#
33# @mbps: throughput in megabits/sec.  (since 1.6)
34#
35# @dirty-sync-count: number of times that dirty ram was synchronized
36#     (since 2.1)
37#
38# @postcopy-requests: The number of page requests received from the
39#     destination (since 2.7)
40#
41# @page-size: The number of bytes per page for the various page-based
42#     statistics (since 2.10)
43#
44# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
45#
46# @pages-per-second: the number of memory pages transferred per second
47#     (Since 4.0)
48#
49# @precopy-bytes: The number of bytes sent in the pre-copy phase
50#     (since 7.0).
51#
52# @downtime-bytes: The number of bytes sent while the guest is paused
53#     (since 7.0).
54#
55# @postcopy-bytes: The number of bytes sent during the post-copy phase
56#     (since 7.0).
57#
58# @dirty-sync-missed-zero-copy: Number of times dirty RAM
59#     synchronization could not avoid copying dirty pages.  This is
60#     between 0 and @dirty-sync-count * @multifd-channels.  (since
61#     7.1)
62#
63# Since: 0.14
64##
65{ 'struct': 'MigrationStats',
66  'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
67           'duplicate': 'int',
68           'normal': 'int',
69           'normal-bytes': 'int', 'dirty-pages-rate': 'int',
70           'mbps': 'number', 'dirty-sync-count': 'int',
71           'postcopy-requests': 'int', 'page-size': 'int',
72           'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
73           'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
74           'postcopy-bytes': 'uint64',
75           'dirty-sync-missed-zero-copy': 'uint64' } }
76
77##
78# @XBZRLECacheStats:
79#
80# Detailed XBZRLE migration cache statistics
81#
82# @cache-size: XBZRLE cache size
83#
84# @bytes: amount of bytes already transferred to the target VM
85#
86# @pages: amount of pages transferred to the target VM
87#
88# @cache-miss: number of cache miss
89#
90# @cache-miss-rate: rate of cache miss (since 2.1)
91#
92# @encoding-rate: rate of encoded bytes (since 5.1)
93#
94# @overflow: number of overflows
95#
96# Since: 1.2
97##
98{ 'struct': 'XBZRLECacheStats',
99  'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
100           'cache-miss': 'int', 'cache-miss-rate': 'number',
101           'encoding-rate': 'number', 'overflow': 'int' } }
102
103##
104# @CompressionStats:
105#
106# Detailed migration compression statistics
107#
108# @pages: amount of pages compressed and transferred to the target VM
109#
110# @busy: count of times that no free thread was available to compress
111#     data
112#
113# @busy-rate: rate of thread busy
114#
115# @compressed-size: amount of bytes after compression
116#
117# @compression-rate: rate of compressed size
118#
119# Since: 3.1
120##
121{ 'struct': 'CompressionStats',
122  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
123           'compressed-size': 'int', 'compression-rate': 'number' } }
124
125##
126# @MigrationStatus:
127#
128# An enumeration of migration status.
129#
130# @none: no migration has ever happened.
131#
132# @setup: migration process has been initiated.
133#
134# @cancelling: in the process of cancelling migration.
135#
136# @cancelled: cancelling migration is finished.
137#
138# @active: in the process of doing migration.
139#
140# @postcopy-active: like active, but now in postcopy mode.  (since
141#     2.5)
142#
143# @postcopy-paused: during postcopy but paused.  (since 3.0)
144#
145# @postcopy-recover-setup: setup phase for a postcopy recovery
146#     process, preparing for a recovery phase to start.  (since 9.1)
147#
148# @postcopy-recover: trying to recover from a paused postcopy.  (since
149#     3.0)
150#
151# @completed: migration is finished.
152#
153# @failed: some error occurred during migration process.
154#
155# @colo: VM is in the process of fault tolerance, VM can not get into
156#     this state unless colo capability is enabled for migration.
157#     (since 2.8)
158#
159# @pre-switchover: Paused before device serialisation.  (since 2.11)
160#
161# @device: During device serialisation when pause-before-switchover is
162#     enabled (since 2.11)
163#
164# @wait-unplug: wait for device unplug request by guest OS to be
165#     completed.  (since 4.2)
166#
167# Since: 2.3
168##
169{ 'enum': 'MigrationStatus',
170  'data': [ 'none', 'setup', 'cancelling', 'cancelled',
171            'active', 'postcopy-active', 'postcopy-paused',
172            'postcopy-recover-setup',
173            'postcopy-recover', 'completed', 'failed', 'colo',
174            'pre-switchover', 'device', 'wait-unplug' ] }
175##
176# @VfioStats:
177#
178# Detailed VFIO devices migration statistics
179#
180# @transferred: amount of bytes transferred to the target VM by VFIO
181#     devices
182#
183# Since: 5.2
184##
185{ 'struct': 'VfioStats',
186  'data': {'transferred': 'int' } }
187
188##
189# @MigrationInfo:
190#
191# Information about current migration process.
192#
193# @status: @MigrationStatus describing the current migration status.
194#     If this field is not returned, no migration process has been
195#     initiated
196#
197# @ram: @MigrationStats containing detailed migration status, only
198#     returned if status is 'active' or 'completed'(since 1.2)
199#
200# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
201#     migration statistics, only returned if XBZRLE feature is on and
202#     status is 'active' or 'completed' (since 1.2)
203#
204# @total-time: total amount of milliseconds since migration started.
205#     If migration has ended, it returns the total migration time.
206#     (since 1.2)
207#
208# @downtime: only present when migration finishes correctly total
209#     downtime in milliseconds for the guest.  (since 1.3)
210#
211# @expected-downtime: only present while migration is active expected
212#     downtime in milliseconds for the guest in last walk of the dirty
213#     bitmap.  (since 1.3)
214#
215# @setup-time: amount of setup time in milliseconds *before* the
216#     iterations begin but *after* the QMP command is issued.  This is
217#     designed to provide an accounting of any activities (such as
218#     RDMA pinning) which may be expensive, but do not actually occur
219#     during the iterative migration rounds themselves.  (since 1.6)
220#
221# @cpu-throttle-percentage: percentage of time guest cpus are being
222#     throttled during auto-converge.  This is only present when
223#     auto-converge has started throttling guest cpus.  (Since 2.7)
224#
225# @error-desc: the human readable error description string.  Clients
226#     should not attempt to parse the error strings.  (Since 2.7)
227#
228# @postcopy-blocktime: total time when all vCPU were blocked during
229#     postcopy live migration.  This is only present when the
230#     postcopy-blocktime migration capability is enabled.  (Since 3.0)
231#
232# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
233#     This is only present when the postcopy-blocktime migration
234#     capability is enabled.  (Since 3.0)
235#
236# @socket-address: Only used for tcp, to know what the real port is
237#     (Since 4.0)
238#
239# @vfio: @VfioStats containing detailed VFIO devices migration
240#     statistics, only returned if VFIO device is present, migration
241#     is supported by all VFIO devices and status is 'active' or
242#     'completed' (since 5.2)
243#
244# @blocked-reasons: A list of reasons an outgoing migration is
245#     blocked.  Present and non-empty when migration is blocked.
246#     (since 6.0)
247#
248# @dirty-limit-throttle-time-per-round: Maximum throttle time (in
249#     microseconds) of virtual CPUs each dirty ring full round, which
250#     shows how MigrationCapability dirty-limit affects the guest
251#     during live migration.  (Since 8.1)
252#
253# @dirty-limit-ring-full-time: Estimated average dirty ring full time
254#     (in microseconds) for each dirty ring full round.  The value
255#     equals the dirty ring memory size divided by the average dirty
256#     page rate of the virtual CPU, which can be used to observe the
257#     average memory load of the virtual CPU indirectly.  Note that
258#     zero means guest doesn't dirty memory.  (Since 8.1)
259#
260# Since: 0.14
261##
262{ 'struct': 'MigrationInfo',
263  'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
264           '*vfio': 'VfioStats',
265           '*xbzrle-cache': 'XBZRLECacheStats',
266           '*total-time': 'int',
267           '*expected-downtime': 'int',
268           '*downtime': 'int',
269           '*setup-time': 'int',
270           '*cpu-throttle-percentage': 'int',
271           '*error-desc': 'str',
272           '*blocked-reasons': ['str'],
273           '*postcopy-blocktime': 'uint32',
274           '*postcopy-vcpu-blocktime': ['uint32'],
275           '*socket-address': ['SocketAddress'],
276           '*dirty-limit-throttle-time-per-round': 'uint64',
277           '*dirty-limit-ring-full-time': 'uint64'} }
278
279##
280# @query-migrate:
281#
282# Returns information about current migration process.  If migration
283# is active there will be another json-object with RAM migration
284# status.
285#
286# Returns: @MigrationInfo
287#
288# Since: 0.14
289#
290# .. qmp-example::
291#    :title: Before the first migration
292#
293#     -> { "execute": "query-migrate" }
294#     <- { "return": {} }
295#
296# .. qmp-example::
297#    :title: Migration is done and has succeeded
298#
299#     -> { "execute": "query-migrate" }
300#     <- { "return": {
301#             "status": "completed",
302#             "total-time":12345,
303#             "setup-time":12345,
304#             "downtime":12345,
305#             "ram":{
306#               "transferred":123,
307#               "remaining":123,
308#               "total":246,
309#               "duplicate":123,
310#               "normal":123,
311#               "normal-bytes":123456,
312#               "dirty-sync-count":15
313#             }
314#          }
315#        }
316#
317# .. qmp-example::
318#    :title: Migration is done and has failed
319#
320#     -> { "execute": "query-migrate" }
321#     <- { "return": { "status": "failed" } }
322#
323# .. qmp-example::
324#    :title: Migration is being performed
325#
326#     -> { "execute": "query-migrate" }
327#     <- {
328#           "return":{
329#              "status":"active",
330#              "total-time":12345,
331#              "setup-time":12345,
332#              "expected-downtime":12345,
333#              "ram":{
334#                 "transferred":123,
335#                 "remaining":123,
336#                 "total":246,
337#                 "duplicate":123,
338#                 "normal":123,
339#                 "normal-bytes":123456,
340#                 "dirty-sync-count":15
341#              }
342#           }
343#        }
344#
345# .. qmp-example::
346#    :title: Migration is being performed and XBZRLE is active
347#
348#     -> { "execute": "query-migrate" }
349#     <- {
350#           "return":{
351#              "status":"active",
352#              "total-time":12345,
353#              "setup-time":12345,
354#              "expected-downtime":12345,
355#              "ram":{
356#                 "total":1057024,
357#                 "remaining":1053304,
358#                 "transferred":3720,
359#                 "duplicate":10,
360#                 "normal":3333,
361#                 "normal-bytes":3412992,
362#                 "dirty-sync-count":15
363#              },
364#              "xbzrle-cache":{
365#                 "cache-size":67108864,
366#                 "bytes":20971520,
367#                 "pages":2444343,
368#                 "cache-miss":2244,
369#                 "cache-miss-rate":0.123,
370#                 "encoding-rate":80.1,
371#                 "overflow":34434
372#              }
373#           }
374#        }
375##
376{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
377
378##
379# @MigrationCapability:
380#
381# Migration capabilities enumeration
382#
383# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
384#     Encoding).  This feature allows us to minimize migration traffic
385#     for certain work loads, by sending compressed difference of the
386#     pages
387#
388# @rdma-pin-all: Controls whether or not the entire VM memory
389#     footprint is mlock()'d on demand or all at once.  Refer to
390#     docs/rdma.txt for usage.  Disabled by default.  (since 2.0)
391#
392# @zero-blocks: During storage migration encode blocks of zeroes
393#     efficiently.  This essentially saves 1MB of zeroes per block on
394#     the wire.  Enabling requires source and target VM to support
395#     this feature.  To enable it is sufficient to enable the
396#     capability on the source VM.  The feature is disabled by
397#     default.  (since 1.6)
398#
399# @events: generate events for each migration state change (since 2.4)
400#
401# @auto-converge: If enabled, QEMU will automatically throttle down
402#     the guest to speed up convergence of RAM migration.  (since 1.6)
403#
404# @postcopy-ram: Start executing on the migration target before all of
405#     RAM has been migrated, pulling the remaining pages along as
406#     needed.  The capacity must have the same setting on both source
407#     and target or migration will not even start.  NOTE: If the
408#     migration fails during postcopy the VM will fail.  (since 2.6)
409#
410# @x-colo: If enabled, migration will never end, and the state of the
411#     VM on the primary side will be migrated continuously to the VM
412#     on secondary side, this process is called COarse-Grain LOck
413#     Stepping (COLO) for Non-stop Service.  (since 2.8)
414#
415# @release-ram: if enabled, qemu will free the migrated ram pages on
416#     the source during postcopy-ram migration.  (since 2.9)
417#
418# @return-path: If enabled, migration will use the return path even
419#     for precopy.  (since 2.10)
420#
421# @pause-before-switchover: Pause outgoing migration before
422#     serialising device state and before disabling block IO (since
423#     2.11)
424#
425# @multifd: Use more than one fd for migration (since 4.0)
426#
427# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
428#     (since 2.12)
429#
430# @postcopy-blocktime: Calculate downtime for postcopy live migration
431#     (since 3.0)
432#
433# @late-block-activate: If enabled, the destination will not activate
434#     block devices (and thus take locks) immediately at the end of
435#     migration.  (since 3.0)
436#
437# @x-ignore-shared: If enabled, QEMU will not migrate shared memory
438#     that is accessible on the destination machine.  (since 4.0)
439#
440# @validate-uuid: Send the UUID of the source to allow the destination
441#     to ensure it is the same.  (since 4.2)
442#
443# @background-snapshot: If enabled, the migration stream will be a
444#     snapshot of the VM exactly at the point when the migration
445#     procedure starts.  The VM RAM is saved with running VM.
446#     (since 6.0)
447#
448# @zero-copy-send: Controls behavior on sending memory pages on
449#     migration.  When true, enables a zero-copy mechanism for sending
450#     memory pages, if host supports it.  Requires that QEMU be
451#     permitted to use locked memory for guest RAM pages.  (since 7.1)
452#
453# @postcopy-preempt: If enabled, the migration process will allow
454#     postcopy requests to preempt precopy stream, so postcopy
455#     requests will be handled faster.  This is a performance feature
456#     and should not affect the correctness of postcopy migration.
457#     (since 7.1)
458#
459# @switchover-ack: If enabled, migration will not stop the source VM
460#     and complete the migration until an ACK is received from the
461#     destination that it's OK to do so.  Exactly when this ACK is
462#     sent depends on the migrated devices that use this feature.  For
463#     example, a device can use it to make sure some of its data is
464#     sent and loaded in the destination before doing switchover.
465#     This can reduce downtime if devices that support this capability
466#     are present.  'return-path' capability must be enabled to use
467#     it.  (since 8.1)
468#
469# @dirty-limit: If enabled, migration will throttle vCPUs as needed to
470#     keep their dirty page rate within @vcpu-dirty-limit.  This can
471#     improve responsiveness of large guests during live migration,
472#     and can result in more stable read performance.  Requires KVM
473#     with accelerator property "dirty-ring-size" set.  (Since 8.1)
474#
475# @mapped-ram: Migrate using fixed offsets in the migration file for
476#     each RAM page.  Requires a migration URI that supports seeking,
477#     such as a file.  (since 9.0)
478#
479# Features:
480#
481# @unstable: Members @x-colo and @x-ignore-shared are experimental.
482#
483# Since: 1.2
484##
485{ 'enum': 'MigrationCapability',
486  'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
487           'events', 'postcopy-ram',
488           { 'name': 'x-colo', 'features': [ 'unstable' ] },
489           'release-ram',
490           'return-path', 'pause-before-switchover', 'multifd',
491           'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
492           { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
493           'validate-uuid', 'background-snapshot',
494           'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
495           'dirty-limit', 'mapped-ram'] }
496
497##
498# @MigrationCapabilityStatus:
499#
500# Migration capability information
501#
502# @capability: capability enum
503#
504# @state: capability state bool
505#
506# Since: 1.2
507##
508{ 'struct': 'MigrationCapabilityStatus',
509  'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
510
511##
512# @migrate-set-capabilities:
513#
514# Enable/Disable the following migration capabilities (like xbzrle)
515#
516# @capabilities: json array of capability modifications to make
517#
518# Since: 1.2
519#
520# .. qmp-example::
521#
522#     -> { "execute": "migrate-set-capabilities" , "arguments":
523#          { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
524#     <- { "return": {} }
525##
526{ 'command': 'migrate-set-capabilities',
527  'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
528
529##
530# @query-migrate-capabilities:
531#
532# Returns information about the current migration capabilities status
533#
534# Returns: @MigrationCapabilityStatus
535#
536# Since: 1.2
537#
538# .. qmp-example::
539#
540#     -> { "execute": "query-migrate-capabilities" }
541#     <- { "return": [
542#           {"state": false, "capability": "xbzrle"},
543#           {"state": false, "capability": "rdma-pin-all"},
544#           {"state": false, "capability": "auto-converge"},
545#           {"state": false, "capability": "zero-blocks"},
546#           {"state": true, "capability": "events"},
547#           {"state": false, "capability": "postcopy-ram"},
548#           {"state": false, "capability": "x-colo"}
549#        ]}
550##
551{ 'command': 'query-migrate-capabilities', 'returns':   ['MigrationCapabilityStatus']}
552
553##
554# @MultiFDCompression:
555#
556# An enumeration of multifd compression methods.
557#
558# @none: no compression.
559#
560# @zlib: use zlib compression method.
561#
562# @zstd: use zstd compression method.
563#
564# @qpl: use qpl compression method.  Query Processing Library(qpl) is
565#     based on the deflate compression algorithm and use the Intel
566#     In-Memory Analytics Accelerator(IAA) accelerated compression and
567#     decompression.  (Since 9.1)
568#
569# @uadk: use UADK library compression method.  (Since 9.1)
570#
571# Since: 5.0
572##
573{ 'enum': 'MultiFDCompression',
574  'prefix': 'MULTIFD_COMPRESSION',
575  'data': [ 'none', 'zlib',
576            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' },
577            { 'name': 'qpl', 'if': 'CONFIG_QPL' },
578            { 'name': 'uadk', 'if': 'CONFIG_UADK' } ] }
579
580##
581# @MigMode:
582#
583# @normal: the original form of migration.  (since 8.2)
584#
585# @cpr-reboot: The migrate command stops the VM and saves state to the
586#     URI.  After quitting QEMU, the user resumes by running QEMU
587#     -incoming.
588#
589#     This mode allows the user to quit QEMU, optionally update and
590#     reboot the OS, and restart QEMU.  If the user reboots, the URI
591#     must persist across the reboot, such as by using a file.
592#
593#     Unlike normal mode, the use of certain local storage options
594#     does not block the migration, but the user must not modify the
595#     contents of guest block devices between the quit and restart.
596#
597#     This mode supports VFIO devices provided the user first puts the
598#     guest in the suspended runstate, such as by issuing
599#     guest-suspend-ram to the QEMU guest agent.
600#
601#     Best performance is achieved when the memory backend is shared
602#     and the @x-ignore-shared migration capability is set, but this
603#     is not required.  Further, if the user reboots before restarting
604#     such a configuration, the shared memory must persist across the
605#     reboot, such as by backing it with a dax device.
606#
607#     @cpr-reboot may not be used with postcopy, background-snapshot,
608#     or COLO.
609#
610#     (since 8.2)
611##
612{ 'enum': 'MigMode',
613  'data': [ 'normal', 'cpr-reboot' ] }
614
615##
616# @ZeroPageDetection:
617#
618# @none: Do not perform zero page checking.
619#
620# @legacy: Perform zero page checking in main migration thread.
621#
622# @multifd: Perform zero page checking in multifd sender thread if
623#     multifd migration is enabled, else in the main migration thread
624#     as for @legacy.
625#
626# Since: 9.0
627##
628{ 'enum': 'ZeroPageDetection',
629  'data': [ 'none', 'legacy', 'multifd' ] }
630
631##
632# @BitmapMigrationBitmapAliasTransform:
633#
634# @persistent: If present, the bitmap will be made persistent or
635#     transient depending on this parameter.
636#
637# Since: 6.0
638##
639{ 'struct': 'BitmapMigrationBitmapAliasTransform',
640  'data': {
641      '*persistent': 'bool'
642  } }
643
644##
645# @BitmapMigrationBitmapAlias:
646#
647# @name: The name of the bitmap.
648#
649# @alias: An alias name for migration (for example the bitmap name on
650#     the opposite site).
651#
652# @transform: Allows the modification of the migrated bitmap.  (since
653#     6.0)
654#
655# Since: 5.2
656##
657{ 'struct': 'BitmapMigrationBitmapAlias',
658  'data': {
659      'name': 'str',
660      'alias': 'str',
661      '*transform': 'BitmapMigrationBitmapAliasTransform'
662  } }
663
664##
665# @BitmapMigrationNodeAlias:
666#
667# Maps a block node name and the bitmaps it has to aliases for dirty
668# bitmap migration.
669#
670# @node-name: A block node name.
671#
672# @alias: An alias block node name for migration (for example the node
673#     name on the opposite site).
674#
675# @bitmaps: Mappings for the bitmaps on this node.
676#
677# Since: 5.2
678##
679{ 'struct': 'BitmapMigrationNodeAlias',
680  'data': {
681      'node-name': 'str',
682      'alias': 'str',
683      'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
684  } }
685
686##
687# @MigrationParameter:
688#
689# Migration parameters enumeration
690#
691# @announce-initial: Initial delay (in milliseconds) before sending
692#     the first announce (Since 4.0)
693#
694# @announce-max: Maximum delay (in milliseconds) between packets in
695#     the announcement (Since 4.0)
696#
697# @announce-rounds: Number of self-announce packets sent after
698#     migration (Since 4.0)
699#
700# @announce-step: Increase in delay (in milliseconds) between
701#     subsequent packets in the announcement (Since 4.0)
702#
703# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
704#     bytes_xfer_period to trigger throttling.  It is expressed as
705#     percentage.  The default value is 50.  (Since 5.0)
706#
707# @cpu-throttle-initial: Initial percentage of time guest cpus are
708#     throttled when migration auto-converge is activated.  The
709#     default value is 20.  (Since 2.7)
710#
711# @cpu-throttle-increment: throttle percentage increase each time
712#     auto-converge detects that migration is not making progress.
713#     The default value is 10.  (Since 2.7)
714#
715# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
716#     the tail stage of throttling, the Guest is very sensitive to CPU
717#     percentage while the @cpu-throttle -increment is excessive
718#     usually at tail stage.  If this parameter is true, we will
719#     compute the ideal CPU percentage used by the Guest, which may
720#     exactly make the dirty rate match the dirty rate threshold.
721#     Then we will choose a smaller throttle increment between the one
722#     specified by @cpu-throttle-increment and the one generated by
723#     ideal CPU percentage.  Therefore, it is compatible to
724#     traditional throttling, meanwhile the throttle increment won't
725#     be excessive at tail stage.  The default value is false.  (Since
726#     5.1)
727#
728# @tls-creds: ID of the 'tls-creds' object that provides credentials
729#     for establishing a TLS connection over the migration data
730#     channel.  On the outgoing side of the migration, the credentials
731#     must be for a 'client' endpoint, while for the incoming side the
732#     credentials must be for a 'server' endpoint.  Setting this to a
733#     non-empty string enables TLS for all migrations.  An empty
734#     string means that QEMU will use plain text mode for migration,
735#     rather than TLS.  (Since 2.7)
736#
737# @tls-hostname: migration target's hostname for validating the
738#     server's x509 certificate identity.  If empty, QEMU will use the
739#     hostname from the migration URI, if any.  A non-empty value is
740#     required when using x509 based TLS credentials and the migration
741#     URI does not include a hostname, such as fd: or exec: based
742#     migration.  (Since 2.7)
743#
744#     Note: empty value works only since 2.9.
745#
746# @tls-authz: ID of the 'authz' object subclass that provides access
747#     control checking of the TLS x509 certificate distinguished name.
748#     This object is only resolved at time of use, so can be deleted
749#     and recreated on the fly while the migration server is active.
750#     If missing, it will default to denying access (Since 4.0)
751#
752# @max-bandwidth: maximum speed for migration, in bytes per second.
753#     (Since 2.8)
754#
755# @avail-switchover-bandwidth: to set the available bandwidth that
756#     migration can use during switchover phase.  NOTE!  This does not
757#     limit the bandwidth during switchover, but only for calculations
758#     when making decisions to switchover.  By default, this value is
759#     zero, which means QEMU will estimate the bandwidth
760#     automatically.  This can be set when the estimated value is not
761#     accurate, while the user is able to guarantee such bandwidth is
762#     available when switching over.  When specified correctly, this
763#     can make the switchover decision much more accurate.
764#     (Since 8.2)
765#
766# @downtime-limit: set maximum tolerated downtime for migration.
767#     maximum downtime in milliseconds (Since 2.8)
768#
769# @x-checkpoint-delay: The delay time (in ms) between two COLO
770#     checkpoints in periodic mode.  (Since 2.8)
771#
772# @multifd-channels: Number of channels used to migrate data in
773#     parallel.  This is the same number that the number of sockets
774#     used for migration.  The default value is 2 (since 4.0)
775#
776# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
777#     needs to be a multiple of the target page size and a power of 2
778#     (Since 2.11)
779#
780# @max-postcopy-bandwidth: Background transfer bandwidth during
781#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
782#     (Since 3.0)
783#
784# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
785#     (Since 3.1)
786#
787# @multifd-compression: Which compression method to use.  Defaults to
788#     none.  (Since 5.0)
789#
790# @multifd-zlib-level: Set the compression level to be used in live
791#     migration, the compression level is an integer between 0 and 9,
792#     where 0 means no compression, 1 means the best compression
793#     speed, and 9 means best compression ratio which will consume
794#     more CPU.  Defaults to 1.  (Since 5.0)
795#
796# @multifd-zstd-level: Set the compression level to be used in live
797#     migration, the compression level is an integer between 0 and 20,
798#     where 0 means no compression, 1 means the best compression
799#     speed, and 20 means best compression ratio which will consume
800#     more CPU.  Defaults to 1.  (Since 5.0)
801#
802# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
803#     aliases for the purpose of dirty bitmap migration.  Such aliases
804#     may for example be the corresponding names on the opposite site.
805#     The mapping must be one-to-one, but not necessarily complete: On
806#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
807#     will be ignored.  On the destination, encountering an unmapped
808#     alias in the incoming migration stream will result in a report,
809#     and all further bitmap migration data will then be discarded.
810#     Note that the destination does not know about bitmaps it does
811#     not receive, so there is no limitation or requirement regarding
812#     the number of bitmaps received, or how they are named, or on
813#     which nodes they are placed.  By default (when this parameter
814#     has never been set), bitmap names are mapped to themselves.
815#     Nodes are mapped to their block device name if there is one, and
816#     to their node name otherwise.  (Since 5.2)
817#
818# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
819#     limit during live migration.  Should be in the range 1 to
820#     1000ms.  Defaults to 1000ms.  (Since 8.1)
821#
822# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
823#     Defaults to 1.  (Since 8.1)
824#
825# @mode: Migration mode.  See description in @MigMode.  Default is
826#     'normal'.  (Since 8.2)
827#
828# @zero-page-detection: Whether and how to detect zero pages.
829#     See description in @ZeroPageDetection.  Default is 'multifd'.
830#     (since 9.0)
831#
832# @direct-io: Open migration files with O_DIRECT when possible.  This
833#     only has effect if the @mapped-ram capability is enabled.
834#     (Since 9.1)
835#
836# Features:
837#
838# @unstable: Members @x-checkpoint-delay and
839#     @x-vcpu-dirty-limit-period are experimental.
840#
841# Since: 2.4
842##
843{ 'enum': 'MigrationParameter',
844  'data': ['announce-initial', 'announce-max',
845           'announce-rounds', 'announce-step',
846           'throttle-trigger-threshold',
847           'cpu-throttle-initial', 'cpu-throttle-increment',
848           'cpu-throttle-tailslow',
849           'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
850           'avail-switchover-bandwidth', 'downtime-limit',
851           { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
852           'multifd-channels',
853           'xbzrle-cache-size', 'max-postcopy-bandwidth',
854           'max-cpu-throttle', 'multifd-compression',
855           'multifd-zlib-level', 'multifd-zstd-level',
856           'block-bitmap-mapping',
857           { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
858           'vcpu-dirty-limit',
859           'mode',
860           'zero-page-detection',
861           'direct-io'] }
862
863##
864# @MigrateSetParameters:
865#
866# @announce-initial: Initial delay (in milliseconds) before sending
867#     the first announce (Since 4.0)
868#
869# @announce-max: Maximum delay (in milliseconds) between packets in
870#     the announcement (Since 4.0)
871#
872# @announce-rounds: Number of self-announce packets sent after
873#     migration (Since 4.0)
874#
875# @announce-step: Increase in delay (in milliseconds) between
876#     subsequent packets in the announcement (Since 4.0)
877#
878# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
879#     bytes_xfer_period to trigger throttling.  It is expressed as
880#     percentage.  The default value is 50.  (Since 5.0)
881#
882# @cpu-throttle-initial: Initial percentage of time guest cpus are
883#     throttled when migration auto-converge is activated.  The
884#     default value is 20.  (Since 2.7)
885#
886# @cpu-throttle-increment: throttle percentage increase each time
887#     auto-converge detects that migration is not making progress.
888#     The default value is 10.  (Since 2.7)
889#
890# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
891#     the tail stage of throttling, the Guest is very sensitive to CPU
892#     percentage while the @cpu-throttle -increment is excessive
893#     usually at tail stage.  If this parameter is true, we will
894#     compute the ideal CPU percentage used by the Guest, which may
895#     exactly make the dirty rate match the dirty rate threshold.
896#     Then we will choose a smaller throttle increment between the one
897#     specified by @cpu-throttle-increment and the one generated by
898#     ideal CPU percentage.  Therefore, it is compatible to
899#     traditional throttling, meanwhile the throttle increment won't
900#     be excessive at tail stage.  The default value is false.  (Since
901#     5.1)
902#
903# @tls-creds: ID of the 'tls-creds' object that provides credentials
904#     for establishing a TLS connection over the migration data
905#     channel.  On the outgoing side of the migration, the credentials
906#     must be for a 'client' endpoint, while for the incoming side the
907#     credentials must be for a 'server' endpoint.  Setting this to a
908#     non-empty string enables TLS for all migrations.  An empty
909#     string means that QEMU will use plain text mode for migration,
910#     rather than TLS.  This is the default.  (Since 2.7)
911#
912# @tls-hostname: migration target's hostname for validating the
913#     server's x509 certificate identity.  If empty, QEMU will use the
914#     hostname from the migration URI, if any.  A non-empty value is
915#     required when using x509 based TLS credentials and the migration
916#     URI does not include a hostname, such as fd: or exec: based
917#     migration.  (Since 2.7)
918#
919#     Note: empty value works only since 2.9.
920#
921# @tls-authz: ID of the 'authz' object subclass that provides access
922#     control checking of the TLS x509 certificate distinguished name.
923#     This object is only resolved at time of use, so can be deleted
924#     and recreated on the fly while the migration server is active.
925#     If missing, it will default to denying access (Since 4.0)
926#
927# @max-bandwidth: maximum speed for migration, in bytes per second.
928#     (Since 2.8)
929#
930# @avail-switchover-bandwidth: to set the available bandwidth that
931#     migration can use during switchover phase.  NOTE!  This does not
932#     limit the bandwidth during switchover, but only for calculations
933#     when making decisions to switchover.  By default, this value is
934#     zero, which means QEMU will estimate the bandwidth
935#     automatically.  This can be set when the estimated value is not
936#     accurate, while the user is able to guarantee such bandwidth is
937#     available when switching over.  When specified correctly, this
938#     can make the switchover decision much more accurate.
939#     (Since 8.2)
940#
941# @downtime-limit: set maximum tolerated downtime for migration.
942#     maximum downtime in milliseconds (Since 2.8)
943#
944# @x-checkpoint-delay: The delay time (in ms) between two COLO
945#     checkpoints in periodic mode.  (Since 2.8)
946#
947# @multifd-channels: Number of channels used to migrate data in
948#     parallel.  This is the same number that the number of sockets
949#     used for migration.  The default value is 2 (since 4.0)
950#
951# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
952#     needs to be a multiple of the target page size and a power of 2
953#     (Since 2.11)
954#
955# @max-postcopy-bandwidth: Background transfer bandwidth during
956#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
957#     (Since 3.0)
958#
959# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
960#     (Since 3.1)
961#
962# @multifd-compression: Which compression method to use.  Defaults to
963#     none.  (Since 5.0)
964#
965# @multifd-zlib-level: Set the compression level to be used in live
966#     migration, the compression level is an integer between 0 and 9,
967#     where 0 means no compression, 1 means the best compression
968#     speed, and 9 means best compression ratio which will consume
969#     more CPU.  Defaults to 1.  (Since 5.0)
970#
971# @multifd-zstd-level: Set the compression level to be used in live
972#     migration, the compression level is an integer between 0 and 20,
973#     where 0 means no compression, 1 means the best compression
974#     speed, and 20 means best compression ratio which will consume
975#     more CPU.  Defaults to 1.  (Since 5.0)
976#
977# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
978#     aliases for the purpose of dirty bitmap migration.  Such aliases
979#     may for example be the corresponding names on the opposite site.
980#     The mapping must be one-to-one, but not necessarily complete: On
981#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
982#     will be ignored.  On the destination, encountering an unmapped
983#     alias in the incoming migration stream will result in a report,
984#     and all further bitmap migration data will then be discarded.
985#     Note that the destination does not know about bitmaps it does
986#     not receive, so there is no limitation or requirement regarding
987#     the number of bitmaps received, or how they are named, or on
988#     which nodes they are placed.  By default (when this parameter
989#     has never been set), bitmap names are mapped to themselves.
990#     Nodes are mapped to their block device name if there is one, and
991#     to their node name otherwise.  (Since 5.2)
992#
993# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
994#     limit during live migration.  Should be in the range 1 to
995#     1000ms.  Defaults to 1000ms.  (Since 8.1)
996#
997# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
998#     Defaults to 1.  (Since 8.1)
999#
1000# @mode: Migration mode.  See description in @MigMode.  Default is
1001#     'normal'.  (Since 8.2)
1002#
1003# @zero-page-detection: Whether and how to detect zero pages.
1004#     See description in @ZeroPageDetection.  Default is 'multifd'.
1005#     (since 9.0)
1006#
1007# @direct-io: Open migration files with O_DIRECT when possible.  This
1008#     only has effect if the @mapped-ram capability is enabled.
1009#     (Since 9.1)
1010#
1011# Features:
1012#
1013# @unstable: Members @x-checkpoint-delay and
1014#     @x-vcpu-dirty-limit-period are experimental.
1015#
1016# TODO: either fuse back into MigrationParameters, or make
1017#     MigrationParameters members mandatory
1018#
1019# Since: 2.4
1020##
1021{ 'struct': 'MigrateSetParameters',
1022  'data': { '*announce-initial': 'size',
1023            '*announce-max': 'size',
1024            '*announce-rounds': 'size',
1025            '*announce-step': 'size',
1026            '*throttle-trigger-threshold': 'uint8',
1027            '*cpu-throttle-initial': 'uint8',
1028            '*cpu-throttle-increment': 'uint8',
1029            '*cpu-throttle-tailslow': 'bool',
1030            '*tls-creds': 'StrOrNull',
1031            '*tls-hostname': 'StrOrNull',
1032            '*tls-authz': 'StrOrNull',
1033            '*max-bandwidth': 'size',
1034            '*avail-switchover-bandwidth': 'size',
1035            '*downtime-limit': 'uint64',
1036            '*x-checkpoint-delay': { 'type': 'uint32',
1037                                     'features': [ 'unstable' ] },
1038            '*multifd-channels': 'uint8',
1039            '*xbzrle-cache-size': 'size',
1040            '*max-postcopy-bandwidth': 'size',
1041            '*max-cpu-throttle': 'uint8',
1042            '*multifd-compression': 'MultiFDCompression',
1043            '*multifd-zlib-level': 'uint8',
1044            '*multifd-zstd-level': 'uint8',
1045            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1046            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1047                                            'features': [ 'unstable' ] },
1048            '*vcpu-dirty-limit': 'uint64',
1049            '*mode': 'MigMode',
1050            '*zero-page-detection': 'ZeroPageDetection',
1051            '*direct-io': 'bool' } }
1052
1053##
1054# @migrate-set-parameters:
1055#
1056# Set various migration parameters.
1057#
1058# Since: 2.4
1059#
1060# .. qmp-example::
1061#
1062#     -> { "execute": "migrate-set-parameters" ,
1063#          "arguments": { "multifd-channels": 5 } }
1064#     <- { "return": {} }
1065##
1066{ 'command': 'migrate-set-parameters', 'boxed': true,
1067  'data': 'MigrateSetParameters' }
1068
1069##
1070# @MigrationParameters:
1071#
1072# The optional members aren't actually optional.
1073#
1074# @announce-initial: Initial delay (in milliseconds) before sending
1075#     the first announce (Since 4.0)
1076#
1077# @announce-max: Maximum delay (in milliseconds) between packets in
1078#     the announcement (Since 4.0)
1079#
1080# @announce-rounds: Number of self-announce packets sent after
1081#     migration (Since 4.0)
1082#
1083# @announce-step: Increase in delay (in milliseconds) between
1084#     subsequent packets in the announcement (Since 4.0)
1085#
1086# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1087#     bytes_xfer_period to trigger throttling.  It is expressed as
1088#     percentage.  The default value is 50.  (Since 5.0)
1089#
1090# @cpu-throttle-initial: Initial percentage of time guest cpus are
1091#     throttled when migration auto-converge is activated.  (Since
1092#     2.7)
1093#
1094# @cpu-throttle-increment: throttle percentage increase each time
1095#     auto-converge detects that migration is not making progress.
1096#     (Since 2.7)
1097#
1098# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1099#     the tail stage of throttling, the Guest is very sensitive to CPU
1100#     percentage while the @cpu-throttle -increment is excessive
1101#     usually at tail stage.  If this parameter is true, we will
1102#     compute the ideal CPU percentage used by the Guest, which may
1103#     exactly make the dirty rate match the dirty rate threshold.
1104#     Then we will choose a smaller throttle increment between the one
1105#     specified by @cpu-throttle-increment and the one generated by
1106#     ideal CPU percentage.  Therefore, it is compatible to
1107#     traditional throttling, meanwhile the throttle increment won't
1108#     be excessive at tail stage.  The default value is false.  (Since
1109#     5.1)
1110#
1111# @tls-creds: ID of the 'tls-creds' object that provides credentials
1112#     for establishing a TLS connection over the migration data
1113#     channel.  On the outgoing side of the migration, the credentials
1114#     must be for a 'client' endpoint, while for the incoming side the
1115#     credentials must be for a 'server' endpoint.  An empty string
1116#     means that QEMU will use plain text mode for migration, rather
1117#     than TLS.  (Since 2.7)
1118#
1119#     Note: 2.8 omits empty @tls-creds instead.
1120#
1121# @tls-hostname: migration target's hostname for validating the
1122#     server's x509 certificate identity.  If empty, QEMU will use the
1123#     hostname from the migration URI, if any.  (Since 2.7)
1124#
1125#     Note: 2.8 omits empty @tls-hostname instead.
1126#
1127# @tls-authz: ID of the 'authz' object subclass that provides access
1128#     control checking of the TLS x509 certificate distinguished name.
1129#     (Since 4.0)
1130#
1131# @max-bandwidth: maximum speed for migration, in bytes per second.
1132#     (Since 2.8)
1133#
1134# @avail-switchover-bandwidth: to set the available bandwidth that
1135#     migration can use during switchover phase.  NOTE!  This does not
1136#     limit the bandwidth during switchover, but only for calculations
1137#     when making decisions to switchover.  By default, this value is
1138#     zero, which means QEMU will estimate the bandwidth
1139#     automatically.  This can be set when the estimated value is not
1140#     accurate, while the user is able to guarantee such bandwidth is
1141#     available when switching over.  When specified correctly, this
1142#     can make the switchover decision much more accurate.
1143#     (Since 8.2)
1144#
1145# @downtime-limit: set maximum tolerated downtime for migration.
1146#     maximum downtime in milliseconds (Since 2.8)
1147#
1148# @x-checkpoint-delay: the delay time between two COLO checkpoints.
1149#     (Since 2.8)
1150#
1151# @multifd-channels: Number of channels used to migrate data in
1152#     parallel.  This is the same number that the number of sockets
1153#     used for migration.  The default value is 2 (since 4.0)
1154#
1155# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1156#     needs to be a multiple of the target page size and a power of 2
1157#     (Since 2.11)
1158#
1159# @max-postcopy-bandwidth: Background transfer bandwidth during
1160#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1161#     (Since 3.0)
1162#
1163# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
1164#     (Since 3.1)
1165#
1166# @multifd-compression: Which compression method to use.  Defaults to
1167#     none.  (Since 5.0)
1168#
1169# @multifd-zlib-level: Set the compression level to be used in live
1170#     migration, the compression level is an integer between 0 and 9,
1171#     where 0 means no compression, 1 means the best compression
1172#     speed, and 9 means best compression ratio which will consume
1173#     more CPU.  Defaults to 1.  (Since 5.0)
1174#
1175# @multifd-zstd-level: Set the compression level to be used in live
1176#     migration, the compression level is an integer between 0 and 20,
1177#     where 0 means no compression, 1 means the best compression
1178#     speed, and 20 means best compression ratio which will consume
1179#     more CPU.  Defaults to 1.  (Since 5.0)
1180#
1181# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1182#     aliases for the purpose of dirty bitmap migration.  Such aliases
1183#     may for example be the corresponding names on the opposite site.
1184#     The mapping must be one-to-one, but not necessarily complete: On
1185#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1186#     will be ignored.  On the destination, encountering an unmapped
1187#     alias in the incoming migration stream will result in a report,
1188#     and all further bitmap migration data will then be discarded.
1189#     Note that the destination does not know about bitmaps it does
1190#     not receive, so there is no limitation or requirement regarding
1191#     the number of bitmaps received, or how they are named, or on
1192#     which nodes they are placed.  By default (when this parameter
1193#     has never been set), bitmap names are mapped to themselves.
1194#     Nodes are mapped to their block device name if there is one, and
1195#     to their node name otherwise.  (Since 5.2)
1196#
1197# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1198#     limit during live migration.  Should be in the range 1 to
1199#     1000ms.  Defaults to 1000ms.  (Since 8.1)
1200#
1201# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1202#     Defaults to 1.  (Since 8.1)
1203#
1204# @mode: Migration mode.  See description in @MigMode.  Default is
1205#     'normal'.  (Since 8.2)
1206#
1207# @zero-page-detection: Whether and how to detect zero pages.
1208#     See description in @ZeroPageDetection.  Default is 'multifd'.
1209#     (since 9.0)
1210#
1211# @direct-io: Open migration files with O_DIRECT when possible.  This
1212#     only has effect if the @mapped-ram capability is enabled.
1213#     (Since 9.1)
1214#
1215# Features:
1216#
1217# @unstable: Members @x-checkpoint-delay and
1218#     @x-vcpu-dirty-limit-period are experimental.
1219#
1220# Since: 2.4
1221##
1222{ 'struct': 'MigrationParameters',
1223  'data': { '*announce-initial': 'size',
1224            '*announce-max': 'size',
1225            '*announce-rounds': 'size',
1226            '*announce-step': 'size',
1227            '*throttle-trigger-threshold': 'uint8',
1228            '*cpu-throttle-initial': 'uint8',
1229            '*cpu-throttle-increment': 'uint8',
1230            '*cpu-throttle-tailslow': 'bool',
1231            '*tls-creds': 'str',
1232            '*tls-hostname': 'str',
1233            '*tls-authz': 'str',
1234            '*max-bandwidth': 'size',
1235            '*avail-switchover-bandwidth': 'size',
1236            '*downtime-limit': 'uint64',
1237            '*x-checkpoint-delay': { 'type': 'uint32',
1238                                     'features': [ 'unstable' ] },
1239            '*multifd-channels': 'uint8',
1240            '*xbzrle-cache-size': 'size',
1241            '*max-postcopy-bandwidth': 'size',
1242            '*max-cpu-throttle': 'uint8',
1243            '*multifd-compression': 'MultiFDCompression',
1244            '*multifd-zlib-level': 'uint8',
1245            '*multifd-zstd-level': 'uint8',
1246            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1247            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1248                                            'features': [ 'unstable' ] },
1249            '*vcpu-dirty-limit': 'uint64',
1250            '*mode': 'MigMode',
1251            '*zero-page-detection': 'ZeroPageDetection',
1252            '*direct-io': 'bool' } }
1253
1254##
1255# @query-migrate-parameters:
1256#
1257# Returns information about the current migration parameters
1258#
1259# Returns: @MigrationParameters
1260#
1261# Since: 2.4
1262#
1263# .. qmp-example::
1264#
1265#     -> { "execute": "query-migrate-parameters" }
1266#     <- { "return": {
1267#              "multifd-channels": 2,
1268#              "cpu-throttle-increment": 10,
1269#              "cpu-throttle-initial": 20,
1270#              "max-bandwidth": 33554432,
1271#              "downtime-limit": 300
1272#           }
1273#        }
1274##
1275{ 'command': 'query-migrate-parameters',
1276  'returns': 'MigrationParameters' }
1277
1278##
1279# @migrate-start-postcopy:
1280#
1281# Followup to a migration command to switch the migration to postcopy
1282# mode.  The postcopy-ram capability must be set on both source and
1283# destination before the original migration command.
1284#
1285# Since: 2.5
1286#
1287# .. qmp-example::
1288#
1289#     -> { "execute": "migrate-start-postcopy" }
1290#     <- { "return": {} }
1291##
1292{ 'command': 'migrate-start-postcopy' }
1293
1294##
1295# @MIGRATION:
1296#
1297# Emitted when a migration event happens
1298#
1299# @status: @MigrationStatus describing the current migration status.
1300#
1301# Since: 2.4
1302#
1303# .. qmp-example::
1304#
1305#     <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1306#         "event": "MIGRATION",
1307#         "data": {"status": "completed"} }
1308##
1309{ 'event': 'MIGRATION',
1310  'data': {'status': 'MigrationStatus'}}
1311
1312##
1313# @MIGRATION_PASS:
1314#
1315# Emitted from the source side of a migration at the start of each
1316# pass (when it syncs the dirty bitmap)
1317#
1318# @pass: An incrementing count (starting at 1 on the first pass)
1319#
1320# Since: 2.6
1321#
1322# .. qmp-example::
1323#
1324#     <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1325#           "event": "MIGRATION_PASS", "data": {"pass": 2} }
1326##
1327{ 'event': 'MIGRATION_PASS',
1328  'data': { 'pass': 'int' } }
1329
1330##
1331# @COLOMessage:
1332#
1333# The message transmission between Primary side and Secondary side.
1334#
1335# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1336#
1337# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1338#     checkpointing
1339#
1340# @checkpoint-reply: SVM gets PVM's checkpoint request
1341#
1342# @vmstate-send: VM's state will be sent by PVM.
1343#
1344# @vmstate-size: The total size of VMstate.
1345#
1346# @vmstate-received: VM's state has been received by SVM.
1347#
1348# @vmstate-loaded: VM's state has been loaded by SVM.
1349#
1350# Since: 2.8
1351##
1352{ 'enum': 'COLOMessage',
1353  'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1354            'vmstate-send', 'vmstate-size', 'vmstate-received',
1355            'vmstate-loaded' ] }
1356
1357##
1358# @COLOMode:
1359#
1360# The COLO current mode.
1361#
1362# @none: COLO is disabled.
1363#
1364# @primary: COLO node in primary side.
1365#
1366# @secondary: COLO node in slave side.
1367#
1368# Since: 2.8
1369##
1370{ 'enum': 'COLOMode',
1371  'data': [ 'none', 'primary', 'secondary'] }
1372
1373##
1374# @FailoverStatus:
1375#
1376# An enumeration of COLO failover status
1377#
1378# @none: no failover has ever happened
1379#
1380# @require: got failover requirement but not handled
1381#
1382# @active: in the process of doing failover
1383#
1384# @completed: finish the process of failover
1385#
1386# @relaunch: restart the failover process, from 'none' -> 'completed'
1387#     (Since 2.9)
1388#
1389# Since: 2.8
1390##
1391{ 'enum': 'FailoverStatus',
1392  'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1393
1394##
1395# @COLO_EXIT:
1396#
1397# Emitted when VM finishes COLO mode due to some errors happening or
1398# at the request of users.
1399#
1400# @mode: report COLO mode when COLO exited.
1401#
1402# @reason: describes the reason for the COLO exit.
1403#
1404# Since: 3.1
1405#
1406# .. qmp-example::
1407#
1408#     <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1409#          "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1410##
1411{ 'event': 'COLO_EXIT',
1412  'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1413
1414##
1415# @COLOExitReason:
1416#
1417# The reason for a COLO exit.
1418#
1419# @none: failover has never happened.  This state does not occur in
1420#     the COLO_EXIT event, and is only visible in the result of
1421#     query-colo-status.
1422#
1423# @request: COLO exit is due to an external request.
1424#
1425# @error: COLO exit is due to an internal error.
1426#
1427# @processing: COLO is currently handling a failover (since 4.0).
1428#
1429# Since: 3.1
1430##
1431{ 'enum': 'COLOExitReason',
1432  'data': [ 'none', 'request', 'error' , 'processing' ] }
1433
1434##
1435# @x-colo-lost-heartbeat:
1436#
1437# Tell qemu that heartbeat is lost, request it to do takeover
1438# procedures.  If this command is sent to the PVM, the Primary side
1439# will exit COLO mode.  If sent to the Secondary, the Secondary side
1440# will run failover work, then takes over server operation to become
1441# the service VM.
1442#
1443# Features:
1444#
1445# @unstable: This command is experimental.
1446#
1447# Since: 2.8
1448#
1449# .. qmp-example::
1450#
1451#     -> { "execute": "x-colo-lost-heartbeat" }
1452#     <- { "return": {} }
1453##
1454{ 'command': 'x-colo-lost-heartbeat',
1455  'features': [ 'unstable' ],
1456  'if': 'CONFIG_REPLICATION' }
1457
1458##
1459# @migrate_cancel:
1460#
1461# Cancel the current executing migration process.
1462#
1463# .. note:: This command succeeds even if there is no migration
1464#    process running.
1465#
1466# Since: 0.14
1467#
1468# .. qmp-example::
1469#
1470#     -> { "execute": "migrate_cancel" }
1471#     <- { "return": {} }
1472##
1473{ 'command': 'migrate_cancel' }
1474
1475##
1476# @migrate-continue:
1477#
1478# Continue migration when it's in a paused state.
1479#
1480# @state: The state the migration is currently expected to be in
1481#
1482# Since: 2.11
1483#
1484# .. qmp-example::
1485#
1486#     -> { "execute": "migrate-continue" , "arguments":
1487#          { "state": "pre-switchover" } }
1488#     <- { "return": {} }
1489##
1490{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1491
1492##
1493# @MigrationAddressType:
1494#
1495# The migration stream transport mechanisms.
1496#
1497# @socket: Migrate via socket.
1498#
1499# @exec: Direct the migration stream to another process.
1500#
1501# @rdma: Migrate via RDMA.
1502#
1503# @file: Direct the migration stream to a file.
1504#
1505# Since: 8.2
1506##
1507{ 'enum': 'MigrationAddressType',
1508  'data': [ 'socket', 'exec', 'rdma', 'file' ] }
1509
1510##
1511# @FileMigrationArgs:
1512#
1513# @filename: The file to receive the migration stream
1514#
1515# @offset: The file offset where the migration stream will start
1516#
1517# Since: 8.2
1518##
1519{ 'struct': 'FileMigrationArgs',
1520  'data': { 'filename': 'str',
1521            'offset': 'uint64' } }
1522
1523##
1524# @MigrationExecCommand:
1525#
1526# @args: command (list head) and arguments to execute.
1527#
1528# Since: 8.2
1529##
1530{ 'struct': 'MigrationExecCommand',
1531  'data': {'args': [ 'str' ] } }
1532
1533##
1534# @MigrationAddress:
1535#
1536# Migration endpoint configuration.
1537#
1538# @transport: The migration stream transport mechanism
1539#
1540# Since: 8.2
1541##
1542{ 'union': 'MigrationAddress',
1543  'base': { 'transport' : 'MigrationAddressType'},
1544  'discriminator': 'transport',
1545  'data': {
1546    'socket': 'SocketAddress',
1547    'exec': 'MigrationExecCommand',
1548    'rdma': 'InetSocketAddress',
1549    'file': 'FileMigrationArgs' } }
1550
1551##
1552# @MigrationChannelType:
1553#
1554# The migration channel-type request options.
1555#
1556# @main: Main outbound migration channel.
1557#
1558# Since: 8.1
1559##
1560{ 'enum': 'MigrationChannelType',
1561  'data': [ 'main' ] }
1562
1563##
1564# @MigrationChannel:
1565#
1566# Migration stream channel parameters.
1567#
1568# @channel-type: Channel type for transferring packet information.
1569#
1570# @addr: Migration endpoint configuration on destination interface.
1571#
1572# Since: 8.1
1573##
1574{ 'struct': 'MigrationChannel',
1575  'data': {
1576      'channel-type': 'MigrationChannelType',
1577      'addr': 'MigrationAddress' } }
1578
1579##
1580# @migrate:
1581#
1582# Migrates the current running guest to another Virtual Machine.
1583#
1584# @uri: the Uniform Resource Identifier of the destination VM
1585#
1586# @channels: list of migration stream channels with each stream in the
1587#     list connected to a destination interface endpoint.
1588#
1589# @detach: this argument exists only for compatibility reasons and is
1590#     ignored by QEMU
1591#
1592# @resume: resume one paused migration, default "off".  (since 3.0)
1593#
1594# Since: 0.14
1595#
1596# .. admonition:: Notes
1597#
1598#     1. The 'query-migrate' command should be used to check
1599#        migration's progress and final result (this information is
1600#        provided by the 'status' member).
1601#
1602#     2. All boolean arguments default to false.
1603#
1604#     3. The user Monitor's "detach" argument is invalid in QMP and
1605#        should not be used.
1606#
1607#     4. The uri argument should have the Uniform Resource Identifier
1608#        of default destination VM.  This connection will be bound to
1609#        default network.
1610#
1611#     5. For now, number of migration streams is restricted to one,
1612#        i.e. number of items in 'channels' list is just 1.
1613#
1614#     6. The 'uri' and 'channels' arguments are mutually exclusive;
1615#        exactly one of the two should be present.
1616#
1617# .. qmp-example::
1618#
1619#     -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1620#     <- { "return": {} }
1621#
1622#     -> { "execute": "migrate",
1623#          "arguments": {
1624#              "channels": [ { "channel-type": "main",
1625#                              "addr": { "transport": "socket",
1626#                                        "type": "inet",
1627#                                        "host": "10.12.34.9",
1628#                                        "port": "1050" } } ] } }
1629#     <- { "return": {} }
1630#
1631#     -> { "execute": "migrate",
1632#          "arguments": {
1633#              "channels": [ { "channel-type": "main",
1634#                              "addr": { "transport": "exec",
1635#                                        "args": [ "/bin/nc", "-p", "6000",
1636#                                                  "/some/sock" ] } } ] } }
1637#     <- { "return": {} }
1638#
1639#     -> { "execute": "migrate",
1640#          "arguments": {
1641#              "channels": [ { "channel-type": "main",
1642#                              "addr": { "transport": "rdma",
1643#                                        "host": "10.12.34.9",
1644#                                        "port": "1050" } } ] } }
1645#     <- { "return": {} }
1646#
1647#     -> { "execute": "migrate",
1648#          "arguments": {
1649#              "channels": [ { "channel-type": "main",
1650#                              "addr": { "transport": "file",
1651#                                        "filename": "/tmp/migfile",
1652#                                        "offset": "0x1000" } } ] } }
1653#     <- { "return": {} }
1654##
1655{ 'command': 'migrate',
1656  'data': {'*uri': 'str',
1657           '*channels': [ 'MigrationChannel' ],
1658           '*detach': 'bool', '*resume': 'bool' } }
1659
1660##
1661# @migrate-incoming:
1662#
1663# Start an incoming migration, the qemu must have been started with
1664# -incoming defer
1665#
1666# @uri: The Uniform Resource Identifier identifying the source or
1667#     address to listen on
1668#
1669# @channels: list of migration stream channels with each stream in the
1670#     list connected to a destination interface endpoint.
1671#
1672# @exit-on-error: Exit on incoming migration failure.  Default true.
1673#     When set to false, the failure triggers a MIGRATION event, and
1674#     error details could be retrieved with query-migrate.
1675#     (since 9.1)
1676#
1677# Since: 2.3
1678#
1679# .. admonition:: Notes
1680#
1681#     1. It's a bad idea to use a string for the uri, but it needs to
1682#        stay compatible with -incoming and the format of the uri is
1683#        already exposed above libvirt.
1684#
1685#     2. QEMU must be started with -incoming defer to allow
1686#        migrate-incoming to be used.
1687#
1688#     3. The uri format is the same as for -incoming
1689#
1690#     4. For now, number of migration streams is restricted to one,
1691#        i.e. number of items in 'channels' list is just 1.
1692#
1693#     5. The 'uri' and 'channels' arguments are mutually exclusive;
1694#        exactly one of the two should be present.
1695#
1696# .. qmp-example::
1697#
1698#     -> { "execute": "migrate-incoming",
1699#          "arguments": { "uri": "tcp:0:4446" } }
1700#     <- { "return": {} }
1701#
1702#     -> { "execute": "migrate-incoming",
1703#          "arguments": {
1704#              "channels": [ { "channel-type": "main",
1705#                              "addr": { "transport": "socket",
1706#                                        "type": "inet",
1707#                                        "host": "10.12.34.9",
1708#                                        "port": "1050" } } ] } }
1709#     <- { "return": {} }
1710#
1711#     -> { "execute": "migrate-incoming",
1712#          "arguments": {
1713#              "channels": [ { "channel-type": "main",
1714#                              "addr": { "transport": "exec",
1715#                                        "args": [ "/bin/nc", "-p", "6000",
1716#                                                  "/some/sock" ] } } ] } }
1717#     <- { "return": {} }
1718#
1719#     -> { "execute": "migrate-incoming",
1720#          "arguments": {
1721#              "channels": [ { "channel-type": "main",
1722#                              "addr": { "transport": "rdma",
1723#                                        "host": "10.12.34.9",
1724#                                        "port": "1050" } } ] } }
1725#     <- { "return": {} }
1726##
1727{ 'command': 'migrate-incoming',
1728             'data': {'*uri': 'str',
1729                      '*channels': [ 'MigrationChannel' ],
1730                      '*exit-on-error': 'bool' } }
1731
1732##
1733# @xen-save-devices-state:
1734#
1735# Save the state of all devices to file.  The RAM and the block
1736# devices of the VM are not saved by this command.
1737#
1738# @filename: the file to save the state of the devices to as binary
1739#     data.  See xen-save-devices-state.txt for a description of the
1740#     binary format.
1741#
1742# @live: Optional argument to ask QEMU to treat this command as part
1743#     of a live migration.  Default to true.  (since 2.11)
1744#
1745# Since: 1.1
1746#
1747# .. qmp-example::
1748#
1749#     -> { "execute": "xen-save-devices-state",
1750#          "arguments": { "filename": "/tmp/save" } }
1751#     <- { "return": {} }
1752##
1753{ 'command': 'xen-save-devices-state',
1754  'data': {'filename': 'str', '*live':'bool' } }
1755
1756##
1757# @xen-set-global-dirty-log:
1758#
1759# Enable or disable the global dirty log mode.
1760#
1761# @enable: true to enable, false to disable.
1762#
1763# Since: 1.3
1764#
1765# .. qmp-example::
1766#
1767#     -> { "execute": "xen-set-global-dirty-log",
1768#          "arguments": { "enable": true } }
1769#     <- { "return": {} }
1770##
1771{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1772
1773##
1774# @xen-load-devices-state:
1775#
1776# Load the state of all devices from file.  The RAM and the block
1777# devices of the VM are not loaded by this command.
1778#
1779# @filename: the file to load the state of the devices from as binary
1780#     data.  See xen-save-devices-state.txt for a description of the
1781#     binary format.
1782#
1783# Since: 2.7
1784#
1785# .. qmp-example::
1786#
1787#     -> { "execute": "xen-load-devices-state",
1788#          "arguments": { "filename": "/tmp/resume" } }
1789#     <- { "return": {} }
1790##
1791{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1792
1793##
1794# @xen-set-replication:
1795#
1796# Enable or disable replication.
1797#
1798# @enable: true to enable, false to disable.
1799#
1800# @primary: true for primary or false for secondary.
1801#
1802# @failover: true to do failover, false to stop.  Cannot be specified
1803#     if 'enable' is true.  Default value is false.
1804#
1805# .. qmp-example::
1806#
1807#     -> { "execute": "xen-set-replication",
1808#          "arguments": {"enable": true, "primary": false} }
1809#     <- { "return": {} }
1810#
1811# Since: 2.9
1812##
1813{ 'command': 'xen-set-replication',
1814  'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1815  'if': 'CONFIG_REPLICATION' }
1816
1817##
1818# @ReplicationStatus:
1819#
1820# The result format for 'query-xen-replication-status'.
1821#
1822# @error: true if an error happened, false if replication is normal.
1823#
1824# @desc: the human readable error description string, when @error is
1825#     'true'.
1826#
1827# Since: 2.9
1828##
1829{ 'struct': 'ReplicationStatus',
1830  'data': { 'error': 'bool', '*desc': 'str' },
1831  'if': 'CONFIG_REPLICATION' }
1832
1833##
1834# @query-xen-replication-status:
1835#
1836# Query replication status while the vm is running.
1837#
1838# Returns: A @ReplicationStatus object showing the status.
1839#
1840# .. qmp-example::
1841#
1842#     -> { "execute": "query-xen-replication-status" }
1843#     <- { "return": { "error": false } }
1844#
1845# Since: 2.9
1846##
1847{ 'command': 'query-xen-replication-status',
1848  'returns': 'ReplicationStatus',
1849  'if': 'CONFIG_REPLICATION' }
1850
1851##
1852# @xen-colo-do-checkpoint:
1853#
1854# Xen uses this command to notify replication to trigger a checkpoint.
1855#
1856# .. qmp-example::
1857#
1858#     -> { "execute": "xen-colo-do-checkpoint" }
1859#     <- { "return": {} }
1860#
1861# Since: 2.9
1862##
1863{ 'command': 'xen-colo-do-checkpoint',
1864  'if': 'CONFIG_REPLICATION' }
1865
1866##
1867# @COLOStatus:
1868#
1869# The result format for 'query-colo-status'.
1870#
1871# @mode: COLO running mode.  If COLO is running, this field will
1872#     return 'primary' or 'secondary'.
1873#
1874# @last-mode: COLO last running mode.  If COLO is running, this field
1875#     will return same like mode field, after failover we can use this
1876#     field to get last colo mode.  (since 4.0)
1877#
1878# @reason: describes the reason for the COLO exit.
1879#
1880# Since: 3.1
1881##
1882{ 'struct': 'COLOStatus',
1883  'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1884            'reason': 'COLOExitReason' },
1885  'if': 'CONFIG_REPLICATION' }
1886
1887##
1888# @query-colo-status:
1889#
1890# Query COLO status while the vm is running.
1891#
1892# Returns: A @COLOStatus object showing the status.
1893#
1894# .. qmp-example::
1895#
1896#     -> { "execute": "query-colo-status" }
1897#     <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1898#
1899# Since: 3.1
1900##
1901{ 'command': 'query-colo-status',
1902  'returns': 'COLOStatus',
1903  'if': 'CONFIG_REPLICATION' }
1904
1905##
1906# @migrate-recover:
1907#
1908# Provide a recovery migration stream URI.
1909#
1910# @uri: the URI to be used for the recovery of migration stream.
1911#
1912# .. qmp-example::
1913#
1914#     -> { "execute": "migrate-recover",
1915#          "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1916#     <- { "return": {} }
1917#
1918# Since: 3.0
1919##
1920{ 'command': 'migrate-recover',
1921  'data': { 'uri': 'str' },
1922  'allow-oob': true }
1923
1924##
1925# @migrate-pause:
1926#
1927# Pause a migration.  Currently it only supports postcopy.
1928#
1929# .. qmp-example::
1930#
1931#     -> { "execute": "migrate-pause" }
1932#     <- { "return": {} }
1933#
1934# Since: 3.0
1935##
1936{ 'command': 'migrate-pause', 'allow-oob': true }
1937
1938##
1939# @UNPLUG_PRIMARY:
1940#
1941# Emitted from source side of a migration when migration state is
1942# WAIT_UNPLUG.  Device was unplugged by guest operating system.
1943# Device resources in QEMU are kept on standby to be able to re-plug
1944# it in case of migration failure.
1945#
1946# @device-id: QEMU device id of the unplugged device
1947#
1948# Since: 4.2
1949#
1950# .. qmp-example::
1951#
1952#     <- { "event": "UNPLUG_PRIMARY",
1953#          "data": { "device-id": "hostdev0" },
1954#          "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1955##
1956{ 'event': 'UNPLUG_PRIMARY',
1957  'data': { 'device-id': 'str' } }
1958
1959##
1960# @DirtyRateVcpu:
1961#
1962# Dirty rate of vcpu.
1963#
1964# @id: vcpu index.
1965#
1966# @dirty-rate: dirty rate.
1967#
1968# Since: 6.2
1969##
1970{ 'struct': 'DirtyRateVcpu',
1971  'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1972
1973##
1974# @DirtyRateStatus:
1975#
1976# Dirty page rate measurement status.
1977#
1978# @unstarted: measuring thread has not been started yet
1979#
1980# @measuring: measuring thread is running
1981#
1982# @measured: dirty page rate is measured and the results are available
1983#
1984# Since: 5.2
1985##
1986{ 'enum': 'DirtyRateStatus',
1987  'data': [ 'unstarted', 'measuring', 'measured'] }
1988
1989##
1990# @DirtyRateMeasureMode:
1991#
1992# Method used to measure dirty page rate.  Differences between
1993# available methods are explained in @calc-dirty-rate.
1994#
1995# @page-sampling: use page sampling
1996#
1997# @dirty-ring: use dirty ring
1998#
1999# @dirty-bitmap: use dirty bitmap
2000#
2001# Since: 6.2
2002##
2003{ 'enum': 'DirtyRateMeasureMode',
2004  'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
2005
2006##
2007# @TimeUnit:
2008#
2009# Specifies unit in which time-related value is specified.
2010#
2011# @second: value is in seconds
2012#
2013# @millisecond: value is in milliseconds
2014#
2015# Since: 8.2
2016##
2017{ 'enum': 'TimeUnit',
2018  'data': ['second', 'millisecond'] }
2019
2020##
2021# @DirtyRateInfo:
2022#
2023# Information about measured dirty page rate.
2024#
2025# @dirty-rate: an estimate of the dirty page rate of the VM in units
2026#     of MiB/s.  Value is present only when @status is 'measured'.
2027#
2028# @status: current status of dirty page rate measurements
2029#
2030# @start-time: start time in units of second for calculation
2031#
2032# @calc-time: time period for which dirty page rate was measured,
2033#     expressed and rounded down to @calc-time-unit.
2034#
2035# @calc-time-unit: time unit of @calc-time  (Since 8.2)
2036#
2037# @sample-pages: number of sampled pages per GiB of guest memory.
2038#     Valid only in page-sampling mode (Since 6.1)
2039#
2040# @mode: mode that was used to measure dirty page rate (Since 6.2)
2041#
2042# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
2043#     specified (Since 6.2)
2044#
2045# Since: 5.2
2046##
2047{ 'struct': 'DirtyRateInfo',
2048  'data': {'*dirty-rate': 'int64',
2049           'status': 'DirtyRateStatus',
2050           'start-time': 'int64',
2051           'calc-time': 'int64',
2052           'calc-time-unit': 'TimeUnit',
2053           'sample-pages': 'uint64',
2054           'mode': 'DirtyRateMeasureMode',
2055           '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
2056
2057##
2058# @calc-dirty-rate:
2059#
2060# Start measuring dirty page rate of the VM.  Results can be retrieved
2061# with @query-dirty-rate after measurements are completed.
2062#
2063# Dirty page rate is the number of pages changed in a given time
2064# period expressed in MiB/s.  The following methods of calculation are
2065# available:
2066#
2067# 1. In page sampling mode, a random subset of pages are selected and
2068#    hashed twice: once at the beginning of measurement time period,
2069#    and once again at the end.  If two hashes for some page are
2070#    different, the page is counted as changed.  Since this method
2071#    relies on sampling and hashing, calculated dirty page rate is
2072#    only an estimate of its true value.  Increasing @sample-pages
2073#    improves estimation quality at the cost of higher computational
2074#    overhead.
2075#
2076# 2. Dirty bitmap mode captures writes to memory (for example by
2077#    temporarily revoking write access to all pages) and counting page
2078#    faults.  Information about modified pages is collected into a
2079#    bitmap, where each bit corresponds to one guest page.  This mode
2080#    requires that KVM accelerator property "dirty-ring-size" is *not*
2081#    set.
2082#
2083# 3. Dirty ring mode is similar to dirty bitmap mode, but the
2084#    information about modified pages is collected into ring buffer.
2085#    This mode tracks page modification per each vCPU separately.  It
2086#    requires that KVM accelerator property "dirty-ring-size" is set.
2087#
2088# @calc-time: time period for which dirty page rate is calculated.  By
2089#     default it is specified in seconds, but the unit can be set
2090#     explicitly with @calc-time-unit.  Note that larger @calc-time
2091#     values will typically result in smaller dirty page rates because
2092#     page dirtying is a one-time event.  Once some page is counted as
2093#     dirty during @calc-time period, further writes to this page will
2094#     not increase dirty page rate anymore.
2095#
2096# @calc-time-unit: time unit in which @calc-time is specified.  By
2097#     default it is seconds.  (Since 8.2)
2098#
2099# @sample-pages: number of sampled pages per each GiB of guest memory.
2100#     Default value is 512.  For 4KiB guest pages this corresponds to
2101#     sampling ratio of 0.2%.  This argument is used only in page
2102#     sampling mode.  (Since 6.1)
2103#
2104# @mode: mechanism for tracking dirty pages.  Default value is
2105#     'page-sampling'.  Others are 'dirty-bitmap' and 'dirty-ring'.
2106#     (Since 6.1)
2107#
2108# Since: 5.2
2109#
2110# .. qmp-example::
2111#
2112#     -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
2113#                                                     "sample-pages": 512} }
2114#     <- { "return": {} }
2115#
2116# .. qmp-example::
2117#    :annotated:
2118#
2119#    Measure dirty rate using dirty bitmap for 500 milliseconds::
2120#
2121#     -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500,
2122#         "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} }
2123#
2124#     <- { "return": {} }
2125##
2126{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
2127                                         '*calc-time-unit': 'TimeUnit',
2128                                         '*sample-pages': 'int',
2129                                         '*mode': 'DirtyRateMeasureMode'} }
2130
2131##
2132# @query-dirty-rate:
2133#
2134# Query results of the most recent invocation of @calc-dirty-rate.
2135#
2136# @calc-time-unit: time unit in which to report calculation time.
2137#     By default it is reported in seconds.  (Since 8.2)
2138#
2139# Since: 5.2
2140#
2141# .. qmp-example::
2142#    :title: Measurement is in progress
2143#
2144#     <- {"status": "measuring", "sample-pages": 512,
2145#         "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2146#         "calc-time-unit": "second"}
2147#
2148# .. qmp-example::
2149#    :title: Measurement has been completed
2150#
2151#     <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
2152#         "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2153#         "calc-time-unit": "second"}
2154##
2155{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' },
2156                                 'returns': 'DirtyRateInfo' }
2157
2158##
2159# @DirtyLimitInfo:
2160#
2161# Dirty page rate limit information of a virtual CPU.
2162#
2163# @cpu-index: index of a virtual CPU.
2164#
2165# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
2166#     CPU, 0 means unlimited.
2167#
2168# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
2169#
2170# Since: 7.1
2171##
2172{ 'struct': 'DirtyLimitInfo',
2173  'data': { 'cpu-index': 'int',
2174            'limit-rate': 'uint64',
2175            'current-rate': 'uint64' } }
2176
2177##
2178# @set-vcpu-dirty-limit:
2179#
2180# Set the upper limit of dirty page rate for virtual CPUs.
2181#
2182# Requires KVM with accelerator property "dirty-ring-size" set.  A
2183# virtual CPU's dirty page rate is a measure of its memory load.  To
2184# observe dirty page rates, use @calc-dirty-rate.
2185#
2186# @cpu-index: index of a virtual CPU, default is all.
2187#
2188# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
2189#
2190# Since: 7.1
2191#
2192# .. qmp-example::
2193#
2194#     -> {"execute": "set-vcpu-dirty-limit"}
2195#         "arguments": { "dirty-rate": 200,
2196#                        "cpu-index": 1 } }
2197#     <- { "return": {} }
2198##
2199{ 'command': 'set-vcpu-dirty-limit',
2200  'data': { '*cpu-index': 'int',
2201            'dirty-rate': 'uint64' } }
2202
2203##
2204# @cancel-vcpu-dirty-limit:
2205#
2206# Cancel the upper limit of dirty page rate for virtual CPUs.
2207#
2208# Cancel the dirty page limit for the vCPU which has been set with
2209# set-vcpu-dirty-limit command.  Note that this command requires
2210# support from dirty ring, same as the "set-vcpu-dirty-limit".
2211#
2212# @cpu-index: index of a virtual CPU, default is all.
2213#
2214# Since: 7.1
2215#
2216# .. qmp-example::
2217#
2218#     -> {"execute": "cancel-vcpu-dirty-limit"},
2219#         "arguments": { "cpu-index": 1 } }
2220#     <- { "return": {} }
2221##
2222{ 'command': 'cancel-vcpu-dirty-limit',
2223  'data': { '*cpu-index': 'int'} }
2224
2225##
2226# @query-vcpu-dirty-limit:
2227#
2228# Returns information about virtual CPU dirty page rate limits, if
2229# any.
2230#
2231# Since: 7.1
2232#
2233# .. qmp-example::
2234#
2235#     -> {"execute": "query-vcpu-dirty-limit"}
2236#     <- {"return": [
2237#            { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
2238#            { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
2239##
2240{ 'command': 'query-vcpu-dirty-limit',
2241  'returns': [ 'DirtyLimitInfo' ] }
2242
2243##
2244# @MigrationThreadInfo:
2245#
2246# Information about migrationthreads
2247#
2248# @name: the name of migration thread
2249#
2250# @thread-id: ID of the underlying host thread
2251#
2252# Since: 7.2
2253##
2254{ 'struct': 'MigrationThreadInfo',
2255  'data': {'name': 'str',
2256           'thread-id': 'int'} }
2257
2258##
2259# @query-migrationthreads:
2260#
2261# Returns information of migration threads
2262#
2263# Returns: @MigrationThreadInfo
2264#
2265# Since: 7.2
2266##
2267{ 'command': 'query-migrationthreads',
2268  'returns': ['MigrationThreadInfo'] }
2269
2270##
2271# @snapshot-save:
2272#
2273# Save a VM snapshot
2274#
2275# @job-id: identifier for the newly created job
2276#
2277# @tag: name of the snapshot to create
2278#
2279# @vmstate: block device node name to save vmstate to
2280#
2281# @devices: list of block device node names to save a snapshot to
2282#
2283# Applications should not assume that the snapshot save is complete
2284# when this command returns.  The job commands / events must be used
2285# to determine completion and to fetch details of any errors that
2286# arise.
2287#
2288# Note that execution of the guest CPUs may be stopped during the time
2289# it takes to save the snapshot.  A future version of QEMU may ensure
2290# CPUs are executing continuously.
2291#
2292# It is strongly recommended that @devices contain all writable block
2293# device nodes if a consistent snapshot is required.
2294#
2295# If @tag already exists, an error will be reported
2296#
2297# .. qmp-example::
2298#
2299#     -> { "execute": "snapshot-save",
2300#          "arguments": {
2301#             "job-id": "snapsave0",
2302#             "tag": "my-snap",
2303#             "vmstate": "disk0",
2304#             "devices": ["disk0", "disk1"]
2305#          }
2306#        }
2307#     <- { "return": { } }
2308#     <- {"event": "JOB_STATUS_CHANGE",
2309#         "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2310#         "data": {"status": "created", "id": "snapsave0"}}
2311#     <- {"event": "JOB_STATUS_CHANGE",
2312#         "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2313#         "data": {"status": "running", "id": "snapsave0"}}
2314#     <- {"event": "STOP",
2315#         "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2316#     <- {"event": "RESUME",
2317#         "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2318#     <- {"event": "JOB_STATUS_CHANGE",
2319#         "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2320#         "data": {"status": "waiting", "id": "snapsave0"}}
2321#     <- {"event": "JOB_STATUS_CHANGE",
2322#         "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2323#         "data": {"status": "pending", "id": "snapsave0"}}
2324#     <- {"event": "JOB_STATUS_CHANGE",
2325#         "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2326#         "data": {"status": "concluded", "id": "snapsave0"}}
2327#     -> {"execute": "query-jobs"}
2328#     <- {"return": [{"current-progress": 1,
2329#                     "status": "concluded",
2330#                     "total-progress": 1,
2331#                     "type": "snapshot-save",
2332#                     "id": "snapsave0"}]}
2333#
2334# Since: 6.0
2335##
2336{ 'command': 'snapshot-save',
2337  'data': { 'job-id': 'str',
2338            'tag': 'str',
2339            'vmstate': 'str',
2340            'devices': ['str'] } }
2341
2342##
2343# @snapshot-load:
2344#
2345# Load a VM snapshot
2346#
2347# @job-id: identifier for the newly created job
2348#
2349# @tag: name of the snapshot to load.
2350#
2351# @vmstate: block device node name to load vmstate from
2352#
2353# @devices: list of block device node names to load a snapshot from
2354#
2355# Applications should not assume that the snapshot load is complete
2356# when this command returns.  The job commands / events must be used
2357# to determine completion and to fetch details of any errors that
2358# arise.
2359#
2360# Note that execution of the guest CPUs will be stopped during the
2361# time it takes to load the snapshot.
2362#
2363# It is strongly recommended that @devices contain all writable block
2364# device nodes that can have changed since the original @snapshot-save
2365# command execution.
2366#
2367# .. qmp-example::
2368#
2369#     -> { "execute": "snapshot-load",
2370#          "arguments": {
2371#             "job-id": "snapload0",
2372#             "tag": "my-snap",
2373#             "vmstate": "disk0",
2374#             "devices": ["disk0", "disk1"]
2375#          }
2376#        }
2377#     <- { "return": { } }
2378#     <- {"event": "JOB_STATUS_CHANGE",
2379#         "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2380#         "data": {"status": "created", "id": "snapload0"}}
2381#     <- {"event": "JOB_STATUS_CHANGE",
2382#         "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2383#         "data": {"status": "running", "id": "snapload0"}}
2384#     <- {"event": "STOP",
2385#         "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2386#     <- {"event": "RESUME",
2387#         "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2388#     <- {"event": "JOB_STATUS_CHANGE",
2389#         "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2390#         "data": {"status": "waiting", "id": "snapload0"}}
2391#     <- {"event": "JOB_STATUS_CHANGE",
2392#         "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2393#         "data": {"status": "pending", "id": "snapload0"}}
2394#     <- {"event": "JOB_STATUS_CHANGE",
2395#         "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2396#         "data": {"status": "concluded", "id": "snapload0"}}
2397#     -> {"execute": "query-jobs"}
2398#     <- {"return": [{"current-progress": 1,
2399#                     "status": "concluded",
2400#                     "total-progress": 1,
2401#                     "type": "snapshot-load",
2402#                     "id": "snapload0"}]}
2403#
2404# Since: 6.0
2405##
2406{ 'command': 'snapshot-load',
2407  'data': { 'job-id': 'str',
2408            'tag': 'str',
2409            'vmstate': 'str',
2410            'devices': ['str'] } }
2411
2412##
2413# @snapshot-delete:
2414#
2415# Delete a VM snapshot
2416#
2417# @job-id: identifier for the newly created job
2418#
2419# @tag: name of the snapshot to delete.
2420#
2421# @devices: list of block device node names to delete a snapshot from
2422#
2423# Applications should not assume that the snapshot delete is complete
2424# when this command returns.  The job commands / events must be used
2425# to determine completion and to fetch details of any errors that
2426# arise.
2427#
2428# .. qmp-example::
2429#
2430#     -> { "execute": "snapshot-delete",
2431#          "arguments": {
2432#             "job-id": "snapdelete0",
2433#             "tag": "my-snap",
2434#             "devices": ["disk0", "disk1"]
2435#          }
2436#        }
2437#     <- { "return": { } }
2438#     <- {"event": "JOB_STATUS_CHANGE",
2439#         "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2440#         "data": {"status": "created", "id": "snapdelete0"}}
2441#     <- {"event": "JOB_STATUS_CHANGE",
2442#         "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2443#         "data": {"status": "running", "id": "snapdelete0"}}
2444#     <- {"event": "JOB_STATUS_CHANGE",
2445#         "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2446#         "data": {"status": "waiting", "id": "snapdelete0"}}
2447#     <- {"event": "JOB_STATUS_CHANGE",
2448#         "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2449#         "data": {"status": "pending", "id": "snapdelete0"}}
2450#     <- {"event": "JOB_STATUS_CHANGE",
2451#         "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2452#         "data": {"status": "concluded", "id": "snapdelete0"}}
2453#     -> {"execute": "query-jobs"}
2454#     <- {"return": [{"current-progress": 1,
2455#                     "status": "concluded",
2456#                     "total-progress": 1,
2457#                     "type": "snapshot-delete",
2458#                     "id": "snapdelete0"}]}
2459#
2460# Since: 6.0
2461##
2462{ 'command': 'snapshot-delete',
2463  'data': { 'job-id': 'str',
2464            'tag': 'str',
2465            'devices': ['str'] } }
2466