xref: /openbmc/qemu/qapi/migration.json (revision c0d691ab)
1# -*- Mode: Python -*-
2# vim: filetype=python
3#
4
5##
6# = Migration
7##
8
9{ 'include': 'common.json' }
10{ 'include': 'sockets.json' }
11
12##
13# @MigrationStats:
14#
15# Detailed migration status.
16#
17# @transferred: amount of bytes already transferred to the target VM
18#
19# @remaining: amount of bytes remaining to be transferred to the
20#     target VM
21#
22# @total: total amount of bytes involved in the migration process
23#
24# @duplicate: number of duplicate (zero) pages (since 1.2)
25#
26# @normal: number of normal pages (since 1.2)
27#
28# @normal-bytes: number of normal bytes sent (since 1.2)
29#
30# @dirty-pages-rate: number of pages dirtied by second by the guest
31#     (since 1.3)
32#
33# @mbps: throughput in megabits/sec.  (since 1.6)
34#
35# @dirty-sync-count: number of times that dirty ram was synchronized
36#     (since 2.1)
37#
38# @postcopy-requests: The number of page requests received from the
39#     destination (since 2.7)
40#
41# @page-size: The number of bytes per page for the various page-based
42#     statistics (since 2.10)
43#
44# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
45#
46# @pages-per-second: the number of memory pages transferred per second
47#     (Since 4.0)
48#
49# @precopy-bytes: The number of bytes sent in the pre-copy phase
50#     (since 7.0).
51#
52# @downtime-bytes: The number of bytes sent while the guest is paused
53#     (since 7.0).
54#
55# @postcopy-bytes: The number of bytes sent during the post-copy phase
56#     (since 7.0).
57#
58# @dirty-sync-missed-zero-copy: Number of times dirty RAM
59#     synchronization could not avoid copying dirty pages.  This is
60#     between 0 and @dirty-sync-count * @multifd-channels.  (since
61#     7.1)
62#
63# Since: 0.14
64##
65{ 'struct': 'MigrationStats',
66  'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
67           'duplicate': 'int',
68           'normal': 'int',
69           'normal-bytes': 'int', 'dirty-pages-rate': 'int',
70           'mbps': 'number', 'dirty-sync-count': 'int',
71           'postcopy-requests': 'int', 'page-size': 'int',
72           'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
73           'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
74           'postcopy-bytes': 'uint64',
75           'dirty-sync-missed-zero-copy': 'uint64' } }
76
77##
78# @XBZRLECacheStats:
79#
80# Detailed XBZRLE migration cache statistics
81#
82# @cache-size: XBZRLE cache size
83#
84# @bytes: amount of bytes already transferred to the target VM
85#
86# @pages: amount of pages transferred to the target VM
87#
88# @cache-miss: number of cache miss
89#
90# @cache-miss-rate: rate of cache miss (since 2.1)
91#
92# @encoding-rate: rate of encoded bytes (since 5.1)
93#
94# @overflow: number of overflows
95#
96# Since: 1.2
97##
98{ 'struct': 'XBZRLECacheStats',
99  'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
100           'cache-miss': 'int', 'cache-miss-rate': 'number',
101           'encoding-rate': 'number', 'overflow': 'int' } }
102
103##
104# @CompressionStats:
105#
106# Detailed migration compression statistics
107#
108# @pages: amount of pages compressed and transferred to the target VM
109#
110# @busy: count of times that no free thread was available to compress
111#     data
112#
113# @busy-rate: rate of thread busy
114#
115# @compressed-size: amount of bytes after compression
116#
117# @compression-rate: rate of compressed size
118#
119# Since: 3.1
120##
121{ 'struct': 'CompressionStats',
122  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
123           'compressed-size': 'int', 'compression-rate': 'number' } }
124
125##
126# @MigrationStatus:
127#
128# An enumeration of migration status.
129#
130# @none: no migration has ever happened.
131#
132# @setup: migration process has been initiated.
133#
134# @cancelling: in the process of cancelling migration.
135#
136# @cancelled: cancelling migration is finished.
137#
138# @active: in the process of doing migration.
139#
140# @postcopy-active: like active, but now in postcopy mode.  (since
141#     2.5)
142#
143# @postcopy-paused: during postcopy but paused.  (since 3.0)
144#
145# @postcopy-recover: trying to recover from a paused postcopy.  (since
146#     3.0)
147#
148# @completed: migration is finished.
149#
150# @failed: some error occurred during migration process.
151#
152# @colo: VM is in the process of fault tolerance, VM can not get into
153#     this state unless colo capability is enabled for migration.
154#     (since 2.8)
155#
156# @pre-switchover: Paused before device serialisation.  (since 2.11)
157#
158# @device: During device serialisation when pause-before-switchover is
159#     enabled (since 2.11)
160#
161# @wait-unplug: wait for device unplug request by guest OS to be
162#     completed.  (since 4.2)
163#
164# Since: 2.3
165##
166{ 'enum': 'MigrationStatus',
167  'data': [ 'none', 'setup', 'cancelling', 'cancelled',
168            'active', 'postcopy-active', 'postcopy-paused',
169            'postcopy-recover', 'completed', 'failed', 'colo',
170            'pre-switchover', 'device', 'wait-unplug' ] }
171##
172# @VfioStats:
173#
174# Detailed VFIO devices migration statistics
175#
176# @transferred: amount of bytes transferred to the target VM by VFIO
177#     devices
178#
179# Since: 5.2
180##
181{ 'struct': 'VfioStats',
182  'data': {'transferred': 'int' } }
183
184##
185# @MigrationInfo:
186#
187# Information about current migration process.
188#
189# @status: @MigrationStatus describing the current migration status.
190#     If this field is not returned, no migration process has been
191#     initiated
192#
193# @ram: @MigrationStats containing detailed migration status, only
194#     returned if status is 'active' or 'completed'(since 1.2)
195#
196# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
197#     migration statistics, only returned if XBZRLE feature is on and
198#     status is 'active' or 'completed' (since 1.2)
199#
200# @total-time: total amount of milliseconds since migration started.
201#     If migration has ended, it returns the total migration time.
202#     (since 1.2)
203#
204# @downtime: only present when migration finishes correctly total
205#     downtime in milliseconds for the guest.  (since 1.3)
206#
207# @expected-downtime: only present while migration is active expected
208#     downtime in milliseconds for the guest in last walk of the dirty
209#     bitmap.  (since 1.3)
210#
211# @setup-time: amount of setup time in milliseconds *before* the
212#     iterations begin but *after* the QMP command is issued.  This is
213#     designed to provide an accounting of any activities (such as
214#     RDMA pinning) which may be expensive, but do not actually occur
215#     during the iterative migration rounds themselves.  (since 1.6)
216#
217# @cpu-throttle-percentage: percentage of time guest cpus are being
218#     throttled during auto-converge.  This is only present when
219#     auto-converge has started throttling guest cpus.  (Since 2.7)
220#
221# @error-desc: the human readable error description string.  Clients
222#     should not attempt to parse the error strings.  (Since 2.7)
223#
224# @postcopy-blocktime: total time when all vCPU were blocked during
225#     postcopy live migration.  This is only present when the
226#     postcopy-blocktime migration capability is enabled.  (Since 3.0)
227#
228# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
229#     This is only present when the postcopy-blocktime migration
230#     capability is enabled.  (Since 3.0)
231#
232# @socket-address: Only used for tcp, to know what the real port is
233#     (Since 4.0)
234#
235# @vfio: @VfioStats containing detailed VFIO devices migration
236#     statistics, only returned if VFIO device is present, migration
237#     is supported by all VFIO devices and status is 'active' or
238#     'completed' (since 5.2)
239#
240# @blocked-reasons: A list of reasons an outgoing migration is
241#     blocked.  Present and non-empty when migration is blocked.
242#     (since 6.0)
243#
244# @dirty-limit-throttle-time-per-round: Maximum throttle time
245#     (in microseconds) of virtual CPUs each dirty ring full round,
246#     which shows how MigrationCapability dirty-limit affects the
247#     guest during live migration.  (Since 8.1)
248#
249# @dirty-limit-ring-full-time: Estimated average dirty ring full time
250#     (in microseconds) for each dirty ring full round.  The value
251#     equals the dirty ring memory size divided by the average dirty
252#     page rate of the virtual CPU, which can be used to observe the
253#     average memory load of the virtual CPU indirectly.  Note that
254#     zero means guest doesn't dirty memory.  (Since 8.1)
255#
256# Since: 0.14
257##
258{ 'struct': 'MigrationInfo',
259  'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
260           '*vfio': 'VfioStats',
261           '*xbzrle-cache': 'XBZRLECacheStats',
262           '*total-time': 'int',
263           '*expected-downtime': 'int',
264           '*downtime': 'int',
265           '*setup-time': 'int',
266           '*cpu-throttle-percentage': 'int',
267           '*error-desc': 'str',
268           '*blocked-reasons': ['str'],
269           '*postcopy-blocktime': 'uint32',
270           '*postcopy-vcpu-blocktime': ['uint32'],
271           '*socket-address': ['SocketAddress'],
272           '*dirty-limit-throttle-time-per-round': 'uint64',
273           '*dirty-limit-ring-full-time': 'uint64'} }
274
275##
276# @query-migrate:
277#
278# Returns information about current migration process.  If migration
279# is active there will be another json-object with RAM migration
280# status.
281#
282# Returns: @MigrationInfo
283#
284# Since: 0.14
285#
286# Examples:
287#
288#     1. Before the first migration
289#
290#     -> { "execute": "query-migrate" }
291#     <- { "return": {} }
292#
293#     2. Migration is done and has succeeded
294#
295#     -> { "execute": "query-migrate" }
296#     <- { "return": {
297#             "status": "completed",
298#             "total-time":12345,
299#             "setup-time":12345,
300#             "downtime":12345,
301#             "ram":{
302#               "transferred":123,
303#               "remaining":123,
304#               "total":246,
305#               "duplicate":123,
306#               "normal":123,
307#               "normal-bytes":123456,
308#               "dirty-sync-count":15
309#             }
310#          }
311#        }
312#
313#     3. Migration is done and has failed
314#
315#     -> { "execute": "query-migrate" }
316#     <- { "return": { "status": "failed" } }
317#
318#     4. Migration is being performed:
319#
320#     -> { "execute": "query-migrate" }
321#     <- {
322#           "return":{
323#              "status":"active",
324#              "total-time":12345,
325#              "setup-time":12345,
326#              "expected-downtime":12345,
327#              "ram":{
328#                 "transferred":123,
329#                 "remaining":123,
330#                 "total":246,
331#                 "duplicate":123,
332#                 "normal":123,
333#                 "normal-bytes":123456,
334#                 "dirty-sync-count":15
335#              }
336#           }
337#        }
338#
339#     5. Migration is being performed and XBZRLE is active:
340#
341#     -> { "execute": "query-migrate" }
342#     <- {
343#           "return":{
344#              "status":"active",
345#              "total-time":12345,
346#              "setup-time":12345,
347#              "expected-downtime":12345,
348#              "ram":{
349#                 "total":1057024,
350#                 "remaining":1053304,
351#                 "transferred":3720,
352#                 "duplicate":10,
353#                 "normal":3333,
354#                 "normal-bytes":3412992,
355#                 "dirty-sync-count":15
356#              },
357#              "xbzrle-cache":{
358#                 "cache-size":67108864,
359#                 "bytes":20971520,
360#                 "pages":2444343,
361#                 "cache-miss":2244,
362#                 "cache-miss-rate":0.123,
363#                 "encoding-rate":80.1,
364#                 "overflow":34434
365#              }
366#           }
367#        }
368##
369{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
370
371##
372# @MigrationCapability:
373#
374# Migration capabilities enumeration
375#
376# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
377#     Encoding). This feature allows us to minimize migration traffic
378#     for certain work loads, by sending compressed difference of the
379#     pages
380#
381# @rdma-pin-all: Controls whether or not the entire VM memory
382#     footprint is mlock()'d on demand or all at once.  Refer to
383#     docs/rdma.txt for usage.  Disabled by default.  (since 2.0)
384#
385# @zero-blocks: During storage migration encode blocks of zeroes
386#     efficiently.  This essentially saves 1MB of zeroes per block on
387#     the wire.  Enabling requires source and target VM to support
388#     this feature.  To enable it is sufficient to enable the
389#     capability on the source VM. The feature is disabled by default.
390#     (since 1.6)
391#
392# @events: generate events for each migration state change (since 2.4)
393#
394# @auto-converge: If enabled, QEMU will automatically throttle down
395#     the guest to speed up convergence of RAM migration.  (since 1.6)
396#
397# @postcopy-ram: Start executing on the migration target before all of
398#     RAM has been migrated, pulling the remaining pages along as
399#     needed.  The capacity must have the same setting on both source
400#     and target or migration will not even start.  NOTE: If the
401#     migration fails during postcopy the VM will fail.  (since 2.6)
402#
403# @x-colo: If enabled, migration will never end, and the state of the
404#     VM on the primary side will be migrated continuously to the VM
405#     on secondary side, this process is called COarse-Grain LOck
406#     Stepping (COLO) for Non-stop Service.  (since 2.8)
407#
408# @release-ram: if enabled, qemu will free the migrated ram pages on
409#     the source during postcopy-ram migration.  (since 2.9)
410#
411# @return-path: If enabled, migration will use the return path even
412#     for precopy.  (since 2.10)
413#
414# @pause-before-switchover: Pause outgoing migration before
415#     serialising device state and before disabling block IO (since
416#     2.11)
417#
418# @multifd: Use more than one fd for migration (since 4.0)
419#
420# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
421#     (since 2.12)
422#
423# @postcopy-blocktime: Calculate downtime for postcopy live migration
424#     (since 3.0)
425#
426# @late-block-activate: If enabled, the destination will not activate
427#     block devices (and thus take locks) immediately at the end of
428#     migration.  (since 3.0)
429#
430# @x-ignore-shared: If enabled, QEMU will not migrate shared memory
431#     that is accessible on the destination machine.  (since 4.0)
432#
433# @validate-uuid: Send the UUID of the source to allow the destination
434#     to ensure it is the same.  (since 4.2)
435#
436# @background-snapshot: If enabled, the migration stream will be a
437#     snapshot of the VM exactly at the point when the migration
438#     procedure starts.  The VM RAM is saved with running VM.
439#     (since 6.0)
440#
441# @zero-copy-send: Controls behavior on sending memory pages on
442#     migration.  When true, enables a zero-copy mechanism for sending
443#     memory pages, if host supports it.  Requires that QEMU be
444#     permitted to use locked memory for guest RAM pages.  (since 7.1)
445#
446# @postcopy-preempt: If enabled, the migration process will allow
447#     postcopy requests to preempt precopy stream, so postcopy
448#     requests will be handled faster.  This is a performance feature
449#     and should not affect the correctness of postcopy migration.
450#     (since 7.1)
451#
452# @switchover-ack: If enabled, migration will not stop the source VM
453#     and complete the migration until an ACK is received from the
454#     destination that it's OK to do so.  Exactly when this ACK is
455#     sent depends on the migrated devices that use this feature.  For
456#     example, a device can use it to make sure some of its data is
457#     sent and loaded in the destination before doing switchover.
458#     This can reduce downtime if devices that support this capability
459#     are present.  'return-path' capability must be enabled to use
460#     it.  (since 8.1)
461#
462# @dirty-limit: If enabled, migration will throttle vCPUs as needed to
463#     keep their dirty page rate within @vcpu-dirty-limit.  This can
464#     improve responsiveness of large guests during live migration,
465#     and can result in more stable read performance.  Requires KVM
466#     with accelerator property "dirty-ring-size" set.  (Since 8.1)
467#
468# @mapped-ram: Migrate using fixed offsets in the migration file for
469#     each RAM page.  Requires a migration URI that supports seeking,
470#     such as a file.  (since 9.0)
471#
472# Features:
473#
474# @unstable: Members @x-colo and @x-ignore-shared are experimental.
475#
476# Since: 1.2
477##
478{ 'enum': 'MigrationCapability',
479  'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
480           'events', 'postcopy-ram',
481           { 'name': 'x-colo', 'features': [ 'unstable' ] },
482           'release-ram',
483           'return-path', 'pause-before-switchover', 'multifd',
484           'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
485           { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
486           'validate-uuid', 'background-snapshot',
487           'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
488           'dirty-limit', 'mapped-ram'] }
489
490##
491# @MigrationCapabilityStatus:
492#
493# Migration capability information
494#
495# @capability: capability enum
496#
497# @state: capability state bool
498#
499# Since: 1.2
500##
501{ 'struct': 'MigrationCapabilityStatus',
502  'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
503
504##
505# @migrate-set-capabilities:
506#
507# Enable/Disable the following migration capabilities (like xbzrle)
508#
509# @capabilities: json array of capability modifications to make
510#
511# Since: 1.2
512#
513# Example:
514#
515#     -> { "execute": "migrate-set-capabilities" , "arguments":
516#          { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
517#     <- { "return": {} }
518##
519{ 'command': 'migrate-set-capabilities',
520  'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
521
522##
523# @query-migrate-capabilities:
524#
525# Returns information about the current migration capabilities status
526#
527# Returns: @MigrationCapabilityStatus
528#
529# Since: 1.2
530#
531# Example:
532#
533#     -> { "execute": "query-migrate-capabilities" }
534#     <- { "return": [
535#           {"state": false, "capability": "xbzrle"},
536#           {"state": false, "capability": "rdma-pin-all"},
537#           {"state": false, "capability": "auto-converge"},
538#           {"state": false, "capability": "zero-blocks"},
539#           {"state": true, "capability": "events"},
540#           {"state": false, "capability": "postcopy-ram"},
541#           {"state": false, "capability": "x-colo"}
542#        ]}
543##
544{ 'command': 'query-migrate-capabilities', 'returns':   ['MigrationCapabilityStatus']}
545
546##
547# @MultiFDCompression:
548#
549# An enumeration of multifd compression methods.
550#
551# @none: no compression.
552#
553# @zlib: use zlib compression method.
554#
555# @zstd: use zstd compression method.
556#
557# Since: 5.0
558##
559{ 'enum': 'MultiFDCompression',
560  'data': [ 'none', 'zlib',
561            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
562
563##
564# @MigMode:
565#
566# @normal: the original form of migration.  (since 8.2)
567#
568# @cpr-reboot: The migrate command stops the VM and saves state to the
569#     URI.  After quitting QEMU, the user resumes by running QEMU
570#     -incoming.
571#
572#     This mode allows the user to quit QEMU, optionally update and
573#     reboot the OS, and restart QEMU.  If the user reboots, the URI
574#     must persist across the reboot, such as by using a file.
575#
576#     Unlike normal mode, the use of certain local storage options
577#     does not block the migration, but the user must not modify the
578#     contents of guest block devices between the quit and restart.
579#
580#     This mode supports VFIO devices provided the user first puts the
581#     guest in the suspended runstate, such as by issuing
582#     guest-suspend-ram to the QEMU guest agent.
583#
584#     Best performance is achieved when the memory backend is shared
585#     and the @x-ignore-shared migration capability is set, but this
586#     is not required.  Further, if the user reboots before restarting
587#     such a configuration, the shared memory must persist across the
588#     reboot, such as by backing it with a dax device.
589#
590#     @cpr-reboot may not be used with postcopy, background-snapshot,
591#     or COLO.
592#
593#     (since 8.2)
594##
595{ 'enum': 'MigMode',
596  'data': [ 'normal', 'cpr-reboot' ] }
597
598##
599# @ZeroPageDetection:
600#
601# @none: Do not perform zero page checking.
602#
603# @legacy: Perform zero page checking in main migration thread.
604#
605# @multifd: Perform zero page checking in multifd sender thread if
606#     multifd migration is enabled, else in the main migration thread
607#     as for @legacy.
608#
609# Since: 9.0
610##
611{ 'enum': 'ZeroPageDetection',
612  'data': [ 'none', 'legacy', 'multifd' ] }
613
614##
615# @BitmapMigrationBitmapAliasTransform:
616#
617# @persistent: If present, the bitmap will be made persistent or
618#     transient depending on this parameter.
619#
620# Since: 6.0
621##
622{ 'struct': 'BitmapMigrationBitmapAliasTransform',
623  'data': {
624      '*persistent': 'bool'
625  } }
626
627##
628# @BitmapMigrationBitmapAlias:
629#
630# @name: The name of the bitmap.
631#
632# @alias: An alias name for migration (for example the bitmap name on
633#     the opposite site).
634#
635# @transform: Allows the modification of the migrated bitmap.  (since
636#     6.0)
637#
638# Since: 5.2
639##
640{ 'struct': 'BitmapMigrationBitmapAlias',
641  'data': {
642      'name': 'str',
643      'alias': 'str',
644      '*transform': 'BitmapMigrationBitmapAliasTransform'
645  } }
646
647##
648# @BitmapMigrationNodeAlias:
649#
650# Maps a block node name and the bitmaps it has to aliases for dirty
651# bitmap migration.
652#
653# @node-name: A block node name.
654#
655# @alias: An alias block node name for migration (for example the node
656#     name on the opposite site).
657#
658# @bitmaps: Mappings for the bitmaps on this node.
659#
660# Since: 5.2
661##
662{ 'struct': 'BitmapMigrationNodeAlias',
663  'data': {
664      'node-name': 'str',
665      'alias': 'str',
666      'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
667  } }
668
669##
670# @MigrationParameter:
671#
672# Migration parameters enumeration
673#
674# @announce-initial: Initial delay (in milliseconds) before sending
675#     the first announce (Since 4.0)
676#
677# @announce-max: Maximum delay (in milliseconds) between packets in
678#     the announcement (Since 4.0)
679#
680# @announce-rounds: Number of self-announce packets sent after
681#     migration (Since 4.0)
682#
683# @announce-step: Increase in delay (in milliseconds) between
684#     subsequent packets in the announcement (Since 4.0)
685#
686# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
687#     bytes_xfer_period to trigger throttling.  It is expressed as
688#     percentage.  The default value is 50.  (Since 5.0)
689#
690# @cpu-throttle-initial: Initial percentage of time guest cpus are
691#     throttled when migration auto-converge is activated.  The
692#     default value is 20.  (Since 2.7)
693#
694# @cpu-throttle-increment: throttle percentage increase each time
695#     auto-converge detects that migration is not making progress.
696#     The default value is 10.  (Since 2.7)
697#
698# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
699#     the tail stage of throttling, the Guest is very sensitive to CPU
700#     percentage while the @cpu-throttle -increment is excessive
701#     usually at tail stage.  If this parameter is true, we will
702#     compute the ideal CPU percentage used by the Guest, which may
703#     exactly make the dirty rate match the dirty rate threshold.
704#     Then we will choose a smaller throttle increment between the one
705#     specified by @cpu-throttle-increment and the one generated by
706#     ideal CPU percentage.  Therefore, it is compatible to
707#     traditional throttling, meanwhile the throttle increment won't
708#     be excessive at tail stage.  The default value is false.  (Since
709#     5.1)
710#
711# @tls-creds: ID of the 'tls-creds' object that provides credentials
712#     for establishing a TLS connection over the migration data
713#     channel.  On the outgoing side of the migration, the credentials
714#     must be for a 'client' endpoint, while for the incoming side the
715#     credentials must be for a 'server' endpoint.  Setting this to a
716#     non-empty string enables TLS for all migrations.  An empty
717#     string means that QEMU will use plain text mode for migration,
718#     rather than TLS.  (Since 2.7)
719#
720# @tls-hostname: migration target's hostname for validating the
721#     server's x509 certificate identity.  If empty, QEMU will use the
722#     hostname from the migration URI, if any.  A non-empty value is
723#     required when using x509 based TLS credentials and the migration
724#     URI does not include a hostname, such as fd: or exec: based
725#     migration.  (Since 2.7)
726#
727#     Note: empty value works only since 2.9.
728#
729# @tls-authz: ID of the 'authz' object subclass that provides access
730#     control checking of the TLS x509 certificate distinguished name.
731#     This object is only resolved at time of use, so can be deleted
732#     and recreated on the fly while the migration server is active.
733#     If missing, it will default to denying access (Since 4.0)
734#
735# @max-bandwidth: maximum speed for migration, in bytes per second.
736#     (Since 2.8)
737#
738# @avail-switchover-bandwidth: to set the available bandwidth that
739#     migration can use during switchover phase.  NOTE!  This does not
740#     limit the bandwidth during switchover, but only for calculations
741#     when making decisions to switchover.  By default, this value is
742#     zero, which means QEMU will estimate the bandwidth
743#     automatically.  This can be set when the estimated value is not
744#     accurate, while the user is able to guarantee such bandwidth is
745#     available when switching over.  When specified correctly, this
746#     can make the switchover decision much more accurate.
747#     (Since 8.2)
748#
749# @downtime-limit: set maximum tolerated downtime for migration.
750#     maximum downtime in milliseconds (Since 2.8)
751#
752# @x-checkpoint-delay: The delay time (in ms) between two COLO
753#     checkpoints in periodic mode.  (Since 2.8)
754#
755# @multifd-channels: Number of channels used to migrate data in
756#     parallel.  This is the same number that the number of sockets
757#     used for migration.  The default value is 2 (since 4.0)
758#
759# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
760#     needs to be a multiple of the target page size and a power of 2
761#     (Since 2.11)
762#
763# @max-postcopy-bandwidth: Background transfer bandwidth during
764#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
765#     (Since 3.0)
766#
767# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
768#     (Since 3.1)
769#
770# @multifd-compression: Which compression method to use.  Defaults to
771#     none.  (Since 5.0)
772#
773# @multifd-zlib-level: Set the compression level to be used in live
774#     migration, the compression level is an integer between 0 and 9,
775#     where 0 means no compression, 1 means the best compression
776#     speed, and 9 means best compression ratio which will consume
777#     more CPU. Defaults to 1.  (Since 5.0)
778#
779# @multifd-zstd-level: Set the compression level to be used in live
780#     migration, the compression level is an integer between 0 and 20,
781#     where 0 means no compression, 1 means the best compression
782#     speed, and 20 means best compression ratio which will consume
783#     more CPU. Defaults to 1.  (Since 5.0)
784#
785# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
786#     aliases for the purpose of dirty bitmap migration.  Such aliases
787#     may for example be the corresponding names on the opposite site.
788#     The mapping must be one-to-one, but not necessarily complete: On
789#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
790#     will be ignored.  On the destination, encountering an unmapped
791#     alias in the incoming migration stream will result in a report,
792#     and all further bitmap migration data will then be discarded.
793#     Note that the destination does not know about bitmaps it does
794#     not receive, so there is no limitation or requirement regarding
795#     the number of bitmaps received, or how they are named, or on
796#     which nodes they are placed.  By default (when this parameter
797#     has never been set), bitmap names are mapped to themselves.
798#     Nodes are mapped to their block device name if there is one, and
799#     to their node name otherwise.  (Since 5.2)
800#
801# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
802#     limit during live migration.  Should be in the range 1 to
803#     1000ms.  Defaults to 1000ms.  (Since 8.1)
804#
805# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
806#     Defaults to 1.  (Since 8.1)
807#
808# @mode: Migration mode.  See description in @MigMode.  Default is
809#     'normal'.  (Since 8.2)
810#
811# @zero-page-detection: Whether and how to detect zero pages.
812#     See description in @ZeroPageDetection.  Default is 'multifd'.
813#     (since 9.0)
814#
815# Features:
816#
817# @unstable: Members @x-checkpoint-delay and
818#     @x-vcpu-dirty-limit-period are experimental.
819#
820# Since: 2.4
821##
822{ 'enum': 'MigrationParameter',
823  'data': ['announce-initial', 'announce-max',
824           'announce-rounds', 'announce-step',
825           'throttle-trigger-threshold',
826           'cpu-throttle-initial', 'cpu-throttle-increment',
827           'cpu-throttle-tailslow',
828           'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
829           'avail-switchover-bandwidth', 'downtime-limit',
830           { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
831           'multifd-channels',
832           'xbzrle-cache-size', 'max-postcopy-bandwidth',
833           'max-cpu-throttle', 'multifd-compression',
834           'multifd-zlib-level', 'multifd-zstd-level',
835           'block-bitmap-mapping',
836           { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
837           'vcpu-dirty-limit',
838           'mode',
839           'zero-page-detection'] }
840
841##
842# @MigrateSetParameters:
843#
844# @announce-initial: Initial delay (in milliseconds) before sending
845#     the first announce (Since 4.0)
846#
847# @announce-max: Maximum delay (in milliseconds) between packets in
848#     the announcement (Since 4.0)
849#
850# @announce-rounds: Number of self-announce packets sent after
851#     migration (Since 4.0)
852#
853# @announce-step: Increase in delay (in milliseconds) between
854#     subsequent packets in the announcement (Since 4.0)
855#
856# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
857#     bytes_xfer_period to trigger throttling.  It is expressed as
858#     percentage.  The default value is 50.  (Since 5.0)
859#
860# @cpu-throttle-initial: Initial percentage of time guest cpus are
861#     throttled when migration auto-converge is activated.  The
862#     default value is 20.  (Since 2.7)
863#
864# @cpu-throttle-increment: throttle percentage increase each time
865#     auto-converge detects that migration is not making progress.
866#     The default value is 10.  (Since 2.7)
867#
868# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
869#     the tail stage of throttling, the Guest is very sensitive to CPU
870#     percentage while the @cpu-throttle -increment is excessive
871#     usually at tail stage.  If this parameter is true, we will
872#     compute the ideal CPU percentage used by the Guest, which may
873#     exactly make the dirty rate match the dirty rate threshold.
874#     Then we will choose a smaller throttle increment between the one
875#     specified by @cpu-throttle-increment and the one generated by
876#     ideal CPU percentage.  Therefore, it is compatible to
877#     traditional throttling, meanwhile the throttle increment won't
878#     be excessive at tail stage.  The default value is false.  (Since
879#     5.1)
880#
881# @tls-creds: ID of the 'tls-creds' object that provides credentials
882#     for establishing a TLS connection over the migration data
883#     channel.  On the outgoing side of the migration, the credentials
884#     must be for a 'client' endpoint, while for the incoming side the
885#     credentials must be for a 'server' endpoint.  Setting this to a
886#     non-empty string enables TLS for all migrations.  An empty
887#     string means that QEMU will use plain text mode for migration,
888#     rather than TLS.  This is the default.  (Since 2.7)
889#
890# @tls-hostname: migration target's hostname for validating the
891#     server's x509 certificate identity.  If empty, QEMU will use the
892#     hostname from the migration URI, if any.  A non-empty value is
893#     required when using x509 based TLS credentials and the migration
894#     URI does not include a hostname, such as fd: or exec: based
895#     migration.  (Since 2.7)
896#
897#     Note: empty value works only since 2.9.
898#
899# @tls-authz: ID of the 'authz' object subclass that provides access
900#     control checking of the TLS x509 certificate distinguished name.
901#     This object is only resolved at time of use, so can be deleted
902#     and recreated on the fly while the migration server is active.
903#     If missing, it will default to denying access (Since 4.0)
904#
905# @max-bandwidth: maximum speed for migration, in bytes per second.
906#     (Since 2.8)
907#
908# @avail-switchover-bandwidth: to set the available bandwidth that
909#     migration can use during switchover phase.  NOTE!  This does not
910#     limit the bandwidth during switchover, but only for calculations
911#     when making decisions to switchover.  By default, this value is
912#     zero, which means QEMU will estimate the bandwidth
913#     automatically.  This can be set when the estimated value is not
914#     accurate, while the user is able to guarantee such bandwidth is
915#     available when switching over.  When specified correctly, this
916#     can make the switchover decision much more accurate.
917#     (Since 8.2)
918#
919# @downtime-limit: set maximum tolerated downtime for migration.
920#     maximum downtime in milliseconds (Since 2.8)
921#
922# @x-checkpoint-delay: The delay time (in ms) between two COLO
923#     checkpoints in periodic mode.  (Since 2.8)
924#
925# @multifd-channels: Number of channels used to migrate data in
926#     parallel.  This is the same number that the number of sockets
927#     used for migration.  The default value is 2 (since 4.0)
928#
929# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
930#     needs to be a multiple of the target page size and a power of 2
931#     (Since 2.11)
932#
933# @max-postcopy-bandwidth: Background transfer bandwidth during
934#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
935#     (Since 3.0)
936#
937# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
938#     (Since 3.1)
939#
940# @multifd-compression: Which compression method to use.  Defaults to
941#     none.  (Since 5.0)
942#
943# @multifd-zlib-level: Set the compression level to be used in live
944#     migration, the compression level is an integer between 0 and 9,
945#     where 0 means no compression, 1 means the best compression
946#     speed, and 9 means best compression ratio which will consume
947#     more CPU. Defaults to 1.  (Since 5.0)
948#
949# @multifd-zstd-level: Set the compression level to be used in live
950#     migration, the compression level is an integer between 0 and 20,
951#     where 0 means no compression, 1 means the best compression
952#     speed, and 20 means best compression ratio which will consume
953#     more CPU. Defaults to 1.  (Since 5.0)
954#
955# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
956#     aliases for the purpose of dirty bitmap migration.  Such aliases
957#     may for example be the corresponding names on the opposite site.
958#     The mapping must be one-to-one, but not necessarily complete: On
959#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
960#     will be ignored.  On the destination, encountering an unmapped
961#     alias in the incoming migration stream will result in a report,
962#     and all further bitmap migration data will then be discarded.
963#     Note that the destination does not know about bitmaps it does
964#     not receive, so there is no limitation or requirement regarding
965#     the number of bitmaps received, or how they are named, or on
966#     which nodes they are placed.  By default (when this parameter
967#     has never been set), bitmap names are mapped to themselves.
968#     Nodes are mapped to their block device name if there is one, and
969#     to their node name otherwise.  (Since 5.2)
970#
971# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
972#     limit during live migration.  Should be in the range 1 to
973#     1000ms.  Defaults to 1000ms.  (Since 8.1)
974#
975# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
976#     Defaults to 1.  (Since 8.1)
977#
978# @mode: Migration mode.  See description in @MigMode.  Default is
979#     'normal'.  (Since 8.2)
980#
981# @zero-page-detection: Whether and how to detect zero pages.
982#     See description in @ZeroPageDetection.  Default is 'multifd'.
983#     (since 9.0)
984#
985# Features:
986#
987# @unstable: Members @x-checkpoint-delay and
988#     @x-vcpu-dirty-limit-period are experimental.
989#
990# TODO: either fuse back into MigrationParameters, or make
991#     MigrationParameters members mandatory
992#
993# Since: 2.4
994##
995{ 'struct': 'MigrateSetParameters',
996  'data': { '*announce-initial': 'size',
997            '*announce-max': 'size',
998            '*announce-rounds': 'size',
999            '*announce-step': 'size',
1000            '*throttle-trigger-threshold': 'uint8',
1001            '*cpu-throttle-initial': 'uint8',
1002            '*cpu-throttle-increment': 'uint8',
1003            '*cpu-throttle-tailslow': 'bool',
1004            '*tls-creds': 'StrOrNull',
1005            '*tls-hostname': 'StrOrNull',
1006            '*tls-authz': 'StrOrNull',
1007            '*max-bandwidth': 'size',
1008            '*avail-switchover-bandwidth': 'size',
1009            '*downtime-limit': 'uint64',
1010            '*x-checkpoint-delay': { 'type': 'uint32',
1011                                     'features': [ 'unstable' ] },
1012            '*multifd-channels': 'uint8',
1013            '*xbzrle-cache-size': 'size',
1014            '*max-postcopy-bandwidth': 'size',
1015            '*max-cpu-throttle': 'uint8',
1016            '*multifd-compression': 'MultiFDCompression',
1017            '*multifd-zlib-level': 'uint8',
1018            '*multifd-zstd-level': 'uint8',
1019            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1020            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1021                                            'features': [ 'unstable' ] },
1022            '*vcpu-dirty-limit': 'uint64',
1023            '*mode': 'MigMode',
1024            '*zero-page-detection': 'ZeroPageDetection'} }
1025
1026##
1027# @migrate-set-parameters:
1028#
1029# Set various migration parameters.
1030#
1031# Since: 2.4
1032#
1033# Example:
1034#
1035#     -> { "execute": "migrate-set-parameters" ,
1036#          "arguments": { "multifd-channels": 5 } }
1037#     <- { "return": {} }
1038##
1039{ 'command': 'migrate-set-parameters', 'boxed': true,
1040  'data': 'MigrateSetParameters' }
1041
1042##
1043# @MigrationParameters:
1044#
1045# The optional members aren't actually optional.
1046#
1047# @announce-initial: Initial delay (in milliseconds) before sending
1048#     the first announce (Since 4.0)
1049#
1050# @announce-max: Maximum delay (in milliseconds) between packets in
1051#     the announcement (Since 4.0)
1052#
1053# @announce-rounds: Number of self-announce packets sent after
1054#     migration (Since 4.0)
1055#
1056# @announce-step: Increase in delay (in milliseconds) between
1057#     subsequent packets in the announcement (Since 4.0)
1058#
1059# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1060#     bytes_xfer_period to trigger throttling.  It is expressed as
1061#     percentage.  The default value is 50.  (Since 5.0)
1062#
1063# @cpu-throttle-initial: Initial percentage of time guest cpus are
1064#     throttled when migration auto-converge is activated.  (Since
1065#     2.7)
1066#
1067# @cpu-throttle-increment: throttle percentage increase each time
1068#     auto-converge detects that migration is not making progress.
1069#     (Since 2.7)
1070#
1071# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1072#     the tail stage of throttling, the Guest is very sensitive to CPU
1073#     percentage while the @cpu-throttle -increment is excessive
1074#     usually at tail stage.  If this parameter is true, we will
1075#     compute the ideal CPU percentage used by the Guest, which may
1076#     exactly make the dirty rate match the dirty rate threshold.
1077#     Then we will choose a smaller throttle increment between the one
1078#     specified by @cpu-throttle-increment and the one generated by
1079#     ideal CPU percentage.  Therefore, it is compatible to
1080#     traditional throttling, meanwhile the throttle increment won't
1081#     be excessive at tail stage.  The default value is false.  (Since
1082#     5.1)
1083#
1084# @tls-creds: ID of the 'tls-creds' object that provides credentials
1085#     for establishing a TLS connection over the migration data
1086#     channel.  On the outgoing side of the migration, the credentials
1087#     must be for a 'client' endpoint, while for the incoming side the
1088#     credentials must be for a 'server' endpoint.  An empty string
1089#     means that QEMU will use plain text mode for migration, rather
1090#     than TLS.  (Since 2.7)
1091#
1092#     Note: 2.8 omits empty @tls-creds instead.
1093#
1094# @tls-hostname: migration target's hostname for validating the
1095#     server's x509 certificate identity.  If empty, QEMU will use the
1096#     hostname from the migration URI, if any.  (Since 2.7)
1097#
1098#     Note: 2.8 omits empty @tls-hostname instead.
1099#
1100# @tls-authz: ID of the 'authz' object subclass that provides access
1101#     control checking of the TLS x509 certificate distinguished name.
1102#     (Since 4.0)
1103#
1104# @max-bandwidth: maximum speed for migration, in bytes per second.
1105#     (Since 2.8)
1106#
1107# @avail-switchover-bandwidth: to set the available bandwidth that
1108#     migration can use during switchover phase.  NOTE!  This does not
1109#     limit the bandwidth during switchover, but only for calculations
1110#     when making decisions to switchover.  By default, this value is
1111#     zero, which means QEMU will estimate the bandwidth
1112#     automatically.  This can be set when the estimated value is not
1113#     accurate, while the user is able to guarantee such bandwidth is
1114#     available when switching over.  When specified correctly, this
1115#     can make the switchover decision much more accurate.
1116#     (Since 8.2)
1117#
1118# @downtime-limit: set maximum tolerated downtime for migration.
1119#     maximum downtime in milliseconds (Since 2.8)
1120#
1121# @x-checkpoint-delay: the delay time between two COLO checkpoints.
1122#     (Since 2.8)
1123#
1124# @multifd-channels: Number of channels used to migrate data in
1125#     parallel.  This is the same number that the number of sockets
1126#     used for migration.  The default value is 2 (since 4.0)
1127#
1128# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1129#     needs to be a multiple of the target page size and a power of 2
1130#     (Since 2.11)
1131#
1132# @max-postcopy-bandwidth: Background transfer bandwidth during
1133#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1134#     (Since 3.0)
1135#
1136# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
1137#     (Since 3.1)
1138#
1139# @multifd-compression: Which compression method to use.  Defaults to
1140#     none.  (Since 5.0)
1141#
1142# @multifd-zlib-level: Set the compression level to be used in live
1143#     migration, the compression level is an integer between 0 and 9,
1144#     where 0 means no compression, 1 means the best compression
1145#     speed, and 9 means best compression ratio which will consume
1146#     more CPU. Defaults to 1.  (Since 5.0)
1147#
1148# @multifd-zstd-level: Set the compression level to be used in live
1149#     migration, the compression level is an integer between 0 and 20,
1150#     where 0 means no compression, 1 means the best compression
1151#     speed, and 20 means best compression ratio which will consume
1152#     more CPU. Defaults to 1.  (Since 5.0)
1153#
1154# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1155#     aliases for the purpose of dirty bitmap migration.  Such aliases
1156#     may for example be the corresponding names on the opposite site.
1157#     The mapping must be one-to-one, but not necessarily complete: On
1158#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1159#     will be ignored.  On the destination, encountering an unmapped
1160#     alias in the incoming migration stream will result in a report,
1161#     and all further bitmap migration data will then be discarded.
1162#     Note that the destination does not know about bitmaps it does
1163#     not receive, so there is no limitation or requirement regarding
1164#     the number of bitmaps received, or how they are named, or on
1165#     which nodes they are placed.  By default (when this parameter
1166#     has never been set), bitmap names are mapped to themselves.
1167#     Nodes are mapped to their block device name if there is one, and
1168#     to their node name otherwise.  (Since 5.2)
1169#
1170# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1171#     limit during live migration.  Should be in the range 1 to
1172#     1000ms.  Defaults to 1000ms.  (Since 8.1)
1173#
1174# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1175#     Defaults to 1.  (Since 8.1)
1176#
1177# @mode: Migration mode.  See description in @MigMode.  Default is
1178#        'normal'.  (Since 8.2)
1179#
1180# @zero-page-detection: Whether and how to detect zero pages.
1181#     See description in @ZeroPageDetection.  Default is 'multifd'.
1182#     (since 9.0)
1183#
1184# Features:
1185#
1186# @unstable: Members @x-checkpoint-delay and
1187#     @x-vcpu-dirty-limit-period are experimental.
1188#
1189# Since: 2.4
1190##
1191{ 'struct': 'MigrationParameters',
1192  'data': { '*announce-initial': 'size',
1193            '*announce-max': 'size',
1194            '*announce-rounds': 'size',
1195            '*announce-step': 'size',
1196            '*throttle-trigger-threshold': 'uint8',
1197            '*cpu-throttle-initial': 'uint8',
1198            '*cpu-throttle-increment': 'uint8',
1199            '*cpu-throttle-tailslow': 'bool',
1200            '*tls-creds': 'str',
1201            '*tls-hostname': 'str',
1202            '*tls-authz': 'str',
1203            '*max-bandwidth': 'size',
1204            '*avail-switchover-bandwidth': 'size',
1205            '*downtime-limit': 'uint64',
1206            '*x-checkpoint-delay': { 'type': 'uint32',
1207                                     'features': [ 'unstable' ] },
1208            '*multifd-channels': 'uint8',
1209            '*xbzrle-cache-size': 'size',
1210            '*max-postcopy-bandwidth': 'size',
1211            '*max-cpu-throttle': 'uint8',
1212            '*multifd-compression': 'MultiFDCompression',
1213            '*multifd-zlib-level': 'uint8',
1214            '*multifd-zstd-level': 'uint8',
1215            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1216            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1217                                            'features': [ 'unstable' ] },
1218            '*vcpu-dirty-limit': 'uint64',
1219            '*mode': 'MigMode',
1220            '*zero-page-detection': 'ZeroPageDetection'} }
1221
1222##
1223# @query-migrate-parameters:
1224#
1225# Returns information about the current migration parameters
1226#
1227# Returns: @MigrationParameters
1228#
1229# Since: 2.4
1230#
1231# Example:
1232#
1233#     -> { "execute": "query-migrate-parameters" }
1234#     <- { "return": {
1235#              "multifd-channels": 2,
1236#              "cpu-throttle-increment": 10,
1237#              "cpu-throttle-initial": 20,
1238#              "max-bandwidth": 33554432,
1239#              "downtime-limit": 300
1240#           }
1241#        }
1242##
1243{ 'command': 'query-migrate-parameters',
1244  'returns': 'MigrationParameters' }
1245
1246##
1247# @migrate-start-postcopy:
1248#
1249# Followup to a migration command to switch the migration to postcopy
1250# mode.  The postcopy-ram capability must be set on both source and
1251# destination before the original migration command.
1252#
1253# Since: 2.5
1254#
1255# Example:
1256#
1257#     -> { "execute": "migrate-start-postcopy" }
1258#     <- { "return": {} }
1259##
1260{ 'command': 'migrate-start-postcopy' }
1261
1262##
1263# @MIGRATION:
1264#
1265# Emitted when a migration event happens
1266#
1267# @status: @MigrationStatus describing the current migration status.
1268#
1269# Since: 2.4
1270#
1271# Example:
1272#
1273#     <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1274#         "event": "MIGRATION",
1275#         "data": {"status": "completed"} }
1276##
1277{ 'event': 'MIGRATION',
1278  'data': {'status': 'MigrationStatus'}}
1279
1280##
1281# @MIGRATION_PASS:
1282#
1283# Emitted from the source side of a migration at the start of each
1284# pass (when it syncs the dirty bitmap)
1285#
1286# @pass: An incrementing count (starting at 1 on the first pass)
1287#
1288# Since: 2.6
1289#
1290# Example:
1291#
1292#     <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1293#           "event": "MIGRATION_PASS", "data": {"pass": 2} }
1294##
1295{ 'event': 'MIGRATION_PASS',
1296  'data': { 'pass': 'int' } }
1297
1298##
1299# @COLOMessage:
1300#
1301# The message transmission between Primary side and Secondary side.
1302#
1303# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1304#
1305# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1306#     checkpointing
1307#
1308# @checkpoint-reply: SVM gets PVM's checkpoint request
1309#
1310# @vmstate-send: VM's state will be sent by PVM.
1311#
1312# @vmstate-size: The total size of VMstate.
1313#
1314# @vmstate-received: VM's state has been received by SVM.
1315#
1316# @vmstate-loaded: VM's state has been loaded by SVM.
1317#
1318# Since: 2.8
1319##
1320{ 'enum': 'COLOMessage',
1321  'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1322            'vmstate-send', 'vmstate-size', 'vmstate-received',
1323            'vmstate-loaded' ] }
1324
1325##
1326# @COLOMode:
1327#
1328# The COLO current mode.
1329#
1330# @none: COLO is disabled.
1331#
1332# @primary: COLO node in primary side.
1333#
1334# @secondary: COLO node in slave side.
1335#
1336# Since: 2.8
1337##
1338{ 'enum': 'COLOMode',
1339  'data': [ 'none', 'primary', 'secondary'] }
1340
1341##
1342# @FailoverStatus:
1343#
1344# An enumeration of COLO failover status
1345#
1346# @none: no failover has ever happened
1347#
1348# @require: got failover requirement but not handled
1349#
1350# @active: in the process of doing failover
1351#
1352# @completed: finish the process of failover
1353#
1354# @relaunch: restart the failover process, from 'none' -> 'completed'
1355#     (Since 2.9)
1356#
1357# Since: 2.8
1358##
1359{ 'enum': 'FailoverStatus',
1360  'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1361
1362##
1363# @COLO_EXIT:
1364#
1365# Emitted when VM finishes COLO mode due to some errors happening or
1366# at the request of users.
1367#
1368# @mode: report COLO mode when COLO exited.
1369#
1370# @reason: describes the reason for the COLO exit.
1371#
1372# Since: 3.1
1373#
1374# Example:
1375#
1376#     <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1377#          "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1378##
1379{ 'event': 'COLO_EXIT',
1380  'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1381
1382##
1383# @COLOExitReason:
1384#
1385# The reason for a COLO exit.
1386#
1387# @none: failover has never happened.  This state does not occur in
1388#     the COLO_EXIT event, and is only visible in the result of
1389#     query-colo-status.
1390#
1391# @request: COLO exit is due to an external request.
1392#
1393# @error: COLO exit is due to an internal error.
1394#
1395# @processing: COLO is currently handling a failover (since 4.0).
1396#
1397# Since: 3.1
1398##
1399{ 'enum': 'COLOExitReason',
1400  'data': [ 'none', 'request', 'error' , 'processing' ] }
1401
1402##
1403# @x-colo-lost-heartbeat:
1404#
1405# Tell qemu that heartbeat is lost, request it to do takeover
1406# procedures.  If this command is sent to the PVM, the Primary side
1407# will exit COLO mode.  If sent to the Secondary, the Secondary side
1408# will run failover work, then takes over server operation to become
1409# the service VM.
1410#
1411# Features:
1412#
1413# @unstable: This command is experimental.
1414#
1415# Since: 2.8
1416#
1417# Example:
1418#
1419#     -> { "execute": "x-colo-lost-heartbeat" }
1420#     <- { "return": {} }
1421##
1422{ 'command': 'x-colo-lost-heartbeat',
1423  'features': [ 'unstable' ],
1424  'if': 'CONFIG_REPLICATION' }
1425
1426##
1427# @migrate_cancel:
1428#
1429# Cancel the current executing migration process.
1430#
1431# Notes: This command succeeds even if there is no migration process
1432#     running.
1433#
1434# Since: 0.14
1435#
1436# Example:
1437#
1438#     -> { "execute": "migrate_cancel" }
1439#     <- { "return": {} }
1440##
1441{ 'command': 'migrate_cancel' }
1442
1443##
1444# @migrate-continue:
1445#
1446# Continue migration when it's in a paused state.
1447#
1448# @state: The state the migration is currently expected to be in
1449#
1450# Since: 2.11
1451#
1452# Example:
1453#
1454#     -> { "execute": "migrate-continue" , "arguments":
1455#          { "state": "pre-switchover" } }
1456#     <- { "return": {} }
1457##
1458{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1459
1460##
1461# @MigrationAddressType:
1462#
1463# The migration stream transport mechanisms.
1464#
1465# @socket: Migrate via socket.
1466#
1467# @exec: Direct the migration stream to another process.
1468#
1469# @rdma: Migrate via RDMA.
1470#
1471# @file: Direct the migration stream to a file.
1472#
1473# Since: 8.2
1474##
1475{ 'enum': 'MigrationAddressType',
1476  'data': [ 'socket', 'exec', 'rdma', 'file' ] }
1477
1478##
1479# @FileMigrationArgs:
1480#
1481# @filename: The file to receive the migration stream
1482#
1483# @offset: The file offset where the migration stream will start
1484#
1485# Since: 8.2
1486##
1487{ 'struct': 'FileMigrationArgs',
1488  'data': { 'filename': 'str',
1489            'offset': 'uint64' } }
1490
1491##
1492# @MigrationExecCommand:
1493#
1494# @args: command (list head) and arguments to execute.
1495#
1496# Since: 8.2
1497##
1498{ 'struct': 'MigrationExecCommand',
1499  'data': {'args': [ 'str' ] } }
1500
1501##
1502# @MigrationAddress:
1503#
1504# Migration endpoint configuration.
1505#
1506# @transport: The migration stream transport mechanism
1507#
1508# Since: 8.2
1509##
1510{ 'union': 'MigrationAddress',
1511  'base': { 'transport' : 'MigrationAddressType'},
1512  'discriminator': 'transport',
1513  'data': {
1514    'socket': 'SocketAddress',
1515    'exec': 'MigrationExecCommand',
1516    'rdma': 'InetSocketAddress',
1517    'file': 'FileMigrationArgs' } }
1518
1519##
1520# @MigrationChannelType:
1521#
1522# The migration channel-type request options.
1523#
1524# @main: Main outbound migration channel.
1525#
1526# Since: 8.1
1527##
1528{ 'enum': 'MigrationChannelType',
1529  'data': [ 'main' ] }
1530
1531##
1532# @MigrationChannel:
1533#
1534# Migration stream channel parameters.
1535#
1536# @channel-type: Channel type for transferring packet information.
1537#
1538# @addr: Migration endpoint configuration on destination interface.
1539#
1540# Since: 8.1
1541##
1542{ 'struct': 'MigrationChannel',
1543  'data': {
1544      'channel-type': 'MigrationChannelType',
1545      'addr': 'MigrationAddress' } }
1546
1547##
1548# @migrate:
1549#
1550# Migrates the current running guest to another Virtual Machine.
1551#
1552# @uri: the Uniform Resource Identifier of the destination VM
1553#
1554# @channels: list of migration stream channels with each stream in the
1555#     list connected to a destination interface endpoint.
1556#
1557# @detach: this argument exists only for compatibility reasons and is
1558#     ignored by QEMU
1559#
1560# @resume: resume one paused migration, default "off".  (since 3.0)
1561#
1562# Since: 0.14
1563#
1564# Notes:
1565#
1566#     1. The 'query-migrate' command should be used to check
1567#        migration's progress and final result (this information is
1568#        provided by the 'status' member)
1569#
1570#     2. All boolean arguments default to false
1571#
1572#     3. The user Monitor's "detach" argument is invalid in QMP and
1573#        should not be used
1574#
1575#     4. The uri argument should have the Uniform Resource Identifier
1576#        of default destination VM. This connection will be bound to
1577#        default network.
1578#
1579#     5. For now, number of migration streams is restricted to one,
1580#        i.e. number of items in 'channels' list is just 1.
1581#
1582#     6. The 'uri' and 'channels' arguments are mutually exclusive;
1583#        exactly one of the two should be present.
1584#
1585# Example:
1586#
1587#     -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1588#     <- { "return": {} }
1589#
1590#     -> { "execute": "migrate",
1591#          "arguments": {
1592#              "channels": [ { "channel-type": "main",
1593#                              "addr": { "transport": "socket",
1594#                                        "type": "inet",
1595#                                        "host": "10.12.34.9",
1596#                                        "port": "1050" } } ] } }
1597#     <- { "return": {} }
1598#
1599#     -> { "execute": "migrate",
1600#          "arguments": {
1601#              "channels": [ { "channel-type": "main",
1602#                              "addr": { "transport": "exec",
1603#                                        "args": [ "/bin/nc", "-p", "6000",
1604#                                                  "/some/sock" ] } } ] } }
1605#     <- { "return": {} }
1606#
1607#     -> { "execute": "migrate",
1608#          "arguments": {
1609#              "channels": [ { "channel-type": "main",
1610#                              "addr": { "transport": "rdma",
1611#                                        "host": "10.12.34.9",
1612#                                        "port": "1050" } } ] } }
1613#     <- { "return": {} }
1614#
1615#     -> { "execute": "migrate",
1616#          "arguments": {
1617#              "channels": [ { "channel-type": "main",
1618#                              "addr": { "transport": "file",
1619#                                        "filename": "/tmp/migfile",
1620#                                        "offset": "0x1000" } } ] } }
1621#     <- { "return": {} }
1622#
1623##
1624{ 'command': 'migrate',
1625  'data': {'*uri': 'str',
1626           '*channels': [ 'MigrationChannel' ],
1627           '*detach': 'bool', '*resume': 'bool' } }
1628
1629##
1630# @migrate-incoming:
1631#
1632# Start an incoming migration, the qemu must have been started with
1633# -incoming defer
1634#
1635# @uri: The Uniform Resource Identifier identifying the source or
1636#     address to listen on
1637#
1638# @channels: list of migration stream channels with each stream in the
1639#     list connected to a destination interface endpoint.
1640#
1641# @exit-on-error: Exit on incoming migration failure.  Default true.
1642#     When set to false, the failure triggers a MIGRATION event, and
1643#     error details could be retrieved with query-migrate.  (since 9.1)
1644#
1645# Since: 2.3
1646#
1647# Notes:
1648#
1649#     1. It's a bad idea to use a string for the uri, but it needs to
1650#        stay compatible with -incoming and the format of the uri is
1651#        already exposed above libvirt.
1652#
1653#     2. QEMU must be started with -incoming defer to allow
1654#        migrate-incoming to be used.
1655#
1656#     3. The uri format is the same as for -incoming
1657#
1658#     4. For now, number of migration streams is restricted to one,
1659#        i.e. number of items in 'channels' list is just 1.
1660#
1661#     5. The 'uri' and 'channels' arguments are mutually exclusive;
1662#        exactly one of the two should be present.
1663#
1664# Example:
1665#
1666#     -> { "execute": "migrate-incoming",
1667#          "arguments": { "uri": "tcp:0:4446" } }
1668#     <- { "return": {} }
1669#
1670#     -> { "execute": "migrate-incoming",
1671#          "arguments": {
1672#              "channels": [ { "channel-type": "main",
1673#                              "addr": { "transport": "socket",
1674#                                        "type": "inet",
1675#                                        "host": "10.12.34.9",
1676#                                        "port": "1050" } } ] } }
1677#     <- { "return": {} }
1678#
1679#     -> { "execute": "migrate-incoming",
1680#          "arguments": {
1681#              "channels": [ { "channel-type": "main",
1682#                              "addr": { "transport": "exec",
1683#                                        "args": [ "/bin/nc", "-p", "6000",
1684#                                                  "/some/sock" ] } } ] } }
1685#     <- { "return": {} }
1686#
1687#     -> { "execute": "migrate-incoming",
1688#          "arguments": {
1689#              "channels": [ { "channel-type": "main",
1690#                              "addr": { "transport": "rdma",
1691#                                        "host": "10.12.34.9",
1692#                                        "port": "1050" } } ] } }
1693#     <- { "return": {} }
1694##
1695{ 'command': 'migrate-incoming',
1696             'data': {'*uri': 'str',
1697                      '*channels': [ 'MigrationChannel' ],
1698                      '*exit-on-error': 'bool' } }
1699
1700##
1701# @xen-save-devices-state:
1702#
1703# Save the state of all devices to file.  The RAM and the block
1704# devices of the VM are not saved by this command.
1705#
1706# @filename: the file to save the state of the devices to as binary
1707#     data.  See xen-save-devices-state.txt for a description of the
1708#     binary format.
1709#
1710# @live: Optional argument to ask QEMU to treat this command as part
1711#     of a live migration.  Default to true.  (since 2.11)
1712#
1713# Since: 1.1
1714#
1715# Example:
1716#
1717#     -> { "execute": "xen-save-devices-state",
1718#          "arguments": { "filename": "/tmp/save" } }
1719#     <- { "return": {} }
1720##
1721{ 'command': 'xen-save-devices-state',
1722  'data': {'filename': 'str', '*live':'bool' } }
1723
1724##
1725# @xen-set-global-dirty-log:
1726#
1727# Enable or disable the global dirty log mode.
1728#
1729# @enable: true to enable, false to disable.
1730#
1731# Since: 1.3
1732#
1733# Example:
1734#
1735#     -> { "execute": "xen-set-global-dirty-log",
1736#          "arguments": { "enable": true } }
1737#     <- { "return": {} }
1738##
1739{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1740
1741##
1742# @xen-load-devices-state:
1743#
1744# Load the state of all devices from file.  The RAM and the block
1745# devices of the VM are not loaded by this command.
1746#
1747# @filename: the file to load the state of the devices from as binary
1748#     data.  See xen-save-devices-state.txt for a description of the
1749#     binary format.
1750#
1751# Since: 2.7
1752#
1753# Example:
1754#
1755#     -> { "execute": "xen-load-devices-state",
1756#          "arguments": { "filename": "/tmp/resume" } }
1757#     <- { "return": {} }
1758##
1759{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1760
1761##
1762# @xen-set-replication:
1763#
1764# Enable or disable replication.
1765#
1766# @enable: true to enable, false to disable.
1767#
1768# @primary: true for primary or false for secondary.
1769#
1770# @failover: true to do failover, false to stop.  Cannot be specified
1771#     if 'enable' is true.  Default value is false.
1772#
1773# Example:
1774#
1775#     -> { "execute": "xen-set-replication",
1776#          "arguments": {"enable": true, "primary": false} }
1777#     <- { "return": {} }
1778#
1779# Since: 2.9
1780##
1781{ 'command': 'xen-set-replication',
1782  'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1783  'if': 'CONFIG_REPLICATION' }
1784
1785##
1786# @ReplicationStatus:
1787#
1788# The result format for 'query-xen-replication-status'.
1789#
1790# @error: true if an error happened, false if replication is normal.
1791#
1792# @desc: the human readable error description string, when @error is
1793#     'true'.
1794#
1795# Since: 2.9
1796##
1797{ 'struct': 'ReplicationStatus',
1798  'data': { 'error': 'bool', '*desc': 'str' },
1799  'if': 'CONFIG_REPLICATION' }
1800
1801##
1802# @query-xen-replication-status:
1803#
1804# Query replication status while the vm is running.
1805#
1806# Returns: A @ReplicationStatus object showing the status.
1807#
1808# Example:
1809#
1810#     -> { "execute": "query-xen-replication-status" }
1811#     <- { "return": { "error": false } }
1812#
1813# Since: 2.9
1814##
1815{ 'command': 'query-xen-replication-status',
1816  'returns': 'ReplicationStatus',
1817  'if': 'CONFIG_REPLICATION' }
1818
1819##
1820# @xen-colo-do-checkpoint:
1821#
1822# Xen uses this command to notify replication to trigger a checkpoint.
1823#
1824# Example:
1825#
1826#     -> { "execute": "xen-colo-do-checkpoint" }
1827#     <- { "return": {} }
1828#
1829# Since: 2.9
1830##
1831{ 'command': 'xen-colo-do-checkpoint',
1832  'if': 'CONFIG_REPLICATION' }
1833
1834##
1835# @COLOStatus:
1836#
1837# The result format for 'query-colo-status'.
1838#
1839# @mode: COLO running mode.  If COLO is running, this field will
1840#     return 'primary' or 'secondary'.
1841#
1842# @last-mode: COLO last running mode.  If COLO is running, this field
1843#     will return same like mode field, after failover we can use this
1844#     field to get last colo mode.  (since 4.0)
1845#
1846# @reason: describes the reason for the COLO exit.
1847#
1848# Since: 3.1
1849##
1850{ 'struct': 'COLOStatus',
1851  'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1852            'reason': 'COLOExitReason' },
1853  'if': 'CONFIG_REPLICATION' }
1854
1855##
1856# @query-colo-status:
1857#
1858# Query COLO status while the vm is running.
1859#
1860# Returns: A @COLOStatus object showing the status.
1861#
1862# Example:
1863#
1864#     -> { "execute": "query-colo-status" }
1865#     <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1866#
1867# Since: 3.1
1868##
1869{ 'command': 'query-colo-status',
1870  'returns': 'COLOStatus',
1871  'if': 'CONFIG_REPLICATION' }
1872
1873##
1874# @migrate-recover:
1875#
1876# Provide a recovery migration stream URI.
1877#
1878# @uri: the URI to be used for the recovery of migration stream.
1879#
1880# Example:
1881#
1882#     -> { "execute": "migrate-recover",
1883#          "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1884#     <- { "return": {} }
1885#
1886# Since: 3.0
1887##
1888{ 'command': 'migrate-recover',
1889  'data': { 'uri': 'str' },
1890  'allow-oob': true }
1891
1892##
1893# @migrate-pause:
1894#
1895# Pause a migration.  Currently it only supports postcopy.
1896#
1897# Example:
1898#
1899#     -> { "execute": "migrate-pause" }
1900#     <- { "return": {} }
1901#
1902# Since: 3.0
1903##
1904{ 'command': 'migrate-pause', 'allow-oob': true }
1905
1906##
1907# @UNPLUG_PRIMARY:
1908#
1909# Emitted from source side of a migration when migration state is
1910# WAIT_UNPLUG. Device was unplugged by guest operating system.  Device
1911# resources in QEMU are kept on standby to be able to re-plug it in
1912# case of migration failure.
1913#
1914# @device-id: QEMU device id of the unplugged device
1915#
1916# Since: 4.2
1917#
1918# Example:
1919#
1920#     <- { "event": "UNPLUG_PRIMARY",
1921#          "data": { "device-id": "hostdev0" },
1922#          "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1923##
1924{ 'event': 'UNPLUG_PRIMARY',
1925  'data': { 'device-id': 'str' } }
1926
1927##
1928# @DirtyRateVcpu:
1929#
1930# Dirty rate of vcpu.
1931#
1932# @id: vcpu index.
1933#
1934# @dirty-rate: dirty rate.
1935#
1936# Since: 6.2
1937##
1938{ 'struct': 'DirtyRateVcpu',
1939  'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1940
1941##
1942# @DirtyRateStatus:
1943#
1944# Dirty page rate measurement status.
1945#
1946# @unstarted: measuring thread has not been started yet
1947#
1948# @measuring: measuring thread is running
1949#
1950# @measured: dirty page rate is measured and the results are available
1951#
1952# Since: 5.2
1953##
1954{ 'enum': 'DirtyRateStatus',
1955  'data': [ 'unstarted', 'measuring', 'measured'] }
1956
1957##
1958# @DirtyRateMeasureMode:
1959#
1960# Method used to measure dirty page rate.  Differences between
1961# available methods are explained in @calc-dirty-rate.
1962#
1963# @page-sampling: use page sampling
1964#
1965# @dirty-ring: use dirty ring
1966#
1967# @dirty-bitmap: use dirty bitmap
1968#
1969# Since: 6.2
1970##
1971{ 'enum': 'DirtyRateMeasureMode',
1972  'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1973
1974##
1975# @TimeUnit:
1976#
1977# Specifies unit in which time-related value is specified.
1978#
1979# @second: value is in seconds
1980#
1981# @millisecond: value is in milliseconds
1982#
1983# Since: 8.2
1984##
1985{ 'enum': 'TimeUnit',
1986  'data': ['second', 'millisecond'] }
1987
1988##
1989# @DirtyRateInfo:
1990#
1991# Information about measured dirty page rate.
1992#
1993# @dirty-rate: an estimate of the dirty page rate of the VM in units
1994#     of MiB/s.  Value is present only when @status is 'measured'.
1995#
1996# @status: current status of dirty page rate measurements
1997#
1998# @start-time: start time in units of second for calculation
1999#
2000# @calc-time: time period for which dirty page rate was measured,
2001#     expressed and rounded down to @calc-time-unit.
2002#
2003# @calc-time-unit: time unit of @calc-time  (Since 8.2)
2004#
2005# @sample-pages: number of sampled pages per GiB of guest memory.
2006#     Valid only in page-sampling mode (Since 6.1)
2007#
2008# @mode: mode that was used to measure dirty page rate (Since 6.2)
2009#
2010# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
2011#     specified (Since 6.2)
2012#
2013# Since: 5.2
2014##
2015{ 'struct': 'DirtyRateInfo',
2016  'data': {'*dirty-rate': 'int64',
2017           'status': 'DirtyRateStatus',
2018           'start-time': 'int64',
2019           'calc-time': 'int64',
2020           'calc-time-unit': 'TimeUnit',
2021           'sample-pages': 'uint64',
2022           'mode': 'DirtyRateMeasureMode',
2023           '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
2024
2025##
2026# @calc-dirty-rate:
2027#
2028# Start measuring dirty page rate of the VM.  Results can be retrieved
2029# with @query-dirty-rate after measurements are completed.
2030#
2031# Dirty page rate is the number of pages changed in a given time
2032# period expressed in MiB/s.  The following methods of calculation are
2033# available:
2034#
2035# 1. In page sampling mode, a random subset of pages are selected and
2036#    hashed twice: once at the beginning of measurement time period,
2037#    and once again at the end.  If two hashes for some page are
2038#    different, the page is counted as changed.  Since this method
2039#    relies on sampling and hashing, calculated dirty page rate is
2040#    only an estimate of its true value.  Increasing @sample-pages
2041#    improves estimation quality at the cost of higher computational
2042#    overhead.
2043#
2044# 2. Dirty bitmap mode captures writes to memory (for example by
2045#    temporarily revoking write access to all pages) and counting page
2046#    faults.  Information about modified pages is collected into a
2047#    bitmap, where each bit corresponds to one guest page.  This mode
2048#    requires that KVM accelerator property "dirty-ring-size" is *not*
2049#    set.
2050#
2051# 3. Dirty ring mode is similar to dirty bitmap mode, but the
2052#    information about modified pages is collected into ring buffer.
2053#    This mode tracks page modification per each vCPU separately.  It
2054#    requires that KVM accelerator property "dirty-ring-size" is set.
2055#
2056# @calc-time: time period for which dirty page rate is calculated.
2057#     By default it is specified in seconds, but the unit can be set
2058#     explicitly with @calc-time-unit.  Note that larger @calc-time
2059#     values will typically result in smaller dirty page rates because
2060#     page dirtying is a one-time event.  Once some page is counted
2061#     as dirty during @calc-time period, further writes to this page
2062#     will not increase dirty page rate anymore.
2063#
2064# @calc-time-unit: time unit in which @calc-time is specified.
2065#     By default it is seconds.  (Since 8.2)
2066#
2067# @sample-pages: number of sampled pages per each GiB of guest memory.
2068#     Default value is 512.  For 4KiB guest pages this corresponds to
2069#     sampling ratio of 0.2%.  This argument is used only in page
2070#     sampling mode.  (Since 6.1)
2071#
2072# @mode: mechanism for tracking dirty pages.  Default value is
2073#     'page-sampling'.  Others are 'dirty-bitmap' and 'dirty-ring'.
2074#     (Since 6.1)
2075#
2076# Since: 5.2
2077#
2078# Example:
2079#
2080#     -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
2081#                                                     'sample-pages': 512} }
2082#     <- { "return": {} }
2083#
2084#     Measure dirty rate using dirty bitmap for 500 milliseconds:
2085#
2086#     -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500,
2087#         "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} }
2088#
2089#     <- { "return": {} }
2090##
2091{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
2092                                         '*calc-time-unit': 'TimeUnit',
2093                                         '*sample-pages': 'int',
2094                                         '*mode': 'DirtyRateMeasureMode'} }
2095
2096##
2097# @query-dirty-rate:
2098#
2099# Query results of the most recent invocation of @calc-dirty-rate.
2100#
2101# @calc-time-unit: time unit in which to report calculation time.
2102#     By default it is reported in seconds.  (Since 8.2)
2103#
2104# Since: 5.2
2105#
2106# Examples:
2107#
2108#     1. Measurement is in progress:
2109#
2110#     <- {"status": "measuring", "sample-pages": 512,
2111#         "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2112#         "calc-time-unit": "second"}
2113#
2114#     2. Measurement has been completed:
2115#
2116#     <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
2117#         "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2118#         "calc-time-unit": "second"}
2119##
2120{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' },
2121                                 'returns': 'DirtyRateInfo' }
2122
2123##
2124# @DirtyLimitInfo:
2125#
2126# Dirty page rate limit information of a virtual CPU.
2127#
2128# @cpu-index: index of a virtual CPU.
2129#
2130# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
2131#     CPU, 0 means unlimited.
2132#
2133# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
2134#
2135# Since: 7.1
2136##
2137{ 'struct': 'DirtyLimitInfo',
2138  'data': { 'cpu-index': 'int',
2139            'limit-rate': 'uint64',
2140            'current-rate': 'uint64' } }
2141
2142##
2143# @set-vcpu-dirty-limit:
2144#
2145# Set the upper limit of dirty page rate for virtual CPUs.
2146#
2147# Requires KVM with accelerator property "dirty-ring-size" set.  A
2148# virtual CPU's dirty page rate is a measure of its memory load.  To
2149# observe dirty page rates, use @calc-dirty-rate.
2150#
2151# @cpu-index: index of a virtual CPU, default is all.
2152#
2153# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
2154#
2155# Since: 7.1
2156#
2157# Example:
2158#
2159#     -> {"execute": "set-vcpu-dirty-limit"}
2160#         "arguments": { "dirty-rate": 200,
2161#                        "cpu-index": 1 } }
2162#     <- { "return": {} }
2163##
2164{ 'command': 'set-vcpu-dirty-limit',
2165  'data': { '*cpu-index': 'int',
2166            'dirty-rate': 'uint64' } }
2167
2168##
2169# @cancel-vcpu-dirty-limit:
2170#
2171# Cancel the upper limit of dirty page rate for virtual CPUs.
2172#
2173# Cancel the dirty page limit for the vCPU which has been set with
2174# set-vcpu-dirty-limit command.  Note that this command requires
2175# support from dirty ring, same as the "set-vcpu-dirty-limit".
2176#
2177# @cpu-index: index of a virtual CPU, default is all.
2178#
2179# Since: 7.1
2180#
2181# Example:
2182#
2183#     -> {"execute": "cancel-vcpu-dirty-limit"},
2184#         "arguments": { "cpu-index": 1 } }
2185#     <- { "return": {} }
2186##
2187{ 'command': 'cancel-vcpu-dirty-limit',
2188  'data': { '*cpu-index': 'int'} }
2189
2190##
2191# @query-vcpu-dirty-limit:
2192#
2193# Returns information about virtual CPU dirty page rate limits, if
2194# any.
2195#
2196# Since: 7.1
2197#
2198# Example:
2199#
2200#     -> {"execute": "query-vcpu-dirty-limit"}
2201#     <- {"return": [
2202#            { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
2203#            { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
2204##
2205{ 'command': 'query-vcpu-dirty-limit',
2206  'returns': [ 'DirtyLimitInfo' ] }
2207
2208##
2209# @MigrationThreadInfo:
2210#
2211# Information about migrationthreads
2212#
2213# @name: the name of migration thread
2214#
2215# @thread-id: ID of the underlying host thread
2216#
2217# Since: 7.2
2218##
2219{ 'struct': 'MigrationThreadInfo',
2220  'data': {'name': 'str',
2221           'thread-id': 'int'} }
2222
2223##
2224# @query-migrationthreads:
2225#
2226# Returns information of migration threads
2227#
2228# Returns: @MigrationThreadInfo
2229#
2230# Since: 7.2
2231##
2232{ 'command': 'query-migrationthreads',
2233  'returns': ['MigrationThreadInfo'] }
2234
2235##
2236# @snapshot-save:
2237#
2238# Save a VM snapshot
2239#
2240# @job-id: identifier for the newly created job
2241#
2242# @tag: name of the snapshot to create
2243#
2244# @vmstate: block device node name to save vmstate to
2245#
2246# @devices: list of block device node names to save a snapshot to
2247#
2248# Applications should not assume that the snapshot save is complete
2249# when this command returns.  The job commands / events must be used
2250# to determine completion and to fetch details of any errors that
2251# arise.
2252#
2253# Note that execution of the guest CPUs may be stopped during the time
2254# it takes to save the snapshot.  A future version of QEMU may ensure
2255# CPUs are executing continuously.
2256#
2257# It is strongly recommended that @devices contain all writable block
2258# device nodes if a consistent snapshot is required.
2259#
2260# If @tag already exists, an error will be reported
2261#
2262# Example:
2263#
2264#     -> { "execute": "snapshot-save",
2265#          "arguments": {
2266#             "job-id": "snapsave0",
2267#             "tag": "my-snap",
2268#             "vmstate": "disk0",
2269#             "devices": ["disk0", "disk1"]
2270#          }
2271#        }
2272#     <- { "return": { } }
2273#     <- {"event": "JOB_STATUS_CHANGE",
2274#         "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2275#         "data": {"status": "created", "id": "snapsave0"}}
2276#     <- {"event": "JOB_STATUS_CHANGE",
2277#         "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2278#         "data": {"status": "running", "id": "snapsave0"}}
2279#     <- {"event": "STOP",
2280#         "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2281#     <- {"event": "RESUME",
2282#         "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2283#     <- {"event": "JOB_STATUS_CHANGE",
2284#         "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2285#         "data": {"status": "waiting", "id": "snapsave0"}}
2286#     <- {"event": "JOB_STATUS_CHANGE",
2287#         "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2288#         "data": {"status": "pending", "id": "snapsave0"}}
2289#     <- {"event": "JOB_STATUS_CHANGE",
2290#         "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2291#         "data": {"status": "concluded", "id": "snapsave0"}}
2292#     -> {"execute": "query-jobs"}
2293#     <- {"return": [{"current-progress": 1,
2294#                     "status": "concluded",
2295#                     "total-progress": 1,
2296#                     "type": "snapshot-save",
2297#                     "id": "snapsave0"}]}
2298#
2299# Since: 6.0
2300##
2301{ 'command': 'snapshot-save',
2302  'data': { 'job-id': 'str',
2303            'tag': 'str',
2304            'vmstate': 'str',
2305            'devices': ['str'] } }
2306
2307##
2308# @snapshot-load:
2309#
2310# Load a VM snapshot
2311#
2312# @job-id: identifier for the newly created job
2313#
2314# @tag: name of the snapshot to load.
2315#
2316# @vmstate: block device node name to load vmstate from
2317#
2318# @devices: list of block device node names to load a snapshot from
2319#
2320# Applications should not assume that the snapshot load is complete
2321# when this command returns.  The job commands / events must be used
2322# to determine completion and to fetch details of any errors that
2323# arise.
2324#
2325# Note that execution of the guest CPUs will be stopped during the
2326# time it takes to load the snapshot.
2327#
2328# It is strongly recommended that @devices contain all writable block
2329# device nodes that can have changed since the original @snapshot-save
2330# command execution.
2331#
2332# Example:
2333#
2334#     -> { "execute": "snapshot-load",
2335#          "arguments": {
2336#             "job-id": "snapload0",
2337#             "tag": "my-snap",
2338#             "vmstate": "disk0",
2339#             "devices": ["disk0", "disk1"]
2340#          }
2341#        }
2342#     <- { "return": { } }
2343#     <- {"event": "JOB_STATUS_CHANGE",
2344#         "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2345#         "data": {"status": "created", "id": "snapload0"}}
2346#     <- {"event": "JOB_STATUS_CHANGE",
2347#         "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2348#         "data": {"status": "running", "id": "snapload0"}}
2349#     <- {"event": "STOP",
2350#         "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2351#     <- {"event": "RESUME",
2352#         "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2353#     <- {"event": "JOB_STATUS_CHANGE",
2354#         "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2355#         "data": {"status": "waiting", "id": "snapload0"}}
2356#     <- {"event": "JOB_STATUS_CHANGE",
2357#         "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2358#         "data": {"status": "pending", "id": "snapload0"}}
2359#     <- {"event": "JOB_STATUS_CHANGE",
2360#         "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2361#         "data": {"status": "concluded", "id": "snapload0"}}
2362#     -> {"execute": "query-jobs"}
2363#     <- {"return": [{"current-progress": 1,
2364#                     "status": "concluded",
2365#                     "total-progress": 1,
2366#                     "type": "snapshot-load",
2367#                     "id": "snapload0"}]}
2368#
2369# Since: 6.0
2370##
2371{ 'command': 'snapshot-load',
2372  'data': { 'job-id': 'str',
2373            'tag': 'str',
2374            'vmstate': 'str',
2375            'devices': ['str'] } }
2376
2377##
2378# @snapshot-delete:
2379#
2380# Delete a VM snapshot
2381#
2382# @job-id: identifier for the newly created job
2383#
2384# @tag: name of the snapshot to delete.
2385#
2386# @devices: list of block device node names to delete a snapshot from
2387#
2388# Applications should not assume that the snapshot delete is complete
2389# when this command returns.  The job commands / events must be used
2390# to determine completion and to fetch details of any errors that
2391# arise.
2392#
2393# Example:
2394#
2395#     -> { "execute": "snapshot-delete",
2396#          "arguments": {
2397#             "job-id": "snapdelete0",
2398#             "tag": "my-snap",
2399#             "devices": ["disk0", "disk1"]
2400#          }
2401#        }
2402#     <- { "return": { } }
2403#     <- {"event": "JOB_STATUS_CHANGE",
2404#         "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2405#         "data": {"status": "created", "id": "snapdelete0"}}
2406#     <- {"event": "JOB_STATUS_CHANGE",
2407#         "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2408#         "data": {"status": "running", "id": "snapdelete0"}}
2409#     <- {"event": "JOB_STATUS_CHANGE",
2410#         "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2411#         "data": {"status": "waiting", "id": "snapdelete0"}}
2412#     <- {"event": "JOB_STATUS_CHANGE",
2413#         "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2414#         "data": {"status": "pending", "id": "snapdelete0"}}
2415#     <- {"event": "JOB_STATUS_CHANGE",
2416#         "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2417#         "data": {"status": "concluded", "id": "snapdelete0"}}
2418#     -> {"execute": "query-jobs"}
2419#     <- {"return": [{"current-progress": 1,
2420#                     "status": "concluded",
2421#                     "total-progress": 1,
2422#                     "type": "snapshot-delete",
2423#                     "id": "snapdelete0"}]}
2424#
2425# Since: 6.0
2426##
2427{ 'command': 'snapshot-delete',
2428  'data': { 'job-id': 'str',
2429            'tag': 'str',
2430            'devices': ['str'] } }
2431