xref: /openbmc/qemu/qapi/migration.json (revision b43b61d5)
1# -*- Mode: Python -*-
2# vim: filetype=python
3#
4
5##
6# = Migration
7##
8
9{ 'include': 'common.json' }
10{ 'include': 'sockets.json' }
11
12##
13# @MigrationStats:
14#
15# Detailed migration status.
16#
17# @transferred: amount of bytes already transferred to the target VM
18#
19# @remaining: amount of bytes remaining to be transferred to the
20#     target VM
21#
22# @total: total amount of bytes involved in the migration process
23#
24# @duplicate: number of duplicate (zero) pages (since 1.2)
25#
26# @normal: number of normal pages (since 1.2)
27#
28# @normal-bytes: number of normal bytes sent (since 1.2)
29#
30# @dirty-pages-rate: number of pages dirtied by second by the guest
31#     (since 1.3)
32#
33# @mbps: throughput in megabits/sec.  (since 1.6)
34#
35# @dirty-sync-count: number of times that dirty ram was synchronized
36#     (since 2.1)
37#
38# @postcopy-requests: The number of page requests received from the
39#     destination (since 2.7)
40#
41# @page-size: The number of bytes per page for the various page-based
42#     statistics (since 2.10)
43#
44# @multifd-bytes: The number of bytes sent through multifd (since 3.0)
45#
46# @pages-per-second: the number of memory pages transferred per second
47#     (Since 4.0)
48#
49# @precopy-bytes: The number of bytes sent in the pre-copy phase
50#     (since 7.0).
51#
52# @downtime-bytes: The number of bytes sent while the guest is paused
53#     (since 7.0).
54#
55# @postcopy-bytes: The number of bytes sent during the post-copy phase
56#     (since 7.0).
57#
58# @dirty-sync-missed-zero-copy: Number of times dirty RAM
59#     synchronization could not avoid copying dirty pages.  This is
60#     between 0 and @dirty-sync-count * @multifd-channels.  (since
61#     7.1)
62#
63# Since: 0.14
64##
65{ 'struct': 'MigrationStats',
66  'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
67           'duplicate': 'int',
68           'normal': 'int',
69           'normal-bytes': 'int', 'dirty-pages-rate': 'int',
70           'mbps': 'number', 'dirty-sync-count': 'int',
71           'postcopy-requests': 'int', 'page-size': 'int',
72           'multifd-bytes': 'uint64', 'pages-per-second': 'uint64',
73           'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64',
74           'postcopy-bytes': 'uint64',
75           'dirty-sync-missed-zero-copy': 'uint64' } }
76
77##
78# @XBZRLECacheStats:
79#
80# Detailed XBZRLE migration cache statistics
81#
82# @cache-size: XBZRLE cache size
83#
84# @bytes: amount of bytes already transferred to the target VM
85#
86# @pages: amount of pages transferred to the target VM
87#
88# @cache-miss: number of cache miss
89#
90# @cache-miss-rate: rate of cache miss (since 2.1)
91#
92# @encoding-rate: rate of encoded bytes (since 5.1)
93#
94# @overflow: number of overflows
95#
96# Since: 1.2
97##
98{ 'struct': 'XBZRLECacheStats',
99  'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int',
100           'cache-miss': 'int', 'cache-miss-rate': 'number',
101           'encoding-rate': 'number', 'overflow': 'int' } }
102
103##
104# @CompressionStats:
105#
106# Detailed migration compression statistics
107#
108# @pages: amount of pages compressed and transferred to the target VM
109#
110# @busy: count of times that no free thread was available to compress
111#     data
112#
113# @busy-rate: rate of thread busy
114#
115# @compressed-size: amount of bytes after compression
116#
117# @compression-rate: rate of compressed size
118#
119# Since: 3.1
120##
121{ 'struct': 'CompressionStats',
122  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
123           'compressed-size': 'int', 'compression-rate': 'number' } }
124
125##
126# @MigrationStatus:
127#
128# An enumeration of migration status.
129#
130# @none: no migration has ever happened.
131#
132# @setup: migration process has been initiated.
133#
134# @cancelling: in the process of cancelling migration.
135#
136# @cancelled: cancelling migration is finished.
137#
138# @active: in the process of doing migration.
139#
140# @postcopy-active: like active, but now in postcopy mode.  (since
141#     2.5)
142#
143# @postcopy-paused: during postcopy but paused.  (since 3.0)
144#
145# @postcopy-recover: trying to recover from a paused postcopy.  (since
146#     3.0)
147#
148# @completed: migration is finished.
149#
150# @failed: some error occurred during migration process.
151#
152# @colo: VM is in the process of fault tolerance, VM can not get into
153#     this state unless colo capability is enabled for migration.
154#     (since 2.8)
155#
156# @pre-switchover: Paused before device serialisation.  (since 2.11)
157#
158# @device: During device serialisation when pause-before-switchover is
159#     enabled (since 2.11)
160#
161# @wait-unplug: wait for device unplug request by guest OS to be
162#     completed.  (since 4.2)
163#
164# Since: 2.3
165##
166{ 'enum': 'MigrationStatus',
167  'data': [ 'none', 'setup', 'cancelling', 'cancelled',
168            'active', 'postcopy-active', 'postcopy-paused',
169            'postcopy-recover', 'completed', 'failed', 'colo',
170            'pre-switchover', 'device', 'wait-unplug' ] }
171##
172# @VfioStats:
173#
174# Detailed VFIO devices migration statistics
175#
176# @transferred: amount of bytes transferred to the target VM by VFIO
177#     devices
178#
179# Since: 5.2
180##
181{ 'struct': 'VfioStats',
182  'data': {'transferred': 'int' } }
183
184##
185# @MigrationInfo:
186#
187# Information about current migration process.
188#
189# @status: @MigrationStatus describing the current migration status.
190#     If this field is not returned, no migration process has been
191#     initiated
192#
193# @ram: @MigrationStats containing detailed migration status, only
194#     returned if status is 'active' or 'completed'(since 1.2)
195#
196# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE
197#     migration statistics, only returned if XBZRLE feature is on and
198#     status is 'active' or 'completed' (since 1.2)
199#
200# @total-time: total amount of milliseconds since migration started.
201#     If migration has ended, it returns the total migration time.
202#     (since 1.2)
203#
204# @downtime: only present when migration finishes correctly total
205#     downtime in milliseconds for the guest.  (since 1.3)
206#
207# @expected-downtime: only present while migration is active expected
208#     downtime in milliseconds for the guest in last walk of the dirty
209#     bitmap.  (since 1.3)
210#
211# @setup-time: amount of setup time in milliseconds *before* the
212#     iterations begin but *after* the QMP command is issued.  This is
213#     designed to provide an accounting of any activities (such as
214#     RDMA pinning) which may be expensive, but do not actually occur
215#     during the iterative migration rounds themselves.  (since 1.6)
216#
217# @cpu-throttle-percentage: percentage of time guest cpus are being
218#     throttled during auto-converge.  This is only present when
219#     auto-converge has started throttling guest cpus.  (Since 2.7)
220#
221# @error-desc: the human readable error description string.  Clients
222#     should not attempt to parse the error strings.  (Since 2.7)
223#
224# @postcopy-blocktime: total time when all vCPU were blocked during
225#     postcopy live migration.  This is only present when the
226#     postcopy-blocktime migration capability is enabled.  (Since 3.0)
227#
228# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU.
229#     This is only present when the postcopy-blocktime migration
230#     capability is enabled.  (Since 3.0)
231#
232# @socket-address: Only used for tcp, to know what the real port is
233#     (Since 4.0)
234#
235# @vfio: @VfioStats containing detailed VFIO devices migration
236#     statistics, only returned if VFIO device is present, migration
237#     is supported by all VFIO devices and status is 'active' or
238#     'completed' (since 5.2)
239#
240# @blocked-reasons: A list of reasons an outgoing migration is
241#     blocked.  Present and non-empty when migration is blocked.
242#     (since 6.0)
243#
244# @dirty-limit-throttle-time-per-round: Maximum throttle time
245#     (in microseconds) of virtual CPUs each dirty ring full round,
246#     which shows how MigrationCapability dirty-limit affects the
247#     guest during live migration.  (Since 8.1)
248#
249# @dirty-limit-ring-full-time: Estimated average dirty ring full time
250#     (in microseconds) for each dirty ring full round.  The value
251#     equals the dirty ring memory size divided by the average dirty
252#     page rate of the virtual CPU, which can be used to observe the
253#     average memory load of the virtual CPU indirectly.  Note that
254#     zero means guest doesn't dirty memory.  (Since 8.1)
255#
256# Since: 0.14
257##
258{ 'struct': 'MigrationInfo',
259  'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats',
260           '*vfio': 'VfioStats',
261           '*xbzrle-cache': 'XBZRLECacheStats',
262           '*total-time': 'int',
263           '*expected-downtime': 'int',
264           '*downtime': 'int',
265           '*setup-time': 'int',
266           '*cpu-throttle-percentage': 'int',
267           '*error-desc': 'str',
268           '*blocked-reasons': ['str'],
269           '*postcopy-blocktime': 'uint32',
270           '*postcopy-vcpu-blocktime': ['uint32'],
271           '*socket-address': ['SocketAddress'],
272           '*dirty-limit-throttle-time-per-round': 'uint64',
273           '*dirty-limit-ring-full-time': 'uint64'} }
274
275##
276# @query-migrate:
277#
278# Returns information about current migration process.  If migration
279# is active there will be another json-object with RAM migration
280# status.
281#
282# Returns: @MigrationInfo
283#
284# Since: 0.14
285#
286# Examples:
287#
288#     1. Before the first migration
289#
290#     -> { "execute": "query-migrate" }
291#     <- { "return": {} }
292#
293#     2. Migration is done and has succeeded
294#
295#     -> { "execute": "query-migrate" }
296#     <- { "return": {
297#             "status": "completed",
298#             "total-time":12345,
299#             "setup-time":12345,
300#             "downtime":12345,
301#             "ram":{
302#               "transferred":123,
303#               "remaining":123,
304#               "total":246,
305#               "duplicate":123,
306#               "normal":123,
307#               "normal-bytes":123456,
308#               "dirty-sync-count":15
309#             }
310#          }
311#        }
312#
313#     3. Migration is done and has failed
314#
315#     -> { "execute": "query-migrate" }
316#     <- { "return": { "status": "failed" } }
317#
318#     4. Migration is being performed:
319#
320#     -> { "execute": "query-migrate" }
321#     <- {
322#           "return":{
323#              "status":"active",
324#              "total-time":12345,
325#              "setup-time":12345,
326#              "expected-downtime":12345,
327#              "ram":{
328#                 "transferred":123,
329#                 "remaining":123,
330#                 "total":246,
331#                 "duplicate":123,
332#                 "normal":123,
333#                 "normal-bytes":123456,
334#                 "dirty-sync-count":15
335#              }
336#           }
337#        }
338#
339#     5. Migration is being performed and XBZRLE is active:
340#
341#     -> { "execute": "query-migrate" }
342#     <- {
343#           "return":{
344#              "status":"active",
345#              "total-time":12345,
346#              "setup-time":12345,
347#              "expected-downtime":12345,
348#              "ram":{
349#                 "total":1057024,
350#                 "remaining":1053304,
351#                 "transferred":3720,
352#                 "duplicate":10,
353#                 "normal":3333,
354#                 "normal-bytes":3412992,
355#                 "dirty-sync-count":15
356#              },
357#              "xbzrle-cache":{
358#                 "cache-size":67108864,
359#                 "bytes":20971520,
360#                 "pages":2444343,
361#                 "cache-miss":2244,
362#                 "cache-miss-rate":0.123,
363#                 "encoding-rate":80.1,
364#                 "overflow":34434
365#              }
366#           }
367#        }
368##
369{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
370
371##
372# @MigrationCapability:
373#
374# Migration capabilities enumeration
375#
376# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length
377#     Encoding). This feature allows us to minimize migration traffic
378#     for certain work loads, by sending compressed difference of the
379#     pages
380#
381# @rdma-pin-all: Controls whether or not the entire VM memory
382#     footprint is mlock()'d on demand or all at once.  Refer to
383#     docs/rdma.txt for usage.  Disabled by default.  (since 2.0)
384#
385# @zero-blocks: During storage migration encode blocks of zeroes
386#     efficiently.  This essentially saves 1MB of zeroes per block on
387#     the wire.  Enabling requires source and target VM to support
388#     this feature.  To enable it is sufficient to enable the
389#     capability on the source VM. The feature is disabled by default.
390#     (since 1.6)
391#
392# @events: generate events for each migration state change (since 2.4)
393#
394# @auto-converge: If enabled, QEMU will automatically throttle down
395#     the guest to speed up convergence of RAM migration.  (since 1.6)
396#
397# @postcopy-ram: Start executing on the migration target before all of
398#     RAM has been migrated, pulling the remaining pages along as
399#     needed.  The capacity must have the same setting on both source
400#     and target or migration will not even start.  NOTE: If the
401#     migration fails during postcopy the VM will fail.  (since 2.6)
402#
403# @x-colo: If enabled, migration will never end, and the state of the
404#     VM on the primary side will be migrated continuously to the VM
405#     on secondary side, this process is called COarse-Grain LOck
406#     Stepping (COLO) for Non-stop Service.  (since 2.8)
407#
408# @release-ram: if enabled, qemu will free the migrated ram pages on
409#     the source during postcopy-ram migration.  (since 2.9)
410#
411# @return-path: If enabled, migration will use the return path even
412#     for precopy.  (since 2.10)
413#
414# @pause-before-switchover: Pause outgoing migration before
415#     serialising device state and before disabling block IO (since
416#     2.11)
417#
418# @multifd: Use more than one fd for migration (since 4.0)
419#
420# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps.
421#     (since 2.12)
422#
423# @postcopy-blocktime: Calculate downtime for postcopy live migration
424#     (since 3.0)
425#
426# @late-block-activate: If enabled, the destination will not activate
427#     block devices (and thus take locks) immediately at the end of
428#     migration.  (since 3.0)
429#
430# @x-ignore-shared: If enabled, QEMU will not migrate shared memory
431#     that is accessible on the destination machine.  (since 4.0)
432#
433# @validate-uuid: Send the UUID of the source to allow the destination
434#     to ensure it is the same.  (since 4.2)
435#
436# @background-snapshot: If enabled, the migration stream will be a
437#     snapshot of the VM exactly at the point when the migration
438#     procedure starts.  The VM RAM is saved with running VM.
439#     (since 6.0)
440#
441# @zero-copy-send: Controls behavior on sending memory pages on
442#     migration.  When true, enables a zero-copy mechanism for sending
443#     memory pages, if host supports it.  Requires that QEMU be
444#     permitted to use locked memory for guest RAM pages.  (since 7.1)
445#
446# @postcopy-preempt: If enabled, the migration process will allow
447#     postcopy requests to preempt precopy stream, so postcopy
448#     requests will be handled faster.  This is a performance feature
449#     and should not affect the correctness of postcopy migration.
450#     (since 7.1)
451#
452# @switchover-ack: If enabled, migration will not stop the source VM
453#     and complete the migration until an ACK is received from the
454#     destination that it's OK to do so.  Exactly when this ACK is
455#     sent depends on the migrated devices that use this feature.  For
456#     example, a device can use it to make sure some of its data is
457#     sent and loaded in the destination before doing switchover.
458#     This can reduce downtime if devices that support this capability
459#     are present.  'return-path' capability must be enabled to use
460#     it.  (since 8.1)
461#
462# @dirty-limit: If enabled, migration will throttle vCPUs as needed to
463#     keep their dirty page rate within @vcpu-dirty-limit.  This can
464#     improve responsiveness of large guests during live migration,
465#     and can result in more stable read performance.  Requires KVM
466#     with accelerator property "dirty-ring-size" set.  (Since 8.1)
467#
468# @mapped-ram: Migrate using fixed offsets in the migration file for
469#     each RAM page.  Requires a migration URI that supports seeking,
470#     such as a file.  (since 9.0)
471#
472# Features:
473#
474# @unstable: Members @x-colo and @x-ignore-shared are experimental.
475#
476# Since: 1.2
477##
478{ 'enum': 'MigrationCapability',
479  'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
480           'events', 'postcopy-ram',
481           { 'name': 'x-colo', 'features': [ 'unstable' ] },
482           'release-ram',
483           'return-path', 'pause-before-switchover', 'multifd',
484           'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate',
485           { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
486           'validate-uuid', 'background-snapshot',
487           'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
488           'dirty-limit', 'mapped-ram'] }
489
490##
491# @MigrationCapabilityStatus:
492#
493# Migration capability information
494#
495# @capability: capability enum
496#
497# @state: capability state bool
498#
499# Since: 1.2
500##
501{ 'struct': 'MigrationCapabilityStatus',
502  'data': { 'capability': 'MigrationCapability', 'state': 'bool' } }
503
504##
505# @migrate-set-capabilities:
506#
507# Enable/Disable the following migration capabilities (like xbzrle)
508#
509# @capabilities: json array of capability modifications to make
510#
511# Since: 1.2
512#
513# Example:
514#
515#     -> { "execute": "migrate-set-capabilities" , "arguments":
516#          { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
517#     <- { "return": {} }
518##
519{ 'command': 'migrate-set-capabilities',
520  'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
521
522##
523# @query-migrate-capabilities:
524#
525# Returns information about the current migration capabilities status
526#
527# Returns: @MigrationCapabilityStatus
528#
529# Since: 1.2
530#
531# Example:
532#
533#     -> { "execute": "query-migrate-capabilities" }
534#     <- { "return": [
535#           {"state": false, "capability": "xbzrle"},
536#           {"state": false, "capability": "rdma-pin-all"},
537#           {"state": false, "capability": "auto-converge"},
538#           {"state": false, "capability": "zero-blocks"},
539#           {"state": true, "capability": "events"},
540#           {"state": false, "capability": "postcopy-ram"},
541#           {"state": false, "capability": "x-colo"}
542#        ]}
543##
544{ 'command': 'query-migrate-capabilities', 'returns':   ['MigrationCapabilityStatus']}
545
546##
547# @MultiFDCompression:
548#
549# An enumeration of multifd compression methods.
550#
551# @none: no compression.
552#
553# @zlib: use zlib compression method.
554#
555# @zstd: use zstd compression method.
556#
557# @qpl: use qpl compression method.  Query Processing Library(qpl) is
558#       based on the deflate compression algorithm and use the Intel
559#       In-Memory Analytics Accelerator(IAA) accelerated compression
560#       and decompression.  (Since 9.1)
561#
562# @uadk: use UADK library compression method.  (Since 9.1)
563#
564# Since: 5.0
565##
566{ 'enum': 'MultiFDCompression',
567  'data': [ 'none', 'zlib',
568            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' },
569            { 'name': 'qpl', 'if': 'CONFIG_QPL' },
570            { 'name': 'uadk', 'if': 'CONFIG_UADK' } ] }
571
572##
573# @MigMode:
574#
575# @normal: the original form of migration.  (since 8.2)
576#
577# @cpr-reboot: The migrate command stops the VM and saves state to the
578#     URI.  After quitting QEMU, the user resumes by running QEMU
579#     -incoming.
580#
581#     This mode allows the user to quit QEMU, optionally update and
582#     reboot the OS, and restart QEMU.  If the user reboots, the URI
583#     must persist across the reboot, such as by using a file.
584#
585#     Unlike normal mode, the use of certain local storage options
586#     does not block the migration, but the user must not modify the
587#     contents of guest block devices between the quit and restart.
588#
589#     This mode supports VFIO devices provided the user first puts the
590#     guest in the suspended runstate, such as by issuing
591#     guest-suspend-ram to the QEMU guest agent.
592#
593#     Best performance is achieved when the memory backend is shared
594#     and the @x-ignore-shared migration capability is set, but this
595#     is not required.  Further, if the user reboots before restarting
596#     such a configuration, the shared memory must persist across the
597#     reboot, such as by backing it with a dax device.
598#
599#     @cpr-reboot may not be used with postcopy, background-snapshot,
600#     or COLO.
601#
602#     (since 8.2)
603##
604{ 'enum': 'MigMode',
605  'data': [ 'normal', 'cpr-reboot' ] }
606
607##
608# @ZeroPageDetection:
609#
610# @none: Do not perform zero page checking.
611#
612# @legacy: Perform zero page checking in main migration thread.
613#
614# @multifd: Perform zero page checking in multifd sender thread if
615#     multifd migration is enabled, else in the main migration thread
616#     as for @legacy.
617#
618# Since: 9.0
619##
620{ 'enum': 'ZeroPageDetection',
621  'data': [ 'none', 'legacy', 'multifd' ] }
622
623##
624# @BitmapMigrationBitmapAliasTransform:
625#
626# @persistent: If present, the bitmap will be made persistent or
627#     transient depending on this parameter.
628#
629# Since: 6.0
630##
631{ 'struct': 'BitmapMigrationBitmapAliasTransform',
632  'data': {
633      '*persistent': 'bool'
634  } }
635
636##
637# @BitmapMigrationBitmapAlias:
638#
639# @name: The name of the bitmap.
640#
641# @alias: An alias name for migration (for example the bitmap name on
642#     the opposite site).
643#
644# @transform: Allows the modification of the migrated bitmap.  (since
645#     6.0)
646#
647# Since: 5.2
648##
649{ 'struct': 'BitmapMigrationBitmapAlias',
650  'data': {
651      'name': 'str',
652      'alias': 'str',
653      '*transform': 'BitmapMigrationBitmapAliasTransform'
654  } }
655
656##
657# @BitmapMigrationNodeAlias:
658#
659# Maps a block node name and the bitmaps it has to aliases for dirty
660# bitmap migration.
661#
662# @node-name: A block node name.
663#
664# @alias: An alias block node name for migration (for example the node
665#     name on the opposite site).
666#
667# @bitmaps: Mappings for the bitmaps on this node.
668#
669# Since: 5.2
670##
671{ 'struct': 'BitmapMigrationNodeAlias',
672  'data': {
673      'node-name': 'str',
674      'alias': 'str',
675      'bitmaps': [ 'BitmapMigrationBitmapAlias' ]
676  } }
677
678##
679# @MigrationParameter:
680#
681# Migration parameters enumeration
682#
683# @announce-initial: Initial delay (in milliseconds) before sending
684#     the first announce (Since 4.0)
685#
686# @announce-max: Maximum delay (in milliseconds) between packets in
687#     the announcement (Since 4.0)
688#
689# @announce-rounds: Number of self-announce packets sent after
690#     migration (Since 4.0)
691#
692# @announce-step: Increase in delay (in milliseconds) between
693#     subsequent packets in the announcement (Since 4.0)
694#
695# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
696#     bytes_xfer_period to trigger throttling.  It is expressed as
697#     percentage.  The default value is 50.  (Since 5.0)
698#
699# @cpu-throttle-initial: Initial percentage of time guest cpus are
700#     throttled when migration auto-converge is activated.  The
701#     default value is 20.  (Since 2.7)
702#
703# @cpu-throttle-increment: throttle percentage increase each time
704#     auto-converge detects that migration is not making progress.
705#     The default value is 10.  (Since 2.7)
706#
707# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
708#     the tail stage of throttling, the Guest is very sensitive to CPU
709#     percentage while the @cpu-throttle -increment is excessive
710#     usually at tail stage.  If this parameter is true, we will
711#     compute the ideal CPU percentage used by the Guest, which may
712#     exactly make the dirty rate match the dirty rate threshold.
713#     Then we will choose a smaller throttle increment between the one
714#     specified by @cpu-throttle-increment and the one generated by
715#     ideal CPU percentage.  Therefore, it is compatible to
716#     traditional throttling, meanwhile the throttle increment won't
717#     be excessive at tail stage.  The default value is false.  (Since
718#     5.1)
719#
720# @tls-creds: ID of the 'tls-creds' object that provides credentials
721#     for establishing a TLS connection over the migration data
722#     channel.  On the outgoing side of the migration, the credentials
723#     must be for a 'client' endpoint, while for the incoming side the
724#     credentials must be for a 'server' endpoint.  Setting this to a
725#     non-empty string enables TLS for all migrations.  An empty
726#     string means that QEMU will use plain text mode for migration,
727#     rather than TLS.  (Since 2.7)
728#
729# @tls-hostname: migration target's hostname for validating the
730#     server's x509 certificate identity.  If empty, QEMU will use the
731#     hostname from the migration URI, if any.  A non-empty value is
732#     required when using x509 based TLS credentials and the migration
733#     URI does not include a hostname, such as fd: or exec: based
734#     migration.  (Since 2.7)
735#
736#     Note: empty value works only since 2.9.
737#
738# @tls-authz: ID of the 'authz' object subclass that provides access
739#     control checking of the TLS x509 certificate distinguished name.
740#     This object is only resolved at time of use, so can be deleted
741#     and recreated on the fly while the migration server is active.
742#     If missing, it will default to denying access (Since 4.0)
743#
744# @max-bandwidth: maximum speed for migration, in bytes per second.
745#     (Since 2.8)
746#
747# @avail-switchover-bandwidth: to set the available bandwidth that
748#     migration can use during switchover phase.  NOTE!  This does not
749#     limit the bandwidth during switchover, but only for calculations
750#     when making decisions to switchover.  By default, this value is
751#     zero, which means QEMU will estimate the bandwidth
752#     automatically.  This can be set when the estimated value is not
753#     accurate, while the user is able to guarantee such bandwidth is
754#     available when switching over.  When specified correctly, this
755#     can make the switchover decision much more accurate.
756#     (Since 8.2)
757#
758# @downtime-limit: set maximum tolerated downtime for migration.
759#     maximum downtime in milliseconds (Since 2.8)
760#
761# @x-checkpoint-delay: The delay time (in ms) between two COLO
762#     checkpoints in periodic mode.  (Since 2.8)
763#
764# @multifd-channels: Number of channels used to migrate data in
765#     parallel.  This is the same number that the number of sockets
766#     used for migration.  The default value is 2 (since 4.0)
767#
768# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
769#     needs to be a multiple of the target page size and a power of 2
770#     (Since 2.11)
771#
772# @max-postcopy-bandwidth: Background transfer bandwidth during
773#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
774#     (Since 3.0)
775#
776# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
777#     (Since 3.1)
778#
779# @multifd-compression: Which compression method to use.  Defaults to
780#     none.  (Since 5.0)
781#
782# @multifd-zlib-level: Set the compression level to be used in live
783#     migration, the compression level is an integer between 0 and 9,
784#     where 0 means no compression, 1 means the best compression
785#     speed, and 9 means best compression ratio which will consume
786#     more CPU. Defaults to 1.  (Since 5.0)
787#
788# @multifd-zstd-level: Set the compression level to be used in live
789#     migration, the compression level is an integer between 0 and 20,
790#     where 0 means no compression, 1 means the best compression
791#     speed, and 20 means best compression ratio which will consume
792#     more CPU. Defaults to 1.  (Since 5.0)
793#
794# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
795#     aliases for the purpose of dirty bitmap migration.  Such aliases
796#     may for example be the corresponding names on the opposite site.
797#     The mapping must be one-to-one, but not necessarily complete: On
798#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
799#     will be ignored.  On the destination, encountering an unmapped
800#     alias in the incoming migration stream will result in a report,
801#     and all further bitmap migration data will then be discarded.
802#     Note that the destination does not know about bitmaps it does
803#     not receive, so there is no limitation or requirement regarding
804#     the number of bitmaps received, or how they are named, or on
805#     which nodes they are placed.  By default (when this parameter
806#     has never been set), bitmap names are mapped to themselves.
807#     Nodes are mapped to their block device name if there is one, and
808#     to their node name otherwise.  (Since 5.2)
809#
810# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
811#     limit during live migration.  Should be in the range 1 to
812#     1000ms.  Defaults to 1000ms.  (Since 8.1)
813#
814# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
815#     Defaults to 1.  (Since 8.1)
816#
817# @mode: Migration mode.  See description in @MigMode.  Default is
818#     'normal'.  (Since 8.2)
819#
820# @zero-page-detection: Whether and how to detect zero pages.
821#     See description in @ZeroPageDetection.  Default is 'multifd'.
822#     (since 9.0)
823#
824# @direct-io: Open migration files with O_DIRECT when possible.  This
825#     only has effect if the @mapped-ram capability is enabled.
826#     (Since 9.1)
827#
828# Features:
829#
830# @unstable: Members @x-checkpoint-delay and
831#     @x-vcpu-dirty-limit-period are experimental.
832#
833# Since: 2.4
834##
835{ 'enum': 'MigrationParameter',
836  'data': ['announce-initial', 'announce-max',
837           'announce-rounds', 'announce-step',
838           'throttle-trigger-threshold',
839           'cpu-throttle-initial', 'cpu-throttle-increment',
840           'cpu-throttle-tailslow',
841           'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
842           'avail-switchover-bandwidth', 'downtime-limit',
843           { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
844           'multifd-channels',
845           'xbzrle-cache-size', 'max-postcopy-bandwidth',
846           'max-cpu-throttle', 'multifd-compression',
847           'multifd-zlib-level', 'multifd-zstd-level',
848           'block-bitmap-mapping',
849           { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
850           'vcpu-dirty-limit',
851           'mode',
852           'zero-page-detection',
853           'direct-io'] }
854
855##
856# @MigrateSetParameters:
857#
858# @announce-initial: Initial delay (in milliseconds) before sending
859#     the first announce (Since 4.0)
860#
861# @announce-max: Maximum delay (in milliseconds) between packets in
862#     the announcement (Since 4.0)
863#
864# @announce-rounds: Number of self-announce packets sent after
865#     migration (Since 4.0)
866#
867# @announce-step: Increase in delay (in milliseconds) between
868#     subsequent packets in the announcement (Since 4.0)
869#
870# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
871#     bytes_xfer_period to trigger throttling.  It is expressed as
872#     percentage.  The default value is 50.  (Since 5.0)
873#
874# @cpu-throttle-initial: Initial percentage of time guest cpus are
875#     throttled when migration auto-converge is activated.  The
876#     default value is 20.  (Since 2.7)
877#
878# @cpu-throttle-increment: throttle percentage increase each time
879#     auto-converge detects that migration is not making progress.
880#     The default value is 10.  (Since 2.7)
881#
882# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
883#     the tail stage of throttling, the Guest is very sensitive to CPU
884#     percentage while the @cpu-throttle -increment is excessive
885#     usually at tail stage.  If this parameter is true, we will
886#     compute the ideal CPU percentage used by the Guest, which may
887#     exactly make the dirty rate match the dirty rate threshold.
888#     Then we will choose a smaller throttle increment between the one
889#     specified by @cpu-throttle-increment and the one generated by
890#     ideal CPU percentage.  Therefore, it is compatible to
891#     traditional throttling, meanwhile the throttle increment won't
892#     be excessive at tail stage.  The default value is false.  (Since
893#     5.1)
894#
895# @tls-creds: ID of the 'tls-creds' object that provides credentials
896#     for establishing a TLS connection over the migration data
897#     channel.  On the outgoing side of the migration, the credentials
898#     must be for a 'client' endpoint, while for the incoming side the
899#     credentials must be for a 'server' endpoint.  Setting this to a
900#     non-empty string enables TLS for all migrations.  An empty
901#     string means that QEMU will use plain text mode for migration,
902#     rather than TLS.  This is the default.  (Since 2.7)
903#
904# @tls-hostname: migration target's hostname for validating the
905#     server's x509 certificate identity.  If empty, QEMU will use the
906#     hostname from the migration URI, if any.  A non-empty value is
907#     required when using x509 based TLS credentials and the migration
908#     URI does not include a hostname, such as fd: or exec: based
909#     migration.  (Since 2.7)
910#
911#     Note: empty value works only since 2.9.
912#
913# @tls-authz: ID of the 'authz' object subclass that provides access
914#     control checking of the TLS x509 certificate distinguished name.
915#     This object is only resolved at time of use, so can be deleted
916#     and recreated on the fly while the migration server is active.
917#     If missing, it will default to denying access (Since 4.0)
918#
919# @max-bandwidth: maximum speed for migration, in bytes per second.
920#     (Since 2.8)
921#
922# @avail-switchover-bandwidth: to set the available bandwidth that
923#     migration can use during switchover phase.  NOTE!  This does not
924#     limit the bandwidth during switchover, but only for calculations
925#     when making decisions to switchover.  By default, this value is
926#     zero, which means QEMU will estimate the bandwidth
927#     automatically.  This can be set when the estimated value is not
928#     accurate, while the user is able to guarantee such bandwidth is
929#     available when switching over.  When specified correctly, this
930#     can make the switchover decision much more accurate.
931#     (Since 8.2)
932#
933# @downtime-limit: set maximum tolerated downtime for migration.
934#     maximum downtime in milliseconds (Since 2.8)
935#
936# @x-checkpoint-delay: The delay time (in ms) between two COLO
937#     checkpoints in periodic mode.  (Since 2.8)
938#
939# @multifd-channels: Number of channels used to migrate data in
940#     parallel.  This is the same number that the number of sockets
941#     used for migration.  The default value is 2 (since 4.0)
942#
943# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
944#     needs to be a multiple of the target page size and a power of 2
945#     (Since 2.11)
946#
947# @max-postcopy-bandwidth: Background transfer bandwidth during
948#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
949#     (Since 3.0)
950#
951# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
952#     (Since 3.1)
953#
954# @multifd-compression: Which compression method to use.  Defaults to
955#     none.  (Since 5.0)
956#
957# @multifd-zlib-level: Set the compression level to be used in live
958#     migration, the compression level is an integer between 0 and 9,
959#     where 0 means no compression, 1 means the best compression
960#     speed, and 9 means best compression ratio which will consume
961#     more CPU. Defaults to 1.  (Since 5.0)
962#
963# @multifd-zstd-level: Set the compression level to be used in live
964#     migration, the compression level is an integer between 0 and 20,
965#     where 0 means no compression, 1 means the best compression
966#     speed, and 20 means best compression ratio which will consume
967#     more CPU. Defaults to 1.  (Since 5.0)
968#
969# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
970#     aliases for the purpose of dirty bitmap migration.  Such aliases
971#     may for example be the corresponding names on the opposite site.
972#     The mapping must be one-to-one, but not necessarily complete: On
973#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
974#     will be ignored.  On the destination, encountering an unmapped
975#     alias in the incoming migration stream will result in a report,
976#     and all further bitmap migration data will then be discarded.
977#     Note that the destination does not know about bitmaps it does
978#     not receive, so there is no limitation or requirement regarding
979#     the number of bitmaps received, or how they are named, or on
980#     which nodes they are placed.  By default (when this parameter
981#     has never been set), bitmap names are mapped to themselves.
982#     Nodes are mapped to their block device name if there is one, and
983#     to their node name otherwise.  (Since 5.2)
984#
985# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
986#     limit during live migration.  Should be in the range 1 to
987#     1000ms.  Defaults to 1000ms.  (Since 8.1)
988#
989# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
990#     Defaults to 1.  (Since 8.1)
991#
992# @mode: Migration mode.  See description in @MigMode.  Default is
993#     'normal'.  (Since 8.2)
994#
995# @zero-page-detection: Whether and how to detect zero pages.
996#     See description in @ZeroPageDetection.  Default is 'multifd'.
997#     (since 9.0)
998#
999# @direct-io: Open migration files with O_DIRECT when possible.  This
1000#     only has effect if the @mapped-ram capability is enabled.
1001#     (Since 9.1)
1002#
1003# Features:
1004#
1005# @unstable: Members @x-checkpoint-delay and
1006#     @x-vcpu-dirty-limit-period are experimental.
1007#
1008# TODO: either fuse back into MigrationParameters, or make
1009#     MigrationParameters members mandatory
1010#
1011# Since: 2.4
1012##
1013{ 'struct': 'MigrateSetParameters',
1014  'data': { '*announce-initial': 'size',
1015            '*announce-max': 'size',
1016            '*announce-rounds': 'size',
1017            '*announce-step': 'size',
1018            '*throttle-trigger-threshold': 'uint8',
1019            '*cpu-throttle-initial': 'uint8',
1020            '*cpu-throttle-increment': 'uint8',
1021            '*cpu-throttle-tailslow': 'bool',
1022            '*tls-creds': 'StrOrNull',
1023            '*tls-hostname': 'StrOrNull',
1024            '*tls-authz': 'StrOrNull',
1025            '*max-bandwidth': 'size',
1026            '*avail-switchover-bandwidth': 'size',
1027            '*downtime-limit': 'uint64',
1028            '*x-checkpoint-delay': { 'type': 'uint32',
1029                                     'features': [ 'unstable' ] },
1030            '*multifd-channels': 'uint8',
1031            '*xbzrle-cache-size': 'size',
1032            '*max-postcopy-bandwidth': 'size',
1033            '*max-cpu-throttle': 'uint8',
1034            '*multifd-compression': 'MultiFDCompression',
1035            '*multifd-zlib-level': 'uint8',
1036            '*multifd-zstd-level': 'uint8',
1037            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1038            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1039                                            'features': [ 'unstable' ] },
1040            '*vcpu-dirty-limit': 'uint64',
1041            '*mode': 'MigMode',
1042            '*zero-page-detection': 'ZeroPageDetection',
1043            '*direct-io': 'bool' } }
1044
1045##
1046# @migrate-set-parameters:
1047#
1048# Set various migration parameters.
1049#
1050# Since: 2.4
1051#
1052# Example:
1053#
1054#     -> { "execute": "migrate-set-parameters" ,
1055#          "arguments": { "multifd-channels": 5 } }
1056#     <- { "return": {} }
1057##
1058{ 'command': 'migrate-set-parameters', 'boxed': true,
1059  'data': 'MigrateSetParameters' }
1060
1061##
1062# @MigrationParameters:
1063#
1064# The optional members aren't actually optional.
1065#
1066# @announce-initial: Initial delay (in milliseconds) before sending
1067#     the first announce (Since 4.0)
1068#
1069# @announce-max: Maximum delay (in milliseconds) between packets in
1070#     the announcement (Since 4.0)
1071#
1072# @announce-rounds: Number of self-announce packets sent after
1073#     migration (Since 4.0)
1074#
1075# @announce-step: Increase in delay (in milliseconds) between
1076#     subsequent packets in the announcement (Since 4.0)
1077#
1078# @throttle-trigger-threshold: The ratio of bytes_dirty_period and
1079#     bytes_xfer_period to trigger throttling.  It is expressed as
1080#     percentage.  The default value is 50.  (Since 5.0)
1081#
1082# @cpu-throttle-initial: Initial percentage of time guest cpus are
1083#     throttled when migration auto-converge is activated.  (Since
1084#     2.7)
1085#
1086# @cpu-throttle-increment: throttle percentage increase each time
1087#     auto-converge detects that migration is not making progress.
1088#     (Since 2.7)
1089#
1090# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At
1091#     the tail stage of throttling, the Guest is very sensitive to CPU
1092#     percentage while the @cpu-throttle -increment is excessive
1093#     usually at tail stage.  If this parameter is true, we will
1094#     compute the ideal CPU percentage used by the Guest, which may
1095#     exactly make the dirty rate match the dirty rate threshold.
1096#     Then we will choose a smaller throttle increment between the one
1097#     specified by @cpu-throttle-increment and the one generated by
1098#     ideal CPU percentage.  Therefore, it is compatible to
1099#     traditional throttling, meanwhile the throttle increment won't
1100#     be excessive at tail stage.  The default value is false.  (Since
1101#     5.1)
1102#
1103# @tls-creds: ID of the 'tls-creds' object that provides credentials
1104#     for establishing a TLS connection over the migration data
1105#     channel.  On the outgoing side of the migration, the credentials
1106#     must be for a 'client' endpoint, while for the incoming side the
1107#     credentials must be for a 'server' endpoint.  An empty string
1108#     means that QEMU will use plain text mode for migration, rather
1109#     than TLS.  (Since 2.7)
1110#
1111#     Note: 2.8 omits empty @tls-creds instead.
1112#
1113# @tls-hostname: migration target's hostname for validating the
1114#     server's x509 certificate identity.  If empty, QEMU will use the
1115#     hostname from the migration URI, if any.  (Since 2.7)
1116#
1117#     Note: 2.8 omits empty @tls-hostname instead.
1118#
1119# @tls-authz: ID of the 'authz' object subclass that provides access
1120#     control checking of the TLS x509 certificate distinguished name.
1121#     (Since 4.0)
1122#
1123# @max-bandwidth: maximum speed for migration, in bytes per second.
1124#     (Since 2.8)
1125#
1126# @avail-switchover-bandwidth: to set the available bandwidth that
1127#     migration can use during switchover phase.  NOTE!  This does not
1128#     limit the bandwidth during switchover, but only for calculations
1129#     when making decisions to switchover.  By default, this value is
1130#     zero, which means QEMU will estimate the bandwidth
1131#     automatically.  This can be set when the estimated value is not
1132#     accurate, while the user is able to guarantee such bandwidth is
1133#     available when switching over.  When specified correctly, this
1134#     can make the switchover decision much more accurate.
1135#     (Since 8.2)
1136#
1137# @downtime-limit: set maximum tolerated downtime for migration.
1138#     maximum downtime in milliseconds (Since 2.8)
1139#
1140# @x-checkpoint-delay: the delay time between two COLO checkpoints.
1141#     (Since 2.8)
1142#
1143# @multifd-channels: Number of channels used to migrate data in
1144#     parallel.  This is the same number that the number of sockets
1145#     used for migration.  The default value is 2 (since 4.0)
1146#
1147# @xbzrle-cache-size: cache size to be used by XBZRLE migration.  It
1148#     needs to be a multiple of the target page size and a power of 2
1149#     (Since 2.11)
1150#
1151# @max-postcopy-bandwidth: Background transfer bandwidth during
1152#     postcopy.  Defaults to 0 (unlimited).  In bytes per second.
1153#     (Since 3.0)
1154#
1155# @max-cpu-throttle: maximum cpu throttle percentage.  Defaults to 99.
1156#     (Since 3.1)
1157#
1158# @multifd-compression: Which compression method to use.  Defaults to
1159#     none.  (Since 5.0)
1160#
1161# @multifd-zlib-level: Set the compression level to be used in live
1162#     migration, the compression level is an integer between 0 and 9,
1163#     where 0 means no compression, 1 means the best compression
1164#     speed, and 9 means best compression ratio which will consume
1165#     more CPU. Defaults to 1.  (Since 5.0)
1166#
1167# @multifd-zstd-level: Set the compression level to be used in live
1168#     migration, the compression level is an integer between 0 and 20,
1169#     where 0 means no compression, 1 means the best compression
1170#     speed, and 20 means best compression ratio which will consume
1171#     more CPU. Defaults to 1.  (Since 5.0)
1172#
1173# @block-bitmap-mapping: Maps block nodes and bitmaps on them to
1174#     aliases for the purpose of dirty bitmap migration.  Such aliases
1175#     may for example be the corresponding names on the opposite site.
1176#     The mapping must be one-to-one, but not necessarily complete: On
1177#     the source, unmapped bitmaps and all bitmaps on unmapped nodes
1178#     will be ignored.  On the destination, encountering an unmapped
1179#     alias in the incoming migration stream will result in a report,
1180#     and all further bitmap migration data will then be discarded.
1181#     Note that the destination does not know about bitmaps it does
1182#     not receive, so there is no limitation or requirement regarding
1183#     the number of bitmaps received, or how they are named, or on
1184#     which nodes they are placed.  By default (when this parameter
1185#     has never been set), bitmap names are mapped to themselves.
1186#     Nodes are mapped to their block device name if there is one, and
1187#     to their node name otherwise.  (Since 5.2)
1188#
1189# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty
1190#     limit during live migration.  Should be in the range 1 to
1191#     1000ms.  Defaults to 1000ms.  (Since 8.1)
1192#
1193# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration.
1194#     Defaults to 1.  (Since 8.1)
1195#
1196# @mode: Migration mode.  See description in @MigMode.  Default is
1197#        'normal'.  (Since 8.2)
1198#
1199# @zero-page-detection: Whether and how to detect zero pages.
1200#     See description in @ZeroPageDetection.  Default is 'multifd'.
1201#     (since 9.0)
1202#
1203# @direct-io: Open migration files with O_DIRECT when possible.  This
1204#     only has effect if the @mapped-ram capability is enabled.
1205#     (Since 9.1)
1206#
1207# Features:
1208#
1209# @unstable: Members @x-checkpoint-delay and
1210#     @x-vcpu-dirty-limit-period are experimental.
1211#
1212# Since: 2.4
1213##
1214{ 'struct': 'MigrationParameters',
1215  'data': { '*announce-initial': 'size',
1216            '*announce-max': 'size',
1217            '*announce-rounds': 'size',
1218            '*announce-step': 'size',
1219            '*throttle-trigger-threshold': 'uint8',
1220            '*cpu-throttle-initial': 'uint8',
1221            '*cpu-throttle-increment': 'uint8',
1222            '*cpu-throttle-tailslow': 'bool',
1223            '*tls-creds': 'str',
1224            '*tls-hostname': 'str',
1225            '*tls-authz': 'str',
1226            '*max-bandwidth': 'size',
1227            '*avail-switchover-bandwidth': 'size',
1228            '*downtime-limit': 'uint64',
1229            '*x-checkpoint-delay': { 'type': 'uint32',
1230                                     'features': [ 'unstable' ] },
1231            '*multifd-channels': 'uint8',
1232            '*xbzrle-cache-size': 'size',
1233            '*max-postcopy-bandwidth': 'size',
1234            '*max-cpu-throttle': 'uint8',
1235            '*multifd-compression': 'MultiFDCompression',
1236            '*multifd-zlib-level': 'uint8',
1237            '*multifd-zstd-level': 'uint8',
1238            '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ],
1239            '*x-vcpu-dirty-limit-period': { 'type': 'uint64',
1240                                            'features': [ 'unstable' ] },
1241            '*vcpu-dirty-limit': 'uint64',
1242            '*mode': 'MigMode',
1243            '*zero-page-detection': 'ZeroPageDetection',
1244            '*direct-io': 'bool' } }
1245
1246##
1247# @query-migrate-parameters:
1248#
1249# Returns information about the current migration parameters
1250#
1251# Returns: @MigrationParameters
1252#
1253# Since: 2.4
1254#
1255# Example:
1256#
1257#     -> { "execute": "query-migrate-parameters" }
1258#     <- { "return": {
1259#              "multifd-channels": 2,
1260#              "cpu-throttle-increment": 10,
1261#              "cpu-throttle-initial": 20,
1262#              "max-bandwidth": 33554432,
1263#              "downtime-limit": 300
1264#           }
1265#        }
1266##
1267{ 'command': 'query-migrate-parameters',
1268  'returns': 'MigrationParameters' }
1269
1270##
1271# @migrate-start-postcopy:
1272#
1273# Followup to a migration command to switch the migration to postcopy
1274# mode.  The postcopy-ram capability must be set on both source and
1275# destination before the original migration command.
1276#
1277# Since: 2.5
1278#
1279# Example:
1280#
1281#     -> { "execute": "migrate-start-postcopy" }
1282#     <- { "return": {} }
1283##
1284{ 'command': 'migrate-start-postcopy' }
1285
1286##
1287# @MIGRATION:
1288#
1289# Emitted when a migration event happens
1290#
1291# @status: @MigrationStatus describing the current migration status.
1292#
1293# Since: 2.4
1294#
1295# Example:
1296#
1297#     <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
1298#         "event": "MIGRATION",
1299#         "data": {"status": "completed"} }
1300##
1301{ 'event': 'MIGRATION',
1302  'data': {'status': 'MigrationStatus'}}
1303
1304##
1305# @MIGRATION_PASS:
1306#
1307# Emitted from the source side of a migration at the start of each
1308# pass (when it syncs the dirty bitmap)
1309#
1310# @pass: An incrementing count (starting at 1 on the first pass)
1311#
1312# Since: 2.6
1313#
1314# Example:
1315#
1316#     <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
1317#           "event": "MIGRATION_PASS", "data": {"pass": 2} }
1318##
1319{ 'event': 'MIGRATION_PASS',
1320  'data': { 'pass': 'int' } }
1321
1322##
1323# @COLOMessage:
1324#
1325# The message transmission between Primary side and Secondary side.
1326#
1327# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing
1328#
1329# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for
1330#     checkpointing
1331#
1332# @checkpoint-reply: SVM gets PVM's checkpoint request
1333#
1334# @vmstate-send: VM's state will be sent by PVM.
1335#
1336# @vmstate-size: The total size of VMstate.
1337#
1338# @vmstate-received: VM's state has been received by SVM.
1339#
1340# @vmstate-loaded: VM's state has been loaded by SVM.
1341#
1342# Since: 2.8
1343##
1344{ 'enum': 'COLOMessage',
1345  'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
1346            'vmstate-send', 'vmstate-size', 'vmstate-received',
1347            'vmstate-loaded' ] }
1348
1349##
1350# @COLOMode:
1351#
1352# The COLO current mode.
1353#
1354# @none: COLO is disabled.
1355#
1356# @primary: COLO node in primary side.
1357#
1358# @secondary: COLO node in slave side.
1359#
1360# Since: 2.8
1361##
1362{ 'enum': 'COLOMode',
1363  'data': [ 'none', 'primary', 'secondary'] }
1364
1365##
1366# @FailoverStatus:
1367#
1368# An enumeration of COLO failover status
1369#
1370# @none: no failover has ever happened
1371#
1372# @require: got failover requirement but not handled
1373#
1374# @active: in the process of doing failover
1375#
1376# @completed: finish the process of failover
1377#
1378# @relaunch: restart the failover process, from 'none' -> 'completed'
1379#     (Since 2.9)
1380#
1381# Since: 2.8
1382##
1383{ 'enum': 'FailoverStatus',
1384  'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] }
1385
1386##
1387# @COLO_EXIT:
1388#
1389# Emitted when VM finishes COLO mode due to some errors happening or
1390# at the request of users.
1391#
1392# @mode: report COLO mode when COLO exited.
1393#
1394# @reason: describes the reason for the COLO exit.
1395#
1396# Since: 3.1
1397#
1398# Example:
1399#
1400#     <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172},
1401#          "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } }
1402##
1403{ 'event': 'COLO_EXIT',
1404  'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } }
1405
1406##
1407# @COLOExitReason:
1408#
1409# The reason for a COLO exit.
1410#
1411# @none: failover has never happened.  This state does not occur in
1412#     the COLO_EXIT event, and is only visible in the result of
1413#     query-colo-status.
1414#
1415# @request: COLO exit is due to an external request.
1416#
1417# @error: COLO exit is due to an internal error.
1418#
1419# @processing: COLO is currently handling a failover (since 4.0).
1420#
1421# Since: 3.1
1422##
1423{ 'enum': 'COLOExitReason',
1424  'data': [ 'none', 'request', 'error' , 'processing' ] }
1425
1426##
1427# @x-colo-lost-heartbeat:
1428#
1429# Tell qemu that heartbeat is lost, request it to do takeover
1430# procedures.  If this command is sent to the PVM, the Primary side
1431# will exit COLO mode.  If sent to the Secondary, the Secondary side
1432# will run failover work, then takes over server operation to become
1433# the service VM.
1434#
1435# Features:
1436#
1437# @unstable: This command is experimental.
1438#
1439# Since: 2.8
1440#
1441# Example:
1442#
1443#     -> { "execute": "x-colo-lost-heartbeat" }
1444#     <- { "return": {} }
1445##
1446{ 'command': 'x-colo-lost-heartbeat',
1447  'features': [ 'unstable' ],
1448  'if': 'CONFIG_REPLICATION' }
1449
1450##
1451# @migrate_cancel:
1452#
1453# Cancel the current executing migration process.
1454#
1455# Notes: This command succeeds even if there is no migration process
1456#     running.
1457#
1458# Since: 0.14
1459#
1460# Example:
1461#
1462#     -> { "execute": "migrate_cancel" }
1463#     <- { "return": {} }
1464##
1465{ 'command': 'migrate_cancel' }
1466
1467##
1468# @migrate-continue:
1469#
1470# Continue migration when it's in a paused state.
1471#
1472# @state: The state the migration is currently expected to be in
1473#
1474# Since: 2.11
1475#
1476# Example:
1477#
1478#     -> { "execute": "migrate-continue" , "arguments":
1479#          { "state": "pre-switchover" } }
1480#     <- { "return": {} }
1481##
1482{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
1483
1484##
1485# @MigrationAddressType:
1486#
1487# The migration stream transport mechanisms.
1488#
1489# @socket: Migrate via socket.
1490#
1491# @exec: Direct the migration stream to another process.
1492#
1493# @rdma: Migrate via RDMA.
1494#
1495# @file: Direct the migration stream to a file.
1496#
1497# Since: 8.2
1498##
1499{ 'enum': 'MigrationAddressType',
1500  'data': [ 'socket', 'exec', 'rdma', 'file' ] }
1501
1502##
1503# @FileMigrationArgs:
1504#
1505# @filename: The file to receive the migration stream
1506#
1507# @offset: The file offset where the migration stream will start
1508#
1509# Since: 8.2
1510##
1511{ 'struct': 'FileMigrationArgs',
1512  'data': { 'filename': 'str',
1513            'offset': 'uint64' } }
1514
1515##
1516# @MigrationExecCommand:
1517#
1518# @args: command (list head) and arguments to execute.
1519#
1520# Since: 8.2
1521##
1522{ 'struct': 'MigrationExecCommand',
1523  'data': {'args': [ 'str' ] } }
1524
1525##
1526# @MigrationAddress:
1527#
1528# Migration endpoint configuration.
1529#
1530# @transport: The migration stream transport mechanism
1531#
1532# Since: 8.2
1533##
1534{ 'union': 'MigrationAddress',
1535  'base': { 'transport' : 'MigrationAddressType'},
1536  'discriminator': 'transport',
1537  'data': {
1538    'socket': 'SocketAddress',
1539    'exec': 'MigrationExecCommand',
1540    'rdma': 'InetSocketAddress',
1541    'file': 'FileMigrationArgs' } }
1542
1543##
1544# @MigrationChannelType:
1545#
1546# The migration channel-type request options.
1547#
1548# @main: Main outbound migration channel.
1549#
1550# Since: 8.1
1551##
1552{ 'enum': 'MigrationChannelType',
1553  'data': [ 'main' ] }
1554
1555##
1556# @MigrationChannel:
1557#
1558# Migration stream channel parameters.
1559#
1560# @channel-type: Channel type for transferring packet information.
1561#
1562# @addr: Migration endpoint configuration on destination interface.
1563#
1564# Since: 8.1
1565##
1566{ 'struct': 'MigrationChannel',
1567  'data': {
1568      'channel-type': 'MigrationChannelType',
1569      'addr': 'MigrationAddress' } }
1570
1571##
1572# @migrate:
1573#
1574# Migrates the current running guest to another Virtual Machine.
1575#
1576# @uri: the Uniform Resource Identifier of the destination VM
1577#
1578# @channels: list of migration stream channels with each stream in the
1579#     list connected to a destination interface endpoint.
1580#
1581# @detach: this argument exists only for compatibility reasons and is
1582#     ignored by QEMU
1583#
1584# @resume: resume one paused migration, default "off".  (since 3.0)
1585#
1586# Since: 0.14
1587#
1588# Notes:
1589#
1590#     1. The 'query-migrate' command should be used to check
1591#        migration's progress and final result (this information is
1592#        provided by the 'status' member)
1593#
1594#     2. All boolean arguments default to false
1595#
1596#     3. The user Monitor's "detach" argument is invalid in QMP and
1597#        should not be used
1598#
1599#     4. The uri argument should have the Uniform Resource Identifier
1600#        of default destination VM. This connection will be bound to
1601#        default network.
1602#
1603#     5. For now, number of migration streams is restricted to one,
1604#        i.e. number of items in 'channels' list is just 1.
1605#
1606#     6. The 'uri' and 'channels' arguments are mutually exclusive;
1607#        exactly one of the two should be present.
1608#
1609# Example:
1610#
1611#     -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
1612#     <- { "return": {} }
1613#
1614#     -> { "execute": "migrate",
1615#          "arguments": {
1616#              "channels": [ { "channel-type": "main",
1617#                              "addr": { "transport": "socket",
1618#                                        "type": "inet",
1619#                                        "host": "10.12.34.9",
1620#                                        "port": "1050" } } ] } }
1621#     <- { "return": {} }
1622#
1623#     -> { "execute": "migrate",
1624#          "arguments": {
1625#              "channels": [ { "channel-type": "main",
1626#                              "addr": { "transport": "exec",
1627#                                        "args": [ "/bin/nc", "-p", "6000",
1628#                                                  "/some/sock" ] } } ] } }
1629#     <- { "return": {} }
1630#
1631#     -> { "execute": "migrate",
1632#          "arguments": {
1633#              "channels": [ { "channel-type": "main",
1634#                              "addr": { "transport": "rdma",
1635#                                        "host": "10.12.34.9",
1636#                                        "port": "1050" } } ] } }
1637#     <- { "return": {} }
1638#
1639#     -> { "execute": "migrate",
1640#          "arguments": {
1641#              "channels": [ { "channel-type": "main",
1642#                              "addr": { "transport": "file",
1643#                                        "filename": "/tmp/migfile",
1644#                                        "offset": "0x1000" } } ] } }
1645#     <- { "return": {} }
1646#
1647##
1648{ 'command': 'migrate',
1649  'data': {'*uri': 'str',
1650           '*channels': [ 'MigrationChannel' ],
1651           '*detach': 'bool', '*resume': 'bool' } }
1652
1653##
1654# @migrate-incoming:
1655#
1656# Start an incoming migration, the qemu must have been started with
1657# -incoming defer
1658#
1659# @uri: The Uniform Resource Identifier identifying the source or
1660#     address to listen on
1661#
1662# @channels: list of migration stream channels with each stream in the
1663#     list connected to a destination interface endpoint.
1664#
1665# @exit-on-error: Exit on incoming migration failure.  Default true.
1666#     When set to false, the failure triggers a MIGRATION event, and
1667#     error details could be retrieved with query-migrate.  (since 9.1)
1668#
1669# Since: 2.3
1670#
1671# Notes:
1672#
1673#     1. It's a bad idea to use a string for the uri, but it needs to
1674#        stay compatible with -incoming and the format of the uri is
1675#        already exposed above libvirt.
1676#
1677#     2. QEMU must be started with -incoming defer to allow
1678#        migrate-incoming to be used.
1679#
1680#     3. The uri format is the same as for -incoming
1681#
1682#     4. For now, number of migration streams is restricted to one,
1683#        i.e. number of items in 'channels' list is just 1.
1684#
1685#     5. The 'uri' and 'channels' arguments are mutually exclusive;
1686#        exactly one of the two should be present.
1687#
1688# Example:
1689#
1690#     -> { "execute": "migrate-incoming",
1691#          "arguments": { "uri": "tcp:0:4446" } }
1692#     <- { "return": {} }
1693#
1694#     -> { "execute": "migrate-incoming",
1695#          "arguments": {
1696#              "channels": [ { "channel-type": "main",
1697#                              "addr": { "transport": "socket",
1698#                                        "type": "inet",
1699#                                        "host": "10.12.34.9",
1700#                                        "port": "1050" } } ] } }
1701#     <- { "return": {} }
1702#
1703#     -> { "execute": "migrate-incoming",
1704#          "arguments": {
1705#              "channels": [ { "channel-type": "main",
1706#                              "addr": { "transport": "exec",
1707#                                        "args": [ "/bin/nc", "-p", "6000",
1708#                                                  "/some/sock" ] } } ] } }
1709#     <- { "return": {} }
1710#
1711#     -> { "execute": "migrate-incoming",
1712#          "arguments": {
1713#              "channels": [ { "channel-type": "main",
1714#                              "addr": { "transport": "rdma",
1715#                                        "host": "10.12.34.9",
1716#                                        "port": "1050" } } ] } }
1717#     <- { "return": {} }
1718##
1719{ 'command': 'migrate-incoming',
1720             'data': {'*uri': 'str',
1721                      '*channels': [ 'MigrationChannel' ],
1722                      '*exit-on-error': 'bool' } }
1723
1724##
1725# @xen-save-devices-state:
1726#
1727# Save the state of all devices to file.  The RAM and the block
1728# devices of the VM are not saved by this command.
1729#
1730# @filename: the file to save the state of the devices to as binary
1731#     data.  See xen-save-devices-state.txt for a description of the
1732#     binary format.
1733#
1734# @live: Optional argument to ask QEMU to treat this command as part
1735#     of a live migration.  Default to true.  (since 2.11)
1736#
1737# Since: 1.1
1738#
1739# Example:
1740#
1741#     -> { "execute": "xen-save-devices-state",
1742#          "arguments": { "filename": "/tmp/save" } }
1743#     <- { "return": {} }
1744##
1745{ 'command': 'xen-save-devices-state',
1746  'data': {'filename': 'str', '*live':'bool' } }
1747
1748##
1749# @xen-set-global-dirty-log:
1750#
1751# Enable or disable the global dirty log mode.
1752#
1753# @enable: true to enable, false to disable.
1754#
1755# Since: 1.3
1756#
1757# Example:
1758#
1759#     -> { "execute": "xen-set-global-dirty-log",
1760#          "arguments": { "enable": true } }
1761#     <- { "return": {} }
1762##
1763{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
1764
1765##
1766# @xen-load-devices-state:
1767#
1768# Load the state of all devices from file.  The RAM and the block
1769# devices of the VM are not loaded by this command.
1770#
1771# @filename: the file to load the state of the devices from as binary
1772#     data.  See xen-save-devices-state.txt for a description of the
1773#     binary format.
1774#
1775# Since: 2.7
1776#
1777# Example:
1778#
1779#     -> { "execute": "xen-load-devices-state",
1780#          "arguments": { "filename": "/tmp/resume" } }
1781#     <- { "return": {} }
1782##
1783{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
1784
1785##
1786# @xen-set-replication:
1787#
1788# Enable or disable replication.
1789#
1790# @enable: true to enable, false to disable.
1791#
1792# @primary: true for primary or false for secondary.
1793#
1794# @failover: true to do failover, false to stop.  Cannot be specified
1795#     if 'enable' is true.  Default value is false.
1796#
1797# Example:
1798#
1799#     -> { "execute": "xen-set-replication",
1800#          "arguments": {"enable": true, "primary": false} }
1801#     <- { "return": {} }
1802#
1803# Since: 2.9
1804##
1805{ 'command': 'xen-set-replication',
1806  'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' },
1807  'if': 'CONFIG_REPLICATION' }
1808
1809##
1810# @ReplicationStatus:
1811#
1812# The result format for 'query-xen-replication-status'.
1813#
1814# @error: true if an error happened, false if replication is normal.
1815#
1816# @desc: the human readable error description string, when @error is
1817#     'true'.
1818#
1819# Since: 2.9
1820##
1821{ 'struct': 'ReplicationStatus',
1822  'data': { 'error': 'bool', '*desc': 'str' },
1823  'if': 'CONFIG_REPLICATION' }
1824
1825##
1826# @query-xen-replication-status:
1827#
1828# Query replication status while the vm is running.
1829#
1830# Returns: A @ReplicationStatus object showing the status.
1831#
1832# Example:
1833#
1834#     -> { "execute": "query-xen-replication-status" }
1835#     <- { "return": { "error": false } }
1836#
1837# Since: 2.9
1838##
1839{ 'command': 'query-xen-replication-status',
1840  'returns': 'ReplicationStatus',
1841  'if': 'CONFIG_REPLICATION' }
1842
1843##
1844# @xen-colo-do-checkpoint:
1845#
1846# Xen uses this command to notify replication to trigger a checkpoint.
1847#
1848# Example:
1849#
1850#     -> { "execute": "xen-colo-do-checkpoint" }
1851#     <- { "return": {} }
1852#
1853# Since: 2.9
1854##
1855{ 'command': 'xen-colo-do-checkpoint',
1856  'if': 'CONFIG_REPLICATION' }
1857
1858##
1859# @COLOStatus:
1860#
1861# The result format for 'query-colo-status'.
1862#
1863# @mode: COLO running mode.  If COLO is running, this field will
1864#     return 'primary' or 'secondary'.
1865#
1866# @last-mode: COLO last running mode.  If COLO is running, this field
1867#     will return same like mode field, after failover we can use this
1868#     field to get last colo mode.  (since 4.0)
1869#
1870# @reason: describes the reason for the COLO exit.
1871#
1872# Since: 3.1
1873##
1874{ 'struct': 'COLOStatus',
1875  'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode',
1876            'reason': 'COLOExitReason' },
1877  'if': 'CONFIG_REPLICATION' }
1878
1879##
1880# @query-colo-status:
1881#
1882# Query COLO status while the vm is running.
1883#
1884# Returns: A @COLOStatus object showing the status.
1885#
1886# Example:
1887#
1888#     -> { "execute": "query-colo-status" }
1889#     <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } }
1890#
1891# Since: 3.1
1892##
1893{ 'command': 'query-colo-status',
1894  'returns': 'COLOStatus',
1895  'if': 'CONFIG_REPLICATION' }
1896
1897##
1898# @migrate-recover:
1899#
1900# Provide a recovery migration stream URI.
1901#
1902# @uri: the URI to be used for the recovery of migration stream.
1903#
1904# Example:
1905#
1906#     -> { "execute": "migrate-recover",
1907#          "arguments": { "uri": "tcp:192.168.1.200:12345" } }
1908#     <- { "return": {} }
1909#
1910# Since: 3.0
1911##
1912{ 'command': 'migrate-recover',
1913  'data': { 'uri': 'str' },
1914  'allow-oob': true }
1915
1916##
1917# @migrate-pause:
1918#
1919# Pause a migration.  Currently it only supports postcopy.
1920#
1921# Example:
1922#
1923#     -> { "execute": "migrate-pause" }
1924#     <- { "return": {} }
1925#
1926# Since: 3.0
1927##
1928{ 'command': 'migrate-pause', 'allow-oob': true }
1929
1930##
1931# @UNPLUG_PRIMARY:
1932#
1933# Emitted from source side of a migration when migration state is
1934# WAIT_UNPLUG. Device was unplugged by guest operating system.  Device
1935# resources in QEMU are kept on standby to be able to re-plug it in
1936# case of migration failure.
1937#
1938# @device-id: QEMU device id of the unplugged device
1939#
1940# Since: 4.2
1941#
1942# Example:
1943#
1944#     <- { "event": "UNPLUG_PRIMARY",
1945#          "data": { "device-id": "hostdev0" },
1946#          "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
1947##
1948{ 'event': 'UNPLUG_PRIMARY',
1949  'data': { 'device-id': 'str' } }
1950
1951##
1952# @DirtyRateVcpu:
1953#
1954# Dirty rate of vcpu.
1955#
1956# @id: vcpu index.
1957#
1958# @dirty-rate: dirty rate.
1959#
1960# Since: 6.2
1961##
1962{ 'struct': 'DirtyRateVcpu',
1963  'data': { 'id': 'int', 'dirty-rate': 'int64' } }
1964
1965##
1966# @DirtyRateStatus:
1967#
1968# Dirty page rate measurement status.
1969#
1970# @unstarted: measuring thread has not been started yet
1971#
1972# @measuring: measuring thread is running
1973#
1974# @measured: dirty page rate is measured and the results are available
1975#
1976# Since: 5.2
1977##
1978{ 'enum': 'DirtyRateStatus',
1979  'data': [ 'unstarted', 'measuring', 'measured'] }
1980
1981##
1982# @DirtyRateMeasureMode:
1983#
1984# Method used to measure dirty page rate.  Differences between
1985# available methods are explained in @calc-dirty-rate.
1986#
1987# @page-sampling: use page sampling
1988#
1989# @dirty-ring: use dirty ring
1990#
1991# @dirty-bitmap: use dirty bitmap
1992#
1993# Since: 6.2
1994##
1995{ 'enum': 'DirtyRateMeasureMode',
1996  'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] }
1997
1998##
1999# @TimeUnit:
2000#
2001# Specifies unit in which time-related value is specified.
2002#
2003# @second: value is in seconds
2004#
2005# @millisecond: value is in milliseconds
2006#
2007# Since: 8.2
2008##
2009{ 'enum': 'TimeUnit',
2010  'data': ['second', 'millisecond'] }
2011
2012##
2013# @DirtyRateInfo:
2014#
2015# Information about measured dirty page rate.
2016#
2017# @dirty-rate: an estimate of the dirty page rate of the VM in units
2018#     of MiB/s.  Value is present only when @status is 'measured'.
2019#
2020# @status: current status of dirty page rate measurements
2021#
2022# @start-time: start time in units of second for calculation
2023#
2024# @calc-time: time period for which dirty page rate was measured,
2025#     expressed and rounded down to @calc-time-unit.
2026#
2027# @calc-time-unit: time unit of @calc-time  (Since 8.2)
2028#
2029# @sample-pages: number of sampled pages per GiB of guest memory.
2030#     Valid only in page-sampling mode (Since 6.1)
2031#
2032# @mode: mode that was used to measure dirty page rate (Since 6.2)
2033#
2034# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was
2035#     specified (Since 6.2)
2036#
2037# Since: 5.2
2038##
2039{ 'struct': 'DirtyRateInfo',
2040  'data': {'*dirty-rate': 'int64',
2041           'status': 'DirtyRateStatus',
2042           'start-time': 'int64',
2043           'calc-time': 'int64',
2044           'calc-time-unit': 'TimeUnit',
2045           'sample-pages': 'uint64',
2046           'mode': 'DirtyRateMeasureMode',
2047           '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } }
2048
2049##
2050# @calc-dirty-rate:
2051#
2052# Start measuring dirty page rate of the VM.  Results can be retrieved
2053# with @query-dirty-rate after measurements are completed.
2054#
2055# Dirty page rate is the number of pages changed in a given time
2056# period expressed in MiB/s.  The following methods of calculation are
2057# available:
2058#
2059# 1. In page sampling mode, a random subset of pages are selected and
2060#    hashed twice: once at the beginning of measurement time period,
2061#    and once again at the end.  If two hashes for some page are
2062#    different, the page is counted as changed.  Since this method
2063#    relies on sampling and hashing, calculated dirty page rate is
2064#    only an estimate of its true value.  Increasing @sample-pages
2065#    improves estimation quality at the cost of higher computational
2066#    overhead.
2067#
2068# 2. Dirty bitmap mode captures writes to memory (for example by
2069#    temporarily revoking write access to all pages) and counting page
2070#    faults.  Information about modified pages is collected into a
2071#    bitmap, where each bit corresponds to one guest page.  This mode
2072#    requires that KVM accelerator property "dirty-ring-size" is *not*
2073#    set.
2074#
2075# 3. Dirty ring mode is similar to dirty bitmap mode, but the
2076#    information about modified pages is collected into ring buffer.
2077#    This mode tracks page modification per each vCPU separately.  It
2078#    requires that KVM accelerator property "dirty-ring-size" is set.
2079#
2080# @calc-time: time period for which dirty page rate is calculated.
2081#     By default it is specified in seconds, but the unit can be set
2082#     explicitly with @calc-time-unit.  Note that larger @calc-time
2083#     values will typically result in smaller dirty page rates because
2084#     page dirtying is a one-time event.  Once some page is counted
2085#     as dirty during @calc-time period, further writes to this page
2086#     will not increase dirty page rate anymore.
2087#
2088# @calc-time-unit: time unit in which @calc-time is specified.
2089#     By default it is seconds.  (Since 8.2)
2090#
2091# @sample-pages: number of sampled pages per each GiB of guest memory.
2092#     Default value is 512.  For 4KiB guest pages this corresponds to
2093#     sampling ratio of 0.2%.  This argument is used only in page
2094#     sampling mode.  (Since 6.1)
2095#
2096# @mode: mechanism for tracking dirty pages.  Default value is
2097#     'page-sampling'.  Others are 'dirty-bitmap' and 'dirty-ring'.
2098#     (Since 6.1)
2099#
2100# Since: 5.2
2101#
2102# Example:
2103#
2104#     -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1,
2105#                                                     'sample-pages': 512} }
2106#     <- { "return": {} }
2107#
2108#     Measure dirty rate using dirty bitmap for 500 milliseconds:
2109#
2110#     -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500,
2111#         "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} }
2112#
2113#     <- { "return": {} }
2114##
2115{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64',
2116                                         '*calc-time-unit': 'TimeUnit',
2117                                         '*sample-pages': 'int',
2118                                         '*mode': 'DirtyRateMeasureMode'} }
2119
2120##
2121# @query-dirty-rate:
2122#
2123# Query results of the most recent invocation of @calc-dirty-rate.
2124#
2125# @calc-time-unit: time unit in which to report calculation time.
2126#     By default it is reported in seconds.  (Since 8.2)
2127#
2128# Since: 5.2
2129#
2130# Examples:
2131#
2132#     1. Measurement is in progress:
2133#
2134#     <- {"status": "measuring", "sample-pages": 512,
2135#         "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2136#         "calc-time-unit": "second"}
2137#
2138#     2. Measurement has been completed:
2139#
2140#     <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108,
2141#         "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10,
2142#         "calc-time-unit": "second"}
2143##
2144{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' },
2145                                 'returns': 'DirtyRateInfo' }
2146
2147##
2148# @DirtyLimitInfo:
2149#
2150# Dirty page rate limit information of a virtual CPU.
2151#
2152# @cpu-index: index of a virtual CPU.
2153#
2154# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual
2155#     CPU, 0 means unlimited.
2156#
2157# @current-rate: current dirty page rate (MB/s) for a virtual CPU.
2158#
2159# Since: 7.1
2160##
2161{ 'struct': 'DirtyLimitInfo',
2162  'data': { 'cpu-index': 'int',
2163            'limit-rate': 'uint64',
2164            'current-rate': 'uint64' } }
2165
2166##
2167# @set-vcpu-dirty-limit:
2168#
2169# Set the upper limit of dirty page rate for virtual CPUs.
2170#
2171# Requires KVM with accelerator property "dirty-ring-size" set.  A
2172# virtual CPU's dirty page rate is a measure of its memory load.  To
2173# observe dirty page rates, use @calc-dirty-rate.
2174#
2175# @cpu-index: index of a virtual CPU, default is all.
2176#
2177# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs.
2178#
2179# Since: 7.1
2180#
2181# Example:
2182#
2183#     -> {"execute": "set-vcpu-dirty-limit"}
2184#         "arguments": { "dirty-rate": 200,
2185#                        "cpu-index": 1 } }
2186#     <- { "return": {} }
2187##
2188{ 'command': 'set-vcpu-dirty-limit',
2189  'data': { '*cpu-index': 'int',
2190            'dirty-rate': 'uint64' } }
2191
2192##
2193# @cancel-vcpu-dirty-limit:
2194#
2195# Cancel the upper limit of dirty page rate for virtual CPUs.
2196#
2197# Cancel the dirty page limit for the vCPU which has been set with
2198# set-vcpu-dirty-limit command.  Note that this command requires
2199# support from dirty ring, same as the "set-vcpu-dirty-limit".
2200#
2201# @cpu-index: index of a virtual CPU, default is all.
2202#
2203# Since: 7.1
2204#
2205# Example:
2206#
2207#     -> {"execute": "cancel-vcpu-dirty-limit"},
2208#         "arguments": { "cpu-index": 1 } }
2209#     <- { "return": {} }
2210##
2211{ 'command': 'cancel-vcpu-dirty-limit',
2212  'data': { '*cpu-index': 'int'} }
2213
2214##
2215# @query-vcpu-dirty-limit:
2216#
2217# Returns information about virtual CPU dirty page rate limits, if
2218# any.
2219#
2220# Since: 7.1
2221#
2222# Example:
2223#
2224#     -> {"execute": "query-vcpu-dirty-limit"}
2225#     <- {"return": [
2226#            { "limit-rate": 60, "current-rate": 3, "cpu-index": 0},
2227#            { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]}
2228##
2229{ 'command': 'query-vcpu-dirty-limit',
2230  'returns': [ 'DirtyLimitInfo' ] }
2231
2232##
2233# @MigrationThreadInfo:
2234#
2235# Information about migrationthreads
2236#
2237# @name: the name of migration thread
2238#
2239# @thread-id: ID of the underlying host thread
2240#
2241# Since: 7.2
2242##
2243{ 'struct': 'MigrationThreadInfo',
2244  'data': {'name': 'str',
2245           'thread-id': 'int'} }
2246
2247##
2248# @query-migrationthreads:
2249#
2250# Returns information of migration threads
2251#
2252# Returns: @MigrationThreadInfo
2253#
2254# Since: 7.2
2255##
2256{ 'command': 'query-migrationthreads',
2257  'returns': ['MigrationThreadInfo'] }
2258
2259##
2260# @snapshot-save:
2261#
2262# Save a VM snapshot
2263#
2264# @job-id: identifier for the newly created job
2265#
2266# @tag: name of the snapshot to create
2267#
2268# @vmstate: block device node name to save vmstate to
2269#
2270# @devices: list of block device node names to save a snapshot to
2271#
2272# Applications should not assume that the snapshot save is complete
2273# when this command returns.  The job commands / events must be used
2274# to determine completion and to fetch details of any errors that
2275# arise.
2276#
2277# Note that execution of the guest CPUs may be stopped during the time
2278# it takes to save the snapshot.  A future version of QEMU may ensure
2279# CPUs are executing continuously.
2280#
2281# It is strongly recommended that @devices contain all writable block
2282# device nodes if a consistent snapshot is required.
2283#
2284# If @tag already exists, an error will be reported
2285#
2286# Example:
2287#
2288#     -> { "execute": "snapshot-save",
2289#          "arguments": {
2290#             "job-id": "snapsave0",
2291#             "tag": "my-snap",
2292#             "vmstate": "disk0",
2293#             "devices": ["disk0", "disk1"]
2294#          }
2295#        }
2296#     <- { "return": { } }
2297#     <- {"event": "JOB_STATUS_CHANGE",
2298#         "timestamp": {"seconds": 1432121972, "microseconds": 744001},
2299#         "data": {"status": "created", "id": "snapsave0"}}
2300#     <- {"event": "JOB_STATUS_CHANGE",
2301#         "timestamp": {"seconds": 1432122172, "microseconds": 744001},
2302#         "data": {"status": "running", "id": "snapsave0"}}
2303#     <- {"event": "STOP",
2304#         "timestamp": {"seconds": 1432122372, "microseconds": 744001} }
2305#     <- {"event": "RESUME",
2306#         "timestamp": {"seconds": 1432122572, "microseconds": 744001} }
2307#     <- {"event": "JOB_STATUS_CHANGE",
2308#         "timestamp": {"seconds": 1432122772, "microseconds": 744001},
2309#         "data": {"status": "waiting", "id": "snapsave0"}}
2310#     <- {"event": "JOB_STATUS_CHANGE",
2311#         "timestamp": {"seconds": 1432122972, "microseconds": 744001},
2312#         "data": {"status": "pending", "id": "snapsave0"}}
2313#     <- {"event": "JOB_STATUS_CHANGE",
2314#         "timestamp": {"seconds": 1432123172, "microseconds": 744001},
2315#         "data": {"status": "concluded", "id": "snapsave0"}}
2316#     -> {"execute": "query-jobs"}
2317#     <- {"return": [{"current-progress": 1,
2318#                     "status": "concluded",
2319#                     "total-progress": 1,
2320#                     "type": "snapshot-save",
2321#                     "id": "snapsave0"}]}
2322#
2323# Since: 6.0
2324##
2325{ 'command': 'snapshot-save',
2326  'data': { 'job-id': 'str',
2327            'tag': 'str',
2328            'vmstate': 'str',
2329            'devices': ['str'] } }
2330
2331##
2332# @snapshot-load:
2333#
2334# Load a VM snapshot
2335#
2336# @job-id: identifier for the newly created job
2337#
2338# @tag: name of the snapshot to load.
2339#
2340# @vmstate: block device node name to load vmstate from
2341#
2342# @devices: list of block device node names to load a snapshot from
2343#
2344# Applications should not assume that the snapshot load is complete
2345# when this command returns.  The job commands / events must be used
2346# to determine completion and to fetch details of any errors that
2347# arise.
2348#
2349# Note that execution of the guest CPUs will be stopped during the
2350# time it takes to load the snapshot.
2351#
2352# It is strongly recommended that @devices contain all writable block
2353# device nodes that can have changed since the original @snapshot-save
2354# command execution.
2355#
2356# Example:
2357#
2358#     -> { "execute": "snapshot-load",
2359#          "arguments": {
2360#             "job-id": "snapload0",
2361#             "tag": "my-snap",
2362#             "vmstate": "disk0",
2363#             "devices": ["disk0", "disk1"]
2364#          }
2365#        }
2366#     <- { "return": { } }
2367#     <- {"event": "JOB_STATUS_CHANGE",
2368#         "timestamp": {"seconds": 1472124172, "microseconds": 744001},
2369#         "data": {"status": "created", "id": "snapload0"}}
2370#     <- {"event": "JOB_STATUS_CHANGE",
2371#         "timestamp": {"seconds": 1472125172, "microseconds": 744001},
2372#         "data": {"status": "running", "id": "snapload0"}}
2373#     <- {"event": "STOP",
2374#         "timestamp": {"seconds": 1472125472, "microseconds": 744001} }
2375#     <- {"event": "RESUME",
2376#         "timestamp": {"seconds": 1472125872, "microseconds": 744001} }
2377#     <- {"event": "JOB_STATUS_CHANGE",
2378#         "timestamp": {"seconds": 1472126172, "microseconds": 744001},
2379#         "data": {"status": "waiting", "id": "snapload0"}}
2380#     <- {"event": "JOB_STATUS_CHANGE",
2381#         "timestamp": {"seconds": 1472127172, "microseconds": 744001},
2382#         "data": {"status": "pending", "id": "snapload0"}}
2383#     <- {"event": "JOB_STATUS_CHANGE",
2384#         "timestamp": {"seconds": 1472128172, "microseconds": 744001},
2385#         "data": {"status": "concluded", "id": "snapload0"}}
2386#     -> {"execute": "query-jobs"}
2387#     <- {"return": [{"current-progress": 1,
2388#                     "status": "concluded",
2389#                     "total-progress": 1,
2390#                     "type": "snapshot-load",
2391#                     "id": "snapload0"}]}
2392#
2393# Since: 6.0
2394##
2395{ 'command': 'snapshot-load',
2396  'data': { 'job-id': 'str',
2397            'tag': 'str',
2398            'vmstate': 'str',
2399            'devices': ['str'] } }
2400
2401##
2402# @snapshot-delete:
2403#
2404# Delete a VM snapshot
2405#
2406# @job-id: identifier for the newly created job
2407#
2408# @tag: name of the snapshot to delete.
2409#
2410# @devices: list of block device node names to delete a snapshot from
2411#
2412# Applications should not assume that the snapshot delete is complete
2413# when this command returns.  The job commands / events must be used
2414# to determine completion and to fetch details of any errors that
2415# arise.
2416#
2417# Example:
2418#
2419#     -> { "execute": "snapshot-delete",
2420#          "arguments": {
2421#             "job-id": "snapdelete0",
2422#             "tag": "my-snap",
2423#             "devices": ["disk0", "disk1"]
2424#          }
2425#        }
2426#     <- { "return": { } }
2427#     <- {"event": "JOB_STATUS_CHANGE",
2428#         "timestamp": {"seconds": 1442124172, "microseconds": 744001},
2429#         "data": {"status": "created", "id": "snapdelete0"}}
2430#     <- {"event": "JOB_STATUS_CHANGE",
2431#         "timestamp": {"seconds": 1442125172, "microseconds": 744001},
2432#         "data": {"status": "running", "id": "snapdelete0"}}
2433#     <- {"event": "JOB_STATUS_CHANGE",
2434#         "timestamp": {"seconds": 1442126172, "microseconds": 744001},
2435#         "data": {"status": "waiting", "id": "snapdelete0"}}
2436#     <- {"event": "JOB_STATUS_CHANGE",
2437#         "timestamp": {"seconds": 1442127172, "microseconds": 744001},
2438#         "data": {"status": "pending", "id": "snapdelete0"}}
2439#     <- {"event": "JOB_STATUS_CHANGE",
2440#         "timestamp": {"seconds": 1442128172, "microseconds": 744001},
2441#         "data": {"status": "concluded", "id": "snapdelete0"}}
2442#     -> {"execute": "query-jobs"}
2443#     <- {"return": [{"current-progress": 1,
2444#                     "status": "concluded",
2445#                     "total-progress": 1,
2446#                     "type": "snapshot-delete",
2447#                     "id": "snapdelete0"}]}
2448#
2449# Since: 6.0
2450##
2451{ 'command': 'snapshot-delete',
2452  'data': { 'job-id': 'str',
2453            'tag': 'str',
2454            'devices': ['str'] } }
2455