1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the 20# target VM 21# 22# @total: total amount of bytes involved in the migration process 23# 24# @duplicate: number of duplicate (zero) pages (since 1.2) 25# 26# @skipped: number of skipped zero pages (since 1.5) 27# 28# @normal: number of normal pages (since 1.2) 29# 30# @normal-bytes: number of normal bytes sent (since 1.2) 31# 32# @dirty-pages-rate: number of pages dirtied by second by the guest 33# (since 1.3) 34# 35# @mbps: throughput in megabits/sec. (since 1.6) 36# 37# @dirty-sync-count: number of times that dirty ram was synchronized 38# (since 2.1) 39# 40# @postcopy-requests: The number of page requests received from the 41# destination (since 2.7) 42# 43# @page-size: The number of bytes per page for the various page-based 44# statistics (since 2.10) 45# 46# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 47# 48# @pages-per-second: the number of memory pages transferred per second 49# (Since 4.0) 50# 51# @precopy-bytes: The number of bytes sent in the pre-copy phase 52# (since 7.0). 53# 54# @downtime-bytes: The number of bytes sent while the guest is paused 55# (since 7.0). 56# 57# @postcopy-bytes: The number of bytes sent during the post-copy phase 58# (since 7.0). 59# 60# @dirty-sync-missed-zero-copy: Number of times dirty RAM 61# synchronization could not avoid copying dirty pages. This is 62# between 0 and @dirty-sync-count * @multifd-channels. (since 63# 7.1) 64# 65# Since: 0.14 66## 67{ 'struct': 'MigrationStats', 68 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 69 'duplicate': 'int', 'skipped': 'int', 'normal': 'int', 70 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', 71 'mbps' : 'number', 'dirty-sync-count' : 'int', 72 'postcopy-requests' : 'int', 'page-size' : 'int', 73 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64', 74 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64', 75 'postcopy-bytes' : 'uint64', 76 'dirty-sync-missed-zero-copy' : 'uint64' } } 77 78## 79# @XBZRLECacheStats: 80# 81# Detailed XBZRLE migration cache statistics 82# 83# @cache-size: XBZRLE cache size 84# 85# @bytes: amount of bytes already transferred to the target VM 86# 87# @pages: amount of pages transferred to the target VM 88# 89# @cache-miss: number of cache miss 90# 91# @cache-miss-rate: rate of cache miss (since 2.1) 92# 93# @encoding-rate: rate of encoded bytes (since 5.1) 94# 95# @overflow: number of overflows 96# 97# Since: 1.2 98## 99{ 'struct': 'XBZRLECacheStats', 100 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 101 'cache-miss': 'int', 'cache-miss-rate': 'number', 102 'encoding-rate': 'number', 'overflow': 'int' } } 103 104## 105# @CompressionStats: 106# 107# Detailed migration compression statistics 108# 109# @pages: amount of pages compressed and transferred to the target VM 110# 111# @busy: count of times that no free thread was available to compress 112# data 113# 114# @busy-rate: rate of thread busy 115# 116# @compressed-size: amount of bytes after compression 117# 118# @compression-rate: rate of compressed size 119# 120# Since: 3.1 121## 122{ 'struct': 'CompressionStats', 123 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 124 'compressed-size': 'int', 'compression-rate': 'number' } } 125 126## 127# @MigrationStatus: 128# 129# An enumeration of migration status. 130# 131# @none: no migration has ever happened. 132# 133# @setup: migration process has been initiated. 134# 135# @cancelling: in the process of cancelling migration. 136# 137# @cancelled: cancelling migration is finished. 138# 139# @active: in the process of doing migration. 140# 141# @postcopy-active: like active, but now in postcopy mode. (since 142# 2.5) 143# 144# @postcopy-paused: during postcopy but paused. (since 3.0) 145# 146# @postcopy-recover: trying to recover from a paused postcopy. (since 147# 3.0) 148# 149# @completed: migration is finished. 150# 151# @failed: some error occurred during migration process. 152# 153# @colo: VM is in the process of fault tolerance, VM can not get into 154# this state unless colo capability is enabled for migration. 155# (since 2.8) 156# 157# @pre-switchover: Paused before device serialisation. (since 2.11) 158# 159# @device: During device serialisation when pause-before-switchover is 160# enabled (since 2.11) 161# 162# @wait-unplug: wait for device unplug request by guest OS to be 163# completed. (since 4.2) 164# 165# Since: 2.3 166## 167{ 'enum': 'MigrationStatus', 168 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 169 'active', 'postcopy-active', 'postcopy-paused', 170 'postcopy-recover', 'completed', 'failed', 'colo', 171 'pre-switchover', 'device', 'wait-unplug' ] } 172## 173# @VfioStats: 174# 175# Detailed VFIO devices migration statistics 176# 177# @transferred: amount of bytes transferred to the target VM by VFIO 178# devices 179# 180# Since: 5.2 181## 182{ 'struct': 'VfioStats', 183 'data': {'transferred': 'int' } } 184 185## 186# @MigrationInfo: 187# 188# Information about current migration process. 189# 190# @status: @MigrationStatus describing the current migration status. 191# If this field is not returned, no migration process has been 192# initiated 193# 194# @ram: @MigrationStats containing detailed migration status, only 195# returned if status is 'active' or 'completed'(since 1.2) 196# 197# @disk: @MigrationStats containing detailed disk migration status, 198# only returned if status is 'active' and it is a block migration 199# 200# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 201# migration statistics, only returned if XBZRLE feature is on and 202# status is 'active' or 'completed' (since 1.2) 203# 204# @total-time: total amount of milliseconds since migration started. 205# If migration has ended, it returns the total migration time. 206# (since 1.2) 207# 208# @downtime: only present when migration finishes correctly total 209# downtime in milliseconds for the guest. (since 1.3) 210# 211# @expected-downtime: only present while migration is active expected 212# downtime in milliseconds for the guest in last walk of the dirty 213# bitmap. (since 1.3) 214# 215# @setup-time: amount of setup time in milliseconds *before* the 216# iterations begin but *after* the QMP command is issued. This is 217# designed to provide an accounting of any activities (such as 218# RDMA pinning) which may be expensive, but do not actually occur 219# during the iterative migration rounds themselves. (since 1.6) 220# 221# @cpu-throttle-percentage: percentage of time guest cpus are being 222# throttled during auto-converge. This is only present when 223# auto-converge has started throttling guest cpus. (Since 2.7) 224# 225# @error-desc: the human readable error description string, when 226# @status is 'failed'. Clients should not attempt to parse the 227# error strings. (Since 2.7) 228# 229# @postcopy-blocktime: total time when all vCPU were blocked during 230# postcopy live migration. This is only present when the 231# postcopy-blocktime migration capability is enabled. (Since 3.0) 232# 233# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. 234# This is only present when the postcopy-blocktime migration 235# capability is enabled. (Since 3.0) 236# 237# @compression: migration compression statistics, only returned if 238# compression feature is on and status is 'active' or 'completed' 239# (Since 3.1) 240# 241# @socket-address: Only used for tcp, to know what the real port is 242# (Since 4.0) 243# 244# @vfio: @VfioStats containing detailed VFIO devices migration 245# statistics, only returned if VFIO device is present, migration 246# is supported by all VFIO devices and status is 'active' or 247# 'completed' (since 5.2) 248# 249# @blocked-reasons: A list of reasons an outgoing migration is 250# blocked. Present and non-empty when migration is blocked. 251# (since 6.0) 252# 253# Since: 0.14 254## 255{ 'struct': 'MigrationInfo', 256 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 257 '*disk': 'MigrationStats', 258 '*vfio': 'VfioStats', 259 '*xbzrle-cache': 'XBZRLECacheStats', 260 '*total-time': 'int', 261 '*expected-downtime': 'int', 262 '*downtime': 'int', 263 '*setup-time': 'int', 264 '*cpu-throttle-percentage': 'int', 265 '*error-desc': 'str', 266 '*blocked-reasons': ['str'], 267 '*postcopy-blocktime' : 'uint32', 268 '*postcopy-vcpu-blocktime': ['uint32'], 269 '*compression': 'CompressionStats', 270 '*socket-address': ['SocketAddress'] } } 271 272## 273# @query-migrate: 274# 275# Returns information about current migration process. If migration 276# is active there will be another json-object with RAM migration 277# status and if block migration is active another one with block 278# migration status. 279# 280# Returns: @MigrationInfo 281# 282# Since: 0.14 283# 284# Examples: 285# 286# 1. Before the first migration 287# 288# -> { "execute": "query-migrate" } 289# <- { "return": {} } 290# 291# 2. Migration is done and has succeeded 292# 293# -> { "execute": "query-migrate" } 294# <- { "return": { 295# "status": "completed", 296# "total-time":12345, 297# "setup-time":12345, 298# "downtime":12345, 299# "ram":{ 300# "transferred":123, 301# "remaining":123, 302# "total":246, 303# "duplicate":123, 304# "normal":123, 305# "normal-bytes":123456, 306# "dirty-sync-count":15 307# } 308# } 309# } 310# 311# 3. Migration is done and has failed 312# 313# -> { "execute": "query-migrate" } 314# <- { "return": { "status": "failed" } } 315# 316# 4. Migration is being performed and is not a block migration: 317# 318# -> { "execute": "query-migrate" } 319# <- { 320# "return":{ 321# "status":"active", 322# "total-time":12345, 323# "setup-time":12345, 324# "expected-downtime":12345, 325# "ram":{ 326# "transferred":123, 327# "remaining":123, 328# "total":246, 329# "duplicate":123, 330# "normal":123, 331# "normal-bytes":123456, 332# "dirty-sync-count":15 333# } 334# } 335# } 336# 337# 5. Migration is being performed and is a block migration: 338# 339# -> { "execute": "query-migrate" } 340# <- { 341# "return":{ 342# "status":"active", 343# "total-time":12345, 344# "setup-time":12345, 345# "expected-downtime":12345, 346# "ram":{ 347# "total":1057024, 348# "remaining":1053304, 349# "transferred":3720, 350# "duplicate":123, 351# "normal":123, 352# "normal-bytes":123456, 353# "dirty-sync-count":15 354# }, 355# "disk":{ 356# "total":20971520, 357# "remaining":20880384, 358# "transferred":91136 359# } 360# } 361# } 362# 363# 6. Migration is being performed and XBZRLE is active: 364# 365# -> { "execute": "query-migrate" } 366# <- { 367# "return":{ 368# "status":"active", 369# "total-time":12345, 370# "setup-time":12345, 371# "expected-downtime":12345, 372# "ram":{ 373# "total":1057024, 374# "remaining":1053304, 375# "transferred":3720, 376# "duplicate":10, 377# "normal":3333, 378# "normal-bytes":3412992, 379# "dirty-sync-count":15 380# }, 381# "xbzrle-cache":{ 382# "cache-size":67108864, 383# "bytes":20971520, 384# "pages":2444343, 385# "cache-miss":2244, 386# "cache-miss-rate":0.123, 387# "encoding-rate":80.1, 388# "overflow":34434 389# } 390# } 391# } 392## 393{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 394 395## 396# @MigrationCapability: 397# 398# Migration capabilities enumeration 399# 400# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length 401# Encoding). This feature allows us to minimize migration traffic 402# for certain work loads, by sending compressed difference of the 403# pages 404# 405# @rdma-pin-all: Controls whether or not the entire VM memory 406# footprint is mlock()'d on demand or all at once. Refer to 407# docs/rdma.txt for usage. Disabled by default. (since 2.0) 408# 409# @zero-blocks: During storage migration encode blocks of zeroes 410# efficiently. This essentially saves 1MB of zeroes per block on 411# the wire. Enabling requires source and target VM to support 412# this feature. To enable it is sufficient to enable the 413# capability on the source VM. The feature is disabled by default. 414# (since 1.6) 415# 416# @compress: Use multiple compression threads to accelerate live 417# migration. This feature can help to reduce the migration 418# traffic, by sending compressed pages. Please note that if 419# compress and xbzrle are both on, compress only takes effect in 420# the ram bulk stage, after that, it will be disabled and only 421# xbzrle takes effect, this can help to minimize migration 422# traffic. The feature is disabled by default. (since 2.4 ) 423# 424# @events: generate events for each migration state change (since 2.4 425# ) 426# 427# @auto-converge: If enabled, QEMU will automatically throttle down 428# the guest to speed up convergence of RAM migration. (since 1.6) 429# 430# @postcopy-ram: Start executing on the migration target before all of 431# RAM has been migrated, pulling the remaining pages along as 432# needed. The capacity must have the same setting on both source 433# and target or migration will not even start. NOTE: If the 434# migration fails during postcopy the VM will fail. (since 2.6) 435# 436# @x-colo: If enabled, migration will never end, and the state of the 437# VM on the primary side will be migrated continuously to the VM 438# on secondary side, this process is called COarse-Grain LOck 439# Stepping (COLO) for Non-stop Service. (since 2.8) 440# 441# @release-ram: if enabled, qemu will free the migrated ram pages on 442# the source during postcopy-ram migration. (since 2.9) 443# 444# @block: If enabled, QEMU will also migrate the contents of all block 445# devices. Default is disabled. A possible alternative uses 446# mirror jobs to a builtin NBD server on the destination, which 447# offers more flexibility. (Since 2.10) 448# 449# @return-path: If enabled, migration will use the return path even 450# for precopy. (since 2.10) 451# 452# @pause-before-switchover: Pause outgoing migration before 453# serialising device state and before disabling block IO (since 454# 2.11) 455# 456# @multifd: Use more than one fd for migration (since 4.0) 457# 458# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 459# (since 2.12) 460# 461# @postcopy-blocktime: Calculate downtime for postcopy live migration 462# (since 3.0) 463# 464# @late-block-activate: If enabled, the destination will not activate 465# block devices (and thus take locks) immediately at the end of 466# migration. (since 3.0) 467# 468# @x-ignore-shared: If enabled, QEMU will not migrate shared memory 469# (since 4.0) 470# 471# @validate-uuid: Send the UUID of the source to allow the destination 472# to ensure it is the same. (since 4.2) 473# 474# @background-snapshot: If enabled, the migration stream will be a 475# snapshot of the VM exactly at the point when the migration 476# procedure starts. The VM RAM is saved with running VM. (since 477# 6.0) 478# 479# @zero-copy-send: Controls behavior on sending memory pages on 480# migration. When true, enables a zero-copy mechanism for sending 481# memory pages, if host supports it. Requires that QEMU be 482# permitted to use locked memory for guest RAM pages. (since 7.1) 483# 484# @postcopy-preempt: If enabled, the migration process will allow 485# postcopy requests to preempt precopy stream, so postcopy 486# requests will be handled faster. This is a performance feature 487# and should not affect the correctness of postcopy migration. 488# (since 7.1) 489# 490# Features: 491# 492# @unstable: Members @x-colo and @x-ignore-shared are experimental. 493# 494# Since: 1.2 495## 496{ 'enum': 'MigrationCapability', 497 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 498 'compress', 'events', 'postcopy-ram', 499 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 500 'release-ram', 501 'block', 'return-path', 'pause-before-switchover', 'multifd', 502 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 503 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 504 'validate-uuid', 'background-snapshot', 505 'zero-copy-send', 'postcopy-preempt'] } 506 507## 508# @MigrationCapabilityStatus: 509# 510# Migration capability information 511# 512# @capability: capability enum 513# 514# @state: capability state bool 515# 516# Since: 1.2 517## 518{ 'struct': 'MigrationCapabilityStatus', 519 'data': { 'capability' : 'MigrationCapability', 'state' : 'bool' } } 520 521## 522# @migrate-set-capabilities: 523# 524# Enable/Disable the following migration capabilities (like xbzrle) 525# 526# @capabilities: json array of capability modifications to make 527# 528# Since: 1.2 529# 530# Example: 531# 532# -> { "execute": "migrate-set-capabilities" , "arguments": 533# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 534# <- { "return": {} } 535## 536{ 'command': 'migrate-set-capabilities', 537 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 538 539## 540# @query-migrate-capabilities: 541# 542# Returns information about the current migration capabilities status 543# 544# Returns: @MigrationCapabilityStatus 545# 546# Since: 1.2 547# 548# Example: 549# 550# -> { "execute": "query-migrate-capabilities" } 551# <- { "return": [ 552# {"state": false, "capability": "xbzrle"}, 553# {"state": false, "capability": "rdma-pin-all"}, 554# {"state": false, "capability": "auto-converge"}, 555# {"state": false, "capability": "zero-blocks"}, 556# {"state": false, "capability": "compress"}, 557# {"state": true, "capability": "events"}, 558# {"state": false, "capability": "postcopy-ram"}, 559# {"state": false, "capability": "x-colo"} 560# ]} 561## 562{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 563 564## 565# @MultiFDCompression: 566# 567# An enumeration of multifd compression methods. 568# 569# @none: no compression. 570# 571# @zlib: use zlib compression method. 572# 573# @zstd: use zstd compression method. 574# 575# Since: 5.0 576## 577{ 'enum': 'MultiFDCompression', 578 'data': [ 'none', 'zlib', 579 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 580 581## 582# @BitmapMigrationBitmapAliasTransform: 583# 584# @persistent: If present, the bitmap will be made persistent or 585# transient depending on this parameter. 586# 587# Since: 6.0 588## 589{ 'struct': 'BitmapMigrationBitmapAliasTransform', 590 'data': { 591 '*persistent': 'bool' 592 } } 593 594## 595# @BitmapMigrationBitmapAlias: 596# 597# @name: The name of the bitmap. 598# 599# @alias: An alias name for migration (for example the bitmap name on 600# the opposite site). 601# 602# @transform: Allows the modification of the migrated bitmap. (since 603# 6.0) 604# 605# Since: 5.2 606## 607{ 'struct': 'BitmapMigrationBitmapAlias', 608 'data': { 609 'name': 'str', 610 'alias': 'str', 611 '*transform': 'BitmapMigrationBitmapAliasTransform' 612 } } 613 614## 615# @BitmapMigrationNodeAlias: 616# 617# Maps a block node name and the bitmaps it has to aliases for dirty 618# bitmap migration. 619# 620# @node-name: A block node name. 621# 622# @alias: An alias block node name for migration (for example the node 623# name on the opposite site). 624# 625# @bitmaps: Mappings for the bitmaps on this node. 626# 627# Since: 5.2 628## 629{ 'struct': 'BitmapMigrationNodeAlias', 630 'data': { 631 'node-name': 'str', 632 'alias': 'str', 633 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 634 } } 635 636## 637# @MigrationParameter: 638# 639# Migration parameters enumeration 640# 641# @announce-initial: Initial delay (in milliseconds) before sending 642# the first announce (Since 4.0) 643# 644# @announce-max: Maximum delay (in milliseconds) between packets in 645# the announcement (Since 4.0) 646# 647# @announce-rounds: Number of self-announce packets sent after 648# migration (Since 4.0) 649# 650# @announce-step: Increase in delay (in milliseconds) between 651# subsequent packets in the announcement (Since 4.0) 652# 653# @compress-level: Set the compression level to be used in live 654# migration, the compression level is an integer between 0 and 9, 655# where 0 means no compression, 1 means the best compression 656# speed, and 9 means best compression ratio which will consume 657# more CPU. 658# 659# @compress-threads: Set compression thread count to be used in live 660# migration, the compression thread count is an integer between 1 661# and 255. 662# 663# @compress-wait-thread: Controls behavior when all compression 664# threads are currently busy. If true (default), wait for a free 665# compression thread to become available; otherwise, send the page 666# uncompressed. (Since 3.1) 667# 668# @decompress-threads: Set decompression thread count to be used in 669# live migration, the decompression thread count is an integer 670# between 1 and 255. Usually, decompression is at least 4 times as 671# fast as compression, so set the decompress-threads to the number 672# about 1/4 of compress-threads is adequate. 673# 674# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 675# bytes_xfer_period to trigger throttling. It is expressed as 676# percentage. The default value is 50. (Since 5.0) 677# 678# @cpu-throttle-initial: Initial percentage of time guest cpus are 679# throttled when migration auto-converge is activated. The 680# default value is 20. (Since 2.7) 681# 682# @cpu-throttle-increment: throttle percentage increase each time 683# auto-converge detects that migration is not making progress. 684# The default value is 10. (Since 2.7) 685# 686# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 687# the tail stage of throttling, the Guest is very sensitive to CPU 688# percentage while the @cpu-throttle -increment is excessive 689# usually at tail stage. If this parameter is true, we will 690# compute the ideal CPU percentage used by the Guest, which may 691# exactly make the dirty rate match the dirty rate threshold. 692# Then we will choose a smaller throttle increment between the one 693# specified by @cpu-throttle-increment and the one generated by 694# ideal CPU percentage. Therefore, it is compatible to 695# traditional throttling, meanwhile the throttle increment won't 696# be excessive at tail stage. The default value is false. (Since 697# 5.1) 698# 699# @tls-creds: ID of the 'tls-creds' object that provides credentials 700# for establishing a TLS connection over the migration data 701# channel. On the outgoing side of the migration, the credentials 702# must be for a 'client' endpoint, while for the incoming side the 703# credentials must be for a 'server' endpoint. Setting this will 704# enable TLS for all migrations. The default is unset, resulting 705# in unsecured migration at the QEMU level. (Since 2.7) 706# 707# @tls-hostname: hostname of the target host for the migration. This 708# is required when using x509 based TLS credentials and the 709# migration URI does not already include a hostname. For example 710# if using fd: or exec: based migration, the hostname must be 711# provided so that the server's x509 certificate identity can be 712# validated. (Since 2.7) 713# 714# @tls-authz: ID of the 'authz' object subclass that provides access 715# control checking of the TLS x509 certificate distinguished name. 716# This object is only resolved at time of use, so can be deleted 717# and recreated on the fly while the migration server is active. 718# If missing, it will default to denying access (Since 4.0) 719# 720# @max-bandwidth: to set maximum speed for migration. maximum speed 721# in bytes per second. (Since 2.8) 722# 723# @downtime-limit: set maximum tolerated downtime for migration. 724# maximum downtime in milliseconds (Since 2.8) 725# 726# @x-checkpoint-delay: The delay time (in ms) between two COLO 727# checkpoints in periodic mode. (Since 2.8) 728# 729# @block-incremental: Affects how much storage is migrated when the 730# block migration capability is enabled. When false, the entire 731# storage backing chain is migrated into a flattened image at the 732# destination; when true, only the active qcow2 layer is migrated 733# and the destination must already have access to the same backing 734# chain as was used on the source. (since 2.10) 735# 736# @multifd-channels: Number of channels used to migrate data in 737# parallel. This is the same number that the number of sockets 738# used for migration. The default value is 2 (since 4.0) 739# 740# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 741# needs to be a multiple of the target page size and a power of 2 742# (Since 2.11) 743# 744# @max-postcopy-bandwidth: Background transfer bandwidth during 745# postcopy. Defaults to 0 (unlimited). In bytes per second. 746# (Since 3.0) 747# 748# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 749# (Since 3.1) 750# 751# @multifd-compression: Which compression method to use. Defaults to 752# none. (Since 5.0) 753# 754# @multifd-zlib-level: Set the compression level to be used in live 755# migration, the compression level is an integer between 0 and 9, 756# where 0 means no compression, 1 means the best compression 757# speed, and 9 means best compression ratio which will consume 758# more CPU. Defaults to 1. (Since 5.0) 759# 760# @multifd-zstd-level: Set the compression level to be used in live 761# migration, the compression level is an integer between 0 and 20, 762# where 0 means no compression, 1 means the best compression 763# speed, and 20 means best compression ratio which will consume 764# more CPU. Defaults to 1. (Since 5.0) 765# 766# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 767# aliases for the purpose of dirty bitmap migration. Such aliases 768# may for example be the corresponding names on the opposite site. 769# The mapping must be one-to-one, but not necessarily complete: On 770# the source, unmapped bitmaps and all bitmaps on unmapped nodes 771# will be ignored. On the destination, encountering an unmapped 772# alias in the incoming migration stream will result in a report, 773# and all further bitmap migration data will then be discarded. 774# Note that the destination does not know about bitmaps it does 775# not receive, so there is no limitation or requirement regarding 776# the number of bitmaps received, or how they are named, or on 777# which nodes they are placed. By default (when this parameter 778# has never been set), bitmap names are mapped to themselves. 779# Nodes are mapped to their block device name if there is one, and 780# to their node name otherwise. (Since 5.2) 781# 782# Features: 783# 784# @unstable: Member @x-checkpoint-delay is experimental. 785# 786# Since: 2.4 787## 788{ 'enum': 'MigrationParameter', 789 'data': ['announce-initial', 'announce-max', 790 'announce-rounds', 'announce-step', 791 'compress-level', 'compress-threads', 'decompress-threads', 792 'compress-wait-thread', 'throttle-trigger-threshold', 793 'cpu-throttle-initial', 'cpu-throttle-increment', 794 'cpu-throttle-tailslow', 795 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 796 'downtime-limit', 797 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 798 'block-incremental', 799 'multifd-channels', 800 'xbzrle-cache-size', 'max-postcopy-bandwidth', 801 'max-cpu-throttle', 'multifd-compression', 802 'multifd-zlib-level' ,'multifd-zstd-level', 803 'block-bitmap-mapping' ] } 804 805## 806# @MigrateSetParameters: 807# 808# @announce-initial: Initial delay (in milliseconds) before sending 809# the first announce (Since 4.0) 810# 811# @announce-max: Maximum delay (in milliseconds) between packets in 812# the announcement (Since 4.0) 813# 814# @announce-rounds: Number of self-announce packets sent after 815# migration (Since 4.0) 816# 817# @announce-step: Increase in delay (in milliseconds) between 818# subsequent packets in the announcement (Since 4.0) 819# 820# @compress-level: compression level 821# 822# @compress-threads: compression thread count 823# 824# @compress-wait-thread: Controls behavior when all compression 825# threads are currently busy. If true (default), wait for a free 826# compression thread to become available; otherwise, send the page 827# uncompressed. (Since 3.1) 828# 829# @decompress-threads: decompression thread count 830# 831# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 832# bytes_xfer_period to trigger throttling. It is expressed as 833# percentage. The default value is 50. (Since 5.0) 834# 835# @cpu-throttle-initial: Initial percentage of time guest cpus are 836# throttled when migration auto-converge is activated. The 837# default value is 20. (Since 2.7) 838# 839# @cpu-throttle-increment: throttle percentage increase each time 840# auto-converge detects that migration is not making progress. 841# The default value is 10. (Since 2.7) 842# 843# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 844# the tail stage of throttling, the Guest is very sensitive to CPU 845# percentage while the @cpu-throttle -increment is excessive 846# usually at tail stage. If this parameter is true, we will 847# compute the ideal CPU percentage used by the Guest, which may 848# exactly make the dirty rate match the dirty rate threshold. 849# Then we will choose a smaller throttle increment between the one 850# specified by @cpu-throttle-increment and the one generated by 851# ideal CPU percentage. Therefore, it is compatible to 852# traditional throttling, meanwhile the throttle increment won't 853# be excessive at tail stage. The default value is false. (Since 854# 5.1) 855# 856# @tls-creds: ID of the 'tls-creds' object that provides credentials 857# for establishing a TLS connection over the migration data 858# channel. On the outgoing side of the migration, the credentials 859# must be for a 'client' endpoint, while for the incoming side the 860# credentials must be for a 'server' endpoint. Setting this to a 861# non-empty string enables TLS for all migrations. An empty 862# string means that QEMU will use plain text mode for migration, 863# rather than TLS (Since 2.9) Previously (since 2.7), this was 864# reported by omitting tls-creds instead. 865# 866# @tls-hostname: hostname of the target host for the migration. This 867# is required when using x509 based TLS credentials and the 868# migration URI does not already include a hostname. For example 869# if using fd: or exec: based migration, the hostname must be 870# provided so that the server's x509 certificate identity can be 871# validated. (Since 2.7) An empty string means that QEMU will use 872# the hostname associated with the migration URI, if any. (Since 873# 2.9) Previously (since 2.7), this was reported by omitting 874# tls-hostname instead. 875# 876# @max-bandwidth: to set maximum speed for migration. maximum speed 877# in bytes per second. (Since 2.8) 878# 879# @downtime-limit: set maximum tolerated downtime for migration. 880# maximum downtime in milliseconds (Since 2.8) 881# 882# @x-checkpoint-delay: the delay time between two COLO checkpoints. 883# (Since 2.8) 884# 885# @block-incremental: Affects how much storage is migrated when the 886# block migration capability is enabled. When false, the entire 887# storage backing chain is migrated into a flattened image at the 888# destination; when true, only the active qcow2 layer is migrated 889# and the destination must already have access to the same backing 890# chain as was used on the source. (since 2.10) 891# 892# @multifd-channels: Number of channels used to migrate data in 893# parallel. This is the same number that the number of sockets 894# used for migration. The default value is 2 (since 4.0) 895# 896# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 897# needs to be a multiple of the target page size and a power of 2 898# (Since 2.11) 899# 900# @max-postcopy-bandwidth: Background transfer bandwidth during 901# postcopy. Defaults to 0 (unlimited). In bytes per second. 902# (Since 3.0) 903# 904# @max-cpu-throttle: maximum cpu throttle percentage. The default 905# value is 99. (Since 3.1) 906# 907# @multifd-compression: Which compression method to use. Defaults to 908# none. (Since 5.0) 909# 910# @multifd-zlib-level: Set the compression level to be used in live 911# migration, the compression level is an integer between 0 and 9, 912# where 0 means no compression, 1 means the best compression 913# speed, and 9 means best compression ratio which will consume 914# more CPU. Defaults to 1. (Since 5.0) 915# 916# @multifd-zstd-level: Set the compression level to be used in live 917# migration, the compression level is an integer between 0 and 20, 918# where 0 means no compression, 1 means the best compression 919# speed, and 20 means best compression ratio which will consume 920# more CPU. Defaults to 1. (Since 5.0) 921# 922# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 923# aliases for the purpose of dirty bitmap migration. Such aliases 924# may for example be the corresponding names on the opposite site. 925# The mapping must be one-to-one, but not necessarily complete: On 926# the source, unmapped bitmaps and all bitmaps on unmapped nodes 927# will be ignored. On the destination, encountering an unmapped 928# alias in the incoming migration stream will result in a report, 929# and all further bitmap migration data will then be discarded. 930# Note that the destination does not know about bitmaps it does 931# not receive, so there is no limitation or requirement regarding 932# the number of bitmaps received, or how they are named, or on 933# which nodes they are placed. By default (when this parameter 934# has never been set), bitmap names are mapped to themselves. 935# Nodes are mapped to their block device name if there is one, and 936# to their node name otherwise. (Since 5.2) 937# 938# Features: 939# 940# @unstable: Member @x-checkpoint-delay is experimental. 941# 942# TODO: either fuse back into MigrationParameters, or make 943# MigrationParameters members mandatory 944# 945# Since: 2.4 946## 947{ 'struct': 'MigrateSetParameters', 948 'data': { '*announce-initial': 'size', 949 '*announce-max': 'size', 950 '*announce-rounds': 'size', 951 '*announce-step': 'size', 952 '*compress-level': 'uint8', 953 '*compress-threads': 'uint8', 954 '*compress-wait-thread': 'bool', 955 '*decompress-threads': 'uint8', 956 '*throttle-trigger-threshold': 'uint8', 957 '*cpu-throttle-initial': 'uint8', 958 '*cpu-throttle-increment': 'uint8', 959 '*cpu-throttle-tailslow': 'bool', 960 '*tls-creds': 'StrOrNull', 961 '*tls-hostname': 'StrOrNull', 962 '*tls-authz': 'StrOrNull', 963 '*max-bandwidth': 'size', 964 '*downtime-limit': 'uint64', 965 '*x-checkpoint-delay': { 'type': 'uint32', 966 'features': [ 'unstable' ] }, 967 '*block-incremental': 'bool', 968 '*multifd-channels': 'uint8', 969 '*xbzrle-cache-size': 'size', 970 '*max-postcopy-bandwidth': 'size', 971 '*max-cpu-throttle': 'uint8', 972 '*multifd-compression': 'MultiFDCompression', 973 '*multifd-zlib-level': 'uint8', 974 '*multifd-zstd-level': 'uint8', 975 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 976 977## 978# @migrate-set-parameters: 979# 980# Set various migration parameters. 981# 982# Since: 2.4 983# 984# Example: 985# 986# -> { "execute": "migrate-set-parameters" , 987# "arguments": { "compress-level": 1 } } 988# <- { "return": {} } 989## 990{ 'command': 'migrate-set-parameters', 'boxed': true, 991 'data': 'MigrateSetParameters' } 992 993## 994# @MigrationParameters: 995# 996# The optional members aren't actually optional. 997# 998# @announce-initial: Initial delay (in milliseconds) before sending 999# the first announce (Since 4.0) 1000# 1001# @announce-max: Maximum delay (in milliseconds) between packets in 1002# the announcement (Since 4.0) 1003# 1004# @announce-rounds: Number of self-announce packets sent after 1005# migration (Since 4.0) 1006# 1007# @announce-step: Increase in delay (in milliseconds) between 1008# subsequent packets in the announcement (Since 4.0) 1009# 1010# @compress-level: compression level 1011# 1012# @compress-threads: compression thread count 1013# 1014# @compress-wait-thread: Controls behavior when all compression 1015# threads are currently busy. If true (default), wait for a free 1016# compression thread to become available; otherwise, send the page 1017# uncompressed. (Since 3.1) 1018# 1019# @decompress-threads: decompression thread count 1020# 1021# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 1022# bytes_xfer_period to trigger throttling. It is expressed as 1023# percentage. The default value is 50. (Since 5.0) 1024# 1025# @cpu-throttle-initial: Initial percentage of time guest cpus are 1026# throttled when migration auto-converge is activated. (Since 1027# 2.7) 1028# 1029# @cpu-throttle-increment: throttle percentage increase each time 1030# auto-converge detects that migration is not making progress. 1031# (Since 2.7) 1032# 1033# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 1034# the tail stage of throttling, the Guest is very sensitive to CPU 1035# percentage while the @cpu-throttle -increment is excessive 1036# usually at tail stage. If this parameter is true, we will 1037# compute the ideal CPU percentage used by the Guest, which may 1038# exactly make the dirty rate match the dirty rate threshold. 1039# Then we will choose a smaller throttle increment between the one 1040# specified by @cpu-throttle-increment and the one generated by 1041# ideal CPU percentage. Therefore, it is compatible to 1042# traditional throttling, meanwhile the throttle increment won't 1043# be excessive at tail stage. The default value is false. (Since 1044# 5.1) 1045# 1046# @tls-creds: ID of the 'tls-creds' object that provides credentials 1047# for establishing a TLS connection over the migration data 1048# channel. On the outgoing side of the migration, the credentials 1049# must be for a 'client' endpoint, while for the incoming side the 1050# credentials must be for a 'server' endpoint. An empty string 1051# means that QEMU will use plain text mode for migration, rather 1052# than TLS (Since 2.7) Note: 2.8 reports this by omitting 1053# tls-creds instead. 1054# 1055# @tls-hostname: hostname of the target host for the migration. This 1056# is required when using x509 based TLS credentials and the 1057# migration URI does not already include a hostname. For example 1058# if using fd: or exec: based migration, the hostname must be 1059# provided so that the server's x509 certificate identity can be 1060# validated. (Since 2.7) An empty string means that QEMU will use 1061# the hostname associated with the migration URI, if any. (Since 1062# 2.9) Note: 2.8 reports this by omitting tls-hostname instead. 1063# 1064# @tls-authz: ID of the 'authz' object subclass that provides access 1065# control checking of the TLS x509 certificate distinguished name. 1066# (Since 4.0) 1067# 1068# @max-bandwidth: to set maximum speed for migration. maximum speed 1069# in bytes per second. (Since 2.8) 1070# 1071# @downtime-limit: set maximum tolerated downtime for migration. 1072# maximum downtime in milliseconds (Since 2.8) 1073# 1074# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1075# (Since 2.8) 1076# 1077# @block-incremental: Affects how much storage is migrated when the 1078# block migration capability is enabled. When false, the entire 1079# storage backing chain is migrated into a flattened image at the 1080# destination; when true, only the active qcow2 layer is migrated 1081# and the destination must already have access to the same backing 1082# chain as was used on the source. (since 2.10) 1083# 1084# @multifd-channels: Number of channels used to migrate data in 1085# parallel. This is the same number that the number of sockets 1086# used for migration. The default value is 2 (since 4.0) 1087# 1088# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1089# needs to be a multiple of the target page size and a power of 2 1090# (Since 2.11) 1091# 1092# @max-postcopy-bandwidth: Background transfer bandwidth during 1093# postcopy. Defaults to 0 (unlimited). In bytes per second. 1094# (Since 3.0) 1095# 1096# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1097# (Since 3.1) 1098# 1099# @multifd-compression: Which compression method to use. Defaults to 1100# none. (Since 5.0) 1101# 1102# @multifd-zlib-level: Set the compression level to be used in live 1103# migration, the compression level is an integer between 0 and 9, 1104# where 0 means no compression, 1 means the best compression 1105# speed, and 9 means best compression ratio which will consume 1106# more CPU. Defaults to 1. (Since 5.0) 1107# 1108# @multifd-zstd-level: Set the compression level to be used in live 1109# migration, the compression level is an integer between 0 and 20, 1110# where 0 means no compression, 1 means the best compression 1111# speed, and 20 means best compression ratio which will consume 1112# more CPU. Defaults to 1. (Since 5.0) 1113# 1114# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1115# aliases for the purpose of dirty bitmap migration. Such aliases 1116# may for example be the corresponding names on the opposite site. 1117# The mapping must be one-to-one, but not necessarily complete: On 1118# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1119# will be ignored. On the destination, encountering an unmapped 1120# alias in the incoming migration stream will result in a report, 1121# and all further bitmap migration data will then be discarded. 1122# Note that the destination does not know about bitmaps it does 1123# not receive, so there is no limitation or requirement regarding 1124# the number of bitmaps received, or how they are named, or on 1125# which nodes they are placed. By default (when this parameter 1126# has never been set), bitmap names are mapped to themselves. 1127# Nodes are mapped to their block device name if there is one, and 1128# to their node name otherwise. (Since 5.2) 1129# 1130# Features: 1131# 1132# @unstable: Member @x-checkpoint-delay is experimental. 1133# 1134# Since: 2.4 1135## 1136{ 'struct': 'MigrationParameters', 1137 'data': { '*announce-initial': 'size', 1138 '*announce-max': 'size', 1139 '*announce-rounds': 'size', 1140 '*announce-step': 'size', 1141 '*compress-level': 'uint8', 1142 '*compress-threads': 'uint8', 1143 '*compress-wait-thread': 'bool', 1144 '*decompress-threads': 'uint8', 1145 '*throttle-trigger-threshold': 'uint8', 1146 '*cpu-throttle-initial': 'uint8', 1147 '*cpu-throttle-increment': 'uint8', 1148 '*cpu-throttle-tailslow': 'bool', 1149 '*tls-creds': 'str', 1150 '*tls-hostname': 'str', 1151 '*tls-authz': 'str', 1152 '*max-bandwidth': 'size', 1153 '*downtime-limit': 'uint64', 1154 '*x-checkpoint-delay': { 'type': 'uint32', 1155 'features': [ 'unstable' ] }, 1156 '*block-incremental': 'bool', 1157 '*multifd-channels': 'uint8', 1158 '*xbzrle-cache-size': 'size', 1159 '*max-postcopy-bandwidth': 'size', 1160 '*max-cpu-throttle': 'uint8', 1161 '*multifd-compression': 'MultiFDCompression', 1162 '*multifd-zlib-level': 'uint8', 1163 '*multifd-zstd-level': 'uint8', 1164 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 1165 1166## 1167# @query-migrate-parameters: 1168# 1169# Returns information about the current migration parameters 1170# 1171# Returns: @MigrationParameters 1172# 1173# Since: 2.4 1174# 1175# Example: 1176# 1177# -> { "execute": "query-migrate-parameters" } 1178# <- { "return": { 1179# "decompress-threads": 2, 1180# "cpu-throttle-increment": 10, 1181# "compress-threads": 8, 1182# "compress-level": 1, 1183# "cpu-throttle-initial": 20, 1184# "max-bandwidth": 33554432, 1185# "downtime-limit": 300 1186# } 1187# } 1188## 1189{ 'command': 'query-migrate-parameters', 1190 'returns': 'MigrationParameters' } 1191 1192## 1193# @migrate-start-postcopy: 1194# 1195# Followup to a migration command to switch the migration to postcopy 1196# mode. The postcopy-ram capability must be set on both source and 1197# destination before the original migration command. 1198# 1199# Since: 2.5 1200# 1201# Example: 1202# 1203# -> { "execute": "migrate-start-postcopy" } 1204# <- { "return": {} } 1205## 1206{ 'command': 'migrate-start-postcopy' } 1207 1208## 1209# @MIGRATION: 1210# 1211# Emitted when a migration event happens 1212# 1213# @status: @MigrationStatus describing the current migration status. 1214# 1215# Since: 2.4 1216# 1217# Example: 1218# 1219# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1220# "event": "MIGRATION", 1221# "data": {"status": "completed"} } 1222## 1223{ 'event': 'MIGRATION', 1224 'data': {'status': 'MigrationStatus'}} 1225 1226## 1227# @MIGRATION_PASS: 1228# 1229# Emitted from the source side of a migration at the start of each 1230# pass (when it syncs the dirty bitmap) 1231# 1232# @pass: An incrementing count (starting at 1 on the first pass) 1233# 1234# Since: 2.6 1235# 1236# Example: 1237# 1238# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1239# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1240## 1241{ 'event': 'MIGRATION_PASS', 1242 'data': { 'pass': 'int' } } 1243 1244## 1245# @COLOMessage: 1246# 1247# The message transmission between Primary side and Secondary side. 1248# 1249# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1250# 1251# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for 1252# checkpointing 1253# 1254# @checkpoint-reply: SVM gets PVM's checkpoint request 1255# 1256# @vmstate-send: VM's state will be sent by PVM. 1257# 1258# @vmstate-size: The total size of VMstate. 1259# 1260# @vmstate-received: VM's state has been received by SVM. 1261# 1262# @vmstate-loaded: VM's state has been loaded by SVM. 1263# 1264# Since: 2.8 1265## 1266{ 'enum': 'COLOMessage', 1267 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1268 'vmstate-send', 'vmstate-size', 'vmstate-received', 1269 'vmstate-loaded' ] } 1270 1271## 1272# @COLOMode: 1273# 1274# The COLO current mode. 1275# 1276# @none: COLO is disabled. 1277# 1278# @primary: COLO node in primary side. 1279# 1280# @secondary: COLO node in slave side. 1281# 1282# Since: 2.8 1283## 1284{ 'enum': 'COLOMode', 1285 'data': [ 'none', 'primary', 'secondary'] } 1286 1287## 1288# @FailoverStatus: 1289# 1290# An enumeration of COLO failover status 1291# 1292# @none: no failover has ever happened 1293# 1294# @require: got failover requirement but not handled 1295# 1296# @active: in the process of doing failover 1297# 1298# @completed: finish the process of failover 1299# 1300# @relaunch: restart the failover process, from 'none' -> 'completed' 1301# (Since 2.9) 1302# 1303# Since: 2.8 1304## 1305{ 'enum': 'FailoverStatus', 1306 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1307 1308## 1309# @COLO_EXIT: 1310# 1311# Emitted when VM finishes COLO mode due to some errors happening or 1312# at the request of users. 1313# 1314# @mode: report COLO mode when COLO exited. 1315# 1316# @reason: describes the reason for the COLO exit. 1317# 1318# Since: 3.1 1319# 1320# Example: 1321# 1322# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1323# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1324## 1325{ 'event': 'COLO_EXIT', 1326 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1327 1328## 1329# @COLOExitReason: 1330# 1331# The reason for a COLO exit. 1332# 1333# @none: failover has never happened. This state does not occur in 1334# the COLO_EXIT event, and is only visible in the result of 1335# query-colo-status. 1336# 1337# @request: COLO exit is due to an external request. 1338# 1339# @error: COLO exit is due to an internal error. 1340# 1341# @processing: COLO is currently handling a failover (since 4.0). 1342# 1343# Since: 3.1 1344## 1345{ 'enum': 'COLOExitReason', 1346 'data': [ 'none', 'request', 'error' , 'processing' ] } 1347 1348## 1349# @x-colo-lost-heartbeat: 1350# 1351# Tell qemu that heartbeat is lost, request it to do takeover 1352# procedures. If this command is sent to the PVM, the Primary side 1353# will exit COLO mode. If sent to the Secondary, the Secondary side 1354# will run failover work, then takes over server operation to become 1355# the service VM. 1356# 1357# Features: 1358# 1359# @unstable: This command is experimental. 1360# 1361# Since: 2.8 1362# 1363# Example: 1364# 1365# -> { "execute": "x-colo-lost-heartbeat" } 1366# <- { "return": {} } 1367## 1368{ 'command': 'x-colo-lost-heartbeat', 1369 'features': [ 'unstable' ] } 1370 1371## 1372# @migrate_cancel: 1373# 1374# Cancel the current executing migration process. 1375# 1376# Returns: nothing on success 1377# 1378# Notes: This command succeeds even if there is no migration process 1379# running. 1380# 1381# Since: 0.14 1382# 1383# Example: 1384# 1385# -> { "execute": "migrate_cancel" } 1386# <- { "return": {} } 1387## 1388{ 'command': 'migrate_cancel' } 1389 1390## 1391# @migrate-continue: 1392# 1393# Continue migration when it's in a paused state. 1394# 1395# @state: The state the migration is currently expected to be in 1396# 1397# Returns: nothing on success 1398# 1399# Since: 2.11 1400# 1401# Example: 1402# 1403# -> { "execute": "migrate-continue" , "arguments": 1404# { "state": "pre-switchover" } } 1405# <- { "return": {} } 1406## 1407{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1408 1409## 1410# @migrate: 1411# 1412# Migrates the current running guest to another Virtual Machine. 1413# 1414# @uri: the Uniform Resource Identifier of the destination VM 1415# 1416# @blk: do block migration (full disk copy) 1417# 1418# @inc: incremental disk copy migration 1419# 1420# @detach: this argument exists only for compatibility reasons and is 1421# ignored by QEMU 1422# 1423# @resume: resume one paused migration, default "off". (since 3.0) 1424# 1425# Returns: nothing on success 1426# 1427# Since: 0.14 1428# 1429# Notes: 1430# 1431# 1. The 'query-migrate' command should be used to check migration's 1432# progress and final result (this information is provided by the 1433# 'status' member) 1434# 1435# 2. All boolean arguments default to false 1436# 1437# 3. The user Monitor's "detach" argument is invalid in QMP and should 1438# not be used 1439# 1440# Example: 1441# 1442# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1443# <- { "return": {} } 1444## 1445{ 'command': 'migrate', 1446 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', 1447 '*detach': 'bool', '*resume': 'bool' } } 1448 1449## 1450# @migrate-incoming: 1451# 1452# Start an incoming migration, the qemu must have been started with 1453# -incoming defer 1454# 1455# @uri: The Uniform Resource Identifier identifying the source or 1456# address to listen on 1457# 1458# Returns: nothing on success 1459# 1460# Since: 2.3 1461# 1462# Notes: 1463# 1464# 1. It's a bad idea to use a string for the uri, but it needs 1465# to stay compatible with -incoming and the format of the uri 1466# is already exposed above libvirt. 1467# 1468# 2. QEMU must be started with -incoming defer to allow 1469# migrate-incoming to be used. 1470# 1471# 3. The uri format is the same as for -incoming 1472# 1473# Example: 1474# 1475# -> { "execute": "migrate-incoming", 1476# "arguments": { "uri": "tcp::4446" } } 1477# <- { "return": {} } 1478## 1479{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } } 1480 1481## 1482# @xen-save-devices-state: 1483# 1484# Save the state of all devices to file. The RAM and the block 1485# devices of the VM are not saved by this command. 1486# 1487# @filename: the file to save the state of the devices to as binary 1488# data. See xen-save-devices-state.txt for a description of the 1489# binary format. 1490# 1491# @live: Optional argument to ask QEMU to treat this command as part 1492# of a live migration. Default to true. (since 2.11) 1493# 1494# Returns: Nothing on success 1495# 1496# Since: 1.1 1497# 1498# Example: 1499# 1500# -> { "execute": "xen-save-devices-state", 1501# "arguments": { "filename": "/tmp/save" } } 1502# <- { "return": {} } 1503## 1504{ 'command': 'xen-save-devices-state', 1505 'data': {'filename': 'str', '*live':'bool' } } 1506 1507## 1508# @xen-set-global-dirty-log: 1509# 1510# Enable or disable the global dirty log mode. 1511# 1512# @enable: true to enable, false to disable. 1513# 1514# Returns: nothing 1515# 1516# Since: 1.3 1517# 1518# Example: 1519# 1520# -> { "execute": "xen-set-global-dirty-log", 1521# "arguments": { "enable": true } } 1522# <- { "return": {} } 1523## 1524{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1525 1526## 1527# @xen-load-devices-state: 1528# 1529# Load the state of all devices from file. The RAM and the block 1530# devices of the VM are not loaded by this command. 1531# 1532# @filename: the file to load the state of the devices from as binary 1533# data. See xen-save-devices-state.txt for a description of the 1534# binary format. 1535# 1536# Since: 2.7 1537# 1538# Example: 1539# 1540# -> { "execute": "xen-load-devices-state", 1541# "arguments": { "filename": "/tmp/resume" } } 1542# <- { "return": {} } 1543## 1544{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1545 1546## 1547# @xen-set-replication: 1548# 1549# Enable or disable replication. 1550# 1551# @enable: true to enable, false to disable. 1552# 1553# @primary: true for primary or false for secondary. 1554# 1555# @failover: true to do failover, false to stop. but cannot be 1556# specified if 'enable' is true. default value is false. 1557# 1558# Returns: nothing. 1559# 1560# Example: 1561# 1562# -> { "execute": "xen-set-replication", 1563# "arguments": {"enable": true, "primary": false} } 1564# <- { "return": {} } 1565# 1566# Since: 2.9 1567## 1568{ 'command': 'xen-set-replication', 1569 'data': { 'enable': 'bool', 'primary': 'bool', '*failover' : 'bool' }, 1570 'if': 'CONFIG_REPLICATION' } 1571 1572## 1573# @ReplicationStatus: 1574# 1575# The result format for 'query-xen-replication-status'. 1576# 1577# @error: true if an error happened, false if replication is normal. 1578# 1579# @desc: the human readable error description string, when @error is 1580# 'true'. 1581# 1582# Since: 2.9 1583## 1584{ 'struct': 'ReplicationStatus', 1585 'data': { 'error': 'bool', '*desc': 'str' }, 1586 'if': 'CONFIG_REPLICATION' } 1587 1588## 1589# @query-xen-replication-status: 1590# 1591# Query replication status while the vm is running. 1592# 1593# Returns: A @ReplicationStatus object showing the status. 1594# 1595# Example: 1596# 1597# -> { "execute": "query-xen-replication-status" } 1598# <- { "return": { "error": false } } 1599# 1600# Since: 2.9 1601## 1602{ 'command': 'query-xen-replication-status', 1603 'returns': 'ReplicationStatus', 1604 'if': 'CONFIG_REPLICATION' } 1605 1606## 1607# @xen-colo-do-checkpoint: 1608# 1609# Xen uses this command to notify replication to trigger a checkpoint. 1610# 1611# Returns: nothing. 1612# 1613# Example: 1614# 1615# -> { "execute": "xen-colo-do-checkpoint" } 1616# <- { "return": {} } 1617# 1618# Since: 2.9 1619## 1620{ 'command': 'xen-colo-do-checkpoint', 1621 'if': 'CONFIG_REPLICATION' } 1622 1623## 1624# @COLOStatus: 1625# 1626# The result format for 'query-colo-status'. 1627# 1628# @mode: COLO running mode. If COLO is running, this field will 1629# return 'primary' or 'secondary'. 1630# 1631# @last-mode: COLO last running mode. If COLO is running, this field 1632# will return same like mode field, after failover we can use this 1633# field to get last colo mode. (since 4.0) 1634# 1635# @reason: describes the reason for the COLO exit. 1636# 1637# Since: 3.1 1638## 1639{ 'struct': 'COLOStatus', 1640 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1641 'reason': 'COLOExitReason' } } 1642 1643## 1644# @query-colo-status: 1645# 1646# Query COLO status while the vm is running. 1647# 1648# Returns: A @COLOStatus object showing the status. 1649# 1650# Example: 1651# 1652# -> { "execute": "query-colo-status" } 1653# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1654# 1655# Since: 3.1 1656## 1657{ 'command': 'query-colo-status', 1658 'returns': 'COLOStatus' } 1659 1660## 1661# @migrate-recover: 1662# 1663# Provide a recovery migration stream URI. 1664# 1665# @uri: the URI to be used for the recovery of migration stream. 1666# 1667# Returns: nothing. 1668# 1669# Example: 1670# 1671# -> { "execute": "migrate-recover", 1672# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1673# <- { "return": {} } 1674# 1675# Since: 3.0 1676## 1677{ 'command': 'migrate-recover', 1678 'data': { 'uri': 'str' }, 1679 'allow-oob': true } 1680 1681## 1682# @migrate-pause: 1683# 1684# Pause a migration. Currently it only supports postcopy. 1685# 1686# Returns: nothing. 1687# 1688# Example: 1689# 1690# -> { "execute": "migrate-pause" } 1691# <- { "return": {} } 1692# 1693# Since: 3.0 1694## 1695{ 'command': 'migrate-pause', 'allow-oob': true } 1696 1697## 1698# @UNPLUG_PRIMARY: 1699# 1700# Emitted from source side of a migration when migration state is 1701# WAIT_UNPLUG. Device was unplugged by guest operating system. Device 1702# resources in QEMU are kept on standby to be able to re-plug it in 1703# case of migration failure. 1704# 1705# @device-id: QEMU device id of the unplugged device 1706# 1707# Since: 4.2 1708# 1709# Example: 1710# 1711# <- { "event": "UNPLUG_PRIMARY", 1712# "data": { "device-id": "hostdev0" }, 1713# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 1714## 1715{ 'event': 'UNPLUG_PRIMARY', 1716 'data': { 'device-id': 'str' } } 1717 1718## 1719# @DirtyRateVcpu: 1720# 1721# Dirty rate of vcpu. 1722# 1723# @id: vcpu index. 1724# 1725# @dirty-rate: dirty rate. 1726# 1727# Since: 6.2 1728## 1729{ 'struct': 'DirtyRateVcpu', 1730 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 1731 1732## 1733# @DirtyRateStatus: 1734# 1735# An enumeration of dirtyrate status. 1736# 1737# @unstarted: the dirtyrate thread has not been started. 1738# 1739# @measuring: the dirtyrate thread is measuring. 1740# 1741# @measured: the dirtyrate thread has measured and results are 1742# available. 1743# 1744# Since: 5.2 1745## 1746{ 'enum': 'DirtyRateStatus', 1747 'data': [ 'unstarted', 'measuring', 'measured'] } 1748 1749## 1750# @DirtyRateMeasureMode: 1751# 1752# An enumeration of mode of measuring dirtyrate. 1753# 1754# @page-sampling: calculate dirtyrate by sampling pages. 1755# 1756# @dirty-ring: calculate dirtyrate by dirty ring. 1757# 1758# @dirty-bitmap: calculate dirtyrate by dirty bitmap. 1759# 1760# Since: 6.2 1761## 1762{ 'enum': 'DirtyRateMeasureMode', 1763 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 1764 1765## 1766# @DirtyRateInfo: 1767# 1768# Information about current dirty page rate of vm. 1769# 1770# @dirty-rate: an estimate of the dirty page rate of the VM in units 1771# of MB/s, present only when estimating the rate has completed. 1772# 1773# @status: status containing dirtyrate query status includes 1774# 'unstarted' or 'measuring' or 'measured' 1775# 1776# @start-time: start time in units of second for calculation 1777# 1778# @calc-time: time in units of second for sample dirty pages 1779# 1780# @sample-pages: page count per GB for sample dirty pages the default 1781# value is 512 (since 6.1) 1782# 1783# @mode: mode containing method of calculate dirtyrate includes 1784# 'page-sampling' and 'dirty-ring' (Since 6.2) 1785# 1786# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring mode 1787# specified (Since 6.2) 1788# 1789# Since: 5.2 1790## 1791{ 'struct': 'DirtyRateInfo', 1792 'data': {'*dirty-rate': 'int64', 1793 'status': 'DirtyRateStatus', 1794 'start-time': 'int64', 1795 'calc-time': 'int64', 1796 'sample-pages': 'uint64', 1797 'mode': 'DirtyRateMeasureMode', 1798 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 1799 1800## 1801# @calc-dirty-rate: 1802# 1803# start calculating dirty page rate for vm 1804# 1805# @calc-time: time in units of second for sample dirty pages 1806# 1807# @sample-pages: page count per GB for sample dirty pages the default 1808# value is 512 (since 6.1) 1809# 1810# @mode: mechanism of calculating dirtyrate includes 'page-sampling' 1811# and 'dirty-ring' (Since 6.1) 1812# 1813# Since: 5.2 1814# 1815# Example: 1816# 1817# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 1818# 'sample-pages': 512} } 1819# <- { "return": {} } 1820## 1821{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 1822 '*sample-pages': 'int', 1823 '*mode': 'DirtyRateMeasureMode'} } 1824 1825## 1826# @query-dirty-rate: 1827# 1828# query dirty page rate in units of MB/s for vm 1829# 1830# Since: 5.2 1831## 1832{ 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' } 1833 1834## 1835# @DirtyLimitInfo: 1836# 1837# Dirty page rate limit information of a virtual CPU. 1838# 1839# @cpu-index: index of a virtual CPU. 1840# 1841# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 1842# CPU, 0 means unlimited. 1843# 1844# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 1845# 1846# Since: 7.1 1847## 1848{ 'struct': 'DirtyLimitInfo', 1849 'data': { 'cpu-index': 'int', 1850 'limit-rate': 'uint64', 1851 'current-rate': 'uint64' } } 1852 1853## 1854# @set-vcpu-dirty-limit: 1855# 1856# Set the upper limit of dirty page rate for virtual CPUs. 1857# 1858# Requires KVM with accelerator property "dirty-ring-size" set. A 1859# virtual CPU's dirty page rate is a measure of its memory load. To 1860# observe dirty page rates, use @calc-dirty-rate. 1861# 1862# @cpu-index: index of a virtual CPU, default is all. 1863# 1864# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 1865# 1866# Since: 7.1 1867# 1868# Example: 1869# 1870# -> {"execute": "set-vcpu-dirty-limit"} 1871# "arguments": { "dirty-rate": 200, 1872# "cpu-index": 1 } } 1873# <- { "return": {} } 1874## 1875{ 'command': 'set-vcpu-dirty-limit', 1876 'data': { '*cpu-index': 'int', 1877 'dirty-rate': 'uint64' } } 1878 1879## 1880# @cancel-vcpu-dirty-limit: 1881# 1882# Cancel the upper limit of dirty page rate for virtual CPUs. 1883# 1884# Cancel the dirty page limit for the vCPU which has been set with 1885# set-vcpu-dirty-limit command. Note that this command requires 1886# support from dirty ring, same as the "set-vcpu-dirty-limit". 1887# 1888# @cpu-index: index of a virtual CPU, default is all. 1889# 1890# Since: 7.1 1891# 1892# Example: 1893# 1894# -> {"execute": "cancel-vcpu-dirty-limit"}, 1895# "arguments": { "cpu-index": 1 } } 1896# <- { "return": {} } 1897## 1898{ 'command': 'cancel-vcpu-dirty-limit', 1899 'data': { '*cpu-index': 'int'} } 1900 1901## 1902# @query-vcpu-dirty-limit: 1903# 1904# Returns information about virtual CPU dirty page rate limits, if 1905# any. 1906# 1907# Since: 7.1 1908# 1909# Example: 1910# 1911# -> {"execute": "query-vcpu-dirty-limit"} 1912# <- {"return": [ 1913# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 1914# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 1915## 1916{ 'command': 'query-vcpu-dirty-limit', 1917 'returns': [ 'DirtyLimitInfo' ] } 1918 1919## 1920# @MigrationThreadInfo: 1921# 1922# Information about migrationthreads 1923# 1924# @name: the name of migration thread 1925# 1926# @thread-id: ID of the underlying host thread 1927# 1928# Since: 7.2 1929## 1930{ 'struct': 'MigrationThreadInfo', 1931 'data': {'name': 'str', 1932 'thread-id': 'int'} } 1933 1934## 1935# @query-migrationthreads: 1936# 1937# Returns information of migration threads 1938# 1939# data: migration thread name 1940# 1941# Returns: information about migration threads 1942# 1943# Since: 7.2 1944## 1945{ 'command': 'query-migrationthreads', 1946 'returns': ['MigrationThreadInfo'] } 1947 1948## 1949# @snapshot-save: 1950# 1951# Save a VM snapshot 1952# 1953# @job-id: identifier for the newly created job 1954# 1955# @tag: name of the snapshot to create 1956# 1957# @vmstate: block device node name to save vmstate to 1958# 1959# @devices: list of block device node names to save a snapshot to 1960# 1961# Applications should not assume that the snapshot save is complete 1962# when this command returns. The job commands / events must be used 1963# to determine completion and to fetch details of any errors that 1964# arise. 1965# 1966# Note that execution of the guest CPUs may be stopped during the time 1967# it takes to save the snapshot. A future version of QEMU may ensure 1968# CPUs are executing continuously. 1969# 1970# It is strongly recommended that @devices contain all writable block 1971# device nodes if a consistent snapshot is required. 1972# 1973# If @tag already exists, an error will be reported 1974# 1975# Returns: nothing 1976# 1977# Example: 1978# 1979# -> { "execute": "snapshot-save", 1980# "arguments": { 1981# "job-id": "snapsave0", 1982# "tag": "my-snap", 1983# "vmstate": "disk0", 1984# "devices": ["disk0", "disk1"] 1985# } 1986# } 1987# <- { "return": { } } 1988# <- {"event": "JOB_STATUS_CHANGE", 1989# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1990# "data": {"status": "created", "id": "snapsave0"}} 1991# <- {"event": "JOB_STATUS_CHANGE", 1992# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 1993# "data": {"status": "running", "id": "snapsave0"}} 1994# <- {"event": "STOP", 1995# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 1996# <- {"event": "RESUME", 1997# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 1998# <- {"event": "JOB_STATUS_CHANGE", 1999# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2000# "data": {"status": "waiting", "id": "snapsave0"}} 2001# <- {"event": "JOB_STATUS_CHANGE", 2002# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2003# "data": {"status": "pending", "id": "snapsave0"}} 2004# <- {"event": "JOB_STATUS_CHANGE", 2005# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2006# "data": {"status": "concluded", "id": "snapsave0"}} 2007# -> {"execute": "query-jobs"} 2008# <- {"return": [{"current-progress": 1, 2009# "status": "concluded", 2010# "total-progress": 1, 2011# "type": "snapshot-save", 2012# "id": "snapsave0"}]} 2013# 2014# Since: 6.0 2015## 2016{ 'command': 'snapshot-save', 2017 'data': { 'job-id': 'str', 2018 'tag': 'str', 2019 'vmstate': 'str', 2020 'devices': ['str'] } } 2021 2022## 2023# @snapshot-load: 2024# 2025# Load a VM snapshot 2026# 2027# @job-id: identifier for the newly created job 2028# 2029# @tag: name of the snapshot to load. 2030# 2031# @vmstate: block device node name to load vmstate from 2032# 2033# @devices: list of block device node names to load a snapshot from 2034# 2035# Applications should not assume that the snapshot load is complete 2036# when this command returns. The job commands / events must be used 2037# to determine completion and to fetch details of any errors that 2038# arise. 2039# 2040# Note that execution of the guest CPUs will be stopped during the 2041# time it takes to load the snapshot. 2042# 2043# It is strongly recommended that @devices contain all writable block 2044# device nodes that can have changed since the original @snapshot-save 2045# command execution. 2046# 2047# Returns: nothing 2048# 2049# Example: 2050# 2051# -> { "execute": "snapshot-load", 2052# "arguments": { 2053# "job-id": "snapload0", 2054# "tag": "my-snap", 2055# "vmstate": "disk0", 2056# "devices": ["disk0", "disk1"] 2057# } 2058# } 2059# <- { "return": { } } 2060# <- {"event": "JOB_STATUS_CHANGE", 2061# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2062# "data": {"status": "created", "id": "snapload0"}} 2063# <- {"event": "JOB_STATUS_CHANGE", 2064# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2065# "data": {"status": "running", "id": "snapload0"}} 2066# <- {"event": "STOP", 2067# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2068# <- {"event": "RESUME", 2069# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2070# <- {"event": "JOB_STATUS_CHANGE", 2071# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2072# "data": {"status": "waiting", "id": "snapload0"}} 2073# <- {"event": "JOB_STATUS_CHANGE", 2074# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2075# "data": {"status": "pending", "id": "snapload0"}} 2076# <- {"event": "JOB_STATUS_CHANGE", 2077# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2078# "data": {"status": "concluded", "id": "snapload0"}} 2079# -> {"execute": "query-jobs"} 2080# <- {"return": [{"current-progress": 1, 2081# "status": "concluded", 2082# "total-progress": 1, 2083# "type": "snapshot-load", 2084# "id": "snapload0"}]} 2085# 2086# Since: 6.0 2087## 2088{ 'command': 'snapshot-load', 2089 'data': { 'job-id': 'str', 2090 'tag': 'str', 2091 'vmstate': 'str', 2092 'devices': ['str'] } } 2093 2094## 2095# @snapshot-delete: 2096# 2097# Delete a VM snapshot 2098# 2099# @job-id: identifier for the newly created job 2100# 2101# @tag: name of the snapshot to delete. 2102# 2103# @devices: list of block device node names to delete a snapshot from 2104# 2105# Applications should not assume that the snapshot delete is complete 2106# when this command returns. The job commands / events must be used 2107# to determine completion and to fetch details of any errors that 2108# arise. 2109# 2110# Returns: nothing 2111# 2112# Example: 2113# 2114# -> { "execute": "snapshot-delete", 2115# "arguments": { 2116# "job-id": "snapdelete0", 2117# "tag": "my-snap", 2118# "devices": ["disk0", "disk1"] 2119# } 2120# } 2121# <- { "return": { } } 2122# <- {"event": "JOB_STATUS_CHANGE", 2123# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2124# "data": {"status": "created", "id": "snapdelete0"}} 2125# <- {"event": "JOB_STATUS_CHANGE", 2126# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2127# "data": {"status": "running", "id": "snapdelete0"}} 2128# <- {"event": "JOB_STATUS_CHANGE", 2129# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2130# "data": {"status": "waiting", "id": "snapdelete0"}} 2131# <- {"event": "JOB_STATUS_CHANGE", 2132# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2133# "data": {"status": "pending", "id": "snapdelete0"}} 2134# <- {"event": "JOB_STATUS_CHANGE", 2135# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2136# "data": {"status": "concluded", "id": "snapdelete0"}} 2137# -> {"execute": "query-jobs"} 2138# <- {"return": [{"current-progress": 1, 2139# "status": "concluded", 2140# "total-progress": 1, 2141# "type": "snapshot-delete", 2142# "id": "snapdelete0"}]} 2143# 2144# Since: 6.0 2145## 2146{ 'command': 'snapshot-delete', 2147 'data': { 'job-id': 'str', 2148 'tag': 'str', 2149 'devices': ['str'] } } 2150