1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the 20# target VM 21# 22# @total: total amount of bytes involved in the migration process 23# 24# @duplicate: number of duplicate (zero) pages (since 1.2) 25# 26# @skipped: number of skipped zero pages (since 1.5) 27# 28# @normal: number of normal pages (since 1.2) 29# 30# @normal-bytes: number of normal bytes sent (since 1.2) 31# 32# @dirty-pages-rate: number of pages dirtied by second by the guest 33# (since 1.3) 34# 35# @mbps: throughput in megabits/sec. (since 1.6) 36# 37# @dirty-sync-count: number of times that dirty ram was synchronized 38# (since 2.1) 39# 40# @postcopy-requests: The number of page requests received from the 41# destination (since 2.7) 42# 43# @page-size: The number of bytes per page for the various page-based 44# statistics (since 2.10) 45# 46# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 47# 48# @pages-per-second: the number of memory pages transferred per second 49# (Since 4.0) 50# 51# @precopy-bytes: The number of bytes sent in the pre-copy phase 52# (since 7.0). 53# 54# @downtime-bytes: The number of bytes sent while the guest is paused 55# (since 7.0). 56# 57# @postcopy-bytes: The number of bytes sent during the post-copy phase 58# (since 7.0). 59# 60# @dirty-sync-missed-zero-copy: Number of times dirty RAM 61# synchronization could not avoid copying dirty pages. This is 62# between 0 and @dirty-sync-count * @multifd-channels. (since 63# 7.1) 64# 65# Since: 0.14 66## 67{ 'struct': 'MigrationStats', 68 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 69 'duplicate': 'int', 'skipped': 'int', 'normal': 'int', 70 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', 71 'mbps' : 'number', 'dirty-sync-count' : 'int', 72 'postcopy-requests' : 'int', 'page-size' : 'int', 73 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64', 74 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64', 75 'postcopy-bytes' : 'uint64', 76 'dirty-sync-missed-zero-copy' : 'uint64' } } 77 78## 79# @XBZRLECacheStats: 80# 81# Detailed XBZRLE migration cache statistics 82# 83# @cache-size: XBZRLE cache size 84# 85# @bytes: amount of bytes already transferred to the target VM 86# 87# @pages: amount of pages transferred to the target VM 88# 89# @cache-miss: number of cache miss 90# 91# @cache-miss-rate: rate of cache miss (since 2.1) 92# 93# @encoding-rate: rate of encoded bytes (since 5.1) 94# 95# @overflow: number of overflows 96# 97# Since: 1.2 98## 99{ 'struct': 'XBZRLECacheStats', 100 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 101 'cache-miss': 'int', 'cache-miss-rate': 'number', 102 'encoding-rate': 'number', 'overflow': 'int' } } 103 104## 105# @CompressionStats: 106# 107# Detailed migration compression statistics 108# 109# @pages: amount of pages compressed and transferred to the target VM 110# 111# @busy: count of times that no free thread was available to compress 112# data 113# 114# @busy-rate: rate of thread busy 115# 116# @compressed-size: amount of bytes after compression 117# 118# @compression-rate: rate of compressed size 119# 120# Since: 3.1 121## 122{ 'struct': 'CompressionStats', 123 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 124 'compressed-size': 'int', 'compression-rate': 'number' } } 125 126## 127# @MigrationStatus: 128# 129# An enumeration of migration status. 130# 131# @none: no migration has ever happened. 132# 133# @setup: migration process has been initiated. 134# 135# @cancelling: in the process of cancelling migration. 136# 137# @cancelled: cancelling migration is finished. 138# 139# @active: in the process of doing migration. 140# 141# @postcopy-active: like active, but now in postcopy mode. (since 142# 2.5) 143# 144# @postcopy-paused: during postcopy but paused. (since 3.0) 145# 146# @postcopy-recover: trying to recover from a paused postcopy. (since 147# 3.0) 148# 149# @completed: migration is finished. 150# 151# @failed: some error occurred during migration process. 152# 153# @colo: VM is in the process of fault tolerance, VM can not get into 154# this state unless colo capability is enabled for migration. 155# (since 2.8) 156# 157# @pre-switchover: Paused before device serialisation. (since 2.11) 158# 159# @device: During device serialisation when pause-before-switchover is 160# enabled (since 2.11) 161# 162# @wait-unplug: wait for device unplug request by guest OS to be 163# completed. (since 4.2) 164# 165# Since: 2.3 166## 167{ 'enum': 'MigrationStatus', 168 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 169 'active', 'postcopy-active', 'postcopy-paused', 170 'postcopy-recover', 'completed', 'failed', 'colo', 171 'pre-switchover', 'device', 'wait-unplug' ] } 172## 173# @VfioStats: 174# 175# Detailed VFIO devices migration statistics 176# 177# @transferred: amount of bytes transferred to the target VM by VFIO 178# devices 179# 180# Since: 5.2 181## 182{ 'struct': 'VfioStats', 183 'data': {'transferred': 'int' } } 184 185## 186# @MigrationInfo: 187# 188# Information about current migration process. 189# 190# @status: @MigrationStatus describing the current migration status. 191# If this field is not returned, no migration process has been 192# initiated 193# 194# @ram: @MigrationStats containing detailed migration status, only 195# returned if status is 'active' or 'completed'(since 1.2) 196# 197# @disk: @MigrationStats containing detailed disk migration status, 198# only returned if status is 'active' and it is a block migration 199# 200# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 201# migration statistics, only returned if XBZRLE feature is on and 202# status is 'active' or 'completed' (since 1.2) 203# 204# @total-time: total amount of milliseconds since migration started. 205# If migration has ended, it returns the total migration time. 206# (since 1.2) 207# 208# @downtime: only present when migration finishes correctly total 209# downtime in milliseconds for the guest. (since 1.3) 210# 211# @expected-downtime: only present while migration is active expected 212# downtime in milliseconds for the guest in last walk of the dirty 213# bitmap. (since 1.3) 214# 215# @setup-time: amount of setup time in milliseconds *before* the 216# iterations begin but *after* the QMP command is issued. This is 217# designed to provide an accounting of any activities (such as 218# RDMA pinning) which may be expensive, but do not actually occur 219# during the iterative migration rounds themselves. (since 1.6) 220# 221# @cpu-throttle-percentage: percentage of time guest cpus are being 222# throttled during auto-converge. This is only present when 223# auto-converge has started throttling guest cpus. (Since 2.7) 224# 225# @error-desc: the human readable error description string, when 226# @status is 'failed'. Clients should not attempt to parse the 227# error strings. (Since 2.7) 228# 229# @postcopy-blocktime: total time when all vCPU were blocked during 230# postcopy live migration. This is only present when the 231# postcopy-blocktime migration capability is enabled. (Since 3.0) 232# 233# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. 234# This is only present when the postcopy-blocktime migration 235# capability is enabled. (Since 3.0) 236# 237# @compression: migration compression statistics, only returned if 238# compression feature is on and status is 'active' or 'completed' 239# (Since 3.1) 240# 241# @socket-address: Only used for tcp, to know what the real port is 242# (Since 4.0) 243# 244# @vfio: @VfioStats containing detailed VFIO devices migration 245# statistics, only returned if VFIO device is present, migration 246# is supported by all VFIO devices and status is 'active' or 247# 'completed' (since 5.2) 248# 249# @blocked-reasons: A list of reasons an outgoing migration is 250# blocked. Present and non-empty when migration is blocked. 251# (since 6.0) 252# 253# Since: 0.14 254## 255{ 'struct': 'MigrationInfo', 256 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 257 '*disk': 'MigrationStats', 258 '*vfio': 'VfioStats', 259 '*xbzrle-cache': 'XBZRLECacheStats', 260 '*total-time': 'int', 261 '*expected-downtime': 'int', 262 '*downtime': 'int', 263 '*setup-time': 'int', 264 '*cpu-throttle-percentage': 'int', 265 '*error-desc': 'str', 266 '*blocked-reasons': ['str'], 267 '*postcopy-blocktime' : 'uint32', 268 '*postcopy-vcpu-blocktime': ['uint32'], 269 '*compression': 'CompressionStats', 270 '*socket-address': ['SocketAddress'] } } 271 272## 273# @query-migrate: 274# 275# Returns information about current migration process. If migration 276# is active there will be another json-object with RAM migration 277# status and if block migration is active another one with block 278# migration status. 279# 280# Returns: @MigrationInfo 281# 282# Since: 0.14 283# 284# Examples: 285# 286# 1. Before the first migration 287# 288# -> { "execute": "query-migrate" } 289# <- { "return": {} } 290# 291# 2. Migration is done and has succeeded 292# 293# -> { "execute": "query-migrate" } 294# <- { "return": { 295# "status": "completed", 296# "total-time":12345, 297# "setup-time":12345, 298# "downtime":12345, 299# "ram":{ 300# "transferred":123, 301# "remaining":123, 302# "total":246, 303# "duplicate":123, 304# "normal":123, 305# "normal-bytes":123456, 306# "dirty-sync-count":15 307# } 308# } 309# } 310# 311# 3. Migration is done and has failed 312# 313# -> { "execute": "query-migrate" } 314# <- { "return": { "status": "failed" } } 315# 316# 4. Migration is being performed and is not a block migration: 317# 318# -> { "execute": "query-migrate" } 319# <- { 320# "return":{ 321# "status":"active", 322# "total-time":12345, 323# "setup-time":12345, 324# "expected-downtime":12345, 325# "ram":{ 326# "transferred":123, 327# "remaining":123, 328# "total":246, 329# "duplicate":123, 330# "normal":123, 331# "normal-bytes":123456, 332# "dirty-sync-count":15 333# } 334# } 335# } 336# 337# 5. Migration is being performed and is a block migration: 338# 339# -> { "execute": "query-migrate" } 340# <- { 341# "return":{ 342# "status":"active", 343# "total-time":12345, 344# "setup-time":12345, 345# "expected-downtime":12345, 346# "ram":{ 347# "total":1057024, 348# "remaining":1053304, 349# "transferred":3720, 350# "duplicate":123, 351# "normal":123, 352# "normal-bytes":123456, 353# "dirty-sync-count":15 354# }, 355# "disk":{ 356# "total":20971520, 357# "remaining":20880384, 358# "transferred":91136 359# } 360# } 361# } 362# 363# 6. Migration is being performed and XBZRLE is active: 364# 365# -> { "execute": "query-migrate" } 366# <- { 367# "return":{ 368# "status":"active", 369# "total-time":12345, 370# "setup-time":12345, 371# "expected-downtime":12345, 372# "ram":{ 373# "total":1057024, 374# "remaining":1053304, 375# "transferred":3720, 376# "duplicate":10, 377# "normal":3333, 378# "normal-bytes":3412992, 379# "dirty-sync-count":15 380# }, 381# "xbzrle-cache":{ 382# "cache-size":67108864, 383# "bytes":20971520, 384# "pages":2444343, 385# "cache-miss":2244, 386# "cache-miss-rate":0.123, 387# "encoding-rate":80.1, 388# "overflow":34434 389# } 390# } 391# } 392## 393{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 394 395## 396# @MigrationCapability: 397# 398# Migration capabilities enumeration 399# 400# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length 401# Encoding). This feature allows us to minimize migration traffic 402# for certain work loads, by sending compressed difference of the 403# pages 404# 405# @rdma-pin-all: Controls whether or not the entire VM memory 406# footprint is mlock()'d on demand or all at once. Refer to 407# docs/rdma.txt for usage. Disabled by default. (since 2.0) 408# 409# @zero-blocks: During storage migration encode blocks of zeroes 410# efficiently. This essentially saves 1MB of zeroes per block on 411# the wire. Enabling requires source and target VM to support 412# this feature. To enable it is sufficient to enable the 413# capability on the source VM. The feature is disabled by default. 414# (since 1.6) 415# 416# @compress: Use multiple compression threads to accelerate live 417# migration. This feature can help to reduce the migration 418# traffic, by sending compressed pages. Please note that if 419# compress and xbzrle are both on, compress only takes effect in 420# the ram bulk stage, after that, it will be disabled and only 421# xbzrle takes effect, this can help to minimize migration 422# traffic. The feature is disabled by default. (since 2.4 ) 423# 424# @events: generate events for each migration state change (since 2.4 425# ) 426# 427# @auto-converge: If enabled, QEMU will automatically throttle down 428# the guest to speed up convergence of RAM migration. (since 1.6) 429# 430# @postcopy-ram: Start executing on the migration target before all of 431# RAM has been migrated, pulling the remaining pages along as 432# needed. The capacity must have the same setting on both source 433# and target or migration will not even start. NOTE: If the 434# migration fails during postcopy the VM will fail. (since 2.6) 435# 436# @x-colo: If enabled, migration will never end, and the state of the 437# VM on the primary side will be migrated continuously to the VM 438# on secondary side, this process is called COarse-Grain LOck 439# Stepping (COLO) for Non-stop Service. (since 2.8) 440# 441# @release-ram: if enabled, qemu will free the migrated ram pages on 442# the source during postcopy-ram migration. (since 2.9) 443# 444# @block: If enabled, QEMU will also migrate the contents of all block 445# devices. Default is disabled. A possible alternative uses 446# mirror jobs to a builtin NBD server on the destination, which 447# offers more flexibility. (Since 2.10) 448# 449# @return-path: If enabled, migration will use the return path even 450# for precopy. (since 2.10) 451# 452# @pause-before-switchover: Pause outgoing migration before 453# serialising device state and before disabling block IO (since 454# 2.11) 455# 456# @multifd: Use more than one fd for migration (since 4.0) 457# 458# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 459# (since 2.12) 460# 461# @postcopy-blocktime: Calculate downtime for postcopy live migration 462# (since 3.0) 463# 464# @late-block-activate: If enabled, the destination will not activate 465# block devices (and thus take locks) immediately at the end of 466# migration. (since 3.0) 467# 468# @x-ignore-shared: If enabled, QEMU will not migrate shared memory 469# (since 4.0) 470# 471# @validate-uuid: Send the UUID of the source to allow the destination 472# to ensure it is the same. (since 4.2) 473# 474# @background-snapshot: If enabled, the migration stream will be a 475# snapshot of the VM exactly at the point when the migration 476# procedure starts. The VM RAM is saved with running VM. (since 477# 6.0) 478# 479# @zero-copy-send: Controls behavior on sending memory pages on 480# migration. When true, enables a zero-copy mechanism for sending 481# memory pages, if host supports it. Requires that QEMU be 482# permitted to use locked memory for guest RAM pages. (since 7.1) 483# 484# @postcopy-preempt: If enabled, the migration process will allow 485# postcopy requests to preempt precopy stream, so postcopy 486# requests will be handled faster. This is a performance feature 487# and should not affect the correctness of postcopy migration. 488# (since 7.1) 489# 490# Features: 491# 492# @unstable: Members @x-colo and @x-ignore-shared are experimental. 493# 494# Since: 1.2 495## 496{ 'enum': 'MigrationCapability', 497 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 498 'compress', 'events', 'postcopy-ram', 499 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 500 'release-ram', 501 'block', 'return-path', 'pause-before-switchover', 'multifd', 502 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 503 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 504 'validate-uuid', 'background-snapshot', 505 'zero-copy-send', 'postcopy-preempt'] } 506 507## 508# @MigrationCapabilityStatus: 509# 510# Migration capability information 511# 512# @capability: capability enum 513# 514# @state: capability state bool 515# 516# Since: 1.2 517## 518{ 'struct': 'MigrationCapabilityStatus', 519 'data': { 'capability' : 'MigrationCapability', 'state' : 'bool' } } 520 521## 522# @migrate-set-capabilities: 523# 524# Enable/Disable the following migration capabilities (like xbzrle) 525# 526# @capabilities: json array of capability modifications to make 527# 528# Since: 1.2 529# 530# Example: 531# 532# -> { "execute": "migrate-set-capabilities" , "arguments": 533# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 534# <- { "return": {} } 535## 536{ 'command': 'migrate-set-capabilities', 537 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 538 539## 540# @query-migrate-capabilities: 541# 542# Returns information about the current migration capabilities status 543# 544# Returns: @MigrationCapabilityStatus 545# 546# Since: 1.2 547# 548# Example: 549# 550# -> { "execute": "query-migrate-capabilities" } 551# <- { "return": [ 552# {"state": false, "capability": "xbzrle"}, 553# {"state": false, "capability": "rdma-pin-all"}, 554# {"state": false, "capability": "auto-converge"}, 555# {"state": false, "capability": "zero-blocks"}, 556# {"state": false, "capability": "compress"}, 557# {"state": true, "capability": "events"}, 558# {"state": false, "capability": "postcopy-ram"}, 559# {"state": false, "capability": "x-colo"} 560# ]} 561## 562{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 563 564## 565# @MultiFDCompression: 566# 567# An enumeration of multifd compression methods. 568# 569# @none: no compression. 570# 571# @zlib: use zlib compression method. 572# 573# @zstd: use zstd compression method. 574# 575# Since: 5.0 576## 577{ 'enum': 'MultiFDCompression', 578 'data': [ 'none', 'zlib', 579 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 580 581## 582# @BitmapMigrationBitmapAliasTransform: 583# 584# @persistent: If present, the bitmap will be made persistent or 585# transient depending on this parameter. 586# 587# Since: 6.0 588## 589{ 'struct': 'BitmapMigrationBitmapAliasTransform', 590 'data': { 591 '*persistent': 'bool' 592 } } 593 594## 595# @BitmapMigrationBitmapAlias: 596# 597# @name: The name of the bitmap. 598# 599# @alias: An alias name for migration (for example the bitmap name on 600# the opposite site). 601# 602# @transform: Allows the modification of the migrated bitmap. (since 603# 6.0) 604# 605# Since: 5.2 606## 607{ 'struct': 'BitmapMigrationBitmapAlias', 608 'data': { 609 'name': 'str', 610 'alias': 'str', 611 '*transform': 'BitmapMigrationBitmapAliasTransform' 612 } } 613 614## 615# @BitmapMigrationNodeAlias: 616# 617# Maps a block node name and the bitmaps it has to aliases for dirty 618# bitmap migration. 619# 620# @node-name: A block node name. 621# 622# @alias: An alias block node name for migration (for example the node 623# name on the opposite site). 624# 625# @bitmaps: Mappings for the bitmaps on this node. 626# 627# Since: 5.2 628## 629{ 'struct': 'BitmapMigrationNodeAlias', 630 'data': { 631 'node-name': 'str', 632 'alias': 'str', 633 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 634 } } 635 636## 637# @MigrationParameter: 638# 639# Migration parameters enumeration 640# 641# @announce-initial: Initial delay (in milliseconds) before sending 642# the first announce (Since 4.0) 643# 644# @announce-max: Maximum delay (in milliseconds) between packets in 645# the announcement (Since 4.0) 646# 647# @announce-rounds: Number of self-announce packets sent after 648# migration (Since 4.0) 649# 650# @announce-step: Increase in delay (in milliseconds) between 651# subsequent packets in the announcement (Since 4.0) 652# 653# @compress-level: Set the compression level to be used in live 654# migration, the compression level is an integer between 0 and 9, 655# where 0 means no compression, 1 means the best compression 656# speed, and 9 means best compression ratio which will consume 657# more CPU. 658# 659# @compress-threads: Set compression thread count to be used in live 660# migration, the compression thread count is an integer between 1 661# and 255. 662# 663# @compress-wait-thread: Controls behavior when all compression 664# threads are currently busy. If true (default), wait for a free 665# compression thread to become available; otherwise, send the page 666# uncompressed. (Since 3.1) 667# 668# @decompress-threads: Set decompression thread count to be used in 669# live migration, the decompression thread count is an integer 670# between 1 and 255. Usually, decompression is at least 4 times as 671# fast as compression, so set the decompress-threads to the number 672# about 1/4 of compress-threads is adequate. 673# 674# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 675# bytes_xfer_period to trigger throttling. It is expressed as 676# percentage. The default value is 50. (Since 5.0) 677# 678# @cpu-throttle-initial: Initial percentage of time guest cpus are 679# throttled when migration auto-converge is activated. The 680# default value is 20. (Since 2.7) 681# 682# @cpu-throttle-increment: throttle percentage increase each time 683# auto-converge detects that migration is not making progress. 684# The default value is 10. (Since 2.7) 685# 686# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 687# the tail stage of throttling, the Guest is very sensitive to CPU 688# percentage while the @cpu-throttle -increment is excessive 689# usually at tail stage. If this parameter is true, we will 690# compute the ideal CPU percentage used by the Guest, which may 691# exactly make the dirty rate match the dirty rate threshold. 692# Then we will choose a smaller throttle increment between the one 693# specified by @cpu-throttle-increment and the one generated by 694# ideal CPU percentage. Therefore, it is compatible to 695# traditional throttling, meanwhile the throttle increment won't 696# be excessive at tail stage. The default value is false. (Since 697# 5.1) 698# 699# @tls-creds: ID of the 'tls-creds' object that provides credentials 700# for establishing a TLS connection over the migration data 701# channel. On the outgoing side of the migration, the credentials 702# must be for a 'client' endpoint, while for the incoming side the 703# credentials must be for a 'server' endpoint. Setting this will 704# enable TLS for all migrations. The default is unset, resulting 705# in unsecured migration at the QEMU level. (Since 2.7) 706# 707# @tls-hostname: hostname of the target host for the migration. This 708# is required when using x509 based TLS credentials and the 709# migration URI does not already include a hostname. For example 710# if using fd: or exec: based migration, the hostname must be 711# provided so that the server's x509 certificate identity can be 712# validated. (Since 2.7) 713# 714# @tls-authz: ID of the 'authz' object subclass that provides access 715# control checking of the TLS x509 certificate distinguished name. 716# This object is only resolved at time of use, so can be deleted 717# and recreated on the fly while the migration server is active. 718# If missing, it will default to denying access (Since 4.0) 719# 720# @max-bandwidth: to set maximum speed for migration. maximum speed 721# in bytes per second. (Since 2.8) 722# 723# @downtime-limit: set maximum tolerated downtime for migration. 724# maximum downtime in milliseconds (Since 2.8) 725# 726# @x-checkpoint-delay: The delay time (in ms) between two COLO 727# checkpoints in periodic mode. (Since 2.8) 728# 729# @block-incremental: Affects how much storage is migrated when the 730# block migration capability is enabled. When false, the entire 731# storage backing chain is migrated into a flattened image at the 732# destination; when true, only the active qcow2 layer is migrated 733# and the destination must already have access to the same backing 734# chain as was used on the source. (since 2.10) 735# 736# @multifd-channels: Number of channels used to migrate data in 737# parallel. This is the same number that the number of sockets 738# used for migration. The default value is 2 (since 4.0) 739# 740# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 741# needs to be a multiple of the target page size and a power of 2 742# (Since 2.11) 743# 744# @max-postcopy-bandwidth: Background transfer bandwidth during 745# postcopy. Defaults to 0 (unlimited). In bytes per second. 746# (Since 3.0) 747# 748# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 749# (Since 3.1) 750# 751# @multifd-compression: Which compression method to use. Defaults to 752# none. (Since 5.0) 753# 754# @multifd-zlib-level: Set the compression level to be used in live 755# migration, the compression level is an integer between 0 and 9, 756# where 0 means no compression, 1 means the best compression 757# speed, and 9 means best compression ratio which will consume 758# more CPU. Defaults to 1. (Since 5.0) 759# 760# @multifd-zstd-level: Set the compression level to be used in live 761# migration, the compression level is an integer between 0 and 20, 762# where 0 means no compression, 1 means the best compression 763# speed, and 20 means best compression ratio which will consume 764# more CPU. Defaults to 1. (Since 5.0) 765# 766# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 767# aliases for the purpose of dirty bitmap migration. Such aliases 768# may for example be the corresponding names on the opposite site. 769# The mapping must be one-to-one, but not necessarily complete: On 770# the source, unmapped bitmaps and all bitmaps on unmapped nodes 771# will be ignored. On the destination, encountering an unmapped 772# alias in the incoming migration stream will result in a report, 773# and all further bitmap migration data will then be discarded. 774# Note that the destination does not know about bitmaps it does 775# not receive, so there is no limitation or requirement regarding 776# the number of bitmaps received, or how they are named, or on 777# which nodes they are placed. By default (when this parameter 778# has never been set), bitmap names are mapped to themselves. 779# Nodes are mapped to their block device name if there is one, and 780# to their node name otherwise. (Since 5.2) 781# 782# Features: 783# 784# @unstable: Member @x-checkpoint-delay is experimental. 785# 786# Since: 2.4 787## 788{ 'enum': 'MigrationParameter', 789 'data': ['announce-initial', 'announce-max', 790 'announce-rounds', 'announce-step', 791 'compress-level', 'compress-threads', 'decompress-threads', 792 'compress-wait-thread', 'throttle-trigger-threshold', 793 'cpu-throttle-initial', 'cpu-throttle-increment', 794 'cpu-throttle-tailslow', 795 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 796 'downtime-limit', 797 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 798 'block-incremental', 799 'multifd-channels', 800 'xbzrle-cache-size', 'max-postcopy-bandwidth', 801 'max-cpu-throttle', 'multifd-compression', 802 'multifd-zlib-level' ,'multifd-zstd-level', 803 'block-bitmap-mapping' ] } 804 805## 806# @MigrateSetParameters: 807# 808# @announce-initial: Initial delay (in milliseconds) before sending 809# the first announce (Since 4.0) 810# 811# @announce-max: Maximum delay (in milliseconds) between packets in 812# the announcement (Since 4.0) 813# 814# @announce-rounds: Number of self-announce packets sent after 815# migration (Since 4.0) 816# 817# @announce-step: Increase in delay (in milliseconds) between 818# subsequent packets in the announcement (Since 4.0) 819# 820# @compress-level: compression level 821# 822# @compress-threads: compression thread count 823# 824# @compress-wait-thread: Controls behavior when all compression 825# threads are currently busy. If true (default), wait for a free 826# compression thread to become available; otherwise, send the page 827# uncompressed. (Since 3.1) 828# 829# @decompress-threads: decompression thread count 830# 831# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 832# bytes_xfer_period to trigger throttling. It is expressed as 833# percentage. The default value is 50. (Since 5.0) 834# 835# @cpu-throttle-initial: Initial percentage of time guest cpus are 836# throttled when migration auto-converge is activated. The 837# default value is 20. (Since 2.7) 838# 839# @cpu-throttle-increment: throttle percentage increase each time 840# auto-converge detects that migration is not making progress. 841# The default value is 10. (Since 2.7) 842# 843# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 844# the tail stage of throttling, the Guest is very sensitive to CPU 845# percentage while the @cpu-throttle -increment is excessive 846# usually at tail stage. If this parameter is true, we will 847# compute the ideal CPU percentage used by the Guest, which may 848# exactly make the dirty rate match the dirty rate threshold. 849# Then we will choose a smaller throttle increment between the one 850# specified by @cpu-throttle-increment and the one generated by 851# ideal CPU percentage. Therefore, it is compatible to 852# traditional throttling, meanwhile the throttle increment won't 853# be excessive at tail stage. The default value is false. (Since 854# 5.1) 855# 856# @tls-creds: ID of the 'tls-creds' object that provides credentials 857# for establishing a TLS connection over the migration data 858# channel. On the outgoing side of the migration, the credentials 859# must be for a 'client' endpoint, while for the incoming side the 860# credentials must be for a 'server' endpoint. Setting this to a 861# non-empty string enables TLS for all migrations. An empty 862# string means that QEMU will use plain text mode for migration, 863# rather than TLS (Since 2.9) Previously (since 2.7), this was 864# reported by omitting tls-creds instead. 865# 866# @tls-hostname: hostname of the target host for the migration. This 867# is required when using x509 based TLS credentials and the 868# migration URI does not already include a hostname. For example 869# if using fd: or exec: based migration, the hostname must be 870# provided so that the server's x509 certificate identity can be 871# validated. (Since 2.7) An empty string means that QEMU will use 872# the hostname associated with the migration URI, if any. (Since 873# 2.9) Previously (since 2.7), this was reported by omitting 874# tls-hostname instead. 875# 876# @max-bandwidth: to set maximum speed for migration. maximum speed 877# in bytes per second. (Since 2.8) 878# 879# @downtime-limit: set maximum tolerated downtime for migration. 880# maximum downtime in milliseconds (Since 2.8) 881# 882# @x-checkpoint-delay: the delay time between two COLO checkpoints. 883# (Since 2.8) 884# 885# @block-incremental: Affects how much storage is migrated when the 886# block migration capability is enabled. When false, the entire 887# storage backing chain is migrated into a flattened image at the 888# destination; when true, only the active qcow2 layer is migrated 889# and the destination must already have access to the same backing 890# chain as was used on the source. (since 2.10) 891# 892# @multifd-channels: Number of channels used to migrate data in 893# parallel. This is the same number that the number of sockets 894# used for migration. The default value is 2 (since 4.0) 895# 896# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 897# needs to be a multiple of the target page size and a power of 2 898# (Since 2.11) 899# 900# @max-postcopy-bandwidth: Background transfer bandwidth during 901# postcopy. Defaults to 0 (unlimited). In bytes per second. 902# (Since 3.0) 903# 904# @max-cpu-throttle: maximum cpu throttle percentage. The default 905# value is 99. (Since 3.1) 906# 907# @multifd-compression: Which compression method to use. Defaults to 908# none. (Since 5.0) 909# 910# @multifd-zlib-level: Set the compression level to be used in live 911# migration, the compression level is an integer between 0 and 9, 912# where 0 means no compression, 1 means the best compression 913# speed, and 9 means best compression ratio which will consume 914# more CPU. Defaults to 1. (Since 5.0) 915# 916# @multifd-zstd-level: Set the compression level to be used in live 917# migration, the compression level is an integer between 0 and 20, 918# where 0 means no compression, 1 means the best compression 919# speed, and 20 means best compression ratio which will consume 920# more CPU. Defaults to 1. (Since 5.0) 921# 922# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 923# aliases for the purpose of dirty bitmap migration. Such aliases 924# may for example be the corresponding names on the opposite site. 925# The mapping must be one-to-one, but not necessarily complete: On 926# the source, unmapped bitmaps and all bitmaps on unmapped nodes 927# will be ignored. On the destination, encountering an unmapped 928# alias in the incoming migration stream will result in a report, 929# and all further bitmap migration data will then be discarded. 930# Note that the destination does not know about bitmaps it does 931# not receive, so there is no limitation or requirement regarding 932# the number of bitmaps received, or how they are named, or on 933# which nodes they are placed. By default (when this parameter 934# has never been set), bitmap names are mapped to themselves. 935# Nodes are mapped to their block device name if there is one, and 936# to their node name otherwise. (Since 5.2) 937# 938# Features: 939# 940# @unstable: Member @x-checkpoint-delay is experimental. 941# 942# TODO: either fuse back into MigrationParameters, or make 943# MigrationParameters members mandatory 944# 945# Since: 2.4 946## 947{ 'struct': 'MigrateSetParameters', 948 'data': { '*announce-initial': 'size', 949 '*announce-max': 'size', 950 '*announce-rounds': 'size', 951 '*announce-step': 'size', 952 '*compress-level': 'uint8', 953 '*compress-threads': 'uint8', 954 '*compress-wait-thread': 'bool', 955 '*decompress-threads': 'uint8', 956 '*throttle-trigger-threshold': 'uint8', 957 '*cpu-throttle-initial': 'uint8', 958 '*cpu-throttle-increment': 'uint8', 959 '*cpu-throttle-tailslow': 'bool', 960 '*tls-creds': 'StrOrNull', 961 '*tls-hostname': 'StrOrNull', 962 '*tls-authz': 'StrOrNull', 963 '*max-bandwidth': 'size', 964 '*downtime-limit': 'uint64', 965 '*x-checkpoint-delay': { 'type': 'uint32', 966 'features': [ 'unstable' ] }, 967 '*block-incremental': 'bool', 968 '*multifd-channels': 'uint8', 969 '*xbzrle-cache-size': 'size', 970 '*max-postcopy-bandwidth': 'size', 971 '*max-cpu-throttle': 'uint8', 972 '*multifd-compression': 'MultiFDCompression', 973 '*multifd-zlib-level': 'uint8', 974 '*multifd-zstd-level': 'uint8', 975 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 976 977## 978# @migrate-set-parameters: 979# 980# Set various migration parameters. 981# 982# Since: 2.4 983# 984# Example: 985# 986# -> { "execute": "migrate-set-parameters" , 987# "arguments": { "compress-level": 1 } } 988# <- { "return": {} } 989## 990{ 'command': 'migrate-set-parameters', 'boxed': true, 991 'data': 'MigrateSetParameters' } 992 993## 994# @MigrationParameters: 995# 996# The optional members aren't actually optional. 997# 998# @announce-initial: Initial delay (in milliseconds) before sending 999# the first announce (Since 4.0) 1000# 1001# @announce-max: Maximum delay (in milliseconds) between packets in 1002# the announcement (Since 4.0) 1003# 1004# @announce-rounds: Number of self-announce packets sent after 1005# migration (Since 4.0) 1006# 1007# @announce-step: Increase in delay (in milliseconds) between 1008# subsequent packets in the announcement (Since 4.0) 1009# 1010# @compress-level: compression level 1011# 1012# @compress-threads: compression thread count 1013# 1014# @compress-wait-thread: Controls behavior when all compression 1015# threads are currently busy. If true (default), wait for a free 1016# compression thread to become available; otherwise, send the page 1017# uncompressed. (Since 3.1) 1018# 1019# @decompress-threads: decompression thread count 1020# 1021# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 1022# bytes_xfer_period to trigger throttling. It is expressed as 1023# percentage. The default value is 50. (Since 5.0) 1024# 1025# @cpu-throttle-initial: Initial percentage of time guest cpus are 1026# throttled when migration auto-converge is activated. (Since 1027# 2.7) 1028# 1029# @cpu-throttle-increment: throttle percentage increase each time 1030# auto-converge detects that migration is not making progress. 1031# (Since 2.7) 1032# 1033# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 1034# the tail stage of throttling, the Guest is very sensitive to CPU 1035# percentage while the @cpu-throttle -increment is excessive 1036# usually at tail stage. If this parameter is true, we will 1037# compute the ideal CPU percentage used by the Guest, which may 1038# exactly make the dirty rate match the dirty rate threshold. 1039# Then we will choose a smaller throttle increment between the one 1040# specified by @cpu-throttle-increment and the one generated by 1041# ideal CPU percentage. Therefore, it is compatible to 1042# traditional throttling, meanwhile the throttle increment won't 1043# be excessive at tail stage. The default value is false. (Since 1044# 5.1) 1045# 1046# @tls-creds: ID of the 'tls-creds' object that provides credentials 1047# for establishing a TLS connection over the migration data 1048# channel. On the outgoing side of the migration, the credentials 1049# must be for a 'client' endpoint, while for the incoming side the 1050# credentials must be for a 'server' endpoint. An empty string 1051# means that QEMU will use plain text mode for migration, rather 1052# than TLS (Since 2.7) Note: 2.8 reports this by omitting 1053# tls-creds instead. 1054# 1055# @tls-hostname: hostname of the target host for the migration. This 1056# is required when using x509 based TLS credentials and the 1057# migration URI does not already include a hostname. For example 1058# if using fd: or exec: based migration, the hostname must be 1059# provided so that the server's x509 certificate identity can be 1060# validated. (Since 2.7) An empty string means that QEMU will use 1061# the hostname associated with the migration URI, if any. (Since 1062# 2.9) Note: 2.8 reports this by omitting tls-hostname instead. 1063# 1064# @tls-authz: ID of the 'authz' object subclass that provides access 1065# control checking of the TLS x509 certificate distinguished name. 1066# (Since 4.0) 1067# 1068# @max-bandwidth: to set maximum speed for migration. maximum speed 1069# in bytes per second. (Since 2.8) 1070# 1071# @downtime-limit: set maximum tolerated downtime for migration. 1072# maximum downtime in milliseconds (Since 2.8) 1073# 1074# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1075# (Since 2.8) 1076# 1077# @block-incremental: Affects how much storage is migrated when the 1078# block migration capability is enabled. When false, the entire 1079# storage backing chain is migrated into a flattened image at the 1080# destination; when true, only the active qcow2 layer is migrated 1081# and the destination must already have access to the same backing 1082# chain as was used on the source. (since 2.10) 1083# 1084# @multifd-channels: Number of channels used to migrate data in 1085# parallel. This is the same number that the number of sockets 1086# used for migration. The default value is 2 (since 4.0) 1087# 1088# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1089# needs to be a multiple of the target page size and a power of 2 1090# (Since 2.11) 1091# 1092# @max-postcopy-bandwidth: Background transfer bandwidth during 1093# postcopy. Defaults to 0 (unlimited). In bytes per second. 1094# (Since 3.0) 1095# 1096# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1097# (Since 3.1) 1098# 1099# @multifd-compression: Which compression method to use. Defaults to 1100# none. (Since 5.0) 1101# 1102# @multifd-zlib-level: Set the compression level to be used in live 1103# migration, the compression level is an integer between 0 and 9, 1104# where 0 means no compression, 1 means the best compression 1105# speed, and 9 means best compression ratio which will consume 1106# more CPU. Defaults to 1. (Since 5.0) 1107# 1108# @multifd-zstd-level: Set the compression level to be used in live 1109# migration, the compression level is an integer between 0 and 20, 1110# where 0 means no compression, 1 means the best compression 1111# speed, and 20 means best compression ratio which will consume 1112# more CPU. Defaults to 1. (Since 5.0) 1113# 1114# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1115# aliases for the purpose of dirty bitmap migration. Such aliases 1116# may for example be the corresponding names on the opposite site. 1117# The mapping must be one-to-one, but not necessarily complete: On 1118# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1119# will be ignored. On the destination, encountering an unmapped 1120# alias in the incoming migration stream will result in a report, 1121# and all further bitmap migration data will then be discarded. 1122# Note that the destination does not know about bitmaps it does 1123# not receive, so there is no limitation or requirement regarding 1124# the number of bitmaps received, or how they are named, or on 1125# which nodes they are placed. By default (when this parameter 1126# has never been set), bitmap names are mapped to themselves. 1127# Nodes are mapped to their block device name if there is one, and 1128# to their node name otherwise. (Since 5.2) 1129# 1130# Features: 1131# 1132# @unstable: Member @x-checkpoint-delay is experimental. 1133# 1134# Since: 2.4 1135## 1136{ 'struct': 'MigrationParameters', 1137 'data': { '*announce-initial': 'size', 1138 '*announce-max': 'size', 1139 '*announce-rounds': 'size', 1140 '*announce-step': 'size', 1141 '*compress-level': 'uint8', 1142 '*compress-threads': 'uint8', 1143 '*compress-wait-thread': 'bool', 1144 '*decompress-threads': 'uint8', 1145 '*throttle-trigger-threshold': 'uint8', 1146 '*cpu-throttle-initial': 'uint8', 1147 '*cpu-throttle-increment': 'uint8', 1148 '*cpu-throttle-tailslow': 'bool', 1149 '*tls-creds': 'str', 1150 '*tls-hostname': 'str', 1151 '*tls-authz': 'str', 1152 '*max-bandwidth': 'size', 1153 '*downtime-limit': 'uint64', 1154 '*x-checkpoint-delay': { 'type': 'uint32', 1155 'features': [ 'unstable' ] }, 1156 '*block-incremental': 'bool', 1157 '*multifd-channels': 'uint8', 1158 '*xbzrle-cache-size': 'size', 1159 '*max-postcopy-bandwidth': 'size', 1160 '*max-cpu-throttle': 'uint8', 1161 '*multifd-compression': 'MultiFDCompression', 1162 '*multifd-zlib-level': 'uint8', 1163 '*multifd-zstd-level': 'uint8', 1164 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 1165 1166## 1167# @query-migrate-parameters: 1168# 1169# Returns information about the current migration parameters 1170# 1171# Returns: @MigrationParameters 1172# 1173# Since: 2.4 1174# 1175# Example: 1176# 1177# -> { "execute": "query-migrate-parameters" } 1178# <- { "return": { 1179# "decompress-threads": 2, 1180# "cpu-throttle-increment": 10, 1181# "compress-threads": 8, 1182# "compress-level": 1, 1183# "cpu-throttle-initial": 20, 1184# "max-bandwidth": 33554432, 1185# "downtime-limit": 300 1186# } 1187# } 1188## 1189{ 'command': 'query-migrate-parameters', 1190 'returns': 'MigrationParameters' } 1191 1192## 1193# @migrate-start-postcopy: 1194# 1195# Followup to a migration command to switch the migration to postcopy 1196# mode. The postcopy-ram capability must be set on both source and 1197# destination before the original migration command. 1198# 1199# Since: 2.5 1200# 1201# Example: 1202# 1203# -> { "execute": "migrate-start-postcopy" } 1204# <- { "return": {} } 1205## 1206{ 'command': 'migrate-start-postcopy' } 1207 1208## 1209# @MIGRATION: 1210# 1211# Emitted when a migration event happens 1212# 1213# @status: @MigrationStatus describing the current migration status. 1214# 1215# Since: 2.4 1216# 1217# Example: 1218# 1219# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1220# "event": "MIGRATION", 1221# "data": {"status": "completed"} } 1222## 1223{ 'event': 'MIGRATION', 1224 'data': {'status': 'MigrationStatus'}} 1225 1226## 1227# @MIGRATION_PASS: 1228# 1229# Emitted from the source side of a migration at the start of each 1230# pass (when it syncs the dirty bitmap) 1231# 1232# @pass: An incrementing count (starting at 1 on the first pass) 1233# 1234# Since: 2.6 1235# 1236# Example: 1237# 1238# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1239# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1240## 1241{ 'event': 'MIGRATION_PASS', 1242 'data': { 'pass': 'int' } } 1243 1244## 1245# @COLOMessage: 1246# 1247# The message transmission between Primary side and Secondary side. 1248# 1249# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1250# 1251# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for 1252# checkpointing 1253# 1254# @checkpoint-reply: SVM gets PVM's checkpoint request 1255# 1256# @vmstate-send: VM's state will be sent by PVM. 1257# 1258# @vmstate-size: The total size of VMstate. 1259# 1260# @vmstate-received: VM's state has been received by SVM. 1261# 1262# @vmstate-loaded: VM's state has been loaded by SVM. 1263# 1264# Since: 2.8 1265## 1266{ 'enum': 'COLOMessage', 1267 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1268 'vmstate-send', 'vmstate-size', 'vmstate-received', 1269 'vmstate-loaded' ] } 1270 1271## 1272# @COLOMode: 1273# 1274# The COLO current mode. 1275# 1276# @none: COLO is disabled. 1277# 1278# @primary: COLO node in primary side. 1279# 1280# @secondary: COLO node in slave side. 1281# 1282# Since: 2.8 1283## 1284{ 'enum': 'COLOMode', 1285 'data': [ 'none', 'primary', 'secondary'] } 1286 1287## 1288# @FailoverStatus: 1289# 1290# An enumeration of COLO failover status 1291# 1292# @none: no failover has ever happened 1293# 1294# @require: got failover requirement but not handled 1295# 1296# @active: in the process of doing failover 1297# 1298# @completed: finish the process of failover 1299# 1300# @relaunch: restart the failover process, from 'none' -> 'completed' 1301# (Since 2.9) 1302# 1303# Since: 2.8 1304## 1305{ 'enum': 'FailoverStatus', 1306 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1307 1308## 1309# @COLO_EXIT: 1310# 1311# Emitted when VM finishes COLO mode due to some errors happening or 1312# at the request of users. 1313# 1314# @mode: report COLO mode when COLO exited. 1315# 1316# @reason: describes the reason for the COLO exit. 1317# 1318# Since: 3.1 1319# 1320# Example: 1321# 1322# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1323# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1324## 1325{ 'event': 'COLO_EXIT', 1326 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1327 1328## 1329# @COLOExitReason: 1330# 1331# The reason for a COLO exit. 1332# 1333# @none: failover has never happened. This state does not occur in 1334# the COLO_EXIT event, and is only visible in the result of 1335# query-colo-status. 1336# 1337# @request: COLO exit is due to an external request. 1338# 1339# @error: COLO exit is due to an internal error. 1340# 1341# @processing: COLO is currently handling a failover (since 4.0). 1342# 1343# Since: 3.1 1344## 1345{ 'enum': 'COLOExitReason', 1346 'data': [ 'none', 'request', 'error' , 'processing' ] } 1347 1348## 1349# @x-colo-lost-heartbeat: 1350# 1351# Tell qemu that heartbeat is lost, request it to do takeover 1352# procedures. If this command is sent to the PVM, the Primary side 1353# will exit COLO mode. If sent to the Secondary, the Secondary side 1354# will run failover work, then takes over server operation to become 1355# the service VM. 1356# 1357# Features: 1358# 1359# @unstable: This command is experimental. 1360# 1361# Since: 2.8 1362# 1363# Example: 1364# 1365# -> { "execute": "x-colo-lost-heartbeat" } 1366# <- { "return": {} } 1367## 1368{ 'command': 'x-colo-lost-heartbeat', 1369 'features': [ 'unstable' ], 1370 'if': 'CONFIG_REPLICATION' } 1371 1372## 1373# @migrate_cancel: 1374# 1375# Cancel the current executing migration process. 1376# 1377# Returns: nothing on success 1378# 1379# Notes: This command succeeds even if there is no migration process 1380# running. 1381# 1382# Since: 0.14 1383# 1384# Example: 1385# 1386# -> { "execute": "migrate_cancel" } 1387# <- { "return": {} } 1388## 1389{ 'command': 'migrate_cancel' } 1390 1391## 1392# @migrate-continue: 1393# 1394# Continue migration when it's in a paused state. 1395# 1396# @state: The state the migration is currently expected to be in 1397# 1398# Returns: nothing on success 1399# 1400# Since: 2.11 1401# 1402# Example: 1403# 1404# -> { "execute": "migrate-continue" , "arguments": 1405# { "state": "pre-switchover" } } 1406# <- { "return": {} } 1407## 1408{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1409 1410## 1411# @migrate: 1412# 1413# Migrates the current running guest to another Virtual Machine. 1414# 1415# @uri: the Uniform Resource Identifier of the destination VM 1416# 1417# @blk: do block migration (full disk copy) 1418# 1419# @inc: incremental disk copy migration 1420# 1421# @detach: this argument exists only for compatibility reasons and is 1422# ignored by QEMU 1423# 1424# @resume: resume one paused migration, default "off". (since 3.0) 1425# 1426# Returns: nothing on success 1427# 1428# Since: 0.14 1429# 1430# Notes: 1431# 1432# 1. The 'query-migrate' command should be used to check migration's 1433# progress and final result (this information is provided by the 1434# 'status' member) 1435# 1436# 2. All boolean arguments default to false 1437# 1438# 3. The user Monitor's "detach" argument is invalid in QMP and should 1439# not be used 1440# 1441# Example: 1442# 1443# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1444# <- { "return": {} } 1445## 1446{ 'command': 'migrate', 1447 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', 1448 '*detach': 'bool', '*resume': 'bool' } } 1449 1450## 1451# @migrate-incoming: 1452# 1453# Start an incoming migration, the qemu must have been started with 1454# -incoming defer 1455# 1456# @uri: The Uniform Resource Identifier identifying the source or 1457# address to listen on 1458# 1459# Returns: nothing on success 1460# 1461# Since: 2.3 1462# 1463# Notes: 1464# 1465# 1. It's a bad idea to use a string for the uri, but it needs 1466# to stay compatible with -incoming and the format of the uri 1467# is already exposed above libvirt. 1468# 1469# 2. QEMU must be started with -incoming defer to allow 1470# migrate-incoming to be used. 1471# 1472# 3. The uri format is the same as for -incoming 1473# 1474# Example: 1475# 1476# -> { "execute": "migrate-incoming", 1477# "arguments": { "uri": "tcp::4446" } } 1478# <- { "return": {} } 1479## 1480{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } } 1481 1482## 1483# @xen-save-devices-state: 1484# 1485# Save the state of all devices to file. The RAM and the block 1486# devices of the VM are not saved by this command. 1487# 1488# @filename: the file to save the state of the devices to as binary 1489# data. See xen-save-devices-state.txt for a description of the 1490# binary format. 1491# 1492# @live: Optional argument to ask QEMU to treat this command as part 1493# of a live migration. Default to true. (since 2.11) 1494# 1495# Returns: Nothing on success 1496# 1497# Since: 1.1 1498# 1499# Example: 1500# 1501# -> { "execute": "xen-save-devices-state", 1502# "arguments": { "filename": "/tmp/save" } } 1503# <- { "return": {} } 1504## 1505{ 'command': 'xen-save-devices-state', 1506 'data': {'filename': 'str', '*live':'bool' } } 1507 1508## 1509# @xen-set-global-dirty-log: 1510# 1511# Enable or disable the global dirty log mode. 1512# 1513# @enable: true to enable, false to disable. 1514# 1515# Returns: nothing 1516# 1517# Since: 1.3 1518# 1519# Example: 1520# 1521# -> { "execute": "xen-set-global-dirty-log", 1522# "arguments": { "enable": true } } 1523# <- { "return": {} } 1524## 1525{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1526 1527## 1528# @xen-load-devices-state: 1529# 1530# Load the state of all devices from file. The RAM and the block 1531# devices of the VM are not loaded by this command. 1532# 1533# @filename: the file to load the state of the devices from as binary 1534# data. See xen-save-devices-state.txt for a description of the 1535# binary format. 1536# 1537# Since: 2.7 1538# 1539# Example: 1540# 1541# -> { "execute": "xen-load-devices-state", 1542# "arguments": { "filename": "/tmp/resume" } } 1543# <- { "return": {} } 1544## 1545{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1546 1547## 1548# @xen-set-replication: 1549# 1550# Enable or disable replication. 1551# 1552# @enable: true to enable, false to disable. 1553# 1554# @primary: true for primary or false for secondary. 1555# 1556# @failover: true to do failover, false to stop. but cannot be 1557# specified if 'enable' is true. default value is false. 1558# 1559# Returns: nothing. 1560# 1561# Example: 1562# 1563# -> { "execute": "xen-set-replication", 1564# "arguments": {"enable": true, "primary": false} } 1565# <- { "return": {} } 1566# 1567# Since: 2.9 1568## 1569{ 'command': 'xen-set-replication', 1570 'data': { 'enable': 'bool', 'primary': 'bool', '*failover' : 'bool' }, 1571 'if': 'CONFIG_REPLICATION' } 1572 1573## 1574# @ReplicationStatus: 1575# 1576# The result format for 'query-xen-replication-status'. 1577# 1578# @error: true if an error happened, false if replication is normal. 1579# 1580# @desc: the human readable error description string, when @error is 1581# 'true'. 1582# 1583# Since: 2.9 1584## 1585{ 'struct': 'ReplicationStatus', 1586 'data': { 'error': 'bool', '*desc': 'str' }, 1587 'if': 'CONFIG_REPLICATION' } 1588 1589## 1590# @query-xen-replication-status: 1591# 1592# Query replication status while the vm is running. 1593# 1594# Returns: A @ReplicationStatus object showing the status. 1595# 1596# Example: 1597# 1598# -> { "execute": "query-xen-replication-status" } 1599# <- { "return": { "error": false } } 1600# 1601# Since: 2.9 1602## 1603{ 'command': 'query-xen-replication-status', 1604 'returns': 'ReplicationStatus', 1605 'if': 'CONFIG_REPLICATION' } 1606 1607## 1608# @xen-colo-do-checkpoint: 1609# 1610# Xen uses this command to notify replication to trigger a checkpoint. 1611# 1612# Returns: nothing. 1613# 1614# Example: 1615# 1616# -> { "execute": "xen-colo-do-checkpoint" } 1617# <- { "return": {} } 1618# 1619# Since: 2.9 1620## 1621{ 'command': 'xen-colo-do-checkpoint', 1622 'if': 'CONFIG_REPLICATION' } 1623 1624## 1625# @COLOStatus: 1626# 1627# The result format for 'query-colo-status'. 1628# 1629# @mode: COLO running mode. If COLO is running, this field will 1630# return 'primary' or 'secondary'. 1631# 1632# @last-mode: COLO last running mode. If COLO is running, this field 1633# will return same like mode field, after failover we can use this 1634# field to get last colo mode. (since 4.0) 1635# 1636# @reason: describes the reason for the COLO exit. 1637# 1638# Since: 3.1 1639## 1640{ 'struct': 'COLOStatus', 1641 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1642 'reason': 'COLOExitReason' }, 1643 'if': 'CONFIG_REPLICATION' } 1644 1645## 1646# @query-colo-status: 1647# 1648# Query COLO status while the vm is running. 1649# 1650# Returns: A @COLOStatus object showing the status. 1651# 1652# Example: 1653# 1654# -> { "execute": "query-colo-status" } 1655# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1656# 1657# Since: 3.1 1658## 1659{ 'command': 'query-colo-status', 1660 'returns': 'COLOStatus', 1661 'if': 'CONFIG_REPLICATION' } 1662 1663## 1664# @migrate-recover: 1665# 1666# Provide a recovery migration stream URI. 1667# 1668# @uri: the URI to be used for the recovery of migration stream. 1669# 1670# Returns: nothing. 1671# 1672# Example: 1673# 1674# -> { "execute": "migrate-recover", 1675# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1676# <- { "return": {} } 1677# 1678# Since: 3.0 1679## 1680{ 'command': 'migrate-recover', 1681 'data': { 'uri': 'str' }, 1682 'allow-oob': true } 1683 1684## 1685# @migrate-pause: 1686# 1687# Pause a migration. Currently it only supports postcopy. 1688# 1689# Returns: nothing. 1690# 1691# Example: 1692# 1693# -> { "execute": "migrate-pause" } 1694# <- { "return": {} } 1695# 1696# Since: 3.0 1697## 1698{ 'command': 'migrate-pause', 'allow-oob': true } 1699 1700## 1701# @UNPLUG_PRIMARY: 1702# 1703# Emitted from source side of a migration when migration state is 1704# WAIT_UNPLUG. Device was unplugged by guest operating system. Device 1705# resources in QEMU are kept on standby to be able to re-plug it in 1706# case of migration failure. 1707# 1708# @device-id: QEMU device id of the unplugged device 1709# 1710# Since: 4.2 1711# 1712# Example: 1713# 1714# <- { "event": "UNPLUG_PRIMARY", 1715# "data": { "device-id": "hostdev0" }, 1716# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 1717## 1718{ 'event': 'UNPLUG_PRIMARY', 1719 'data': { 'device-id': 'str' } } 1720 1721## 1722# @DirtyRateVcpu: 1723# 1724# Dirty rate of vcpu. 1725# 1726# @id: vcpu index. 1727# 1728# @dirty-rate: dirty rate. 1729# 1730# Since: 6.2 1731## 1732{ 'struct': 'DirtyRateVcpu', 1733 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 1734 1735## 1736# @DirtyRateStatus: 1737# 1738# An enumeration of dirtyrate status. 1739# 1740# @unstarted: the dirtyrate thread has not been started. 1741# 1742# @measuring: the dirtyrate thread is measuring. 1743# 1744# @measured: the dirtyrate thread has measured and results are 1745# available. 1746# 1747# Since: 5.2 1748## 1749{ 'enum': 'DirtyRateStatus', 1750 'data': [ 'unstarted', 'measuring', 'measured'] } 1751 1752## 1753# @DirtyRateMeasureMode: 1754# 1755# An enumeration of mode of measuring dirtyrate. 1756# 1757# @page-sampling: calculate dirtyrate by sampling pages. 1758# 1759# @dirty-ring: calculate dirtyrate by dirty ring. 1760# 1761# @dirty-bitmap: calculate dirtyrate by dirty bitmap. 1762# 1763# Since: 6.2 1764## 1765{ 'enum': 'DirtyRateMeasureMode', 1766 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 1767 1768## 1769# @DirtyRateInfo: 1770# 1771# Information about current dirty page rate of vm. 1772# 1773# @dirty-rate: an estimate of the dirty page rate of the VM in units 1774# of MB/s, present only when estimating the rate has completed. 1775# 1776# @status: status containing dirtyrate query status includes 1777# 'unstarted' or 'measuring' or 'measured' 1778# 1779# @start-time: start time in units of second for calculation 1780# 1781# @calc-time: time in units of second for sample dirty pages 1782# 1783# @sample-pages: page count per GB for sample dirty pages the default 1784# value is 512 (since 6.1) 1785# 1786# @mode: mode containing method of calculate dirtyrate includes 1787# 'page-sampling' and 'dirty-ring' (Since 6.2) 1788# 1789# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring mode 1790# specified (Since 6.2) 1791# 1792# Since: 5.2 1793## 1794{ 'struct': 'DirtyRateInfo', 1795 'data': {'*dirty-rate': 'int64', 1796 'status': 'DirtyRateStatus', 1797 'start-time': 'int64', 1798 'calc-time': 'int64', 1799 'sample-pages': 'uint64', 1800 'mode': 'DirtyRateMeasureMode', 1801 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 1802 1803## 1804# @calc-dirty-rate: 1805# 1806# start calculating dirty page rate for vm 1807# 1808# @calc-time: time in units of second for sample dirty pages 1809# 1810# @sample-pages: page count per GB for sample dirty pages the default 1811# value is 512 (since 6.1) 1812# 1813# @mode: mechanism of calculating dirtyrate includes 'page-sampling' 1814# and 'dirty-ring' (Since 6.1) 1815# 1816# Since: 5.2 1817# 1818# Example: 1819# 1820# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 1821# 'sample-pages': 512} } 1822# <- { "return": {} } 1823## 1824{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 1825 '*sample-pages': 'int', 1826 '*mode': 'DirtyRateMeasureMode'} } 1827 1828## 1829# @query-dirty-rate: 1830# 1831# query dirty page rate in units of MB/s for vm 1832# 1833# Since: 5.2 1834## 1835{ 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' } 1836 1837## 1838# @DirtyLimitInfo: 1839# 1840# Dirty page rate limit information of a virtual CPU. 1841# 1842# @cpu-index: index of a virtual CPU. 1843# 1844# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 1845# CPU, 0 means unlimited. 1846# 1847# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 1848# 1849# Since: 7.1 1850## 1851{ 'struct': 'DirtyLimitInfo', 1852 'data': { 'cpu-index': 'int', 1853 'limit-rate': 'uint64', 1854 'current-rate': 'uint64' } } 1855 1856## 1857# @set-vcpu-dirty-limit: 1858# 1859# Set the upper limit of dirty page rate for virtual CPUs. 1860# 1861# Requires KVM with accelerator property "dirty-ring-size" set. A 1862# virtual CPU's dirty page rate is a measure of its memory load. To 1863# observe dirty page rates, use @calc-dirty-rate. 1864# 1865# @cpu-index: index of a virtual CPU, default is all. 1866# 1867# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 1868# 1869# Since: 7.1 1870# 1871# Example: 1872# 1873# -> {"execute": "set-vcpu-dirty-limit"} 1874# "arguments": { "dirty-rate": 200, 1875# "cpu-index": 1 } } 1876# <- { "return": {} } 1877## 1878{ 'command': 'set-vcpu-dirty-limit', 1879 'data': { '*cpu-index': 'int', 1880 'dirty-rate': 'uint64' } } 1881 1882## 1883# @cancel-vcpu-dirty-limit: 1884# 1885# Cancel the upper limit of dirty page rate for virtual CPUs. 1886# 1887# Cancel the dirty page limit for the vCPU which has been set with 1888# set-vcpu-dirty-limit command. Note that this command requires 1889# support from dirty ring, same as the "set-vcpu-dirty-limit". 1890# 1891# @cpu-index: index of a virtual CPU, default is all. 1892# 1893# Since: 7.1 1894# 1895# Example: 1896# 1897# -> {"execute": "cancel-vcpu-dirty-limit"}, 1898# "arguments": { "cpu-index": 1 } } 1899# <- { "return": {} } 1900## 1901{ 'command': 'cancel-vcpu-dirty-limit', 1902 'data': { '*cpu-index': 'int'} } 1903 1904## 1905# @query-vcpu-dirty-limit: 1906# 1907# Returns information about virtual CPU dirty page rate limits, if 1908# any. 1909# 1910# Since: 7.1 1911# 1912# Example: 1913# 1914# -> {"execute": "query-vcpu-dirty-limit"} 1915# <- {"return": [ 1916# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 1917# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 1918## 1919{ 'command': 'query-vcpu-dirty-limit', 1920 'returns': [ 'DirtyLimitInfo' ] } 1921 1922## 1923# @MigrationThreadInfo: 1924# 1925# Information about migrationthreads 1926# 1927# @name: the name of migration thread 1928# 1929# @thread-id: ID of the underlying host thread 1930# 1931# Since: 7.2 1932## 1933{ 'struct': 'MigrationThreadInfo', 1934 'data': {'name': 'str', 1935 'thread-id': 'int'} } 1936 1937## 1938# @query-migrationthreads: 1939# 1940# Returns information of migration threads 1941# 1942# data: migration thread name 1943# 1944# Returns: information about migration threads 1945# 1946# Since: 7.2 1947## 1948{ 'command': 'query-migrationthreads', 1949 'returns': ['MigrationThreadInfo'] } 1950 1951## 1952# @snapshot-save: 1953# 1954# Save a VM snapshot 1955# 1956# @job-id: identifier for the newly created job 1957# 1958# @tag: name of the snapshot to create 1959# 1960# @vmstate: block device node name to save vmstate to 1961# 1962# @devices: list of block device node names to save a snapshot to 1963# 1964# Applications should not assume that the snapshot save is complete 1965# when this command returns. The job commands / events must be used 1966# to determine completion and to fetch details of any errors that 1967# arise. 1968# 1969# Note that execution of the guest CPUs may be stopped during the time 1970# it takes to save the snapshot. A future version of QEMU may ensure 1971# CPUs are executing continuously. 1972# 1973# It is strongly recommended that @devices contain all writable block 1974# device nodes if a consistent snapshot is required. 1975# 1976# If @tag already exists, an error will be reported 1977# 1978# Returns: nothing 1979# 1980# Example: 1981# 1982# -> { "execute": "snapshot-save", 1983# "arguments": { 1984# "job-id": "snapsave0", 1985# "tag": "my-snap", 1986# "vmstate": "disk0", 1987# "devices": ["disk0", "disk1"] 1988# } 1989# } 1990# <- { "return": { } } 1991# <- {"event": "JOB_STATUS_CHANGE", 1992# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1993# "data": {"status": "created", "id": "snapsave0"}} 1994# <- {"event": "JOB_STATUS_CHANGE", 1995# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 1996# "data": {"status": "running", "id": "snapsave0"}} 1997# <- {"event": "STOP", 1998# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 1999# <- {"event": "RESUME", 2000# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2001# <- {"event": "JOB_STATUS_CHANGE", 2002# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2003# "data": {"status": "waiting", "id": "snapsave0"}} 2004# <- {"event": "JOB_STATUS_CHANGE", 2005# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2006# "data": {"status": "pending", "id": "snapsave0"}} 2007# <- {"event": "JOB_STATUS_CHANGE", 2008# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2009# "data": {"status": "concluded", "id": "snapsave0"}} 2010# -> {"execute": "query-jobs"} 2011# <- {"return": [{"current-progress": 1, 2012# "status": "concluded", 2013# "total-progress": 1, 2014# "type": "snapshot-save", 2015# "id": "snapsave0"}]} 2016# 2017# Since: 6.0 2018## 2019{ 'command': 'snapshot-save', 2020 'data': { 'job-id': 'str', 2021 'tag': 'str', 2022 'vmstate': 'str', 2023 'devices': ['str'] } } 2024 2025## 2026# @snapshot-load: 2027# 2028# Load a VM snapshot 2029# 2030# @job-id: identifier for the newly created job 2031# 2032# @tag: name of the snapshot to load. 2033# 2034# @vmstate: block device node name to load vmstate from 2035# 2036# @devices: list of block device node names to load a snapshot from 2037# 2038# Applications should not assume that the snapshot load is complete 2039# when this command returns. The job commands / events must be used 2040# to determine completion and to fetch details of any errors that 2041# arise. 2042# 2043# Note that execution of the guest CPUs will be stopped during the 2044# time it takes to load the snapshot. 2045# 2046# It is strongly recommended that @devices contain all writable block 2047# device nodes that can have changed since the original @snapshot-save 2048# command execution. 2049# 2050# Returns: nothing 2051# 2052# Example: 2053# 2054# -> { "execute": "snapshot-load", 2055# "arguments": { 2056# "job-id": "snapload0", 2057# "tag": "my-snap", 2058# "vmstate": "disk0", 2059# "devices": ["disk0", "disk1"] 2060# } 2061# } 2062# <- { "return": { } } 2063# <- {"event": "JOB_STATUS_CHANGE", 2064# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2065# "data": {"status": "created", "id": "snapload0"}} 2066# <- {"event": "JOB_STATUS_CHANGE", 2067# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2068# "data": {"status": "running", "id": "snapload0"}} 2069# <- {"event": "STOP", 2070# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2071# <- {"event": "RESUME", 2072# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2073# <- {"event": "JOB_STATUS_CHANGE", 2074# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2075# "data": {"status": "waiting", "id": "snapload0"}} 2076# <- {"event": "JOB_STATUS_CHANGE", 2077# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2078# "data": {"status": "pending", "id": "snapload0"}} 2079# <- {"event": "JOB_STATUS_CHANGE", 2080# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2081# "data": {"status": "concluded", "id": "snapload0"}} 2082# -> {"execute": "query-jobs"} 2083# <- {"return": [{"current-progress": 1, 2084# "status": "concluded", 2085# "total-progress": 1, 2086# "type": "snapshot-load", 2087# "id": "snapload0"}]} 2088# 2089# Since: 6.0 2090## 2091{ 'command': 'snapshot-load', 2092 'data': { 'job-id': 'str', 2093 'tag': 'str', 2094 'vmstate': 'str', 2095 'devices': ['str'] } } 2096 2097## 2098# @snapshot-delete: 2099# 2100# Delete a VM snapshot 2101# 2102# @job-id: identifier for the newly created job 2103# 2104# @tag: name of the snapshot to delete. 2105# 2106# @devices: list of block device node names to delete a snapshot from 2107# 2108# Applications should not assume that the snapshot delete is complete 2109# when this command returns. The job commands / events must be used 2110# to determine completion and to fetch details of any errors that 2111# arise. 2112# 2113# Returns: nothing 2114# 2115# Example: 2116# 2117# -> { "execute": "snapshot-delete", 2118# "arguments": { 2119# "job-id": "snapdelete0", 2120# "tag": "my-snap", 2121# "devices": ["disk0", "disk1"] 2122# } 2123# } 2124# <- { "return": { } } 2125# <- {"event": "JOB_STATUS_CHANGE", 2126# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2127# "data": {"status": "created", "id": "snapdelete0"}} 2128# <- {"event": "JOB_STATUS_CHANGE", 2129# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2130# "data": {"status": "running", "id": "snapdelete0"}} 2131# <- {"event": "JOB_STATUS_CHANGE", 2132# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2133# "data": {"status": "waiting", "id": "snapdelete0"}} 2134# <- {"event": "JOB_STATUS_CHANGE", 2135# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2136# "data": {"status": "pending", "id": "snapdelete0"}} 2137# <- {"event": "JOB_STATUS_CHANGE", 2138# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2139# "data": {"status": "concluded", "id": "snapdelete0"}} 2140# -> {"execute": "query-jobs"} 2141# <- {"return": [{"current-progress": 1, 2142# "status": "concluded", 2143# "total-progress": 1, 2144# "type": "snapshot-delete", 2145# "id": "snapdelete0"}]} 2146# 2147# Since: 6.0 2148## 2149{ 'command': 'snapshot-delete', 2150 'data': { 'job-id': 'str', 2151 'tag': 'str', 2152 'devices': ['str'] } } 2153