1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the target VM 20# 21# @total: total amount of bytes involved in the migration process 22# 23# @duplicate: number of duplicate (zero) pages (since 1.2) 24# 25# @skipped: number of skipped zero pages (since 1.5) 26# 27# @normal: number of normal pages (since 1.2) 28# 29# @normal-bytes: number of normal bytes sent (since 1.2) 30# 31# @dirty-pages-rate: number of pages dirtied by second by the 32# guest (since 1.3) 33# 34# @mbps: throughput in megabits/sec. (since 1.6) 35# 36# @dirty-sync-count: number of times that dirty ram was synchronized (since 2.1) 37# 38# @postcopy-requests: The number of page requests received from the destination 39# (since 2.7) 40# 41# @page-size: The number of bytes per page for the various page-based 42# statistics (since 2.10) 43# 44# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 45# 46# @pages-per-second: the number of memory pages transferred per second 47# (Since 4.0) 48# 49# @precopy-bytes: The number of bytes sent in the pre-copy phase 50# (since 7.0). 51# 52# @downtime-bytes: The number of bytes sent while the guest is paused 53# (since 7.0). 54# 55# @postcopy-bytes: The number of bytes sent during the post-copy phase 56# (since 7.0). 57# 58# @dirty-sync-missed-zero-copy: Number of times dirty RAM synchronization could 59# not avoid copying dirty pages. This is between 60# 0 and @dirty-sync-count * @multifd-channels. 61# (since 7.1) 62# Since: 0.14 63## 64{ 'struct': 'MigrationStats', 65 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 66 'duplicate': 'int', 'skipped': 'int', 'normal': 'int', 67 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', 68 'mbps' : 'number', 'dirty-sync-count' : 'int', 69 'postcopy-requests' : 'int', 'page-size' : 'int', 70 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64', 71 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64', 72 'postcopy-bytes' : 'uint64', 73 'dirty-sync-missed-zero-copy' : 'uint64' } } 74 75## 76# @XBZRLECacheStats: 77# 78# Detailed XBZRLE migration cache statistics 79# 80# @cache-size: XBZRLE cache size 81# 82# @bytes: amount of bytes already transferred to the target VM 83# 84# @pages: amount of pages transferred to the target VM 85# 86# @cache-miss: number of cache miss 87# 88# @cache-miss-rate: rate of cache miss (since 2.1) 89# 90# @encoding-rate: rate of encoded bytes (since 5.1) 91# 92# @overflow: number of overflows 93# 94# Since: 1.2 95## 96{ 'struct': 'XBZRLECacheStats', 97 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 98 'cache-miss': 'int', 'cache-miss-rate': 'number', 99 'encoding-rate': 'number', 'overflow': 'int' } } 100 101## 102# @CompressionStats: 103# 104# Detailed migration compression statistics 105# 106# @pages: amount of pages compressed and transferred to the target VM 107# 108# @busy: count of times that no free thread was available to compress data 109# 110# @busy-rate: rate of thread busy 111# 112# @compressed-size: amount of bytes after compression 113# 114# @compression-rate: rate of compressed size 115# 116# Since: 3.1 117## 118{ 'struct': 'CompressionStats', 119 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 120 'compressed-size': 'int', 'compression-rate': 'number' } } 121 122## 123# @MigrationStatus: 124# 125# An enumeration of migration status. 126# 127# @none: no migration has ever happened. 128# 129# @setup: migration process has been initiated. 130# 131# @cancelling: in the process of cancelling migration. 132# 133# @cancelled: cancelling migration is finished. 134# 135# @active: in the process of doing migration. 136# 137# @postcopy-active: like active, but now in postcopy mode. (since 2.5) 138# 139# @postcopy-paused: during postcopy but paused. (since 3.0) 140# 141# @postcopy-recover: trying to recover from a paused postcopy. (since 3.0) 142# 143# @completed: migration is finished. 144# 145# @failed: some error occurred during migration process. 146# 147# @colo: VM is in the process of fault tolerance, VM can not get into this 148# state unless colo capability is enabled for migration. (since 2.8) 149# 150# @pre-switchover: Paused before device serialisation. (since 2.11) 151# 152# @device: During device serialisation when pause-before-switchover is enabled 153# (since 2.11) 154# 155# @wait-unplug: wait for device unplug request by guest OS to be completed. 156# (since 4.2) 157# 158# Since: 2.3 159## 160{ 'enum': 'MigrationStatus', 161 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 162 'active', 'postcopy-active', 'postcopy-paused', 163 'postcopy-recover', 'completed', 'failed', 'colo', 164 'pre-switchover', 'device', 'wait-unplug' ] } 165## 166# @VfioStats: 167# 168# Detailed VFIO devices migration statistics 169# 170# @transferred: amount of bytes transferred to the target VM by VFIO devices 171# 172# Since: 5.2 173## 174{ 'struct': 'VfioStats', 175 'data': {'transferred': 'int' } } 176 177## 178# @MigrationInfo: 179# 180# Information about current migration process. 181# 182# @status: @MigrationStatus describing the current migration status. 183# If this field is not returned, no migration process 184# has been initiated 185# 186# @ram: @MigrationStats containing detailed migration 187# status, only returned if status is 'active' or 188# 'completed'(since 1.2) 189# 190# @disk: @MigrationStats containing detailed disk migration 191# status, only returned if status is 'active' and it is a block 192# migration 193# 194# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 195# migration statistics, only returned if XBZRLE feature is on and 196# status is 'active' or 'completed' (since 1.2) 197# 198# @total-time: total amount of milliseconds since migration started. 199# If migration has ended, it returns the total migration 200# time. (since 1.2) 201# 202# @downtime: only present when migration finishes correctly 203# total downtime in milliseconds for the guest. 204# (since 1.3) 205# 206# @expected-downtime: only present while migration is active 207# expected downtime in milliseconds for the guest in last walk 208# of the dirty bitmap. (since 1.3) 209# 210# @setup-time: amount of setup time in milliseconds *before* the 211# iterations begin but *after* the QMP command is issued. This is designed 212# to provide an accounting of any activities (such as RDMA pinning) which 213# may be expensive, but do not actually occur during the iterative 214# migration rounds themselves. (since 1.6) 215# 216# @cpu-throttle-percentage: percentage of time guest cpus are being 217# throttled during auto-converge. This is only present when auto-converge 218# has started throttling guest cpus. (Since 2.7) 219# 220# @error-desc: the human readable error description string, when 221# @status is 'failed'. Clients should not attempt to parse the 222# error strings. (Since 2.7) 223# 224# @postcopy-blocktime: total time when all vCPU were blocked during postcopy 225# live migration. This is only present when the postcopy-blocktime 226# migration capability is enabled. (Since 3.0) 227# 228# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. This is 229# only present when the postcopy-blocktime migration capability 230# is enabled. (Since 3.0) 231# 232# @compression: migration compression statistics, only returned if compression 233# feature is on and status is 'active' or 'completed' (Since 3.1) 234# 235# @socket-address: Only used for tcp, to know what the real port is (Since 4.0) 236# 237# @vfio: @VfioStats containing detailed VFIO devices migration statistics, 238# only returned if VFIO device is present, migration is supported by all 239# VFIO devices and status is 'active' or 'completed' (since 5.2) 240# 241# @blocked-reasons: A list of reasons an outgoing migration is blocked. 242# Present and non-empty when migration is blocked. 243# (since 6.0) 244# 245# Since: 0.14 246## 247{ 'struct': 'MigrationInfo', 248 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 249 '*disk': 'MigrationStats', 250 '*vfio': 'VfioStats', 251 '*xbzrle-cache': 'XBZRLECacheStats', 252 '*total-time': 'int', 253 '*expected-downtime': 'int', 254 '*downtime': 'int', 255 '*setup-time': 'int', 256 '*cpu-throttle-percentage': 'int', 257 '*error-desc': 'str', 258 '*blocked-reasons': ['str'], 259 '*postcopy-blocktime' : 'uint32', 260 '*postcopy-vcpu-blocktime': ['uint32'], 261 '*compression': 'CompressionStats', 262 '*socket-address': ['SocketAddress'] } } 263 264## 265# @query-migrate: 266# 267# Returns information about current migration process. If migration 268# is active there will be another json-object with RAM migration 269# status and if block migration is active another one with block 270# migration status. 271# 272# Returns: @MigrationInfo 273# 274# Since: 0.14 275# 276# Examples: 277# 278# 1. Before the first migration 279# 280# -> { "execute": "query-migrate" } 281# <- { "return": {} } 282# 283# 2. Migration is done and has succeeded 284# 285# -> { "execute": "query-migrate" } 286# <- { "return": { 287# "status": "completed", 288# "total-time":12345, 289# "setup-time":12345, 290# "downtime":12345, 291# "ram":{ 292# "transferred":123, 293# "remaining":123, 294# "total":246, 295# "duplicate":123, 296# "normal":123, 297# "normal-bytes":123456, 298# "dirty-sync-count":15 299# } 300# } 301# } 302# 303# 3. Migration is done and has failed 304# 305# -> { "execute": "query-migrate" } 306# <- { "return": { "status": "failed" } } 307# 308# 4. Migration is being performed and is not a block migration: 309# 310# -> { "execute": "query-migrate" } 311# <- { 312# "return":{ 313# "status":"active", 314# "total-time":12345, 315# "setup-time":12345, 316# "expected-downtime":12345, 317# "ram":{ 318# "transferred":123, 319# "remaining":123, 320# "total":246, 321# "duplicate":123, 322# "normal":123, 323# "normal-bytes":123456, 324# "dirty-sync-count":15 325# } 326# } 327# } 328# 329# 5. Migration is being performed and is a block migration: 330# 331# -> { "execute": "query-migrate" } 332# <- { 333# "return":{ 334# "status":"active", 335# "total-time":12345, 336# "setup-time":12345, 337# "expected-downtime":12345, 338# "ram":{ 339# "total":1057024, 340# "remaining":1053304, 341# "transferred":3720, 342# "duplicate":123, 343# "normal":123, 344# "normal-bytes":123456, 345# "dirty-sync-count":15 346# }, 347# "disk":{ 348# "total":20971520, 349# "remaining":20880384, 350# "transferred":91136 351# } 352# } 353# } 354# 355# 6. Migration is being performed and XBZRLE is active: 356# 357# -> { "execute": "query-migrate" } 358# <- { 359# "return":{ 360# "status":"active", 361# "total-time":12345, 362# "setup-time":12345, 363# "expected-downtime":12345, 364# "ram":{ 365# "total":1057024, 366# "remaining":1053304, 367# "transferred":3720, 368# "duplicate":10, 369# "normal":3333, 370# "normal-bytes":3412992, 371# "dirty-sync-count":15 372# }, 373# "xbzrle-cache":{ 374# "cache-size":67108864, 375# "bytes":20971520, 376# "pages":2444343, 377# "cache-miss":2244, 378# "cache-miss-rate":0.123, 379# "encoding-rate":80.1, 380# "overflow":34434 381# } 382# } 383# } 384# 385## 386{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 387 388## 389# @MigrationCapability: 390# 391# Migration capabilities enumeration 392# 393# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length Encoding). 394# This feature allows us to minimize migration traffic for certain work 395# loads, by sending compressed difference of the pages 396# 397# @rdma-pin-all: Controls whether or not the entire VM memory footprint is 398# mlock()'d on demand or all at once. Refer to docs/rdma.txt for usage. 399# Disabled by default. (since 2.0) 400# 401# @zero-blocks: During storage migration encode blocks of zeroes efficiently. This 402# essentially saves 1MB of zeroes per block on the wire. Enabling requires 403# source and target VM to support this feature. To enable it is sufficient 404# to enable the capability on the source VM. The feature is disabled by 405# default. (since 1.6) 406# 407# @compress: Use multiple compression threads to accelerate live migration. 408# This feature can help to reduce the migration traffic, by sending 409# compressed pages. Please note that if compress and xbzrle are both 410# on, compress only takes effect in the ram bulk stage, after that, 411# it will be disabled and only xbzrle takes effect, this can help to 412# minimize migration traffic. The feature is disabled by default. 413# (since 2.4 ) 414# 415# @events: generate events for each migration state change 416# (since 2.4 ) 417# 418# @auto-converge: If enabled, QEMU will automatically throttle down the guest 419# to speed up convergence of RAM migration. (since 1.6) 420# 421# @postcopy-ram: Start executing on the migration target before all of RAM has 422# been migrated, pulling the remaining pages along as needed. The 423# capacity must have the same setting on both source and target 424# or migration will not even start. NOTE: If the migration fails during 425# postcopy the VM will fail. (since 2.6) 426# 427# @x-colo: If enabled, migration will never end, and the state of the VM on the 428# primary side will be migrated continuously to the VM on secondary 429# side, this process is called COarse-Grain LOck Stepping (COLO) for 430# Non-stop Service. (since 2.8) 431# 432# @release-ram: if enabled, qemu will free the migrated ram pages on the source 433# during postcopy-ram migration. (since 2.9) 434# 435# @block: If enabled, QEMU will also migrate the contents of all block 436# devices. Default is disabled. A possible alternative uses 437# mirror jobs to a builtin NBD server on the destination, which 438# offers more flexibility. 439# (Since 2.10) 440# 441# @return-path: If enabled, migration will use the return path even 442# for precopy. (since 2.10) 443# 444# @pause-before-switchover: Pause outgoing migration before serialising device 445# state and before disabling block IO (since 2.11) 446# 447# @multifd: Use more than one fd for migration (since 4.0) 448# 449# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 450# (since 2.12) 451# 452# @postcopy-blocktime: Calculate downtime for postcopy live migration 453# (since 3.0) 454# 455# @late-block-activate: If enabled, the destination will not activate block 456# devices (and thus take locks) immediately at the end of migration. 457# (since 3.0) 458# 459# @x-ignore-shared: If enabled, QEMU will not migrate shared memory (since 4.0) 460# 461# @validate-uuid: Send the UUID of the source to allow the destination 462# to ensure it is the same. (since 4.2) 463# 464# @background-snapshot: If enabled, the migration stream will be a snapshot 465# of the VM exactly at the point when the migration 466# procedure starts. The VM RAM is saved with running VM. 467# (since 6.0) 468# 469# @zero-copy-send: Controls behavior on sending memory pages on migration. 470# When true, enables a zero-copy mechanism for sending 471# memory pages, if host supports it. 472# Requires that QEMU be permitted to use locked memory 473# for guest RAM pages. 474# (since 7.1) 475# @postcopy-preempt: If enabled, the migration process will allow postcopy 476# requests to preempt precopy stream, so postcopy requests 477# will be handled faster. This is a performance feature and 478# should not affect the correctness of postcopy migration. 479# (since 7.1) 480# 481# Features: 482# @unstable: Members @x-colo and @x-ignore-shared are experimental. 483# 484# Since: 1.2 485## 486{ 'enum': 'MigrationCapability', 487 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 488 'compress', 'events', 'postcopy-ram', 489 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 490 'release-ram', 491 'block', 'return-path', 'pause-before-switchover', 'multifd', 492 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 493 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 494 'validate-uuid', 'background-snapshot', 495 'zero-copy-send', 'postcopy-preempt'] } 496 497## 498# @MigrationCapabilityStatus: 499# 500# Migration capability information 501# 502# @capability: capability enum 503# 504# @state: capability state bool 505# 506# Since: 1.2 507## 508{ 'struct': 'MigrationCapabilityStatus', 509 'data': { 'capability' : 'MigrationCapability', 'state' : 'bool' } } 510 511## 512# @migrate-set-capabilities: 513# 514# Enable/Disable the following migration capabilities (like xbzrle) 515# 516# @capabilities: json array of capability modifications to make 517# 518# Since: 1.2 519# 520# Example: 521# 522# -> { "execute": "migrate-set-capabilities" , "arguments": 523# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 524# <- { "return": {} } 525# 526## 527{ 'command': 'migrate-set-capabilities', 528 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 529 530## 531# @query-migrate-capabilities: 532# 533# Returns information about the current migration capabilities status 534# 535# Returns: @MigrationCapabilityStatus 536# 537# Since: 1.2 538# 539# Example: 540# 541# -> { "execute": "query-migrate-capabilities" } 542# <- { "return": [ 543# {"state": false, "capability": "xbzrle"}, 544# {"state": false, "capability": "rdma-pin-all"}, 545# {"state": false, "capability": "auto-converge"}, 546# {"state": false, "capability": "zero-blocks"}, 547# {"state": false, "capability": "compress"}, 548# {"state": true, "capability": "events"}, 549# {"state": false, "capability": "postcopy-ram"}, 550# {"state": false, "capability": "x-colo"} 551# ]} 552# 553## 554{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 555 556## 557# @MultiFDCompression: 558# 559# An enumeration of multifd compression methods. 560# 561# @none: no compression. 562# @zlib: use zlib compression method. 563# @zstd: use zstd compression method. 564# 565# Since: 5.0 566## 567{ 'enum': 'MultiFDCompression', 568 'data': [ 'none', 'zlib', 569 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 570 571## 572# @BitmapMigrationBitmapAliasTransform: 573# 574# @persistent: If present, the bitmap will be made persistent 575# or transient depending on this parameter. 576# 577# Since: 6.0 578## 579{ 'struct': 'BitmapMigrationBitmapAliasTransform', 580 'data': { 581 '*persistent': 'bool' 582 } } 583 584## 585# @BitmapMigrationBitmapAlias: 586# 587# @name: The name of the bitmap. 588# 589# @alias: An alias name for migration (for example the bitmap name on 590# the opposite site). 591# 592# @transform: Allows the modification of the migrated bitmap. 593# (since 6.0) 594# 595# Since: 5.2 596## 597{ 'struct': 'BitmapMigrationBitmapAlias', 598 'data': { 599 'name': 'str', 600 'alias': 'str', 601 '*transform': 'BitmapMigrationBitmapAliasTransform' 602 } } 603 604## 605# @BitmapMigrationNodeAlias: 606# 607# Maps a block node name and the bitmaps it has to aliases for dirty 608# bitmap migration. 609# 610# @node-name: A block node name. 611# 612# @alias: An alias block node name for migration (for example the 613# node name on the opposite site). 614# 615# @bitmaps: Mappings for the bitmaps on this node. 616# 617# Since: 5.2 618## 619{ 'struct': 'BitmapMigrationNodeAlias', 620 'data': { 621 'node-name': 'str', 622 'alias': 'str', 623 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 624 } } 625 626## 627# @MigrationParameter: 628# 629# Migration parameters enumeration 630# 631# @announce-initial: Initial delay (in milliseconds) before sending the first 632# announce (Since 4.0) 633# 634# @announce-max: Maximum delay (in milliseconds) between packets in the 635# announcement (Since 4.0) 636# 637# @announce-rounds: Number of self-announce packets sent after migration 638# (Since 4.0) 639# 640# @announce-step: Increase in delay (in milliseconds) between subsequent 641# packets in the announcement (Since 4.0) 642# 643# @compress-level: Set the compression level to be used in live migration, 644# the compression level is an integer between 0 and 9, where 0 means 645# no compression, 1 means the best compression speed, and 9 means best 646# compression ratio which will consume more CPU. 647# 648# @compress-threads: Set compression thread count to be used in live migration, 649# the compression thread count is an integer between 1 and 255. 650# 651# @compress-wait-thread: Controls behavior when all compression threads are 652# currently busy. If true (default), wait for a free 653# compression thread to become available; otherwise, 654# send the page uncompressed. (Since 3.1) 655# 656# @decompress-threads: Set decompression thread count to be used in live 657# migration, the decompression thread count is an integer between 1 658# and 255. Usually, decompression is at least 4 times as fast as 659# compression, so set the decompress-threads to the number about 1/4 660# of compress-threads is adequate. 661# 662# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 663# to trigger throttling. It is expressed as percentage. 664# The default value is 50. (Since 5.0) 665# 666# @cpu-throttle-initial: Initial percentage of time guest cpus are throttled 667# when migration auto-converge is activated. The 668# default value is 20. (Since 2.7) 669# 670# @cpu-throttle-increment: throttle percentage increase each time 671# auto-converge detects that migration is not making 672# progress. The default value is 10. (Since 2.7) 673# 674# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 675# At the tail stage of throttling, the Guest is very 676# sensitive to CPU percentage while the @cpu-throttle 677# -increment is excessive usually at tail stage. 678# If this parameter is true, we will compute the ideal 679# CPU percentage used by the Guest, which may exactly make 680# the dirty rate match the dirty rate threshold. Then we 681# will choose a smaller throttle increment between the 682# one specified by @cpu-throttle-increment and the one 683# generated by ideal CPU percentage. 684# Therefore, it is compatible to traditional throttling, 685# meanwhile the throttle increment won't be excessive 686# at tail stage. 687# The default value is false. (Since 5.1) 688# 689# @tls-creds: ID of the 'tls-creds' object that provides credentials for 690# establishing a TLS connection over the migration data channel. 691# On the outgoing side of the migration, the credentials must 692# be for a 'client' endpoint, while for the incoming side the 693# credentials must be for a 'server' endpoint. Setting this 694# will enable TLS for all migrations. The default is unset, 695# resulting in unsecured migration at the QEMU level. (Since 2.7) 696# 697# @tls-hostname: hostname of the target host for the migration. This is 698# required when using x509 based TLS credentials and the 699# migration URI does not already include a hostname. For 700# example if using fd: or exec: based migration, the 701# hostname must be provided so that the server's x509 702# certificate identity can be validated. (Since 2.7) 703# 704# @tls-authz: ID of the 'authz' object subclass that provides access control 705# checking of the TLS x509 certificate distinguished name. 706# This object is only resolved at time of use, so can be deleted 707# and recreated on the fly while the migration server is active. 708# If missing, it will default to denying access (Since 4.0) 709# 710# @max-bandwidth: to set maximum speed for migration. maximum speed in 711# bytes per second. (Since 2.8) 712# 713# @downtime-limit: set maximum tolerated downtime for migration. maximum 714# downtime in milliseconds (Since 2.8) 715# 716# @x-checkpoint-delay: The delay time (in ms) between two COLO checkpoints in 717# periodic mode. (Since 2.8) 718# 719# @block-incremental: Affects how much storage is migrated when the 720# block migration capability is enabled. When false, the entire 721# storage backing chain is migrated into a flattened image at 722# the destination; when true, only the active qcow2 layer is 723# migrated and the destination must already have access to the 724# same backing chain as was used on the source. (since 2.10) 725# 726# @multifd-channels: Number of channels used to migrate data in 727# parallel. This is the same number that the 728# number of sockets used for migration. The 729# default value is 2 (since 4.0) 730# 731# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 732# needs to be a multiple of the target page size 733# and a power of 2 734# (Since 2.11) 735# 736# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 737# Defaults to 0 (unlimited). In bytes per second. 738# (Since 3.0) 739# 740# @max-cpu-throttle: maximum cpu throttle percentage. 741# Defaults to 99. (Since 3.1) 742# 743# @multifd-compression: Which compression method to use. 744# Defaults to none. (Since 5.0) 745# 746# @multifd-zlib-level: Set the compression level to be used in live 747# migration, the compression level is an integer between 0 748# and 9, where 0 means no compression, 1 means the best 749# compression speed, and 9 means best compression ratio which 750# will consume more CPU. 751# Defaults to 1. (Since 5.0) 752# 753# @multifd-zstd-level: Set the compression level to be used in live 754# migration, the compression level is an integer between 0 755# and 20, where 0 means no compression, 1 means the best 756# compression speed, and 20 means best compression ratio which 757# will consume more CPU. 758# Defaults to 1. (Since 5.0) 759# 760# 761# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 762# aliases for the purpose of dirty bitmap migration. Such 763# aliases may for example be the corresponding names on the 764# opposite site. 765# The mapping must be one-to-one, but not necessarily 766# complete: On the source, unmapped bitmaps and all bitmaps 767# on unmapped nodes will be ignored. On the destination, 768# encountering an unmapped alias in the incoming migration 769# stream will result in a report, and all further bitmap 770# migration data will then be discarded. 771# Note that the destination does not know about bitmaps it 772# does not receive, so there is no limitation or requirement 773# regarding the number of bitmaps received, or how they are 774# named, or on which nodes they are placed. 775# By default (when this parameter has never been set), bitmap 776# names are mapped to themselves. Nodes are mapped to their 777# block device name if there is one, and to their node name 778# otherwise. (Since 5.2) 779# 780# Features: 781# @unstable: Member @x-checkpoint-delay is experimental. 782# 783# Since: 2.4 784## 785{ 'enum': 'MigrationParameter', 786 'data': ['announce-initial', 'announce-max', 787 'announce-rounds', 'announce-step', 788 'compress-level', 'compress-threads', 'decompress-threads', 789 'compress-wait-thread', 'throttle-trigger-threshold', 790 'cpu-throttle-initial', 'cpu-throttle-increment', 791 'cpu-throttle-tailslow', 792 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 793 'downtime-limit', 794 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 795 'block-incremental', 796 'multifd-channels', 797 'xbzrle-cache-size', 'max-postcopy-bandwidth', 798 'max-cpu-throttle', 'multifd-compression', 799 'multifd-zlib-level' ,'multifd-zstd-level', 800 'block-bitmap-mapping' ] } 801 802## 803# @MigrateSetParameters: 804# 805# @announce-initial: Initial delay (in milliseconds) before sending the first 806# announce (Since 4.0) 807# 808# @announce-max: Maximum delay (in milliseconds) between packets in the 809# announcement (Since 4.0) 810# 811# @announce-rounds: Number of self-announce packets sent after migration 812# (Since 4.0) 813# 814# @announce-step: Increase in delay (in milliseconds) between subsequent 815# packets in the announcement (Since 4.0) 816# 817# @compress-level: compression level 818# 819# @compress-threads: compression thread count 820# 821# @compress-wait-thread: Controls behavior when all compression threads are 822# currently busy. If true (default), wait for a free 823# compression thread to become available; otherwise, 824# send the page uncompressed. (Since 3.1) 825# 826# @decompress-threads: decompression thread count 827# 828# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 829# to trigger throttling. It is expressed as percentage. 830# The default value is 50. (Since 5.0) 831# 832# @cpu-throttle-initial: Initial percentage of time guest cpus are 833# throttled when migration auto-converge is activated. 834# The default value is 20. (Since 2.7) 835# 836# @cpu-throttle-increment: throttle percentage increase each time 837# auto-converge detects that migration is not making 838# progress. The default value is 10. (Since 2.7) 839# 840# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 841# At the tail stage of throttling, the Guest is very 842# sensitive to CPU percentage while the @cpu-throttle 843# -increment is excessive usually at tail stage. 844# If this parameter is true, we will compute the ideal 845# CPU percentage used by the Guest, which may exactly make 846# the dirty rate match the dirty rate threshold. Then we 847# will choose a smaller throttle increment between the 848# one specified by @cpu-throttle-increment and the one 849# generated by ideal CPU percentage. 850# Therefore, it is compatible to traditional throttling, 851# meanwhile the throttle increment won't be excessive 852# at tail stage. 853# The default value is false. (Since 5.1) 854# 855# @tls-creds: ID of the 'tls-creds' object that provides credentials 856# for establishing a TLS connection over the migration data 857# channel. On the outgoing side of the migration, the credentials 858# must be for a 'client' endpoint, while for the incoming side the 859# credentials must be for a 'server' endpoint. Setting this 860# to a non-empty string enables TLS for all migrations. 861# An empty string means that QEMU will use plain text mode for 862# migration, rather than TLS (Since 2.9) 863# Previously (since 2.7), this was reported by omitting 864# tls-creds instead. 865# 866# @tls-hostname: hostname of the target host for the migration. This 867# is required when using x509 based TLS credentials and the 868# migration URI does not already include a hostname. For 869# example if using fd: or exec: based migration, the 870# hostname must be provided so that the server's x509 871# certificate identity can be validated. (Since 2.7) 872# An empty string means that QEMU will use the hostname 873# associated with the migration URI, if any. (Since 2.9) 874# Previously (since 2.7), this was reported by omitting 875# tls-hostname instead. 876# 877# @max-bandwidth: to set maximum speed for migration. maximum speed in 878# bytes per second. (Since 2.8) 879# 880# @downtime-limit: set maximum tolerated downtime for migration. maximum 881# downtime in milliseconds (Since 2.8) 882# 883# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) 884# 885# @block-incremental: Affects how much storage is migrated when the 886# block migration capability is enabled. When false, the entire 887# storage backing chain is migrated into a flattened image at 888# the destination; when true, only the active qcow2 layer is 889# migrated and the destination must already have access to the 890# same backing chain as was used on the source. (since 2.10) 891# 892# @multifd-channels: Number of channels used to migrate data in 893# parallel. This is the same number that the 894# number of sockets used for migration. The 895# default value is 2 (since 4.0) 896# 897# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 898# needs to be a multiple of the target page size 899# and a power of 2 900# (Since 2.11) 901# 902# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 903# Defaults to 0 (unlimited). In bytes per second. 904# (Since 3.0) 905# 906# @max-cpu-throttle: maximum cpu throttle percentage. 907# The default value is 99. (Since 3.1) 908# 909# @multifd-compression: Which compression method to use. 910# Defaults to none. (Since 5.0) 911# 912# @multifd-zlib-level: Set the compression level to be used in live 913# migration, the compression level is an integer between 0 914# and 9, where 0 means no compression, 1 means the best 915# compression speed, and 9 means best compression ratio which 916# will consume more CPU. 917# Defaults to 1. (Since 5.0) 918# 919# @multifd-zstd-level: Set the compression level to be used in live 920# migration, the compression level is an integer between 0 921# and 20, where 0 means no compression, 1 means the best 922# compression speed, and 20 means best compression ratio which 923# will consume more CPU. 924# Defaults to 1. (Since 5.0) 925# 926# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 927# aliases for the purpose of dirty bitmap migration. Such 928# aliases may for example be the corresponding names on the 929# opposite site. 930# The mapping must be one-to-one, but not necessarily 931# complete: On the source, unmapped bitmaps and all bitmaps 932# on unmapped nodes will be ignored. On the destination, 933# encountering an unmapped alias in the incoming migration 934# stream will result in a report, and all further bitmap 935# migration data will then be discarded. 936# Note that the destination does not know about bitmaps it 937# does not receive, so there is no limitation or requirement 938# regarding the number of bitmaps received, or how they are 939# named, or on which nodes they are placed. 940# By default (when this parameter has never been set), bitmap 941# names are mapped to themselves. Nodes are mapped to their 942# block device name if there is one, and to their node name 943# otherwise. (Since 5.2) 944# 945# Features: 946# @unstable: Member @x-checkpoint-delay is experimental. 947# 948# TODO: either fuse back into MigrationParameters, or make 949# MigrationParameters members mandatory 950# 951# Since: 2.4 952## 953{ 'struct': 'MigrateSetParameters', 954 'data': { '*announce-initial': 'size', 955 '*announce-max': 'size', 956 '*announce-rounds': 'size', 957 '*announce-step': 'size', 958 '*compress-level': 'uint8', 959 '*compress-threads': 'uint8', 960 '*compress-wait-thread': 'bool', 961 '*decompress-threads': 'uint8', 962 '*throttle-trigger-threshold': 'uint8', 963 '*cpu-throttle-initial': 'uint8', 964 '*cpu-throttle-increment': 'uint8', 965 '*cpu-throttle-tailslow': 'bool', 966 '*tls-creds': 'StrOrNull', 967 '*tls-hostname': 'StrOrNull', 968 '*tls-authz': 'StrOrNull', 969 '*max-bandwidth': 'size', 970 '*downtime-limit': 'uint64', 971 '*x-checkpoint-delay': { 'type': 'uint32', 972 'features': [ 'unstable' ] }, 973 '*block-incremental': 'bool', 974 '*multifd-channels': 'uint8', 975 '*xbzrle-cache-size': 'size', 976 '*max-postcopy-bandwidth': 'size', 977 '*max-cpu-throttle': 'uint8', 978 '*multifd-compression': 'MultiFDCompression', 979 '*multifd-zlib-level': 'uint8', 980 '*multifd-zstd-level': 'uint8', 981 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 982 983## 984# @migrate-set-parameters: 985# 986# Set various migration parameters. 987# 988# Since: 2.4 989# 990# Example: 991# 992# -> { "execute": "migrate-set-parameters" , 993# "arguments": { "compress-level": 1 } } 994# <- { "return": {} } 995# 996## 997{ 'command': 'migrate-set-parameters', 'boxed': true, 998 'data': 'MigrateSetParameters' } 999 1000## 1001# @MigrationParameters: 1002# 1003# The optional members aren't actually optional. 1004# 1005# @announce-initial: Initial delay (in milliseconds) before sending the 1006# first announce (Since 4.0) 1007# 1008# @announce-max: Maximum delay (in milliseconds) between packets in the 1009# announcement (Since 4.0) 1010# 1011# @announce-rounds: Number of self-announce packets sent after migration 1012# (Since 4.0) 1013# 1014# @announce-step: Increase in delay (in milliseconds) between subsequent 1015# packets in the announcement (Since 4.0) 1016# 1017# @compress-level: compression level 1018# 1019# @compress-threads: compression thread count 1020# 1021# @compress-wait-thread: Controls behavior when all compression threads are 1022# currently busy. If true (default), wait for a free 1023# compression thread to become available; otherwise, 1024# send the page uncompressed. (Since 3.1) 1025# 1026# @decompress-threads: decompression thread count 1027# 1028# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 1029# to trigger throttling. It is expressed as percentage. 1030# The default value is 50. (Since 5.0) 1031# 1032# @cpu-throttle-initial: Initial percentage of time guest cpus are 1033# throttled when migration auto-converge is activated. 1034# (Since 2.7) 1035# 1036# @cpu-throttle-increment: throttle percentage increase each time 1037# auto-converge detects that migration is not making 1038# progress. (Since 2.7) 1039# 1040# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 1041# At the tail stage of throttling, the Guest is very 1042# sensitive to CPU percentage while the @cpu-throttle 1043# -increment is excessive usually at tail stage. 1044# If this parameter is true, we will compute the ideal 1045# CPU percentage used by the Guest, which may exactly make 1046# the dirty rate match the dirty rate threshold. Then we 1047# will choose a smaller throttle increment between the 1048# one specified by @cpu-throttle-increment and the one 1049# generated by ideal CPU percentage. 1050# Therefore, it is compatible to traditional throttling, 1051# meanwhile the throttle increment won't be excessive 1052# at tail stage. 1053# The default value is false. (Since 5.1) 1054# 1055# @tls-creds: ID of the 'tls-creds' object that provides credentials 1056# for establishing a TLS connection over the migration data 1057# channel. On the outgoing side of the migration, the credentials 1058# must be for a 'client' endpoint, while for the incoming side the 1059# credentials must be for a 'server' endpoint. 1060# An empty string means that QEMU will use plain text mode for 1061# migration, rather than TLS (Since 2.7) 1062# Note: 2.8 reports this by omitting tls-creds instead. 1063# 1064# @tls-hostname: hostname of the target host for the migration. This 1065# is required when using x509 based TLS credentials and the 1066# migration URI does not already include a hostname. For 1067# example if using fd: or exec: based migration, the 1068# hostname must be provided so that the server's x509 1069# certificate identity can be validated. (Since 2.7) 1070# An empty string means that QEMU will use the hostname 1071# associated with the migration URI, if any. (Since 2.9) 1072# Note: 2.8 reports this by omitting tls-hostname instead. 1073# 1074# @tls-authz: ID of the 'authz' object subclass that provides access control 1075# checking of the TLS x509 certificate distinguished name. (Since 1076# 4.0) 1077# 1078# @max-bandwidth: to set maximum speed for migration. maximum speed in 1079# bytes per second. (Since 2.8) 1080# 1081# @downtime-limit: set maximum tolerated downtime for migration. maximum 1082# downtime in milliseconds (Since 2.8) 1083# 1084# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) 1085# 1086# @block-incremental: Affects how much storage is migrated when the 1087# block migration capability is enabled. When false, the entire 1088# storage backing chain is migrated into a flattened image at 1089# the destination; when true, only the active qcow2 layer is 1090# migrated and the destination must already have access to the 1091# same backing chain as was used on the source. (since 2.10) 1092# 1093# @multifd-channels: Number of channels used to migrate data in 1094# parallel. This is the same number that the 1095# number of sockets used for migration. 1096# The default value is 2 (since 4.0) 1097# 1098# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1099# needs to be a multiple of the target page size 1100# and a power of 2 1101# (Since 2.11) 1102# 1103# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 1104# Defaults to 0 (unlimited). In bytes per second. 1105# (Since 3.0) 1106# 1107# @max-cpu-throttle: maximum cpu throttle percentage. 1108# Defaults to 99. 1109# (Since 3.1) 1110# 1111# @multifd-compression: Which compression method to use. 1112# Defaults to none. (Since 5.0) 1113# 1114# @multifd-zlib-level: Set the compression level to be used in live 1115# migration, the compression level is an integer between 0 1116# and 9, where 0 means no compression, 1 means the best 1117# compression speed, and 9 means best compression ratio which 1118# will consume more CPU. 1119# Defaults to 1. (Since 5.0) 1120# 1121# @multifd-zstd-level: Set the compression level to be used in live 1122# migration, the compression level is an integer between 0 1123# and 20, where 0 means no compression, 1 means the best 1124# compression speed, and 20 means best compression ratio which 1125# will consume more CPU. 1126# Defaults to 1. (Since 5.0) 1127# 1128# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1129# aliases for the purpose of dirty bitmap migration. Such 1130# aliases may for example be the corresponding names on the 1131# opposite site. 1132# The mapping must be one-to-one, but not necessarily 1133# complete: On the source, unmapped bitmaps and all bitmaps 1134# on unmapped nodes will be ignored. On the destination, 1135# encountering an unmapped alias in the incoming migration 1136# stream will result in a report, and all further bitmap 1137# migration data will then be discarded. 1138# Note that the destination does not know about bitmaps it 1139# does not receive, so there is no limitation or requirement 1140# regarding the number of bitmaps received, or how they are 1141# named, or on which nodes they are placed. 1142# By default (when this parameter has never been set), bitmap 1143# names are mapped to themselves. Nodes are mapped to their 1144# block device name if there is one, and to their node name 1145# otherwise. (Since 5.2) 1146# 1147# Features: 1148# @unstable: Member @x-checkpoint-delay is experimental. 1149# 1150# Since: 2.4 1151## 1152{ 'struct': 'MigrationParameters', 1153 'data': { '*announce-initial': 'size', 1154 '*announce-max': 'size', 1155 '*announce-rounds': 'size', 1156 '*announce-step': 'size', 1157 '*compress-level': 'uint8', 1158 '*compress-threads': 'uint8', 1159 '*compress-wait-thread': 'bool', 1160 '*decompress-threads': 'uint8', 1161 '*throttle-trigger-threshold': 'uint8', 1162 '*cpu-throttle-initial': 'uint8', 1163 '*cpu-throttle-increment': 'uint8', 1164 '*cpu-throttle-tailslow': 'bool', 1165 '*tls-creds': 'str', 1166 '*tls-hostname': 'str', 1167 '*tls-authz': 'str', 1168 '*max-bandwidth': 'size', 1169 '*downtime-limit': 'uint64', 1170 '*x-checkpoint-delay': { 'type': 'uint32', 1171 'features': [ 'unstable' ] }, 1172 '*block-incremental': 'bool', 1173 '*multifd-channels': 'uint8', 1174 '*xbzrle-cache-size': 'size', 1175 '*max-postcopy-bandwidth': 'size', 1176 '*max-cpu-throttle': 'uint8', 1177 '*multifd-compression': 'MultiFDCompression', 1178 '*multifd-zlib-level': 'uint8', 1179 '*multifd-zstd-level': 'uint8', 1180 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 1181 1182## 1183# @query-migrate-parameters: 1184# 1185# Returns information about the current migration parameters 1186# 1187# Returns: @MigrationParameters 1188# 1189# Since: 2.4 1190# 1191# Example: 1192# 1193# -> { "execute": "query-migrate-parameters" } 1194# <- { "return": { 1195# "decompress-threads": 2, 1196# "cpu-throttle-increment": 10, 1197# "compress-threads": 8, 1198# "compress-level": 1, 1199# "cpu-throttle-initial": 20, 1200# "max-bandwidth": 33554432, 1201# "downtime-limit": 300 1202# } 1203# } 1204# 1205## 1206{ 'command': 'query-migrate-parameters', 1207 'returns': 'MigrationParameters' } 1208 1209## 1210# @migrate-start-postcopy: 1211# 1212# Followup to a migration command to switch the migration to postcopy mode. 1213# The postcopy-ram capability must be set on both source and destination 1214# before the original migration command. 1215# 1216# Since: 2.5 1217# 1218# Example: 1219# 1220# -> { "execute": "migrate-start-postcopy" } 1221# <- { "return": {} } 1222# 1223## 1224{ 'command': 'migrate-start-postcopy' } 1225 1226## 1227# @MIGRATION: 1228# 1229# Emitted when a migration event happens 1230# 1231# @status: @MigrationStatus describing the current migration status. 1232# 1233# Since: 2.4 1234# 1235# Example: 1236# 1237# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1238# "event": "MIGRATION", 1239# "data": {"status": "completed"} } 1240# 1241## 1242{ 'event': 'MIGRATION', 1243 'data': {'status': 'MigrationStatus'}} 1244 1245## 1246# @MIGRATION_PASS: 1247# 1248# Emitted from the source side of a migration at the start of each pass 1249# (when it syncs the dirty bitmap) 1250# 1251# @pass: An incrementing count (starting at 1 on the first pass) 1252# 1253# Since: 2.6 1254# 1255# Example: 1256# 1257# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1258# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1259# 1260## 1261{ 'event': 'MIGRATION_PASS', 1262 'data': { 'pass': 'int' } } 1263 1264## 1265# @COLOMessage: 1266# 1267# The message transmission between Primary side and Secondary side. 1268# 1269# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1270# 1271# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for checkpointing 1272# 1273# @checkpoint-reply: SVM gets PVM's checkpoint request 1274# 1275# @vmstate-send: VM's state will be sent by PVM. 1276# 1277# @vmstate-size: The total size of VMstate. 1278# 1279# @vmstate-received: VM's state has been received by SVM. 1280# 1281# @vmstate-loaded: VM's state has been loaded by SVM. 1282# 1283# Since: 2.8 1284## 1285{ 'enum': 'COLOMessage', 1286 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1287 'vmstate-send', 'vmstate-size', 'vmstate-received', 1288 'vmstate-loaded' ] } 1289 1290## 1291# @COLOMode: 1292# 1293# The COLO current mode. 1294# 1295# @none: COLO is disabled. 1296# 1297# @primary: COLO node in primary side. 1298# 1299# @secondary: COLO node in slave side. 1300# 1301# Since: 2.8 1302## 1303{ 'enum': 'COLOMode', 1304 'data': [ 'none', 'primary', 'secondary'] } 1305 1306## 1307# @FailoverStatus: 1308# 1309# An enumeration of COLO failover status 1310# 1311# @none: no failover has ever happened 1312# 1313# @require: got failover requirement but not handled 1314# 1315# @active: in the process of doing failover 1316# 1317# @completed: finish the process of failover 1318# 1319# @relaunch: restart the failover process, from 'none' -> 'completed' (Since 2.9) 1320# 1321# Since: 2.8 1322## 1323{ 'enum': 'FailoverStatus', 1324 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1325 1326## 1327# @COLO_EXIT: 1328# 1329# Emitted when VM finishes COLO mode due to some errors happening or 1330# at the request of users. 1331# 1332# @mode: report COLO mode when COLO exited. 1333# 1334# @reason: describes the reason for the COLO exit. 1335# 1336# Since: 3.1 1337# 1338# Example: 1339# 1340# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1341# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1342# 1343## 1344{ 'event': 'COLO_EXIT', 1345 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1346 1347## 1348# @COLOExitReason: 1349# 1350# The reason for a COLO exit. 1351# 1352# @none: failover has never happened. This state does not occur 1353# in the COLO_EXIT event, and is only visible in the result of 1354# query-colo-status. 1355# 1356# @request: COLO exit is due to an external request. 1357# 1358# @error: COLO exit is due to an internal error. 1359# 1360# @processing: COLO is currently handling a failover (since 4.0). 1361# 1362# Since: 3.1 1363## 1364{ 'enum': 'COLOExitReason', 1365 'data': [ 'none', 'request', 'error' , 'processing' ] } 1366 1367## 1368# @x-colo-lost-heartbeat: 1369# 1370# Tell qemu that heartbeat is lost, request it to do takeover procedures. 1371# If this command is sent to the PVM, the Primary side will exit COLO mode. 1372# If sent to the Secondary, the Secondary side will run failover work, 1373# then takes over server operation to become the service VM. 1374# 1375# Features: 1376# @unstable: This command is experimental. 1377# 1378# Since: 2.8 1379# 1380# Example: 1381# 1382# -> { "execute": "x-colo-lost-heartbeat" } 1383# <- { "return": {} } 1384# 1385## 1386{ 'command': 'x-colo-lost-heartbeat', 1387 'features': [ 'unstable' ] } 1388 1389## 1390# @migrate_cancel: 1391# 1392# Cancel the current executing migration process. 1393# 1394# Returns: nothing on success 1395# 1396# Notes: This command succeeds even if there is no migration process running. 1397# 1398# Since: 0.14 1399# 1400# Example: 1401# 1402# -> { "execute": "migrate_cancel" } 1403# <- { "return": {} } 1404# 1405## 1406{ 'command': 'migrate_cancel' } 1407 1408## 1409# @migrate-continue: 1410# 1411# Continue migration when it's in a paused state. 1412# 1413# @state: The state the migration is currently expected to be in 1414# 1415# Returns: nothing on success 1416# 1417# Since: 2.11 1418# 1419# Example: 1420# 1421# -> { "execute": "migrate-continue" , "arguments": 1422# { "state": "pre-switchover" } } 1423# <- { "return": {} } 1424## 1425{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1426 1427## 1428# @migrate: 1429# 1430# Migrates the current running guest to another Virtual Machine. 1431# 1432# @uri: the Uniform Resource Identifier of the destination VM 1433# 1434# @blk: do block migration (full disk copy) 1435# 1436# @inc: incremental disk copy migration 1437# 1438# @detach: this argument exists only for compatibility reasons and 1439# is ignored by QEMU 1440# 1441# @resume: resume one paused migration, default "off". (since 3.0) 1442# 1443# Returns: nothing on success 1444# 1445# Since: 0.14 1446# 1447# Notes: 1448# 1449# 1. The 'query-migrate' command should be used to check migration's progress 1450# and final result (this information is provided by the 'status' member) 1451# 1452# 2. All boolean arguments default to false 1453# 1454# 3. The user Monitor's "detach" argument is invalid in QMP and should not 1455# be used 1456# 1457# Example: 1458# 1459# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1460# <- { "return": {} } 1461# 1462## 1463{ 'command': 'migrate', 1464 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', 1465 '*detach': 'bool', '*resume': 'bool' } } 1466 1467## 1468# @migrate-incoming: 1469# 1470# Start an incoming migration, the qemu must have been started 1471# with -incoming defer 1472# 1473# @uri: The Uniform Resource Identifier identifying the source or 1474# address to listen on 1475# 1476# Returns: nothing on success 1477# 1478# Since: 2.3 1479# 1480# Notes: 1481# 1482# 1. It's a bad idea to use a string for the uri, but it needs to stay 1483# compatible with -incoming and the format of the uri is already exposed 1484# above libvirt. 1485# 1486# 2. QEMU must be started with -incoming defer to allow migrate-incoming to 1487# be used. 1488# 1489# 3. The uri format is the same as for -incoming 1490# 1491# Example: 1492# 1493# -> { "execute": "migrate-incoming", 1494# "arguments": { "uri": "tcp::4446" } } 1495# <- { "return": {} } 1496# 1497## 1498{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } } 1499 1500## 1501# @xen-save-devices-state: 1502# 1503# Save the state of all devices to file. The RAM and the block devices 1504# of the VM are not saved by this command. 1505# 1506# @filename: the file to save the state of the devices to as binary 1507# data. See xen-save-devices-state.txt for a description of the binary 1508# format. 1509# 1510# @live: Optional argument to ask QEMU to treat this command as part of a live 1511# migration. Default to true. (since 2.11) 1512# 1513# Returns: Nothing on success 1514# 1515# Since: 1.1 1516# 1517# Example: 1518# 1519# -> { "execute": "xen-save-devices-state", 1520# "arguments": { "filename": "/tmp/save" } } 1521# <- { "return": {} } 1522# 1523## 1524{ 'command': 'xen-save-devices-state', 1525 'data': {'filename': 'str', '*live':'bool' } } 1526 1527## 1528# @xen-set-global-dirty-log: 1529# 1530# Enable or disable the global dirty log mode. 1531# 1532# @enable: true to enable, false to disable. 1533# 1534# Returns: nothing 1535# 1536# Since: 1.3 1537# 1538# Example: 1539# 1540# -> { "execute": "xen-set-global-dirty-log", 1541# "arguments": { "enable": true } } 1542# <- { "return": {} } 1543# 1544## 1545{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1546 1547## 1548# @xen-load-devices-state: 1549# 1550# Load the state of all devices from file. The RAM and the block devices 1551# of the VM are not loaded by this command. 1552# 1553# @filename: the file to load the state of the devices from as binary 1554# data. See xen-save-devices-state.txt for a description of the binary 1555# format. 1556# 1557# Since: 2.7 1558# 1559# Example: 1560# 1561# -> { "execute": "xen-load-devices-state", 1562# "arguments": { "filename": "/tmp/resume" } } 1563# <- { "return": {} } 1564# 1565## 1566{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1567 1568## 1569# @xen-set-replication: 1570# 1571# Enable or disable replication. 1572# 1573# @enable: true to enable, false to disable. 1574# 1575# @primary: true for primary or false for secondary. 1576# 1577# @failover: true to do failover, false to stop. but cannot be 1578# specified if 'enable' is true. default value is false. 1579# 1580# Returns: nothing. 1581# 1582# Example: 1583# 1584# -> { "execute": "xen-set-replication", 1585# "arguments": {"enable": true, "primary": false} } 1586# <- { "return": {} } 1587# 1588# Since: 2.9 1589## 1590{ 'command': 'xen-set-replication', 1591 'data': { 'enable': 'bool', 'primary': 'bool', '*failover' : 'bool' }, 1592 'if': 'CONFIG_REPLICATION' } 1593 1594## 1595# @ReplicationStatus: 1596# 1597# The result format for 'query-xen-replication-status'. 1598# 1599# @error: true if an error happened, false if replication is normal. 1600# 1601# @desc: the human readable error description string, when 1602# @error is 'true'. 1603# 1604# Since: 2.9 1605## 1606{ 'struct': 'ReplicationStatus', 1607 'data': { 'error': 'bool', '*desc': 'str' }, 1608 'if': 'CONFIG_REPLICATION' } 1609 1610## 1611# @query-xen-replication-status: 1612# 1613# Query replication status while the vm is running. 1614# 1615# Returns: A @ReplicationStatus object showing the status. 1616# 1617# Example: 1618# 1619# -> { "execute": "query-xen-replication-status" } 1620# <- { "return": { "error": false } } 1621# 1622# Since: 2.9 1623## 1624{ 'command': 'query-xen-replication-status', 1625 'returns': 'ReplicationStatus', 1626 'if': 'CONFIG_REPLICATION' } 1627 1628## 1629# @xen-colo-do-checkpoint: 1630# 1631# Xen uses this command to notify replication to trigger a checkpoint. 1632# 1633# Returns: nothing. 1634# 1635# Example: 1636# 1637# -> { "execute": "xen-colo-do-checkpoint" } 1638# <- { "return": {} } 1639# 1640# Since: 2.9 1641## 1642{ 'command': 'xen-colo-do-checkpoint', 1643 'if': 'CONFIG_REPLICATION' } 1644 1645## 1646# @COLOStatus: 1647# 1648# The result format for 'query-colo-status'. 1649# 1650# @mode: COLO running mode. If COLO is running, this field will return 1651# 'primary' or 'secondary'. 1652# 1653# @last-mode: COLO last running mode. If COLO is running, this field 1654# will return same like mode field, after failover we can 1655# use this field to get last colo mode. (since 4.0) 1656# 1657# @reason: describes the reason for the COLO exit. 1658# 1659# Since: 3.1 1660## 1661{ 'struct': 'COLOStatus', 1662 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1663 'reason': 'COLOExitReason' } } 1664 1665## 1666# @query-colo-status: 1667# 1668# Query COLO status while the vm is running. 1669# 1670# Returns: A @COLOStatus object showing the status. 1671# 1672# Example: 1673# 1674# -> { "execute": "query-colo-status" } 1675# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1676# 1677# Since: 3.1 1678## 1679{ 'command': 'query-colo-status', 1680 'returns': 'COLOStatus' } 1681 1682## 1683# @migrate-recover: 1684# 1685# Provide a recovery migration stream URI. 1686# 1687# @uri: the URI to be used for the recovery of migration stream. 1688# 1689# Returns: nothing. 1690# 1691# Example: 1692# 1693# -> { "execute": "migrate-recover", 1694# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1695# <- { "return": {} } 1696# 1697# Since: 3.0 1698## 1699{ 'command': 'migrate-recover', 1700 'data': { 'uri': 'str' }, 1701 'allow-oob': true } 1702 1703## 1704# @migrate-pause: 1705# 1706# Pause a migration. Currently it only supports postcopy. 1707# 1708# Returns: nothing. 1709# 1710# Example: 1711# 1712# -> { "execute": "migrate-pause" } 1713# <- { "return": {} } 1714# 1715# Since: 3.0 1716## 1717{ 'command': 'migrate-pause', 'allow-oob': true } 1718 1719## 1720# @UNPLUG_PRIMARY: 1721# 1722# Emitted from source side of a migration when migration state is 1723# WAIT_UNPLUG. Device was unplugged by guest operating system. 1724# Device resources in QEMU are kept on standby to be able to re-plug it in case 1725# of migration failure. 1726# 1727# @device-id: QEMU device id of the unplugged device 1728# 1729# Since: 4.2 1730# 1731# Example: 1732# 1733# <- { "event": "UNPLUG_PRIMARY", 1734# "data": { "device-id": "hostdev0" }, 1735# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 1736# 1737## 1738{ 'event': 'UNPLUG_PRIMARY', 1739 'data': { 'device-id': 'str' } } 1740 1741## 1742# @DirtyRateVcpu: 1743# 1744# Dirty rate of vcpu. 1745# 1746# @id: vcpu index. 1747# 1748# @dirty-rate: dirty rate. 1749# 1750# Since: 6.2 1751## 1752{ 'struct': 'DirtyRateVcpu', 1753 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 1754 1755## 1756# @DirtyRateStatus: 1757# 1758# An enumeration of dirtyrate status. 1759# 1760# @unstarted: the dirtyrate thread has not been started. 1761# 1762# @measuring: the dirtyrate thread is measuring. 1763# 1764# @measured: the dirtyrate thread has measured and results are available. 1765# 1766# Since: 5.2 1767## 1768{ 'enum': 'DirtyRateStatus', 1769 'data': [ 'unstarted', 'measuring', 'measured'] } 1770 1771## 1772# @DirtyRateMeasureMode: 1773# 1774# An enumeration of mode of measuring dirtyrate. 1775# 1776# @page-sampling: calculate dirtyrate by sampling pages. 1777# 1778# @dirty-ring: calculate dirtyrate by dirty ring. 1779# 1780# @dirty-bitmap: calculate dirtyrate by dirty bitmap. 1781# 1782# Since: 6.2 1783## 1784{ 'enum': 'DirtyRateMeasureMode', 1785 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 1786 1787## 1788# @DirtyRateInfo: 1789# 1790# Information about current dirty page rate of vm. 1791# 1792# @dirty-rate: an estimate of the dirty page rate of the VM in units of 1793# MB/s, present only when estimating the rate has completed. 1794# 1795# @status: status containing dirtyrate query status includes 1796# 'unstarted' or 'measuring' or 'measured' 1797# 1798# @start-time: start time in units of second for calculation 1799# 1800# @calc-time: time in units of second for sample dirty pages 1801# 1802# @sample-pages: page count per GB for sample dirty pages 1803# the default value is 512 (since 6.1) 1804# 1805# @mode: mode containing method of calculate dirtyrate includes 1806# 'page-sampling' and 'dirty-ring' (Since 6.2) 1807# 1808# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring 1809# mode specified (Since 6.2) 1810# 1811# Since: 5.2 1812## 1813{ 'struct': 'DirtyRateInfo', 1814 'data': {'*dirty-rate': 'int64', 1815 'status': 'DirtyRateStatus', 1816 'start-time': 'int64', 1817 'calc-time': 'int64', 1818 'sample-pages': 'uint64', 1819 'mode': 'DirtyRateMeasureMode', 1820 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 1821 1822## 1823# @calc-dirty-rate: 1824# 1825# start calculating dirty page rate for vm 1826# 1827# @calc-time: time in units of second for sample dirty pages 1828# 1829# @sample-pages: page count per GB for sample dirty pages 1830# the default value is 512 (since 6.1) 1831# 1832# @mode: mechanism of calculating dirtyrate includes 1833# 'page-sampling' and 'dirty-ring' (Since 6.1) 1834# 1835# Since: 5.2 1836# 1837# Example: 1838# 1839# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 1840# 'sample-pages': 512} } 1841# <- { "return": {} } 1842# 1843## 1844{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 1845 '*sample-pages': 'int', 1846 '*mode': 'DirtyRateMeasureMode'} } 1847 1848## 1849# @query-dirty-rate: 1850# 1851# query dirty page rate in units of MB/s for vm 1852# 1853# Since: 5.2 1854## 1855{ 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' } 1856 1857## 1858# @DirtyLimitInfo: 1859# 1860# Dirty page rate limit information of a virtual CPU. 1861# 1862# @cpu-index: index of a virtual CPU. 1863# 1864# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 1865# CPU, 0 means unlimited. 1866# 1867# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 1868# 1869# Since: 7.1 1870# 1871## 1872{ 'struct': 'DirtyLimitInfo', 1873 'data': { 'cpu-index': 'int', 1874 'limit-rate': 'uint64', 1875 'current-rate': 'uint64' } } 1876 1877## 1878# @set-vcpu-dirty-limit: 1879# 1880# Set the upper limit of dirty page rate for virtual CPUs. 1881# 1882# Requires KVM with accelerator property "dirty-ring-size" set. 1883# A virtual CPU's dirty page rate is a measure of its memory load. 1884# To observe dirty page rates, use @calc-dirty-rate. 1885# 1886# @cpu-index: index of a virtual CPU, default is all. 1887# 1888# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 1889# 1890# Since: 7.1 1891# 1892# Example: 1893# 1894# -> {"execute": "set-vcpu-dirty-limit"} 1895# "arguments": { "dirty-rate": 200, 1896# "cpu-index": 1 } } 1897# <- { "return": {} } 1898# 1899## 1900{ 'command': 'set-vcpu-dirty-limit', 1901 'data': { '*cpu-index': 'int', 1902 'dirty-rate': 'uint64' } } 1903 1904## 1905# @cancel-vcpu-dirty-limit: 1906# 1907# Cancel the upper limit of dirty page rate for virtual CPUs. 1908# 1909# Cancel the dirty page limit for the vCPU which has been set with 1910# set-vcpu-dirty-limit command. Note that this command requires 1911# support from dirty ring, same as the "set-vcpu-dirty-limit". 1912# 1913# @cpu-index: index of a virtual CPU, default is all. 1914# 1915# Since: 7.1 1916# 1917# Example: 1918# 1919# -> {"execute": "cancel-vcpu-dirty-limit"}, 1920# "arguments": { "cpu-index": 1 } } 1921# <- { "return": {} } 1922# 1923## 1924{ 'command': 'cancel-vcpu-dirty-limit', 1925 'data': { '*cpu-index': 'int'} } 1926 1927## 1928# @query-vcpu-dirty-limit: 1929# 1930# Returns information about virtual CPU dirty page rate limits, if any. 1931# 1932# Since: 7.1 1933# 1934# Example: 1935# 1936# -> {"execute": "query-vcpu-dirty-limit"} 1937# <- {"return": [ 1938# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 1939# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 1940# 1941## 1942{ 'command': 'query-vcpu-dirty-limit', 1943 'returns': [ 'DirtyLimitInfo' ] } 1944 1945## 1946# @MigrationThreadInfo: 1947# 1948# Information about migrationthreads 1949# 1950# @name: the name of migration thread 1951# 1952# @thread-id: ID of the underlying host thread 1953# 1954# Since: 7.2 1955## 1956{ 'struct': 'MigrationThreadInfo', 1957 'data': {'name': 'str', 1958 'thread-id': 'int'} } 1959 1960## 1961# @query-migrationthreads: 1962# 1963# Returns information of migration threads 1964# 1965# data: migration thread name 1966# 1967# Returns: information about migration threads 1968# 1969# Since: 7.2 1970## 1971{ 'command': 'query-migrationthreads', 1972 'returns': ['MigrationThreadInfo'] } 1973 1974## 1975# @snapshot-save: 1976# 1977# Save a VM snapshot 1978# 1979# @job-id: identifier for the newly created job 1980# @tag: name of the snapshot to create 1981# @vmstate: block device node name to save vmstate to 1982# @devices: list of block device node names to save a snapshot to 1983# 1984# Applications should not assume that the snapshot save is complete 1985# when this command returns. The job commands / events must be used 1986# to determine completion and to fetch details of any errors that arise. 1987# 1988# Note that execution of the guest CPUs may be stopped during the 1989# time it takes to save the snapshot. A future version of QEMU 1990# may ensure CPUs are executing continuously. 1991# 1992# It is strongly recommended that @devices contain all writable 1993# block device nodes if a consistent snapshot is required. 1994# 1995# If @tag already exists, an error will be reported 1996# 1997# Returns: nothing 1998# 1999# Example: 2000# 2001# -> { "execute": "snapshot-save", 2002# "arguments": { 2003# "job-id": "snapsave0", 2004# "tag": "my-snap", 2005# "vmstate": "disk0", 2006# "devices": ["disk0", "disk1"] 2007# } 2008# } 2009# <- { "return": { } } 2010# <- {"event": "JOB_STATUS_CHANGE", 2011# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 2012# "data": {"status": "created", "id": "snapsave0"}} 2013# <- {"event": "JOB_STATUS_CHANGE", 2014# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 2015# "data": {"status": "running", "id": "snapsave0"}} 2016# <- {"event": "STOP", 2017# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 2018# <- {"event": "RESUME", 2019# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2020# <- {"event": "JOB_STATUS_CHANGE", 2021# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2022# "data": {"status": "waiting", "id": "snapsave0"}} 2023# <- {"event": "JOB_STATUS_CHANGE", 2024# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2025# "data": {"status": "pending", "id": "snapsave0"}} 2026# <- {"event": "JOB_STATUS_CHANGE", 2027# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2028# "data": {"status": "concluded", "id": "snapsave0"}} 2029# -> {"execute": "query-jobs"} 2030# <- {"return": [{"current-progress": 1, 2031# "status": "concluded", 2032# "total-progress": 1, 2033# "type": "snapshot-save", 2034# "id": "snapsave0"}]} 2035# 2036# Since: 6.0 2037## 2038{ 'command': 'snapshot-save', 2039 'data': { 'job-id': 'str', 2040 'tag': 'str', 2041 'vmstate': 'str', 2042 'devices': ['str'] } } 2043 2044## 2045# @snapshot-load: 2046# 2047# Load a VM snapshot 2048# 2049# @job-id: identifier for the newly created job 2050# @tag: name of the snapshot to load. 2051# @vmstate: block device node name to load vmstate from 2052# @devices: list of block device node names to load a snapshot from 2053# 2054# Applications should not assume that the snapshot load is complete 2055# when this command returns. The job commands / events must be used 2056# to determine completion and to fetch details of any errors that arise. 2057# 2058# Note that execution of the guest CPUs will be stopped during the 2059# time it takes to load the snapshot. 2060# 2061# It is strongly recommended that @devices contain all writable 2062# block device nodes that can have changed since the original 2063# @snapshot-save command execution. 2064# 2065# Returns: nothing 2066# 2067# Example: 2068# 2069# -> { "execute": "snapshot-load", 2070# "arguments": { 2071# "job-id": "snapload0", 2072# "tag": "my-snap", 2073# "vmstate": "disk0", 2074# "devices": ["disk0", "disk1"] 2075# } 2076# } 2077# <- { "return": { } } 2078# <- {"event": "JOB_STATUS_CHANGE", 2079# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2080# "data": {"status": "created", "id": "snapload0"}} 2081# <- {"event": "JOB_STATUS_CHANGE", 2082# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2083# "data": {"status": "running", "id": "snapload0"}} 2084# <- {"event": "STOP", 2085# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2086# <- {"event": "RESUME", 2087# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2088# <- {"event": "JOB_STATUS_CHANGE", 2089# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2090# "data": {"status": "waiting", "id": "snapload0"}} 2091# <- {"event": "JOB_STATUS_CHANGE", 2092# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2093# "data": {"status": "pending", "id": "snapload0"}} 2094# <- {"event": "JOB_STATUS_CHANGE", 2095# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2096# "data": {"status": "concluded", "id": "snapload0"}} 2097# -> {"execute": "query-jobs"} 2098# <- {"return": [{"current-progress": 1, 2099# "status": "concluded", 2100# "total-progress": 1, 2101# "type": "snapshot-load", 2102# "id": "snapload0"}]} 2103# 2104# Since: 6.0 2105## 2106{ 'command': 'snapshot-load', 2107 'data': { 'job-id': 'str', 2108 'tag': 'str', 2109 'vmstate': 'str', 2110 'devices': ['str'] } } 2111 2112## 2113# @snapshot-delete: 2114# 2115# Delete a VM snapshot 2116# 2117# @job-id: identifier for the newly created job 2118# @tag: name of the snapshot to delete. 2119# @devices: list of block device node names to delete a snapshot from 2120# 2121# Applications should not assume that the snapshot delete is complete 2122# when this command returns. The job commands / events must be used 2123# to determine completion and to fetch details of any errors that arise. 2124# 2125# Returns: nothing 2126# 2127# Example: 2128# 2129# -> { "execute": "snapshot-delete", 2130# "arguments": { 2131# "job-id": "snapdelete0", 2132# "tag": "my-snap", 2133# "devices": ["disk0", "disk1"] 2134# } 2135# } 2136# <- { "return": { } } 2137# <- {"event": "JOB_STATUS_CHANGE", 2138# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2139# "data": {"status": "created", "id": "snapdelete0"}} 2140# <- {"event": "JOB_STATUS_CHANGE", 2141# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2142# "data": {"status": "running", "id": "snapdelete0"}} 2143# <- {"event": "JOB_STATUS_CHANGE", 2144# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2145# "data": {"status": "waiting", "id": "snapdelete0"}} 2146# <- {"event": "JOB_STATUS_CHANGE", 2147# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2148# "data": {"status": "pending", "id": "snapdelete0"}} 2149# <- {"event": "JOB_STATUS_CHANGE", 2150# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2151# "data": {"status": "concluded", "id": "snapdelete0"}} 2152# -> {"execute": "query-jobs"} 2153# <- {"return": [{"current-progress": 1, 2154# "status": "concluded", 2155# "total-progress": 1, 2156# "type": "snapshot-delete", 2157# "id": "snapdelete0"}]} 2158# 2159# Since: 6.0 2160## 2161{ 'command': 'snapshot-delete', 2162 'data': { 'job-id': 'str', 2163 'tag': 'str', 2164 'devices': ['str'] } } 2165