1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the target VM 20# 21# @total: total amount of bytes involved in the migration process 22# 23# @duplicate: number of duplicate (zero) pages (since 1.2) 24# 25# @skipped: number of skipped zero pages (since 1.5) 26# 27# @normal: number of normal pages (since 1.2) 28# 29# @normal-bytes: number of normal bytes sent (since 1.2) 30# 31# @dirty-pages-rate: number of pages dirtied by second by the 32# guest (since 1.3) 33# 34# @mbps: throughput in megabits/sec. (since 1.6) 35# 36# @dirty-sync-count: number of times that dirty ram was synchronized (since 2.1) 37# 38# @postcopy-requests: The number of page requests received from the destination 39# (since 2.7) 40# 41# @page-size: The number of bytes per page for the various page-based 42# statistics (since 2.10) 43# 44# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 45# 46# @pages-per-second: the number of memory pages transferred per second 47# (Since 4.0) 48# 49# @precopy-bytes: The number of bytes sent in the pre-copy phase 50# (since 7.0). 51# 52# @downtime-bytes: The number of bytes sent while the guest is paused 53# (since 7.0). 54# 55# @postcopy-bytes: The number of bytes sent during the post-copy phase 56# (since 7.0). 57# 58# @dirty-sync-missed-zero-copy: Number of times dirty RAM synchronization could 59# not avoid copying dirty pages. This is between 60# 0 and @dirty-sync-count * @multifd-channels. 61# (since 7.1) 62# Since: 0.14 63## 64{ 'struct': 'MigrationStats', 65 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 66 'duplicate': 'int', 'skipped': 'int', 'normal': 'int', 67 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', 68 'mbps' : 'number', 'dirty-sync-count' : 'int', 69 'postcopy-requests' : 'int', 'page-size' : 'int', 70 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64', 71 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64', 72 'postcopy-bytes' : 'uint64', 73 'dirty-sync-missed-zero-copy' : 'uint64' } } 74 75## 76# @XBZRLECacheStats: 77# 78# Detailed XBZRLE migration cache statistics 79# 80# @cache-size: XBZRLE cache size 81# 82# @bytes: amount of bytes already transferred to the target VM 83# 84# @pages: amount of pages transferred to the target VM 85# 86# @cache-miss: number of cache miss 87# 88# @cache-miss-rate: rate of cache miss (since 2.1) 89# 90# @encoding-rate: rate of encoded bytes (since 5.1) 91# 92# @overflow: number of overflows 93# 94# Since: 1.2 95## 96{ 'struct': 'XBZRLECacheStats', 97 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 98 'cache-miss': 'int', 'cache-miss-rate': 'number', 99 'encoding-rate': 'number', 'overflow': 'int' } } 100 101## 102# @CompressionStats: 103# 104# Detailed migration compression statistics 105# 106# @pages: amount of pages compressed and transferred to the target VM 107# 108# @busy: count of times that no free thread was available to compress data 109# 110# @busy-rate: rate of thread busy 111# 112# @compressed-size: amount of bytes after compression 113# 114# @compression-rate: rate of compressed size 115# 116# Since: 3.1 117## 118{ 'struct': 'CompressionStats', 119 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 120 'compressed-size': 'int', 'compression-rate': 'number' } } 121 122## 123# @MigrationStatus: 124# 125# An enumeration of migration status. 126# 127# @none: no migration has ever happened. 128# 129# @setup: migration process has been initiated. 130# 131# @cancelling: in the process of cancelling migration. 132# 133# @cancelled: cancelling migration is finished. 134# 135# @active: in the process of doing migration. 136# 137# @postcopy-active: like active, but now in postcopy mode. (since 2.5) 138# 139# @postcopy-paused: during postcopy but paused. (since 3.0) 140# 141# @postcopy-recover: trying to recover from a paused postcopy. (since 3.0) 142# 143# @completed: migration is finished. 144# 145# @failed: some error occurred during migration process. 146# 147# @colo: VM is in the process of fault tolerance, VM can not get into this 148# state unless colo capability is enabled for migration. (since 2.8) 149# 150# @pre-switchover: Paused before device serialisation. (since 2.11) 151# 152# @device: During device serialisation when pause-before-switchover is enabled 153# (since 2.11) 154# 155# @wait-unplug: wait for device unplug request by guest OS to be completed. 156# (since 4.2) 157# 158# Since: 2.3 159## 160{ 'enum': 'MigrationStatus', 161 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 162 'active', 'postcopy-active', 'postcopy-paused', 163 'postcopy-recover', 'completed', 'failed', 'colo', 164 'pre-switchover', 'device', 'wait-unplug' ] } 165## 166# @VfioStats: 167# 168# Detailed VFIO devices migration statistics 169# 170# @transferred: amount of bytes transferred to the target VM by VFIO devices 171# 172# Since: 5.2 173## 174{ 'struct': 'VfioStats', 175 'data': {'transferred': 'int' } } 176 177## 178# @MigrationInfo: 179# 180# Information about current migration process. 181# 182# @status: @MigrationStatus describing the current migration status. 183# If this field is not returned, no migration process 184# has been initiated 185# 186# @ram: @MigrationStats containing detailed migration 187# status, only returned if status is 'active' or 188# 'completed'(since 1.2) 189# 190# @disk: @MigrationStats containing detailed disk migration 191# status, only returned if status is 'active' and it is a block 192# migration 193# 194# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 195# migration statistics, only returned if XBZRLE feature is on and 196# status is 'active' or 'completed' (since 1.2) 197# 198# @total-time: total amount of milliseconds since migration started. 199# If migration has ended, it returns the total migration 200# time. (since 1.2) 201# 202# @downtime: only present when migration finishes correctly 203# total downtime in milliseconds for the guest. 204# (since 1.3) 205# 206# @expected-downtime: only present while migration is active 207# expected downtime in milliseconds for the guest in last walk 208# of the dirty bitmap. (since 1.3) 209# 210# @setup-time: amount of setup time in milliseconds *before* the 211# iterations begin but *after* the QMP command is issued. This is designed 212# to provide an accounting of any activities (such as RDMA pinning) which 213# may be expensive, but do not actually occur during the iterative 214# migration rounds themselves. (since 1.6) 215# 216# @cpu-throttle-percentage: percentage of time guest cpus are being 217# throttled during auto-converge. This is only present when auto-converge 218# has started throttling guest cpus. (Since 2.7) 219# 220# @error-desc: the human readable error description string, when 221# @status is 'failed'. Clients should not attempt to parse the 222# error strings. (Since 2.7) 223# 224# @postcopy-blocktime: total time when all vCPU were blocked during postcopy 225# live migration. This is only present when the postcopy-blocktime 226# migration capability is enabled. (Since 3.0) 227# 228# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. This is 229# only present when the postcopy-blocktime migration capability 230# is enabled. (Since 3.0) 231# 232# @compression: migration compression statistics, only returned if compression 233# feature is on and status is 'active' or 'completed' (Since 3.1) 234# 235# @socket-address: Only used for tcp, to know what the real port is (Since 4.0) 236# 237# @vfio: @VfioStats containing detailed VFIO devices migration statistics, 238# only returned if VFIO device is present, migration is supported by all 239# VFIO devices and status is 'active' or 'completed' (since 5.2) 240# 241# @blocked-reasons: A list of reasons an outgoing migration is blocked. 242# Present and non-empty when migration is blocked. 243# (since 6.0) 244# 245# Since: 0.14 246## 247{ 'struct': 'MigrationInfo', 248 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 249 '*disk': 'MigrationStats', 250 '*vfio': 'VfioStats', 251 '*xbzrle-cache': 'XBZRLECacheStats', 252 '*total-time': 'int', 253 '*expected-downtime': 'int', 254 '*downtime': 'int', 255 '*setup-time': 'int', 256 '*cpu-throttle-percentage': 'int', 257 '*error-desc': 'str', 258 '*blocked-reasons': ['str'], 259 '*postcopy-blocktime' : 'uint32', 260 '*postcopy-vcpu-blocktime': ['uint32'], 261 '*compression': 'CompressionStats', 262 '*socket-address': ['SocketAddress'] } } 263 264## 265# @query-migrate: 266# 267# Returns information about current migration process. If migration 268# is active there will be another json-object with RAM migration 269# status and if block migration is active another one with block 270# migration status. 271# 272# Returns: @MigrationInfo 273# 274# Since: 0.14 275# 276# Examples: 277# 278# 1. Before the first migration 279# 280# -> { "execute": "query-migrate" } 281# <- { "return": {} } 282# 283# 2. Migration is done and has succeeded 284# 285# -> { "execute": "query-migrate" } 286# <- { "return": { 287# "status": "completed", 288# "total-time":12345, 289# "setup-time":12345, 290# "downtime":12345, 291# "ram":{ 292# "transferred":123, 293# "remaining":123, 294# "total":246, 295# "duplicate":123, 296# "normal":123, 297# "normal-bytes":123456, 298# "dirty-sync-count":15 299# } 300# } 301# } 302# 303# 3. Migration is done and has failed 304# 305# -> { "execute": "query-migrate" } 306# <- { "return": { "status": "failed" } } 307# 308# 4. Migration is being performed and is not a block migration: 309# 310# -> { "execute": "query-migrate" } 311# <- { 312# "return":{ 313# "status":"active", 314# "total-time":12345, 315# "setup-time":12345, 316# "expected-downtime":12345, 317# "ram":{ 318# "transferred":123, 319# "remaining":123, 320# "total":246, 321# "duplicate":123, 322# "normal":123, 323# "normal-bytes":123456, 324# "dirty-sync-count":15 325# } 326# } 327# } 328# 329# 5. Migration is being performed and is a block migration: 330# 331# -> { "execute": "query-migrate" } 332# <- { 333# "return":{ 334# "status":"active", 335# "total-time":12345, 336# "setup-time":12345, 337# "expected-downtime":12345, 338# "ram":{ 339# "total":1057024, 340# "remaining":1053304, 341# "transferred":3720, 342# "duplicate":123, 343# "normal":123, 344# "normal-bytes":123456, 345# "dirty-sync-count":15 346# }, 347# "disk":{ 348# "total":20971520, 349# "remaining":20880384, 350# "transferred":91136 351# } 352# } 353# } 354# 355# 6. Migration is being performed and XBZRLE is active: 356# 357# -> { "execute": "query-migrate" } 358# <- { 359# "return":{ 360# "status":"active", 361# "total-time":12345, 362# "setup-time":12345, 363# "expected-downtime":12345, 364# "ram":{ 365# "total":1057024, 366# "remaining":1053304, 367# "transferred":3720, 368# "duplicate":10, 369# "normal":3333, 370# "normal-bytes":3412992, 371# "dirty-sync-count":15 372# }, 373# "xbzrle-cache":{ 374# "cache-size":67108864, 375# "bytes":20971520, 376# "pages":2444343, 377# "cache-miss":2244, 378# "cache-miss-rate":0.123, 379# "encoding-rate":80.1, 380# "overflow":34434 381# } 382# } 383# } 384# 385## 386{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 387 388## 389# @MigrationCapability: 390# 391# Migration capabilities enumeration 392# 393# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length Encoding). 394# This feature allows us to minimize migration traffic for certain work 395# loads, by sending compressed difference of the pages 396# 397# @rdma-pin-all: Controls whether or not the entire VM memory footprint is 398# mlock()'d on demand or all at once. Refer to docs/rdma.txt for usage. 399# Disabled by default. (since 2.0) 400# 401# @zero-blocks: During storage migration encode blocks of zeroes efficiently. This 402# essentially saves 1MB of zeroes per block on the wire. Enabling requires 403# source and target VM to support this feature. To enable it is sufficient 404# to enable the capability on the source VM. The feature is disabled by 405# default. (since 1.6) 406# 407# @compress: Use multiple compression threads to accelerate live migration. 408# This feature can help to reduce the migration traffic, by sending 409# compressed pages. Please note that if compress and xbzrle are both 410# on, compress only takes effect in the ram bulk stage, after that, 411# it will be disabled and only xbzrle takes effect, this can help to 412# minimize migration traffic. The feature is disabled by default. 413# (since 2.4 ) 414# 415# @events: generate events for each migration state change 416# (since 2.4 ) 417# 418# @auto-converge: If enabled, QEMU will automatically throttle down the guest 419# to speed up convergence of RAM migration. (since 1.6) 420# 421# @postcopy-ram: Start executing on the migration target before all of RAM has 422# been migrated, pulling the remaining pages along as needed. The 423# capacity must have the same setting on both source and target 424# or migration will not even start. NOTE: If the migration fails during 425# postcopy the VM will fail. (since 2.6) 426# 427# @x-colo: If enabled, migration will never end, and the state of the VM on the 428# primary side will be migrated continuously to the VM on secondary 429# side, this process is called COarse-Grain LOck Stepping (COLO) for 430# Non-stop Service. (since 2.8) 431# 432# @release-ram: if enabled, qemu will free the migrated ram pages on the source 433# during postcopy-ram migration. (since 2.9) 434# 435# @block: If enabled, QEMU will also migrate the contents of all block 436# devices. Default is disabled. A possible alternative uses 437# mirror jobs to a builtin NBD server on the destination, which 438# offers more flexibility. 439# (Since 2.10) 440# 441# @return-path: If enabled, migration will use the return path even 442# for precopy. (since 2.10) 443# 444# @pause-before-switchover: Pause outgoing migration before serialising device 445# state and before disabling block IO (since 2.11) 446# 447# @multifd: Use more than one fd for migration (since 4.0) 448# 449# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 450# (since 2.12) 451# 452# @postcopy-blocktime: Calculate downtime for postcopy live migration 453# (since 3.0) 454# 455# @late-block-activate: If enabled, the destination will not activate block 456# devices (and thus take locks) immediately at the end of migration. 457# (since 3.0) 458# 459# @x-ignore-shared: If enabled, QEMU will not migrate shared memory (since 4.0) 460# 461# @validate-uuid: Send the UUID of the source to allow the destination 462# to ensure it is the same. (since 4.2) 463# 464# @background-snapshot: If enabled, the migration stream will be a snapshot 465# of the VM exactly at the point when the migration 466# procedure starts. The VM RAM is saved with running VM. 467# (since 6.0) 468# 469# @zero-copy-send: Controls behavior on sending memory pages on migration. 470# When true, enables a zero-copy mechanism for sending 471# memory pages, if host supports it. 472# Requires that QEMU be permitted to use locked memory 473# for guest RAM pages. 474# (since 7.1) 475# @postcopy-preempt: If enabled, the migration process will allow postcopy 476# requests to preempt precopy stream, so postcopy requests 477# will be handled faster. This is a performance feature and 478# should not affect the correctness of postcopy migration. 479# (since 7.1) 480# 481# Features: 482# @unstable: Members @x-colo and @x-ignore-shared are experimental. 483# 484# Since: 1.2 485## 486{ 'enum': 'MigrationCapability', 487 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 488 'compress', 'events', 'postcopy-ram', 489 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 490 'release-ram', 491 'block', 'return-path', 'pause-before-switchover', 'multifd', 492 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 493 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 494 'validate-uuid', 'background-snapshot', 495 'zero-copy-send', 'postcopy-preempt'] } 496 497## 498# @MigrationCapabilityStatus: 499# 500# Migration capability information 501# 502# @capability: capability enum 503# 504# @state: capability state bool 505# 506# Since: 1.2 507## 508{ 'struct': 'MigrationCapabilityStatus', 509 'data': { 'capability' : 'MigrationCapability', 'state' : 'bool' } } 510 511## 512# @migrate-set-capabilities: 513# 514# Enable/Disable the following migration capabilities (like xbzrle) 515# 516# @capabilities: json array of capability modifications to make 517# 518# Since: 1.2 519# 520# Example: 521# 522# -> { "execute": "migrate-set-capabilities" , "arguments": 523# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 524# <- { "return": {} } 525# 526## 527{ 'command': 'migrate-set-capabilities', 528 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 529 530## 531# @query-migrate-capabilities: 532# 533# Returns information about the current migration capabilities status 534# 535# Returns: @MigrationCapabilityStatus 536# 537# Since: 1.2 538# 539# Example: 540# 541# -> { "execute": "query-migrate-capabilities" } 542# <- { "return": [ 543# {"state": false, "capability": "xbzrle"}, 544# {"state": false, "capability": "rdma-pin-all"}, 545# {"state": false, "capability": "auto-converge"}, 546# {"state": false, "capability": "zero-blocks"}, 547# {"state": false, "capability": "compress"}, 548# {"state": true, "capability": "events"}, 549# {"state": false, "capability": "postcopy-ram"}, 550# {"state": false, "capability": "x-colo"} 551# ]} 552# 553## 554{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 555 556## 557# @MultiFDCompression: 558# 559# An enumeration of multifd compression methods. 560# 561# @none: no compression. 562# @zlib: use zlib compression method. 563# @zstd: use zstd compression method. 564# 565# Since: 5.0 566## 567{ 'enum': 'MultiFDCompression', 568 'data': [ 'none', 'zlib', 569 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 570 571## 572# @BitmapMigrationBitmapAliasTransform: 573# 574# @persistent: If present, the bitmap will be made persistent 575# or transient depending on this parameter. 576# 577# Since: 6.0 578## 579{ 'struct': 'BitmapMigrationBitmapAliasTransform', 580 'data': { 581 '*persistent': 'bool' 582 } } 583 584## 585# @BitmapMigrationBitmapAlias: 586# 587# @name: The name of the bitmap. 588# 589# @alias: An alias name for migration (for example the bitmap name on 590# the opposite site). 591# 592# @transform: Allows the modification of the migrated bitmap. 593# (since 6.0) 594# 595# Since: 5.2 596## 597{ 'struct': 'BitmapMigrationBitmapAlias', 598 'data': { 599 'name': 'str', 600 'alias': 'str', 601 '*transform': 'BitmapMigrationBitmapAliasTransform' 602 } } 603 604## 605# @BitmapMigrationNodeAlias: 606# 607# Maps a block node name and the bitmaps it has to aliases for dirty 608# bitmap migration. 609# 610# @node-name: A block node name. 611# 612# @alias: An alias block node name for migration (for example the 613# node name on the opposite site). 614# 615# @bitmaps: Mappings for the bitmaps on this node. 616# 617# Since: 5.2 618## 619{ 'struct': 'BitmapMigrationNodeAlias', 620 'data': { 621 'node-name': 'str', 622 'alias': 'str', 623 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 624 } } 625 626## 627# @MigrationParameter: 628# 629# Migration parameters enumeration 630# 631# @announce-initial: Initial delay (in milliseconds) before sending the first 632# announce (Since 4.0) 633# 634# @announce-max: Maximum delay (in milliseconds) between packets in the 635# announcement (Since 4.0) 636# 637# @announce-rounds: Number of self-announce packets sent after migration 638# (Since 4.0) 639# 640# @announce-step: Increase in delay (in milliseconds) between subsequent 641# packets in the announcement (Since 4.0) 642# 643# @compress-level: Set the compression level to be used in live migration, 644# the compression level is an integer between 0 and 9, where 0 means 645# no compression, 1 means the best compression speed, and 9 means best 646# compression ratio which will consume more CPU. 647# 648# @compress-threads: Set compression thread count to be used in live migration, 649# the compression thread count is an integer between 1 and 255. 650# 651# @compress-wait-thread: Controls behavior when all compression threads are 652# currently busy. If true (default), wait for a free 653# compression thread to become available; otherwise, 654# send the page uncompressed. (Since 3.1) 655# 656# @decompress-threads: Set decompression thread count to be used in live 657# migration, the decompression thread count is an integer between 1 658# and 255. Usually, decompression is at least 4 times as fast as 659# compression, so set the decompress-threads to the number about 1/4 660# of compress-threads is adequate. 661# 662# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 663# to trigger throttling. It is expressed as percentage. 664# The default value is 50. (Since 5.0) 665# 666# @cpu-throttle-initial: Initial percentage of time guest cpus are throttled 667# when migration auto-converge is activated. The 668# default value is 20. (Since 2.7) 669# 670# @cpu-throttle-increment: throttle percentage increase each time 671# auto-converge detects that migration is not making 672# progress. The default value is 10. (Since 2.7) 673# 674# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 675# At the tail stage of throttling, the Guest is very 676# sensitive to CPU percentage while the @cpu-throttle 677# -increment is excessive usually at tail stage. 678# If this parameter is true, we will compute the ideal 679# CPU percentage used by the Guest, which may exactly make 680# the dirty rate match the dirty rate threshold. Then we 681# will choose a smaller throttle increment between the 682# one specified by @cpu-throttle-increment and the one 683# generated by ideal CPU percentage. 684# Therefore, it is compatible to traditional throttling, 685# meanwhile the throttle increment won't be excessive 686# at tail stage. 687# The default value is false. (Since 5.1) 688# 689# @tls-creds: ID of the 'tls-creds' object that provides credentials for 690# establishing a TLS connection over the migration data channel. 691# On the outgoing side of the migration, the credentials must 692# be for a 'client' endpoint, while for the incoming side the 693# credentials must be for a 'server' endpoint. Setting this 694# will enable TLS for all migrations. The default is unset, 695# resulting in unsecured migration at the QEMU level. (Since 2.7) 696# 697# @tls-hostname: hostname of the target host for the migration. This is 698# required when using x509 based TLS credentials and the 699# migration URI does not already include a hostname. For 700# example if using fd: or exec: based migration, the 701# hostname must be provided so that the server's x509 702# certificate identity can be validated. (Since 2.7) 703# 704# @tls-authz: ID of the 'authz' object subclass that provides access control 705# checking of the TLS x509 certificate distinguished name. 706# This object is only resolved at time of use, so can be deleted 707# and recreated on the fly while the migration server is active. 708# If missing, it will default to denying access (Since 4.0) 709# 710# @max-bandwidth: to set maximum speed for migration. maximum speed in 711# bytes per second. (Since 2.8) 712# 713# @downtime-limit: set maximum tolerated downtime for migration. maximum 714# downtime in milliseconds (Since 2.8) 715# 716# @x-checkpoint-delay: The delay time (in ms) between two COLO checkpoints in 717# periodic mode. (Since 2.8) 718# 719# @block-incremental: Affects how much storage is migrated when the 720# block migration capability is enabled. When false, the entire 721# storage backing chain is migrated into a flattened image at 722# the destination; when true, only the active qcow2 layer is 723# migrated and the destination must already have access to the 724# same backing chain as was used on the source. (since 2.10) 725# 726# @multifd-channels: Number of channels used to migrate data in 727# parallel. This is the same number that the 728# number of sockets used for migration. The 729# default value is 2 (since 4.0) 730# 731# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 732# needs to be a multiple of the target page size 733# and a power of 2 734# (Since 2.11) 735# 736# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 737# Defaults to 0 (unlimited). In bytes per second. 738# (Since 3.0) 739# 740# @max-cpu-throttle: maximum cpu throttle percentage. 741# Defaults to 99. (Since 3.1) 742# 743# @multifd-compression: Which compression method to use. 744# Defaults to none. (Since 5.0) 745# 746# @multifd-zlib-level: Set the compression level to be used in live 747# migration, the compression level is an integer between 0 748# and 9, where 0 means no compression, 1 means the best 749# compression speed, and 9 means best compression ratio which 750# will consume more CPU. 751# Defaults to 1. (Since 5.0) 752# 753# @multifd-zstd-level: Set the compression level to be used in live 754# migration, the compression level is an integer between 0 755# and 20, where 0 means no compression, 1 means the best 756# compression speed, and 20 means best compression ratio which 757# will consume more CPU. 758# Defaults to 1. (Since 5.0) 759# 760# 761# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 762# aliases for the purpose of dirty bitmap migration. Such 763# aliases may for example be the corresponding names on the 764# opposite site. 765# The mapping must be one-to-one, but not necessarily 766# complete: On the source, unmapped bitmaps and all bitmaps 767# on unmapped nodes will be ignored. On the destination, 768# encountering an unmapped alias in the incoming migration 769# stream will result in a report, and all further bitmap 770# migration data will then be discarded. 771# Note that the destination does not know about bitmaps it 772# does not receive, so there is no limitation or requirement 773# regarding the number of bitmaps received, or how they are 774# named, or on which nodes they are placed. 775# By default (when this parameter has never been set), bitmap 776# names are mapped to themselves. Nodes are mapped to their 777# block device name if there is one, and to their node name 778# otherwise. (Since 5.2) 779# 780# Features: 781# @unstable: Member @x-checkpoint-delay is experimental. 782# 783# Since: 2.4 784## 785{ 'enum': 'MigrationParameter', 786 'data': ['announce-initial', 'announce-max', 787 'announce-rounds', 'announce-step', 788 'compress-level', 'compress-threads', 'decompress-threads', 789 'compress-wait-thread', 'throttle-trigger-threshold', 790 'cpu-throttle-initial', 'cpu-throttle-increment', 791 'cpu-throttle-tailslow', 792 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 793 'downtime-limit', 794 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 795 'block-incremental', 796 'multifd-channels', 797 'xbzrle-cache-size', 'max-postcopy-bandwidth', 798 'max-cpu-throttle', 'multifd-compression', 799 'multifd-zlib-level' ,'multifd-zstd-level', 800 'block-bitmap-mapping' ] } 801 802## 803# @MigrateSetParameters: 804# 805# @announce-initial: Initial delay (in milliseconds) before sending the first 806# announce (Since 4.0) 807# 808# @announce-max: Maximum delay (in milliseconds) between packets in the 809# announcement (Since 4.0) 810# 811# @announce-rounds: Number of self-announce packets sent after migration 812# (Since 4.0) 813# 814# @announce-step: Increase in delay (in milliseconds) between subsequent 815# packets in the announcement (Since 4.0) 816# 817# @compress-level: compression level 818# 819# @compress-threads: compression thread count 820# 821# @compress-wait-thread: Controls behavior when all compression threads are 822# currently busy. If true (default), wait for a free 823# compression thread to become available; otherwise, 824# send the page uncompressed. (Since 3.1) 825# 826# @decompress-threads: decompression thread count 827# 828# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 829# to trigger throttling. It is expressed as percentage. 830# The default value is 50. (Since 5.0) 831# 832# @cpu-throttle-initial: Initial percentage of time guest cpus are 833# throttled when migration auto-converge is activated. 834# The default value is 20. (Since 2.7) 835# 836# @cpu-throttle-increment: throttle percentage increase each time 837# auto-converge detects that migration is not making 838# progress. The default value is 10. (Since 2.7) 839# 840# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 841# At the tail stage of throttling, the Guest is very 842# sensitive to CPU percentage while the @cpu-throttle 843# -increment is excessive usually at tail stage. 844# If this parameter is true, we will compute the ideal 845# CPU percentage used by the Guest, which may exactly make 846# the dirty rate match the dirty rate threshold. Then we 847# will choose a smaller throttle increment between the 848# one specified by @cpu-throttle-increment and the one 849# generated by ideal CPU percentage. 850# Therefore, it is compatible to traditional throttling, 851# meanwhile the throttle increment won't be excessive 852# at tail stage. 853# The default value is false. (Since 5.1) 854# 855# @tls-creds: ID of the 'tls-creds' object that provides credentials 856# for establishing a TLS connection over the migration data 857# channel. On the outgoing side of the migration, the credentials 858# must be for a 'client' endpoint, while for the incoming side the 859# credentials must be for a 'server' endpoint. Setting this 860# to a non-empty string enables TLS for all migrations. 861# An empty string means that QEMU will use plain text mode for 862# migration, rather than TLS (Since 2.9) 863# Previously (since 2.7), this was reported by omitting 864# tls-creds instead. 865# 866# @tls-hostname: hostname of the target host for the migration. This 867# is required when using x509 based TLS credentials and the 868# migration URI does not already include a hostname. For 869# example if using fd: or exec: based migration, the 870# hostname must be provided so that the server's x509 871# certificate identity can be validated. (Since 2.7) 872# An empty string means that QEMU will use the hostname 873# associated with the migration URI, if any. (Since 2.9) 874# Previously (since 2.7), this was reported by omitting 875# tls-hostname instead. 876# 877# @max-bandwidth: to set maximum speed for migration. maximum speed in 878# bytes per second. (Since 2.8) 879# 880# @downtime-limit: set maximum tolerated downtime for migration. maximum 881# downtime in milliseconds (Since 2.8) 882# 883# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) 884# 885# @block-incremental: Affects how much storage is migrated when the 886# block migration capability is enabled. When false, the entire 887# storage backing chain is migrated into a flattened image at 888# the destination; when true, only the active qcow2 layer is 889# migrated and the destination must already have access to the 890# same backing chain as was used on the source. (since 2.10) 891# 892# @multifd-channels: Number of channels used to migrate data in 893# parallel. This is the same number that the 894# number of sockets used for migration. The 895# default value is 2 (since 4.0) 896# 897# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 898# needs to be a multiple of the target page size 899# and a power of 2 900# (Since 2.11) 901# 902# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 903# Defaults to 0 (unlimited). In bytes per second. 904# (Since 3.0) 905# 906# @max-cpu-throttle: maximum cpu throttle percentage. 907# The default value is 99. (Since 3.1) 908# 909# @multifd-compression: Which compression method to use. 910# Defaults to none. (Since 5.0) 911# 912# @multifd-zlib-level: Set the compression level to be used in live 913# migration, the compression level is an integer between 0 914# and 9, where 0 means no compression, 1 means the best 915# compression speed, and 9 means best compression ratio which 916# will consume more CPU. 917# Defaults to 1. (Since 5.0) 918# 919# @multifd-zstd-level: Set the compression level to be used in live 920# migration, the compression level is an integer between 0 921# and 20, where 0 means no compression, 1 means the best 922# compression speed, and 20 means best compression ratio which 923# will consume more CPU. 924# Defaults to 1. (Since 5.0) 925# 926# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 927# aliases for the purpose of dirty bitmap migration. Such 928# aliases may for example be the corresponding names on the 929# opposite site. 930# The mapping must be one-to-one, but not necessarily 931# complete: On the source, unmapped bitmaps and all bitmaps 932# on unmapped nodes will be ignored. On the destination, 933# encountering an unmapped alias in the incoming migration 934# stream will result in a report, and all further bitmap 935# migration data will then be discarded. 936# Note that the destination does not know about bitmaps it 937# does not receive, so there is no limitation or requirement 938# regarding the number of bitmaps received, or how they are 939# named, or on which nodes they are placed. 940# By default (when this parameter has never been set), bitmap 941# names are mapped to themselves. Nodes are mapped to their 942# block device name if there is one, and to their node name 943# otherwise. (Since 5.2) 944# 945# Features: 946# @unstable: Member @x-checkpoint-delay is experimental. 947# 948# Since: 2.4 949## 950# TODO either fuse back into MigrationParameters, or make 951# MigrationParameters members mandatory 952{ 'struct': 'MigrateSetParameters', 953 'data': { '*announce-initial': 'size', 954 '*announce-max': 'size', 955 '*announce-rounds': 'size', 956 '*announce-step': 'size', 957 '*compress-level': 'uint8', 958 '*compress-threads': 'uint8', 959 '*compress-wait-thread': 'bool', 960 '*decompress-threads': 'uint8', 961 '*throttle-trigger-threshold': 'uint8', 962 '*cpu-throttle-initial': 'uint8', 963 '*cpu-throttle-increment': 'uint8', 964 '*cpu-throttle-tailslow': 'bool', 965 '*tls-creds': 'StrOrNull', 966 '*tls-hostname': 'StrOrNull', 967 '*tls-authz': 'StrOrNull', 968 '*max-bandwidth': 'size', 969 '*downtime-limit': 'uint64', 970 '*x-checkpoint-delay': { 'type': 'uint32', 971 'features': [ 'unstable' ] }, 972 '*block-incremental': 'bool', 973 '*multifd-channels': 'uint8', 974 '*xbzrle-cache-size': 'size', 975 '*max-postcopy-bandwidth': 'size', 976 '*max-cpu-throttle': 'uint8', 977 '*multifd-compression': 'MultiFDCompression', 978 '*multifd-zlib-level': 'uint8', 979 '*multifd-zstd-level': 'uint8', 980 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 981 982## 983# @migrate-set-parameters: 984# 985# Set various migration parameters. 986# 987# Since: 2.4 988# 989# Example: 990# 991# -> { "execute": "migrate-set-parameters" , 992# "arguments": { "compress-level": 1 } } 993# <- { "return": {} } 994# 995## 996{ 'command': 'migrate-set-parameters', 'boxed': true, 997 'data': 'MigrateSetParameters' } 998 999## 1000# @MigrationParameters: 1001# 1002# The optional members aren't actually optional. 1003# 1004# @announce-initial: Initial delay (in milliseconds) before sending the 1005# first announce (Since 4.0) 1006# 1007# @announce-max: Maximum delay (in milliseconds) between packets in the 1008# announcement (Since 4.0) 1009# 1010# @announce-rounds: Number of self-announce packets sent after migration 1011# (Since 4.0) 1012# 1013# @announce-step: Increase in delay (in milliseconds) between subsequent 1014# packets in the announcement (Since 4.0) 1015# 1016# @compress-level: compression level 1017# 1018# @compress-threads: compression thread count 1019# 1020# @compress-wait-thread: Controls behavior when all compression threads are 1021# currently busy. If true (default), wait for a free 1022# compression thread to become available; otherwise, 1023# send the page uncompressed. (Since 3.1) 1024# 1025# @decompress-threads: decompression thread count 1026# 1027# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 1028# to trigger throttling. It is expressed as percentage. 1029# The default value is 50. (Since 5.0) 1030# 1031# @cpu-throttle-initial: Initial percentage of time guest cpus are 1032# throttled when migration auto-converge is activated. 1033# (Since 2.7) 1034# 1035# @cpu-throttle-increment: throttle percentage increase each time 1036# auto-converge detects that migration is not making 1037# progress. (Since 2.7) 1038# 1039# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 1040# At the tail stage of throttling, the Guest is very 1041# sensitive to CPU percentage while the @cpu-throttle 1042# -increment is excessive usually at tail stage. 1043# If this parameter is true, we will compute the ideal 1044# CPU percentage used by the Guest, which may exactly make 1045# the dirty rate match the dirty rate threshold. Then we 1046# will choose a smaller throttle increment between the 1047# one specified by @cpu-throttle-increment and the one 1048# generated by ideal CPU percentage. 1049# Therefore, it is compatible to traditional throttling, 1050# meanwhile the throttle increment won't be excessive 1051# at tail stage. 1052# The default value is false. (Since 5.1) 1053# 1054# @tls-creds: ID of the 'tls-creds' object that provides credentials 1055# for establishing a TLS connection over the migration data 1056# channel. On the outgoing side of the migration, the credentials 1057# must be for a 'client' endpoint, while for the incoming side the 1058# credentials must be for a 'server' endpoint. 1059# An empty string means that QEMU will use plain text mode for 1060# migration, rather than TLS (Since 2.7) 1061# Note: 2.8 reports this by omitting tls-creds instead. 1062# 1063# @tls-hostname: hostname of the target host for the migration. This 1064# is required when using x509 based TLS credentials and the 1065# migration URI does not already include a hostname. For 1066# example if using fd: or exec: based migration, the 1067# hostname must be provided so that the server's x509 1068# certificate identity can be validated. (Since 2.7) 1069# An empty string means that QEMU will use the hostname 1070# associated with the migration URI, if any. (Since 2.9) 1071# Note: 2.8 reports this by omitting tls-hostname instead. 1072# 1073# @tls-authz: ID of the 'authz' object subclass that provides access control 1074# checking of the TLS x509 certificate distinguished name. (Since 1075# 4.0) 1076# 1077# @max-bandwidth: to set maximum speed for migration. maximum speed in 1078# bytes per second. (Since 2.8) 1079# 1080# @downtime-limit: set maximum tolerated downtime for migration. maximum 1081# downtime in milliseconds (Since 2.8) 1082# 1083# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) 1084# 1085# @block-incremental: Affects how much storage is migrated when the 1086# block migration capability is enabled. When false, the entire 1087# storage backing chain is migrated into a flattened image at 1088# the destination; when true, only the active qcow2 layer is 1089# migrated and the destination must already have access to the 1090# same backing chain as was used on the source. (since 2.10) 1091# 1092# @multifd-channels: Number of channels used to migrate data in 1093# parallel. This is the same number that the 1094# number of sockets used for migration. 1095# The default value is 2 (since 4.0) 1096# 1097# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1098# needs to be a multiple of the target page size 1099# and a power of 2 1100# (Since 2.11) 1101# 1102# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 1103# Defaults to 0 (unlimited). In bytes per second. 1104# (Since 3.0) 1105# 1106# @max-cpu-throttle: maximum cpu throttle percentage. 1107# Defaults to 99. 1108# (Since 3.1) 1109# 1110# @multifd-compression: Which compression method to use. 1111# Defaults to none. (Since 5.0) 1112# 1113# @multifd-zlib-level: Set the compression level to be used in live 1114# migration, the compression level is an integer between 0 1115# and 9, where 0 means no compression, 1 means the best 1116# compression speed, and 9 means best compression ratio which 1117# will consume more CPU. 1118# Defaults to 1. (Since 5.0) 1119# 1120# @multifd-zstd-level: Set the compression level to be used in live 1121# migration, the compression level is an integer between 0 1122# and 20, where 0 means no compression, 1 means the best 1123# compression speed, and 20 means best compression ratio which 1124# will consume more CPU. 1125# Defaults to 1. (Since 5.0) 1126# 1127# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1128# aliases for the purpose of dirty bitmap migration. Such 1129# aliases may for example be the corresponding names on the 1130# opposite site. 1131# The mapping must be one-to-one, but not necessarily 1132# complete: On the source, unmapped bitmaps and all bitmaps 1133# on unmapped nodes will be ignored. On the destination, 1134# encountering an unmapped alias in the incoming migration 1135# stream will result in a report, and all further bitmap 1136# migration data will then be discarded. 1137# Note that the destination does not know about bitmaps it 1138# does not receive, so there is no limitation or requirement 1139# regarding the number of bitmaps received, or how they are 1140# named, or on which nodes they are placed. 1141# By default (when this parameter has never been set), bitmap 1142# names are mapped to themselves. Nodes are mapped to their 1143# block device name if there is one, and to their node name 1144# otherwise. (Since 5.2) 1145# 1146# Features: 1147# @unstable: Member @x-checkpoint-delay is experimental. 1148# 1149# Since: 2.4 1150## 1151{ 'struct': 'MigrationParameters', 1152 'data': { '*announce-initial': 'size', 1153 '*announce-max': 'size', 1154 '*announce-rounds': 'size', 1155 '*announce-step': 'size', 1156 '*compress-level': 'uint8', 1157 '*compress-threads': 'uint8', 1158 '*compress-wait-thread': 'bool', 1159 '*decompress-threads': 'uint8', 1160 '*throttle-trigger-threshold': 'uint8', 1161 '*cpu-throttle-initial': 'uint8', 1162 '*cpu-throttle-increment': 'uint8', 1163 '*cpu-throttle-tailslow': 'bool', 1164 '*tls-creds': 'str', 1165 '*tls-hostname': 'str', 1166 '*tls-authz': 'str', 1167 '*max-bandwidth': 'size', 1168 '*downtime-limit': 'uint64', 1169 '*x-checkpoint-delay': { 'type': 'uint32', 1170 'features': [ 'unstable' ] }, 1171 '*block-incremental': 'bool', 1172 '*multifd-channels': 'uint8', 1173 '*xbzrle-cache-size': 'size', 1174 '*max-postcopy-bandwidth': 'size', 1175 '*max-cpu-throttle': 'uint8', 1176 '*multifd-compression': 'MultiFDCompression', 1177 '*multifd-zlib-level': 'uint8', 1178 '*multifd-zstd-level': 'uint8', 1179 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 1180 1181## 1182# @query-migrate-parameters: 1183# 1184# Returns information about the current migration parameters 1185# 1186# Returns: @MigrationParameters 1187# 1188# Since: 2.4 1189# 1190# Example: 1191# 1192# -> { "execute": "query-migrate-parameters" } 1193# <- { "return": { 1194# "decompress-threads": 2, 1195# "cpu-throttle-increment": 10, 1196# "compress-threads": 8, 1197# "compress-level": 1, 1198# "cpu-throttle-initial": 20, 1199# "max-bandwidth": 33554432, 1200# "downtime-limit": 300 1201# } 1202# } 1203# 1204## 1205{ 'command': 'query-migrate-parameters', 1206 'returns': 'MigrationParameters' } 1207 1208## 1209# @migrate-start-postcopy: 1210# 1211# Followup to a migration command to switch the migration to postcopy mode. 1212# The postcopy-ram capability must be set on both source and destination 1213# before the original migration command. 1214# 1215# Since: 2.5 1216# 1217# Example: 1218# 1219# -> { "execute": "migrate-start-postcopy" } 1220# <- { "return": {} } 1221# 1222## 1223{ 'command': 'migrate-start-postcopy' } 1224 1225## 1226# @MIGRATION: 1227# 1228# Emitted when a migration event happens 1229# 1230# @status: @MigrationStatus describing the current migration status. 1231# 1232# Since: 2.4 1233# 1234# Example: 1235# 1236# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1237# "event": "MIGRATION", 1238# "data": {"status": "completed"} } 1239# 1240## 1241{ 'event': 'MIGRATION', 1242 'data': {'status': 'MigrationStatus'}} 1243 1244## 1245# @MIGRATION_PASS: 1246# 1247# Emitted from the source side of a migration at the start of each pass 1248# (when it syncs the dirty bitmap) 1249# 1250# @pass: An incrementing count (starting at 1 on the first pass) 1251# 1252# Since: 2.6 1253# 1254# Example: 1255# 1256# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1257# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1258# 1259## 1260{ 'event': 'MIGRATION_PASS', 1261 'data': { 'pass': 'int' } } 1262 1263## 1264# @COLOMessage: 1265# 1266# The message transmission between Primary side and Secondary side. 1267# 1268# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1269# 1270# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for checkpointing 1271# 1272# @checkpoint-reply: SVM gets PVM's checkpoint request 1273# 1274# @vmstate-send: VM's state will be sent by PVM. 1275# 1276# @vmstate-size: The total size of VMstate. 1277# 1278# @vmstate-received: VM's state has been received by SVM. 1279# 1280# @vmstate-loaded: VM's state has been loaded by SVM. 1281# 1282# Since: 2.8 1283## 1284{ 'enum': 'COLOMessage', 1285 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1286 'vmstate-send', 'vmstate-size', 'vmstate-received', 1287 'vmstate-loaded' ] } 1288 1289## 1290# @COLOMode: 1291# 1292# The COLO current mode. 1293# 1294# @none: COLO is disabled. 1295# 1296# @primary: COLO node in primary side. 1297# 1298# @secondary: COLO node in slave side. 1299# 1300# Since: 2.8 1301## 1302{ 'enum': 'COLOMode', 1303 'data': [ 'none', 'primary', 'secondary'] } 1304 1305## 1306# @FailoverStatus: 1307# 1308# An enumeration of COLO failover status 1309# 1310# @none: no failover has ever happened 1311# 1312# @require: got failover requirement but not handled 1313# 1314# @active: in the process of doing failover 1315# 1316# @completed: finish the process of failover 1317# 1318# @relaunch: restart the failover process, from 'none' -> 'completed' (Since 2.9) 1319# 1320# Since: 2.8 1321## 1322{ 'enum': 'FailoverStatus', 1323 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1324 1325## 1326# @COLO_EXIT: 1327# 1328# Emitted when VM finishes COLO mode due to some errors happening or 1329# at the request of users. 1330# 1331# @mode: report COLO mode when COLO exited. 1332# 1333# @reason: describes the reason for the COLO exit. 1334# 1335# Since: 3.1 1336# 1337# Example: 1338# 1339# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1340# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1341# 1342## 1343{ 'event': 'COLO_EXIT', 1344 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1345 1346## 1347# @COLOExitReason: 1348# 1349# The reason for a COLO exit. 1350# 1351# @none: failover has never happened. This state does not occur 1352# in the COLO_EXIT event, and is only visible in the result of 1353# query-colo-status. 1354# 1355# @request: COLO exit is due to an external request. 1356# 1357# @error: COLO exit is due to an internal error. 1358# 1359# @processing: COLO is currently handling a failover (since 4.0). 1360# 1361# Since: 3.1 1362## 1363{ 'enum': 'COLOExitReason', 1364 'data': [ 'none', 'request', 'error' , 'processing' ] } 1365 1366## 1367# @x-colo-lost-heartbeat: 1368# 1369# Tell qemu that heartbeat is lost, request it to do takeover procedures. 1370# If this command is sent to the PVM, the Primary side will exit COLO mode. 1371# If sent to the Secondary, the Secondary side will run failover work, 1372# then takes over server operation to become the service VM. 1373# 1374# Features: 1375# @unstable: This command is experimental. 1376# 1377# Since: 2.8 1378# 1379# Example: 1380# 1381# -> { "execute": "x-colo-lost-heartbeat" } 1382# <- { "return": {} } 1383# 1384## 1385{ 'command': 'x-colo-lost-heartbeat', 1386 'features': [ 'unstable' ] } 1387 1388## 1389# @migrate_cancel: 1390# 1391# Cancel the current executing migration process. 1392# 1393# Returns: nothing on success 1394# 1395# Notes: This command succeeds even if there is no migration process running. 1396# 1397# Since: 0.14 1398# 1399# Example: 1400# 1401# -> { "execute": "migrate_cancel" } 1402# <- { "return": {} } 1403# 1404## 1405{ 'command': 'migrate_cancel' } 1406 1407## 1408# @migrate-continue: 1409# 1410# Continue migration when it's in a paused state. 1411# 1412# @state: The state the migration is currently expected to be in 1413# 1414# Returns: nothing on success 1415# 1416# Since: 2.11 1417# 1418# Example: 1419# 1420# -> { "execute": "migrate-continue" , "arguments": 1421# { "state": "pre-switchover" } } 1422# <- { "return": {} } 1423## 1424{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1425 1426## 1427# @migrate: 1428# 1429# Migrates the current running guest to another Virtual Machine. 1430# 1431# @uri: the Uniform Resource Identifier of the destination VM 1432# 1433# @blk: do block migration (full disk copy) 1434# 1435# @inc: incremental disk copy migration 1436# 1437# @detach: this argument exists only for compatibility reasons and 1438# is ignored by QEMU 1439# 1440# @resume: resume one paused migration, default "off". (since 3.0) 1441# 1442# Returns: nothing on success 1443# 1444# Since: 0.14 1445# 1446# Notes: 1447# 1448# 1. The 'query-migrate' command should be used to check migration's progress 1449# and final result (this information is provided by the 'status' member) 1450# 1451# 2. All boolean arguments default to false 1452# 1453# 3. The user Monitor's "detach" argument is invalid in QMP and should not 1454# be used 1455# 1456# Example: 1457# 1458# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1459# <- { "return": {} } 1460# 1461## 1462{ 'command': 'migrate', 1463 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', 1464 '*detach': 'bool', '*resume': 'bool' } } 1465 1466## 1467# @migrate-incoming: 1468# 1469# Start an incoming migration, the qemu must have been started 1470# with -incoming defer 1471# 1472# @uri: The Uniform Resource Identifier identifying the source or 1473# address to listen on 1474# 1475# Returns: nothing on success 1476# 1477# Since: 2.3 1478# 1479# Notes: 1480# 1481# 1. It's a bad idea to use a string for the uri, but it needs to stay 1482# compatible with -incoming and the format of the uri is already exposed 1483# above libvirt. 1484# 1485# 2. QEMU must be started with -incoming defer to allow migrate-incoming to 1486# be used. 1487# 1488# 3. The uri format is the same as for -incoming 1489# 1490# Example: 1491# 1492# -> { "execute": "migrate-incoming", 1493# "arguments": { "uri": "tcp::4446" } } 1494# <- { "return": {} } 1495# 1496## 1497{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } } 1498 1499## 1500# @xen-save-devices-state: 1501# 1502# Save the state of all devices to file. The RAM and the block devices 1503# of the VM are not saved by this command. 1504# 1505# @filename: the file to save the state of the devices to as binary 1506# data. See xen-save-devices-state.txt for a description of the binary 1507# format. 1508# 1509# @live: Optional argument to ask QEMU to treat this command as part of a live 1510# migration. Default to true. (since 2.11) 1511# 1512# Returns: Nothing on success 1513# 1514# Since: 1.1 1515# 1516# Example: 1517# 1518# -> { "execute": "xen-save-devices-state", 1519# "arguments": { "filename": "/tmp/save" } } 1520# <- { "return": {} } 1521# 1522## 1523{ 'command': 'xen-save-devices-state', 1524 'data': {'filename': 'str', '*live':'bool' } } 1525 1526## 1527# @xen-set-global-dirty-log: 1528# 1529# Enable or disable the global dirty log mode. 1530# 1531# @enable: true to enable, false to disable. 1532# 1533# Returns: nothing 1534# 1535# Since: 1.3 1536# 1537# Example: 1538# 1539# -> { "execute": "xen-set-global-dirty-log", 1540# "arguments": { "enable": true } } 1541# <- { "return": {} } 1542# 1543## 1544{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1545 1546## 1547# @xen-load-devices-state: 1548# 1549# Load the state of all devices from file. The RAM and the block devices 1550# of the VM are not loaded by this command. 1551# 1552# @filename: the file to load the state of the devices from as binary 1553# data. See xen-save-devices-state.txt for a description of the binary 1554# format. 1555# 1556# Since: 2.7 1557# 1558# Example: 1559# 1560# -> { "execute": "xen-load-devices-state", 1561# "arguments": { "filename": "/tmp/resume" } } 1562# <- { "return": {} } 1563# 1564## 1565{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1566 1567## 1568# @xen-set-replication: 1569# 1570# Enable or disable replication. 1571# 1572# @enable: true to enable, false to disable. 1573# 1574# @primary: true for primary or false for secondary. 1575# 1576# @failover: true to do failover, false to stop. but cannot be 1577# specified if 'enable' is true. default value is false. 1578# 1579# Returns: nothing. 1580# 1581# Example: 1582# 1583# -> { "execute": "xen-set-replication", 1584# "arguments": {"enable": true, "primary": false} } 1585# <- { "return": {} } 1586# 1587# Since: 2.9 1588## 1589{ 'command': 'xen-set-replication', 1590 'data': { 'enable': 'bool', 'primary': 'bool', '*failover' : 'bool' }, 1591 'if': 'CONFIG_REPLICATION' } 1592 1593## 1594# @ReplicationStatus: 1595# 1596# The result format for 'query-xen-replication-status'. 1597# 1598# @error: true if an error happened, false if replication is normal. 1599# 1600# @desc: the human readable error description string, when 1601# @error is 'true'. 1602# 1603# Since: 2.9 1604## 1605{ 'struct': 'ReplicationStatus', 1606 'data': { 'error': 'bool', '*desc': 'str' }, 1607 'if': 'CONFIG_REPLICATION' } 1608 1609## 1610# @query-xen-replication-status: 1611# 1612# Query replication status while the vm is running. 1613# 1614# Returns: A @ReplicationStatus object showing the status. 1615# 1616# Example: 1617# 1618# -> { "execute": "query-xen-replication-status" } 1619# <- { "return": { "error": false } } 1620# 1621# Since: 2.9 1622## 1623{ 'command': 'query-xen-replication-status', 1624 'returns': 'ReplicationStatus', 1625 'if': 'CONFIG_REPLICATION' } 1626 1627## 1628# @xen-colo-do-checkpoint: 1629# 1630# Xen uses this command to notify replication to trigger a checkpoint. 1631# 1632# Returns: nothing. 1633# 1634# Example: 1635# 1636# -> { "execute": "xen-colo-do-checkpoint" } 1637# <- { "return": {} } 1638# 1639# Since: 2.9 1640## 1641{ 'command': 'xen-colo-do-checkpoint', 1642 'if': 'CONFIG_REPLICATION' } 1643 1644## 1645# @COLOStatus: 1646# 1647# The result format for 'query-colo-status'. 1648# 1649# @mode: COLO running mode. If COLO is running, this field will return 1650# 'primary' or 'secondary'. 1651# 1652# @last-mode: COLO last running mode. If COLO is running, this field 1653# will return same like mode field, after failover we can 1654# use this field to get last colo mode. (since 4.0) 1655# 1656# @reason: describes the reason for the COLO exit. 1657# 1658# Since: 3.1 1659## 1660{ 'struct': 'COLOStatus', 1661 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1662 'reason': 'COLOExitReason' } } 1663 1664## 1665# @query-colo-status: 1666# 1667# Query COLO status while the vm is running. 1668# 1669# Returns: A @COLOStatus object showing the status. 1670# 1671# Example: 1672# 1673# -> { "execute": "query-colo-status" } 1674# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1675# 1676# Since: 3.1 1677## 1678{ 'command': 'query-colo-status', 1679 'returns': 'COLOStatus' } 1680 1681## 1682# @migrate-recover: 1683# 1684# Provide a recovery migration stream URI. 1685# 1686# @uri: the URI to be used for the recovery of migration stream. 1687# 1688# Returns: nothing. 1689# 1690# Example: 1691# 1692# -> { "execute": "migrate-recover", 1693# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1694# <- { "return": {} } 1695# 1696# Since: 3.0 1697## 1698{ 'command': 'migrate-recover', 1699 'data': { 'uri': 'str' }, 1700 'allow-oob': true } 1701 1702## 1703# @migrate-pause: 1704# 1705# Pause a migration. Currently it only supports postcopy. 1706# 1707# Returns: nothing. 1708# 1709# Example: 1710# 1711# -> { "execute": "migrate-pause" } 1712# <- { "return": {} } 1713# 1714# Since: 3.0 1715## 1716{ 'command': 'migrate-pause', 'allow-oob': true } 1717 1718## 1719# @UNPLUG_PRIMARY: 1720# 1721# Emitted from source side of a migration when migration state is 1722# WAIT_UNPLUG. Device was unplugged by guest operating system. 1723# Device resources in QEMU are kept on standby to be able to re-plug it in case 1724# of migration failure. 1725# 1726# @device-id: QEMU device id of the unplugged device 1727# 1728# Since: 4.2 1729# 1730# Example: 1731# 1732# <- { "event": "UNPLUG_PRIMARY", 1733# "data": { "device-id": "hostdev0" }, 1734# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 1735# 1736## 1737{ 'event': 'UNPLUG_PRIMARY', 1738 'data': { 'device-id': 'str' } } 1739 1740## 1741# @DirtyRateVcpu: 1742# 1743# Dirty rate of vcpu. 1744# 1745# @id: vcpu index. 1746# 1747# @dirty-rate: dirty rate. 1748# 1749# Since: 6.2 1750## 1751{ 'struct': 'DirtyRateVcpu', 1752 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 1753 1754## 1755# @DirtyRateStatus: 1756# 1757# An enumeration of dirtyrate status. 1758# 1759# @unstarted: the dirtyrate thread has not been started. 1760# 1761# @measuring: the dirtyrate thread is measuring. 1762# 1763# @measured: the dirtyrate thread has measured and results are available. 1764# 1765# Since: 5.2 1766## 1767{ 'enum': 'DirtyRateStatus', 1768 'data': [ 'unstarted', 'measuring', 'measured'] } 1769 1770## 1771# @DirtyRateMeasureMode: 1772# 1773# An enumeration of mode of measuring dirtyrate. 1774# 1775# @page-sampling: calculate dirtyrate by sampling pages. 1776# 1777# @dirty-ring: calculate dirtyrate by dirty ring. 1778# 1779# @dirty-bitmap: calculate dirtyrate by dirty bitmap. 1780# 1781# Since: 6.2 1782## 1783{ 'enum': 'DirtyRateMeasureMode', 1784 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 1785 1786## 1787# @DirtyRateInfo: 1788# 1789# Information about current dirty page rate of vm. 1790# 1791# @dirty-rate: an estimate of the dirty page rate of the VM in units of 1792# MB/s, present only when estimating the rate has completed. 1793# 1794# @status: status containing dirtyrate query status includes 1795# 'unstarted' or 'measuring' or 'measured' 1796# 1797# @start-time: start time in units of second for calculation 1798# 1799# @calc-time: time in units of second for sample dirty pages 1800# 1801# @sample-pages: page count per GB for sample dirty pages 1802# the default value is 512 (since 6.1) 1803# 1804# @mode: mode containing method of calculate dirtyrate includes 1805# 'page-sampling' and 'dirty-ring' (Since 6.2) 1806# 1807# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring 1808# mode specified (Since 6.2) 1809# 1810# Since: 5.2 1811## 1812{ 'struct': 'DirtyRateInfo', 1813 'data': {'*dirty-rate': 'int64', 1814 'status': 'DirtyRateStatus', 1815 'start-time': 'int64', 1816 'calc-time': 'int64', 1817 'sample-pages': 'uint64', 1818 'mode': 'DirtyRateMeasureMode', 1819 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 1820 1821## 1822# @calc-dirty-rate: 1823# 1824# start calculating dirty page rate for vm 1825# 1826# @calc-time: time in units of second for sample dirty pages 1827# 1828# @sample-pages: page count per GB for sample dirty pages 1829# the default value is 512 (since 6.1) 1830# 1831# @mode: mechanism of calculating dirtyrate includes 1832# 'page-sampling' and 'dirty-ring' (Since 6.1) 1833# 1834# Since: 5.2 1835# 1836# Example: 1837# 1838# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 1839# 'sample-pages': 512} } 1840# <- { "return": {} } 1841# 1842## 1843{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 1844 '*sample-pages': 'int', 1845 '*mode': 'DirtyRateMeasureMode'} } 1846 1847## 1848# @query-dirty-rate: 1849# 1850# query dirty page rate in units of MB/s for vm 1851# 1852# Since: 5.2 1853## 1854{ 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' } 1855 1856## 1857# @DirtyLimitInfo: 1858# 1859# Dirty page rate limit information of a virtual CPU. 1860# 1861# @cpu-index: index of a virtual CPU. 1862# 1863# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 1864# CPU, 0 means unlimited. 1865# 1866# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 1867# 1868# Since: 7.1 1869# 1870## 1871{ 'struct': 'DirtyLimitInfo', 1872 'data': { 'cpu-index': 'int', 1873 'limit-rate': 'uint64', 1874 'current-rate': 'uint64' } } 1875 1876## 1877# @set-vcpu-dirty-limit: 1878# 1879# Set the upper limit of dirty page rate for virtual CPUs. 1880# 1881# Requires KVM with accelerator property "dirty-ring-size" set. 1882# A virtual CPU's dirty page rate is a measure of its memory load. 1883# To observe dirty page rates, use @calc-dirty-rate. 1884# 1885# @cpu-index: index of a virtual CPU, default is all. 1886# 1887# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 1888# 1889# Since: 7.1 1890# 1891# Example: 1892# 1893# -> {"execute": "set-vcpu-dirty-limit"} 1894# "arguments": { "dirty-rate": 200, 1895# "cpu-index": 1 } } 1896# <- { "return": {} } 1897# 1898## 1899{ 'command': 'set-vcpu-dirty-limit', 1900 'data': { '*cpu-index': 'int', 1901 'dirty-rate': 'uint64' } } 1902 1903## 1904# @cancel-vcpu-dirty-limit: 1905# 1906# Cancel the upper limit of dirty page rate for virtual CPUs. 1907# 1908# Cancel the dirty page limit for the vCPU which has been set with 1909# set-vcpu-dirty-limit command. Note that this command requires 1910# support from dirty ring, same as the "set-vcpu-dirty-limit". 1911# 1912# @cpu-index: index of a virtual CPU, default is all. 1913# 1914# Since: 7.1 1915# 1916# Example: 1917# 1918# -> {"execute": "cancel-vcpu-dirty-limit"}, 1919# "arguments": { "cpu-index": 1 } } 1920# <- { "return": {} } 1921# 1922## 1923{ 'command': 'cancel-vcpu-dirty-limit', 1924 'data': { '*cpu-index': 'int'} } 1925 1926## 1927# @query-vcpu-dirty-limit: 1928# 1929# Returns information about virtual CPU dirty page rate limits, if any. 1930# 1931# Since: 7.1 1932# 1933# Example: 1934# 1935# -> {"execute": "query-vcpu-dirty-limit"} 1936# <- {"return": [ 1937# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 1938# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 1939# 1940## 1941{ 'command': 'query-vcpu-dirty-limit', 1942 'returns': [ 'DirtyLimitInfo' ] } 1943 1944## 1945# @MigrationThreadInfo: 1946# 1947# Information about migrationthreads 1948# 1949# @name: the name of migration thread 1950# 1951# @thread-id: ID of the underlying host thread 1952# 1953# Since: 7.2 1954## 1955{ 'struct': 'MigrationThreadInfo', 1956 'data': {'name': 'str', 1957 'thread-id': 'int'} } 1958 1959## 1960# @query-migrationthreads: 1961# 1962# Returns information of migration threads 1963# 1964# data: migration thread name 1965# 1966# Returns: information about migration threads 1967# 1968# Since: 7.2 1969## 1970{ 'command': 'query-migrationthreads', 1971 'returns': ['MigrationThreadInfo'] } 1972 1973## 1974# @snapshot-save: 1975# 1976# Save a VM snapshot 1977# 1978# @job-id: identifier for the newly created job 1979# @tag: name of the snapshot to create 1980# @vmstate: block device node name to save vmstate to 1981# @devices: list of block device node names to save a snapshot to 1982# 1983# Applications should not assume that the snapshot save is complete 1984# when this command returns. The job commands / events must be used 1985# to determine completion and to fetch details of any errors that arise. 1986# 1987# Note that execution of the guest CPUs may be stopped during the 1988# time it takes to save the snapshot. A future version of QEMU 1989# may ensure CPUs are executing continuously. 1990# 1991# It is strongly recommended that @devices contain all writable 1992# block device nodes if a consistent snapshot is required. 1993# 1994# If @tag already exists, an error will be reported 1995# 1996# Returns: nothing 1997# 1998# Example: 1999# 2000# -> { "execute": "snapshot-save", 2001# "arguments": { 2002# "job-id": "snapsave0", 2003# "tag": "my-snap", 2004# "vmstate": "disk0", 2005# "devices": ["disk0", "disk1"] 2006# } 2007# } 2008# <- { "return": { } } 2009# <- {"event": "JOB_STATUS_CHANGE", 2010# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 2011# "data": {"status": "created", "id": "snapsave0"}} 2012# <- {"event": "JOB_STATUS_CHANGE", 2013# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 2014# "data": {"status": "running", "id": "snapsave0"}} 2015# <- {"event": "STOP", 2016# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 2017# <- {"event": "RESUME", 2018# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2019# <- {"event": "JOB_STATUS_CHANGE", 2020# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2021# "data": {"status": "waiting", "id": "snapsave0"}} 2022# <- {"event": "JOB_STATUS_CHANGE", 2023# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2024# "data": {"status": "pending", "id": "snapsave0"}} 2025# <- {"event": "JOB_STATUS_CHANGE", 2026# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2027# "data": {"status": "concluded", "id": "snapsave0"}} 2028# -> {"execute": "query-jobs"} 2029# <- {"return": [{"current-progress": 1, 2030# "status": "concluded", 2031# "total-progress": 1, 2032# "type": "snapshot-save", 2033# "id": "snapsave0"}]} 2034# 2035# Since: 6.0 2036## 2037{ 'command': 'snapshot-save', 2038 'data': { 'job-id': 'str', 2039 'tag': 'str', 2040 'vmstate': 'str', 2041 'devices': ['str'] } } 2042 2043## 2044# @snapshot-load: 2045# 2046# Load a VM snapshot 2047# 2048# @job-id: identifier for the newly created job 2049# @tag: name of the snapshot to load. 2050# @vmstate: block device node name to load vmstate from 2051# @devices: list of block device node names to load a snapshot from 2052# 2053# Applications should not assume that the snapshot load is complete 2054# when this command returns. The job commands / events must be used 2055# to determine completion and to fetch details of any errors that arise. 2056# 2057# Note that execution of the guest CPUs will be stopped during the 2058# time it takes to load the snapshot. 2059# 2060# It is strongly recommended that @devices contain all writable 2061# block device nodes that can have changed since the original 2062# @snapshot-save command execution. 2063# 2064# Returns: nothing 2065# 2066# Example: 2067# 2068# -> { "execute": "snapshot-load", 2069# "arguments": { 2070# "job-id": "snapload0", 2071# "tag": "my-snap", 2072# "vmstate": "disk0", 2073# "devices": ["disk0", "disk1"] 2074# } 2075# } 2076# <- { "return": { } } 2077# <- {"event": "JOB_STATUS_CHANGE", 2078# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2079# "data": {"status": "created", "id": "snapload0"}} 2080# <- {"event": "JOB_STATUS_CHANGE", 2081# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2082# "data": {"status": "running", "id": "snapload0"}} 2083# <- {"event": "STOP", 2084# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2085# <- {"event": "RESUME", 2086# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2087# <- {"event": "JOB_STATUS_CHANGE", 2088# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2089# "data": {"status": "waiting", "id": "snapload0"}} 2090# <- {"event": "JOB_STATUS_CHANGE", 2091# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2092# "data": {"status": "pending", "id": "snapload0"}} 2093# <- {"event": "JOB_STATUS_CHANGE", 2094# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2095# "data": {"status": "concluded", "id": "snapload0"}} 2096# -> {"execute": "query-jobs"} 2097# <- {"return": [{"current-progress": 1, 2098# "status": "concluded", 2099# "total-progress": 1, 2100# "type": "snapshot-load", 2101# "id": "snapload0"}]} 2102# 2103# Since: 6.0 2104## 2105{ 'command': 'snapshot-load', 2106 'data': { 'job-id': 'str', 2107 'tag': 'str', 2108 'vmstate': 'str', 2109 'devices': ['str'] } } 2110 2111## 2112# @snapshot-delete: 2113# 2114# Delete a VM snapshot 2115# 2116# @job-id: identifier for the newly created job 2117# @tag: name of the snapshot to delete. 2118# @devices: list of block device node names to delete a snapshot from 2119# 2120# Applications should not assume that the snapshot delete is complete 2121# when this command returns. The job commands / events must be used 2122# to determine completion and to fetch details of any errors that arise. 2123# 2124# Returns: nothing 2125# 2126# Example: 2127# 2128# -> { "execute": "snapshot-delete", 2129# "arguments": { 2130# "job-id": "snapdelete0", 2131# "tag": "my-snap", 2132# "devices": ["disk0", "disk1"] 2133# } 2134# } 2135# <- { "return": { } } 2136# <- {"event": "JOB_STATUS_CHANGE", 2137# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2138# "data": {"status": "created", "id": "snapdelete0"}} 2139# <- {"event": "JOB_STATUS_CHANGE", 2140# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2141# "data": {"status": "running", "id": "snapdelete0"}} 2142# <- {"event": "JOB_STATUS_CHANGE", 2143# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2144# "data": {"status": "waiting", "id": "snapdelete0"}} 2145# <- {"event": "JOB_STATUS_CHANGE", 2146# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2147# "data": {"status": "pending", "id": "snapdelete0"}} 2148# <- {"event": "JOB_STATUS_CHANGE", 2149# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2150# "data": {"status": "concluded", "id": "snapdelete0"}} 2151# -> {"execute": "query-jobs"} 2152# <- {"return": [{"current-progress": 1, 2153# "status": "concluded", 2154# "total-progress": 1, 2155# "type": "snapshot-delete", 2156# "id": "snapdelete0"}]} 2157# 2158# Since: 6.0 2159## 2160{ 'command': 'snapshot-delete', 2161 'data': { 'job-id': 'str', 2162 'tag': 'str', 2163 'devices': ['str'] } } 2164