1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the target VM 20# 21# @total: total amount of bytes involved in the migration process 22# 23# @duplicate: number of duplicate (zero) pages (since 1.2) 24# 25# @skipped: number of skipped zero pages (since 1.5) 26# 27# @normal: number of normal pages (since 1.2) 28# 29# @normal-bytes: number of normal bytes sent (since 1.2) 30# 31# @dirty-pages-rate: number of pages dirtied by second by the 32# guest (since 1.3) 33# 34# @mbps: throughput in megabits/sec. (since 1.6) 35# 36# @dirty-sync-count: number of times that dirty ram was synchronized (since 2.1) 37# 38# @postcopy-requests: The number of page requests received from the destination 39# (since 2.7) 40# 41# @page-size: The number of bytes per page for the various page-based 42# statistics (since 2.10) 43# 44# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 45# 46# @pages-per-second: the number of memory pages transferred per second 47# (Since 4.0) 48# 49# @precopy-bytes: The number of bytes sent in the pre-copy phase 50# (since 7.0). 51# 52# @downtime-bytes: The number of bytes sent while the guest is paused 53# (since 7.0). 54# 55# @postcopy-bytes: The number of bytes sent during the post-copy phase 56# (since 7.0). 57# 58# @dirty-sync-missed-zero-copy: Number of times dirty RAM synchronization could 59# not avoid copying dirty pages. This is between 60# 0 and @dirty-sync-count * @multifd-channels. 61# (since 7.1) 62# Since: 0.14 63## 64{ 'struct': 'MigrationStats', 65 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 66 'duplicate': 'int', 'skipped': 'int', 'normal': 'int', 67 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', 68 'mbps' : 'number', 'dirty-sync-count' : 'int', 69 'postcopy-requests' : 'int', 'page-size' : 'int', 70 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64', 71 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64', 72 'postcopy-bytes' : 'uint64', 73 'dirty-sync-missed-zero-copy' : 'uint64' } } 74 75## 76# @XBZRLECacheStats: 77# 78# Detailed XBZRLE migration cache statistics 79# 80# @cache-size: XBZRLE cache size 81# 82# @bytes: amount of bytes already transferred to the target VM 83# 84# @pages: amount of pages transferred to the target VM 85# 86# @cache-miss: number of cache miss 87# 88# @cache-miss-rate: rate of cache miss (since 2.1) 89# 90# @encoding-rate: rate of encoded bytes (since 5.1) 91# 92# @overflow: number of overflows 93# 94# Since: 1.2 95## 96{ 'struct': 'XBZRLECacheStats', 97 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 98 'cache-miss': 'int', 'cache-miss-rate': 'number', 99 'encoding-rate': 'number', 'overflow': 'int' } } 100 101## 102# @CompressionStats: 103# 104# Detailed migration compression statistics 105# 106# @pages: amount of pages compressed and transferred to the target VM 107# 108# @busy: count of times that no free thread was available to compress data 109# 110# @busy-rate: rate of thread busy 111# 112# @compressed-size: amount of bytes after compression 113# 114# @compression-rate: rate of compressed size 115# 116# Since: 3.1 117## 118{ 'struct': 'CompressionStats', 119 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 120 'compressed-size': 'int', 'compression-rate': 'number' } } 121 122## 123# @MigrationStatus: 124# 125# An enumeration of migration status. 126# 127# @none: no migration has ever happened. 128# 129# @setup: migration process has been initiated. 130# 131# @cancelling: in the process of cancelling migration. 132# 133# @cancelled: cancelling migration is finished. 134# 135# @active: in the process of doing migration. 136# 137# @postcopy-active: like active, but now in postcopy mode. (since 2.5) 138# 139# @postcopy-paused: during postcopy but paused. (since 3.0) 140# 141# @postcopy-recover: trying to recover from a paused postcopy. (since 3.0) 142# 143# @completed: migration is finished. 144# 145# @failed: some error occurred during migration process. 146# 147# @colo: VM is in the process of fault tolerance, VM can not get into this 148# state unless colo capability is enabled for migration. (since 2.8) 149# 150# @pre-switchover: Paused before device serialisation. (since 2.11) 151# 152# @device: During device serialisation when pause-before-switchover is enabled 153# (since 2.11) 154# 155# @wait-unplug: wait for device unplug request by guest OS to be completed. 156# (since 4.2) 157# 158# Since: 2.3 159## 160{ 'enum': 'MigrationStatus', 161 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 162 'active', 'postcopy-active', 'postcopy-paused', 163 'postcopy-recover', 'completed', 'failed', 'colo', 164 'pre-switchover', 'device', 'wait-unplug' ] } 165## 166# @VfioStats: 167# 168# Detailed VFIO devices migration statistics 169# 170# @transferred: amount of bytes transferred to the target VM by VFIO devices 171# 172# Since: 5.2 173## 174{ 'struct': 'VfioStats', 175 'data': {'transferred': 'int' } } 176 177## 178# @MigrationInfo: 179# 180# Information about current migration process. 181# 182# @status: @MigrationStatus describing the current migration status. 183# If this field is not returned, no migration process 184# has been initiated 185# 186# @ram: @MigrationStats containing detailed migration 187# status, only returned if status is 'active' or 188# 'completed'(since 1.2) 189# 190# @disk: @MigrationStats containing detailed disk migration 191# status, only returned if status is 'active' and it is a block 192# migration 193# 194# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 195# migration statistics, only returned if XBZRLE feature is on and 196# status is 'active' or 'completed' (since 1.2) 197# 198# @total-time: total amount of milliseconds since migration started. 199# If migration has ended, it returns the total migration 200# time. (since 1.2) 201# 202# @downtime: only present when migration finishes correctly 203# total downtime in milliseconds for the guest. 204# (since 1.3) 205# 206# @expected-downtime: only present while migration is active 207# expected downtime in milliseconds for the guest in last walk 208# of the dirty bitmap. (since 1.3) 209# 210# @setup-time: amount of setup time in milliseconds *before* the 211# iterations begin but *after* the QMP command is issued. This is designed 212# to provide an accounting of any activities (such as RDMA pinning) which 213# may be expensive, but do not actually occur during the iterative 214# migration rounds themselves. (since 1.6) 215# 216# @cpu-throttle-percentage: percentage of time guest cpus are being 217# throttled during auto-converge. This is only present when auto-converge 218# has started throttling guest cpus. (Since 2.7) 219# 220# @error-desc: the human readable error description string, when 221# @status is 'failed'. Clients should not attempt to parse the 222# error strings. (Since 2.7) 223# 224# @postcopy-blocktime: total time when all vCPU were blocked during postcopy 225# live migration. This is only present when the postcopy-blocktime 226# migration capability is enabled. (Since 3.0) 227# 228# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. This is 229# only present when the postcopy-blocktime migration capability 230# is enabled. (Since 3.0) 231# 232# @compression: migration compression statistics, only returned if compression 233# feature is on and status is 'active' or 'completed' (Since 3.1) 234# 235# @socket-address: Only used for tcp, to know what the real port is (Since 4.0) 236# 237# @vfio: @VfioStats containing detailed VFIO devices migration statistics, 238# only returned if VFIO device is present, migration is supported by all 239# VFIO devices and status is 'active' or 'completed' (since 5.2) 240# 241# @blocked-reasons: A list of reasons an outgoing migration is blocked. 242# Present and non-empty when migration is blocked. 243# (since 6.0) 244# 245# Since: 0.14 246## 247{ 'struct': 'MigrationInfo', 248 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 249 '*disk': 'MigrationStats', 250 '*vfio': 'VfioStats', 251 '*xbzrle-cache': 'XBZRLECacheStats', 252 '*total-time': 'int', 253 '*expected-downtime': 'int', 254 '*downtime': 'int', 255 '*setup-time': 'int', 256 '*cpu-throttle-percentage': 'int', 257 '*error-desc': 'str', 258 '*blocked-reasons': ['str'], 259 '*postcopy-blocktime' : 'uint32', 260 '*postcopy-vcpu-blocktime': ['uint32'], 261 '*compression': 'CompressionStats', 262 '*socket-address': ['SocketAddress'] } } 263 264## 265# @query-migrate: 266# 267# Returns information about current migration process. If migration 268# is active there will be another json-object with RAM migration 269# status and if block migration is active another one with block 270# migration status. 271# 272# Returns: @MigrationInfo 273# 274# Since: 0.14 275# 276# Example: 277# 278# 1. Before the first migration 279# 280# -> { "execute": "query-migrate" } 281# <- { "return": {} } 282# 283# 2. Migration is done and has succeeded 284# 285# -> { "execute": "query-migrate" } 286# <- { "return": { 287# "status": "completed", 288# "total-time":12345, 289# "setup-time":12345, 290# "downtime":12345, 291# "ram":{ 292# "transferred":123, 293# "remaining":123, 294# "total":246, 295# "duplicate":123, 296# "normal":123, 297# "normal-bytes":123456, 298# "dirty-sync-count":15 299# } 300# } 301# } 302# 303# 3. Migration is done and has failed 304# 305# -> { "execute": "query-migrate" } 306# <- { "return": { "status": "failed" } } 307# 308# 4. Migration is being performed and is not a block migration: 309# 310# -> { "execute": "query-migrate" } 311# <- { 312# "return":{ 313# "status":"active", 314# "total-time":12345, 315# "setup-time":12345, 316# "expected-downtime":12345, 317# "ram":{ 318# "transferred":123, 319# "remaining":123, 320# "total":246, 321# "duplicate":123, 322# "normal":123, 323# "normal-bytes":123456, 324# "dirty-sync-count":15 325# } 326# } 327# } 328# 329# 5. Migration is being performed and is a block migration: 330# 331# -> { "execute": "query-migrate" } 332# <- { 333# "return":{ 334# "status":"active", 335# "total-time":12345, 336# "setup-time":12345, 337# "expected-downtime":12345, 338# "ram":{ 339# "total":1057024, 340# "remaining":1053304, 341# "transferred":3720, 342# "duplicate":123, 343# "normal":123, 344# "normal-bytes":123456, 345# "dirty-sync-count":15 346# }, 347# "disk":{ 348# "total":20971520, 349# "remaining":20880384, 350# "transferred":91136 351# } 352# } 353# } 354# 355# 6. Migration is being performed and XBZRLE is active: 356# 357# -> { "execute": "query-migrate" } 358# <- { 359# "return":{ 360# "status":"active", 361# "total-time":12345, 362# "setup-time":12345, 363# "expected-downtime":12345, 364# "ram":{ 365# "total":1057024, 366# "remaining":1053304, 367# "transferred":3720, 368# "duplicate":10, 369# "normal":3333, 370# "normal-bytes":3412992, 371# "dirty-sync-count":15 372# }, 373# "xbzrle-cache":{ 374# "cache-size":67108864, 375# "bytes":20971520, 376# "pages":2444343, 377# "cache-miss":2244, 378# "cache-miss-rate":0.123, 379# "encoding-rate":80.1, 380# "overflow":34434 381# } 382# } 383# } 384# 385## 386{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 387 388## 389# @MigrationCapability: 390# 391# Migration capabilities enumeration 392# 393# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length Encoding). 394# This feature allows us to minimize migration traffic for certain work 395# loads, by sending compressed difference of the pages 396# 397# @rdma-pin-all: Controls whether or not the entire VM memory footprint is 398# mlock()'d on demand or all at once. Refer to docs/rdma.txt for usage. 399# Disabled by default. (since 2.0) 400# 401# @zero-blocks: During storage migration encode blocks of zeroes efficiently. This 402# essentially saves 1MB of zeroes per block on the wire. Enabling requires 403# source and target VM to support this feature. To enable it is sufficient 404# to enable the capability on the source VM. The feature is disabled by 405# default. (since 1.6) 406# 407# @compress: Use multiple compression threads to accelerate live migration. 408# This feature can help to reduce the migration traffic, by sending 409# compressed pages. Please note that if compress and xbzrle are both 410# on, compress only takes effect in the ram bulk stage, after that, 411# it will be disabled and only xbzrle takes effect, this can help to 412# minimize migration traffic. The feature is disabled by default. 413# (since 2.4 ) 414# 415# @events: generate events for each migration state change 416# (since 2.4 ) 417# 418# @auto-converge: If enabled, QEMU will automatically throttle down the guest 419# to speed up convergence of RAM migration. (since 1.6) 420# 421# @postcopy-ram: Start executing on the migration target before all of RAM has 422# been migrated, pulling the remaining pages along as needed. The 423# capacity must have the same setting on both source and target 424# or migration will not even start. NOTE: If the migration fails during 425# postcopy the VM will fail. (since 2.6) 426# 427# @x-colo: If enabled, migration will never end, and the state of the VM on the 428# primary side will be migrated continuously to the VM on secondary 429# side, this process is called COarse-Grain LOck Stepping (COLO) for 430# Non-stop Service. (since 2.8) 431# 432# @release-ram: if enabled, qemu will free the migrated ram pages on the source 433# during postcopy-ram migration. (since 2.9) 434# 435# @block: If enabled, QEMU will also migrate the contents of all block 436# devices. Default is disabled. A possible alternative uses 437# mirror jobs to a builtin NBD server on the destination, which 438# offers more flexibility. 439# (Since 2.10) 440# 441# @return-path: If enabled, migration will use the return path even 442# for precopy. (since 2.10) 443# 444# @pause-before-switchover: Pause outgoing migration before serialising device 445# state and before disabling block IO (since 2.11) 446# 447# @multifd: Use more than one fd for migration (since 4.0) 448# 449# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 450# (since 2.12) 451# 452# @postcopy-blocktime: Calculate downtime for postcopy live migration 453# (since 3.0) 454# 455# @late-block-activate: If enabled, the destination will not activate block 456# devices (and thus take locks) immediately at the end of migration. 457# (since 3.0) 458# 459# @x-ignore-shared: If enabled, QEMU will not migrate shared memory (since 4.0) 460# 461# @validate-uuid: Send the UUID of the source to allow the destination 462# to ensure it is the same. (since 4.2) 463# 464# @background-snapshot: If enabled, the migration stream will be a snapshot 465# of the VM exactly at the point when the migration 466# procedure starts. The VM RAM is saved with running VM. 467# (since 6.0) 468# 469# @zero-copy-send: Controls behavior on sending memory pages on migration. 470# When true, enables a zero-copy mechanism for sending 471# memory pages, if host supports it. 472# Requires that QEMU be permitted to use locked memory 473# for guest RAM pages. 474# (since 7.1) 475# @postcopy-preempt: If enabled, the migration process will allow postcopy 476# requests to preempt precopy stream, so postcopy requests 477# will be handled faster. This is a performance feature and 478# should not affect the correctness of postcopy migration. 479# (since 7.1) 480# 481# Features: 482# @unstable: Members @x-colo and @x-ignore-shared are experimental. 483# 484# Since: 1.2 485## 486{ 'enum': 'MigrationCapability', 487 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 488 'compress', 'events', 'postcopy-ram', 489 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 490 'release-ram', 491 'block', 'return-path', 'pause-before-switchover', 'multifd', 492 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 493 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 494 'validate-uuid', 'background-snapshot', 495 'zero-copy-send', 'postcopy-preempt'] } 496 497## 498# @MigrationCapabilityStatus: 499# 500# Migration capability information 501# 502# @capability: capability enum 503# 504# @state: capability state bool 505# 506# Since: 1.2 507## 508{ 'struct': 'MigrationCapabilityStatus', 509 'data': { 'capability' : 'MigrationCapability', 'state' : 'bool' } } 510 511## 512# @migrate-set-capabilities: 513# 514# Enable/Disable the following migration capabilities (like xbzrle) 515# 516# @capabilities: json array of capability modifications to make 517# 518# Since: 1.2 519# 520# Example: 521# 522# -> { "execute": "migrate-set-capabilities" , "arguments": 523# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 524# 525## 526{ 'command': 'migrate-set-capabilities', 527 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 528 529## 530# @query-migrate-capabilities: 531# 532# Returns information about the current migration capabilities status 533# 534# Returns: @MigrationCapabilitiesStatus 535# 536# Since: 1.2 537# 538# Example: 539# 540# -> { "execute": "query-migrate-capabilities" } 541# <- { "return": [ 542# {"state": false, "capability": "xbzrle"}, 543# {"state": false, "capability": "rdma-pin-all"}, 544# {"state": false, "capability": "auto-converge"}, 545# {"state": false, "capability": "zero-blocks"}, 546# {"state": false, "capability": "compress"}, 547# {"state": true, "capability": "events"}, 548# {"state": false, "capability": "postcopy-ram"}, 549# {"state": false, "capability": "x-colo"} 550# ]} 551# 552## 553{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 554 555## 556# @MultiFDCompression: 557# 558# An enumeration of multifd compression methods. 559# 560# @none: no compression. 561# @zlib: use zlib compression method. 562# @zstd: use zstd compression method. 563# 564# Since: 5.0 565## 566{ 'enum': 'MultiFDCompression', 567 'data': [ 'none', 'zlib', 568 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 569 570## 571# @BitmapMigrationBitmapAliasTransform: 572# 573# @persistent: If present, the bitmap will be made persistent 574# or transient depending on this parameter. 575# 576# Since: 6.0 577## 578{ 'struct': 'BitmapMigrationBitmapAliasTransform', 579 'data': { 580 '*persistent': 'bool' 581 } } 582 583## 584# @BitmapMigrationBitmapAlias: 585# 586# @name: The name of the bitmap. 587# 588# @alias: An alias name for migration (for example the bitmap name on 589# the opposite site). 590# 591# @transform: Allows the modification of the migrated bitmap. 592# (since 6.0) 593# 594# Since: 5.2 595## 596{ 'struct': 'BitmapMigrationBitmapAlias', 597 'data': { 598 'name': 'str', 599 'alias': 'str', 600 '*transform': 'BitmapMigrationBitmapAliasTransform' 601 } } 602 603## 604# @BitmapMigrationNodeAlias: 605# 606# Maps a block node name and the bitmaps it has to aliases for dirty 607# bitmap migration. 608# 609# @node-name: A block node name. 610# 611# @alias: An alias block node name for migration (for example the 612# node name on the opposite site). 613# 614# @bitmaps: Mappings for the bitmaps on this node. 615# 616# Since: 5.2 617## 618{ 'struct': 'BitmapMigrationNodeAlias', 619 'data': { 620 'node-name': 'str', 621 'alias': 'str', 622 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 623 } } 624 625## 626# @MigrationParameter: 627# 628# Migration parameters enumeration 629# 630# @announce-initial: Initial delay (in milliseconds) before sending the first 631# announce (Since 4.0) 632# 633# @announce-max: Maximum delay (in milliseconds) between packets in the 634# announcement (Since 4.0) 635# 636# @announce-rounds: Number of self-announce packets sent after migration 637# (Since 4.0) 638# 639# @announce-step: Increase in delay (in milliseconds) between subsequent 640# packets in the announcement (Since 4.0) 641# 642# @compress-level: Set the compression level to be used in live migration, 643# the compression level is an integer between 0 and 9, where 0 means 644# no compression, 1 means the best compression speed, and 9 means best 645# compression ratio which will consume more CPU. 646# 647# @compress-threads: Set compression thread count to be used in live migration, 648# the compression thread count is an integer between 1 and 255. 649# 650# @compress-wait-thread: Controls behavior when all compression threads are 651# currently busy. If true (default), wait for a free 652# compression thread to become available; otherwise, 653# send the page uncompressed. (Since 3.1) 654# 655# @decompress-threads: Set decompression thread count to be used in live 656# migration, the decompression thread count is an integer between 1 657# and 255. Usually, decompression is at least 4 times as fast as 658# compression, so set the decompress-threads to the number about 1/4 659# of compress-threads is adequate. 660# 661# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 662# to trigger throttling. It is expressed as percentage. 663# The default value is 50. (Since 5.0) 664# 665# @cpu-throttle-initial: Initial percentage of time guest cpus are throttled 666# when migration auto-converge is activated. The 667# default value is 20. (Since 2.7) 668# 669# @cpu-throttle-increment: throttle percentage increase each time 670# auto-converge detects that migration is not making 671# progress. The default value is 10. (Since 2.7) 672# 673# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 674# At the tail stage of throttling, the Guest is very 675# sensitive to CPU percentage while the @cpu-throttle 676# -increment is excessive usually at tail stage. 677# If this parameter is true, we will compute the ideal 678# CPU percentage used by the Guest, which may exactly make 679# the dirty rate match the dirty rate threshold. Then we 680# will choose a smaller throttle increment between the 681# one specified by @cpu-throttle-increment and the one 682# generated by ideal CPU percentage. 683# Therefore, it is compatible to traditional throttling, 684# meanwhile the throttle increment won't be excessive 685# at tail stage. 686# The default value is false. (Since 5.1) 687# 688# @tls-creds: ID of the 'tls-creds' object that provides credentials for 689# establishing a TLS connection over the migration data channel. 690# On the outgoing side of the migration, the credentials must 691# be for a 'client' endpoint, while for the incoming side the 692# credentials must be for a 'server' endpoint. Setting this 693# will enable TLS for all migrations. The default is unset, 694# resulting in unsecured migration at the QEMU level. (Since 2.7) 695# 696# @tls-hostname: hostname of the target host for the migration. This is 697# required when using x509 based TLS credentials and the 698# migration URI does not already include a hostname. For 699# example if using fd: or exec: based migration, the 700# hostname must be provided so that the server's x509 701# certificate identity can be validated. (Since 2.7) 702# 703# @tls-authz: ID of the 'authz' object subclass that provides access control 704# checking of the TLS x509 certificate distinguished name. 705# This object is only resolved at time of use, so can be deleted 706# and recreated on the fly while the migration server is active. 707# If missing, it will default to denying access (Since 4.0) 708# 709# @max-bandwidth: to set maximum speed for migration. maximum speed in 710# bytes per second. (Since 2.8) 711# 712# @downtime-limit: set maximum tolerated downtime for migration. maximum 713# downtime in milliseconds (Since 2.8) 714# 715# @x-checkpoint-delay: The delay time (in ms) between two COLO checkpoints in 716# periodic mode. (Since 2.8) 717# 718# @block-incremental: Affects how much storage is migrated when the 719# block migration capability is enabled. When false, the entire 720# storage backing chain is migrated into a flattened image at 721# the destination; when true, only the active qcow2 layer is 722# migrated and the destination must already have access to the 723# same backing chain as was used on the source. (since 2.10) 724# 725# @multifd-channels: Number of channels used to migrate data in 726# parallel. This is the same number that the 727# number of sockets used for migration. The 728# default value is 2 (since 4.0) 729# 730# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 731# needs to be a multiple of the target page size 732# and a power of 2 733# (Since 2.11) 734# 735# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 736# Defaults to 0 (unlimited). In bytes per second. 737# (Since 3.0) 738# 739# @max-cpu-throttle: maximum cpu throttle percentage. 740# Defaults to 99. (Since 3.1) 741# 742# @multifd-compression: Which compression method to use. 743# Defaults to none. (Since 5.0) 744# 745# @multifd-zlib-level: Set the compression level to be used in live 746# migration, the compression level is an integer between 0 747# and 9, where 0 means no compression, 1 means the best 748# compression speed, and 9 means best compression ratio which 749# will consume more CPU. 750# Defaults to 1. (Since 5.0) 751# 752# @multifd-zstd-level: Set the compression level to be used in live 753# migration, the compression level is an integer between 0 754# and 20, where 0 means no compression, 1 means the best 755# compression speed, and 20 means best compression ratio which 756# will consume more CPU. 757# Defaults to 1. (Since 5.0) 758# 759# 760# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 761# aliases for the purpose of dirty bitmap migration. Such 762# aliases may for example be the corresponding names on the 763# opposite site. 764# The mapping must be one-to-one, but not necessarily 765# complete: On the source, unmapped bitmaps and all bitmaps 766# on unmapped nodes will be ignored. On the destination, 767# encountering an unmapped alias in the incoming migration 768# stream will result in a report, and all further bitmap 769# migration data will then be discarded. 770# Note that the destination does not know about bitmaps it 771# does not receive, so there is no limitation or requirement 772# regarding the number of bitmaps received, or how they are 773# named, or on which nodes they are placed. 774# By default (when this parameter has never been set), bitmap 775# names are mapped to themselves. Nodes are mapped to their 776# block device name if there is one, and to their node name 777# otherwise. (Since 5.2) 778# 779# Features: 780# @unstable: Member @x-checkpoint-delay is experimental. 781# 782# Since: 2.4 783## 784{ 'enum': 'MigrationParameter', 785 'data': ['announce-initial', 'announce-max', 786 'announce-rounds', 'announce-step', 787 'compress-level', 'compress-threads', 'decompress-threads', 788 'compress-wait-thread', 'throttle-trigger-threshold', 789 'cpu-throttle-initial', 'cpu-throttle-increment', 790 'cpu-throttle-tailslow', 791 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 792 'downtime-limit', 793 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 794 'block-incremental', 795 'multifd-channels', 796 'xbzrle-cache-size', 'max-postcopy-bandwidth', 797 'max-cpu-throttle', 'multifd-compression', 798 'multifd-zlib-level' ,'multifd-zstd-level', 799 'block-bitmap-mapping' ] } 800 801## 802# @MigrateSetParameters: 803# 804# @announce-initial: Initial delay (in milliseconds) before sending the first 805# announce (Since 4.0) 806# 807# @announce-max: Maximum delay (in milliseconds) between packets in the 808# announcement (Since 4.0) 809# 810# @announce-rounds: Number of self-announce packets sent after migration 811# (Since 4.0) 812# 813# @announce-step: Increase in delay (in milliseconds) between subsequent 814# packets in the announcement (Since 4.0) 815# 816# @compress-level: compression level 817# 818# @compress-threads: compression thread count 819# 820# @compress-wait-thread: Controls behavior when all compression threads are 821# currently busy. If true (default), wait for a free 822# compression thread to become available; otherwise, 823# send the page uncompressed. (Since 3.1) 824# 825# @decompress-threads: decompression thread count 826# 827# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 828# to trigger throttling. It is expressed as percentage. 829# The default value is 50. (Since 5.0) 830# 831# @cpu-throttle-initial: Initial percentage of time guest cpus are 832# throttled when migration auto-converge is activated. 833# The default value is 20. (Since 2.7) 834# 835# @cpu-throttle-increment: throttle percentage increase each time 836# auto-converge detects that migration is not making 837# progress. The default value is 10. (Since 2.7) 838# 839# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 840# At the tail stage of throttling, the Guest is very 841# sensitive to CPU percentage while the @cpu-throttle 842# -increment is excessive usually at tail stage. 843# If this parameter is true, we will compute the ideal 844# CPU percentage used by the Guest, which may exactly make 845# the dirty rate match the dirty rate threshold. Then we 846# will choose a smaller throttle increment between the 847# one specified by @cpu-throttle-increment and the one 848# generated by ideal CPU percentage. 849# Therefore, it is compatible to traditional throttling, 850# meanwhile the throttle increment won't be excessive 851# at tail stage. 852# The default value is false. (Since 5.1) 853# 854# @tls-creds: ID of the 'tls-creds' object that provides credentials 855# for establishing a TLS connection over the migration data 856# channel. On the outgoing side of the migration, the credentials 857# must be for a 'client' endpoint, while for the incoming side the 858# credentials must be for a 'server' endpoint. Setting this 859# to a non-empty string enables TLS for all migrations. 860# An empty string means that QEMU will use plain text mode for 861# migration, rather than TLS (Since 2.9) 862# Previously (since 2.7), this was reported by omitting 863# tls-creds instead. 864# 865# @tls-hostname: hostname of the target host for the migration. This 866# is required when using x509 based TLS credentials and the 867# migration URI does not already include a hostname. For 868# example if using fd: or exec: based migration, the 869# hostname must be provided so that the server's x509 870# certificate identity can be validated. (Since 2.7) 871# An empty string means that QEMU will use the hostname 872# associated with the migration URI, if any. (Since 2.9) 873# Previously (since 2.7), this was reported by omitting 874# tls-hostname instead. 875# 876# @max-bandwidth: to set maximum speed for migration. maximum speed in 877# bytes per second. (Since 2.8) 878# 879# @downtime-limit: set maximum tolerated downtime for migration. maximum 880# downtime in milliseconds (Since 2.8) 881# 882# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) 883# 884# @block-incremental: Affects how much storage is migrated when the 885# block migration capability is enabled. When false, the entire 886# storage backing chain is migrated into a flattened image at 887# the destination; when true, only the active qcow2 layer is 888# migrated and the destination must already have access to the 889# same backing chain as was used on the source. (since 2.10) 890# 891# @multifd-channels: Number of channels used to migrate data in 892# parallel. This is the same number that the 893# number of sockets used for migration. The 894# default value is 2 (since 4.0) 895# 896# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 897# needs to be a multiple of the target page size 898# and a power of 2 899# (Since 2.11) 900# 901# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 902# Defaults to 0 (unlimited). In bytes per second. 903# (Since 3.0) 904# 905# @max-cpu-throttle: maximum cpu throttle percentage. 906# The default value is 99. (Since 3.1) 907# 908# @multifd-compression: Which compression method to use. 909# Defaults to none. (Since 5.0) 910# 911# @multifd-zlib-level: Set the compression level to be used in live 912# migration, the compression level is an integer between 0 913# and 9, where 0 means no compression, 1 means the best 914# compression speed, and 9 means best compression ratio which 915# will consume more CPU. 916# Defaults to 1. (Since 5.0) 917# 918# @multifd-zstd-level: Set the compression level to be used in live 919# migration, the compression level is an integer between 0 920# and 20, where 0 means no compression, 1 means the best 921# compression speed, and 20 means best compression ratio which 922# will consume more CPU. 923# Defaults to 1. (Since 5.0) 924# 925# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 926# aliases for the purpose of dirty bitmap migration. Such 927# aliases may for example be the corresponding names on the 928# opposite site. 929# The mapping must be one-to-one, but not necessarily 930# complete: On the source, unmapped bitmaps and all bitmaps 931# on unmapped nodes will be ignored. On the destination, 932# encountering an unmapped alias in the incoming migration 933# stream will result in a report, and all further bitmap 934# migration data will then be discarded. 935# Note that the destination does not know about bitmaps it 936# does not receive, so there is no limitation or requirement 937# regarding the number of bitmaps received, or how they are 938# named, or on which nodes they are placed. 939# By default (when this parameter has never been set), bitmap 940# names are mapped to themselves. Nodes are mapped to their 941# block device name if there is one, and to their node name 942# otherwise. (Since 5.2) 943# 944# Features: 945# @unstable: Member @x-checkpoint-delay is experimental. 946# 947# Since: 2.4 948## 949# TODO either fuse back into MigrationParameters, or make 950# MigrationParameters members mandatory 951{ 'struct': 'MigrateSetParameters', 952 'data': { '*announce-initial': 'size', 953 '*announce-max': 'size', 954 '*announce-rounds': 'size', 955 '*announce-step': 'size', 956 '*compress-level': 'uint8', 957 '*compress-threads': 'uint8', 958 '*compress-wait-thread': 'bool', 959 '*decompress-threads': 'uint8', 960 '*throttle-trigger-threshold': 'uint8', 961 '*cpu-throttle-initial': 'uint8', 962 '*cpu-throttle-increment': 'uint8', 963 '*cpu-throttle-tailslow': 'bool', 964 '*tls-creds': 'StrOrNull', 965 '*tls-hostname': 'StrOrNull', 966 '*tls-authz': 'StrOrNull', 967 '*max-bandwidth': 'size', 968 '*downtime-limit': 'uint64', 969 '*x-checkpoint-delay': { 'type': 'uint32', 970 'features': [ 'unstable' ] }, 971 '*block-incremental': 'bool', 972 '*multifd-channels': 'uint8', 973 '*xbzrle-cache-size': 'size', 974 '*max-postcopy-bandwidth': 'size', 975 '*max-cpu-throttle': 'uint8', 976 '*multifd-compression': 'MultiFDCompression', 977 '*multifd-zlib-level': 'uint8', 978 '*multifd-zstd-level': 'uint8', 979 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 980 981## 982# @migrate-set-parameters: 983# 984# Set various migration parameters. 985# 986# Since: 2.4 987# 988# Example: 989# 990# -> { "execute": "migrate-set-parameters" , 991# "arguments": { "compress-level": 1 } } 992# 993## 994{ 'command': 'migrate-set-parameters', 'boxed': true, 995 'data': 'MigrateSetParameters' } 996 997## 998# @MigrationParameters: 999# 1000# The optional members aren't actually optional. 1001# 1002# @announce-initial: Initial delay (in milliseconds) before sending the 1003# first announce (Since 4.0) 1004# 1005# @announce-max: Maximum delay (in milliseconds) between packets in the 1006# announcement (Since 4.0) 1007# 1008# @announce-rounds: Number of self-announce packets sent after migration 1009# (Since 4.0) 1010# 1011# @announce-step: Increase in delay (in milliseconds) between subsequent 1012# packets in the announcement (Since 4.0) 1013# 1014# @compress-level: compression level 1015# 1016# @compress-threads: compression thread count 1017# 1018# @compress-wait-thread: Controls behavior when all compression threads are 1019# currently busy. If true (default), wait for a free 1020# compression thread to become available; otherwise, 1021# send the page uncompressed. (Since 3.1) 1022# 1023# @decompress-threads: decompression thread count 1024# 1025# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 1026# to trigger throttling. It is expressed as percentage. 1027# The default value is 50. (Since 5.0) 1028# 1029# @cpu-throttle-initial: Initial percentage of time guest cpus are 1030# throttled when migration auto-converge is activated. 1031# (Since 2.7) 1032# 1033# @cpu-throttle-increment: throttle percentage increase each time 1034# auto-converge detects that migration is not making 1035# progress. (Since 2.7) 1036# 1037# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 1038# At the tail stage of throttling, the Guest is very 1039# sensitive to CPU percentage while the @cpu-throttle 1040# -increment is excessive usually at tail stage. 1041# If this parameter is true, we will compute the ideal 1042# CPU percentage used by the Guest, which may exactly make 1043# the dirty rate match the dirty rate threshold. Then we 1044# will choose a smaller throttle increment between the 1045# one specified by @cpu-throttle-increment and the one 1046# generated by ideal CPU percentage. 1047# Therefore, it is compatible to traditional throttling, 1048# meanwhile the throttle increment won't be excessive 1049# at tail stage. 1050# The default value is false. (Since 5.1) 1051# 1052# @tls-creds: ID of the 'tls-creds' object that provides credentials 1053# for establishing a TLS connection over the migration data 1054# channel. On the outgoing side of the migration, the credentials 1055# must be for a 'client' endpoint, while for the incoming side the 1056# credentials must be for a 'server' endpoint. 1057# An empty string means that QEMU will use plain text mode for 1058# migration, rather than TLS (Since 2.7) 1059# Note: 2.8 reports this by omitting tls-creds instead. 1060# 1061# @tls-hostname: hostname of the target host for the migration. This 1062# is required when using x509 based TLS credentials and the 1063# migration URI does not already include a hostname. For 1064# example if using fd: or exec: based migration, the 1065# hostname must be provided so that the server's x509 1066# certificate identity can be validated. (Since 2.7) 1067# An empty string means that QEMU will use the hostname 1068# associated with the migration URI, if any. (Since 2.9) 1069# Note: 2.8 reports this by omitting tls-hostname instead. 1070# 1071# @tls-authz: ID of the 'authz' object subclass that provides access control 1072# checking of the TLS x509 certificate distinguished name. (Since 1073# 4.0) 1074# 1075# @max-bandwidth: to set maximum speed for migration. maximum speed in 1076# bytes per second. (Since 2.8) 1077# 1078# @downtime-limit: set maximum tolerated downtime for migration. maximum 1079# downtime in milliseconds (Since 2.8) 1080# 1081# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) 1082# 1083# @block-incremental: Affects how much storage is migrated when the 1084# block migration capability is enabled. When false, the entire 1085# storage backing chain is migrated into a flattened image at 1086# the destination; when true, only the active qcow2 layer is 1087# migrated and the destination must already have access to the 1088# same backing chain as was used on the source. (since 2.10) 1089# 1090# @multifd-channels: Number of channels used to migrate data in 1091# parallel. This is the same number that the 1092# number of sockets used for migration. 1093# The default value is 2 (since 4.0) 1094# 1095# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1096# needs to be a multiple of the target page size 1097# and a power of 2 1098# (Since 2.11) 1099# 1100# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 1101# Defaults to 0 (unlimited). In bytes per second. 1102# (Since 3.0) 1103# 1104# @max-cpu-throttle: maximum cpu throttle percentage. 1105# Defaults to 99. 1106# (Since 3.1) 1107# 1108# @multifd-compression: Which compression method to use. 1109# Defaults to none. (Since 5.0) 1110# 1111# @multifd-zlib-level: Set the compression level to be used in live 1112# migration, the compression level is an integer between 0 1113# and 9, where 0 means no compression, 1 means the best 1114# compression speed, and 9 means best compression ratio which 1115# will consume more CPU. 1116# Defaults to 1. (Since 5.0) 1117# 1118# @multifd-zstd-level: Set the compression level to be used in live 1119# migration, the compression level is an integer between 0 1120# and 20, where 0 means no compression, 1 means the best 1121# compression speed, and 20 means best compression ratio which 1122# will consume more CPU. 1123# Defaults to 1. (Since 5.0) 1124# 1125# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1126# aliases for the purpose of dirty bitmap migration. Such 1127# aliases may for example be the corresponding names on the 1128# opposite site. 1129# The mapping must be one-to-one, but not necessarily 1130# complete: On the source, unmapped bitmaps and all bitmaps 1131# on unmapped nodes will be ignored. On the destination, 1132# encountering an unmapped alias in the incoming migration 1133# stream will result in a report, and all further bitmap 1134# migration data will then be discarded. 1135# Note that the destination does not know about bitmaps it 1136# does not receive, so there is no limitation or requirement 1137# regarding the number of bitmaps received, or how they are 1138# named, or on which nodes they are placed. 1139# By default (when this parameter has never been set), bitmap 1140# names are mapped to themselves. Nodes are mapped to their 1141# block device name if there is one, and to their node name 1142# otherwise. (Since 5.2) 1143# 1144# Features: 1145# @unstable: Member @x-checkpoint-delay is experimental. 1146# 1147# Since: 2.4 1148## 1149{ 'struct': 'MigrationParameters', 1150 'data': { '*announce-initial': 'size', 1151 '*announce-max': 'size', 1152 '*announce-rounds': 'size', 1153 '*announce-step': 'size', 1154 '*compress-level': 'uint8', 1155 '*compress-threads': 'uint8', 1156 '*compress-wait-thread': 'bool', 1157 '*decompress-threads': 'uint8', 1158 '*throttle-trigger-threshold': 'uint8', 1159 '*cpu-throttle-initial': 'uint8', 1160 '*cpu-throttle-increment': 'uint8', 1161 '*cpu-throttle-tailslow': 'bool', 1162 '*tls-creds': 'str', 1163 '*tls-hostname': 'str', 1164 '*tls-authz': 'str', 1165 '*max-bandwidth': 'size', 1166 '*downtime-limit': 'uint64', 1167 '*x-checkpoint-delay': { 'type': 'uint32', 1168 'features': [ 'unstable' ] }, 1169 '*block-incremental': 'bool', 1170 '*multifd-channels': 'uint8', 1171 '*xbzrle-cache-size': 'size', 1172 '*max-postcopy-bandwidth': 'size', 1173 '*max-cpu-throttle': 'uint8', 1174 '*multifd-compression': 'MultiFDCompression', 1175 '*multifd-zlib-level': 'uint8', 1176 '*multifd-zstd-level': 'uint8', 1177 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } 1178 1179## 1180# @query-migrate-parameters: 1181# 1182# Returns information about the current migration parameters 1183# 1184# Returns: @MigrationParameters 1185# 1186# Since: 2.4 1187# 1188# Example: 1189# 1190# -> { "execute": "query-migrate-parameters" } 1191# <- { "return": { 1192# "decompress-threads": 2, 1193# "cpu-throttle-increment": 10, 1194# "compress-threads": 8, 1195# "compress-level": 1, 1196# "cpu-throttle-initial": 20, 1197# "max-bandwidth": 33554432, 1198# "downtime-limit": 300 1199# } 1200# } 1201# 1202## 1203{ 'command': 'query-migrate-parameters', 1204 'returns': 'MigrationParameters' } 1205 1206## 1207# @client_migrate_info: 1208# 1209# Set migration information for remote display. This makes the server 1210# ask the client to automatically reconnect using the new parameters 1211# once migration finished successfully. Only implemented for SPICE. 1212# 1213# @protocol: must be "spice" 1214# @hostname: migration target hostname 1215# @port: spice tcp port for plaintext channels 1216# @tls-port: spice tcp port for tls-secured channels 1217# @cert-subject: server certificate subject 1218# 1219# Since: 0.14 1220# 1221# Example: 1222# 1223# -> { "execute": "client_migrate_info", 1224# "arguments": { "protocol": "spice", 1225# "hostname": "virt42.lab.kraxel.org", 1226# "port": 1234 } } 1227# <- { "return": {} } 1228# 1229## 1230{ 'command': 'client_migrate_info', 1231 'data': { 'protocol': 'str', 'hostname': 'str', '*port': 'int', 1232 '*tls-port': 'int', '*cert-subject': 'str' } } 1233 1234## 1235# @migrate-start-postcopy: 1236# 1237# Followup to a migration command to switch the migration to postcopy mode. 1238# The postcopy-ram capability must be set on both source and destination 1239# before the original migration command. 1240# 1241# Since: 2.5 1242# 1243# Example: 1244# 1245# -> { "execute": "migrate-start-postcopy" } 1246# <- { "return": {} } 1247# 1248## 1249{ 'command': 'migrate-start-postcopy' } 1250 1251## 1252# @MIGRATION: 1253# 1254# Emitted when a migration event happens 1255# 1256# @status: @MigrationStatus describing the current migration status. 1257# 1258# Since: 2.4 1259# 1260# Example: 1261# 1262# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1263# "event": "MIGRATION", 1264# "data": {"status": "completed"} } 1265# 1266## 1267{ 'event': 'MIGRATION', 1268 'data': {'status': 'MigrationStatus'}} 1269 1270## 1271# @MIGRATION_PASS: 1272# 1273# Emitted from the source side of a migration at the start of each pass 1274# (when it syncs the dirty bitmap) 1275# 1276# @pass: An incrementing count (starting at 1 on the first pass) 1277# 1278# Since: 2.6 1279# 1280# Example: 1281# 1282# { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1283# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1284# 1285## 1286{ 'event': 'MIGRATION_PASS', 1287 'data': { 'pass': 'int' } } 1288 1289## 1290# @COLOMessage: 1291# 1292# The message transmission between Primary side and Secondary side. 1293# 1294# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1295# 1296# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for checkpointing 1297# 1298# @checkpoint-reply: SVM gets PVM's checkpoint request 1299# 1300# @vmstate-send: VM's state will be sent by PVM. 1301# 1302# @vmstate-size: The total size of VMstate. 1303# 1304# @vmstate-received: VM's state has been received by SVM. 1305# 1306# @vmstate-loaded: VM's state has been loaded by SVM. 1307# 1308# Since: 2.8 1309## 1310{ 'enum': 'COLOMessage', 1311 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1312 'vmstate-send', 'vmstate-size', 'vmstate-received', 1313 'vmstate-loaded' ] } 1314 1315## 1316# @COLOMode: 1317# 1318# The COLO current mode. 1319# 1320# @none: COLO is disabled. 1321# 1322# @primary: COLO node in primary side. 1323# 1324# @secondary: COLO node in slave side. 1325# 1326# Since: 2.8 1327## 1328{ 'enum': 'COLOMode', 1329 'data': [ 'none', 'primary', 'secondary'] } 1330 1331## 1332# @FailoverStatus: 1333# 1334# An enumeration of COLO failover status 1335# 1336# @none: no failover has ever happened 1337# 1338# @require: got failover requirement but not handled 1339# 1340# @active: in the process of doing failover 1341# 1342# @completed: finish the process of failover 1343# 1344# @relaunch: restart the failover process, from 'none' -> 'completed' (Since 2.9) 1345# 1346# Since: 2.8 1347## 1348{ 'enum': 'FailoverStatus', 1349 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1350 1351## 1352# @COLO_EXIT: 1353# 1354# Emitted when VM finishes COLO mode due to some errors happening or 1355# at the request of users. 1356# 1357# @mode: report COLO mode when COLO exited. 1358# 1359# @reason: describes the reason for the COLO exit. 1360# 1361# Since: 3.1 1362# 1363# Example: 1364# 1365# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1366# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1367# 1368## 1369{ 'event': 'COLO_EXIT', 1370 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1371 1372## 1373# @COLOExitReason: 1374# 1375# The reason for a COLO exit. 1376# 1377# @none: failover has never happened. This state does not occur 1378# in the COLO_EXIT event, and is only visible in the result of 1379# query-colo-status. 1380# 1381# @request: COLO exit is due to an external request. 1382# 1383# @error: COLO exit is due to an internal error. 1384# 1385# @processing: COLO is currently handling a failover (since 4.0). 1386# 1387# Since: 3.1 1388## 1389{ 'enum': 'COLOExitReason', 1390 'data': [ 'none', 'request', 'error' , 'processing' ] } 1391 1392## 1393# @x-colo-lost-heartbeat: 1394# 1395# Tell qemu that heartbeat is lost, request it to do takeover procedures. 1396# If this command is sent to the PVM, the Primary side will exit COLO mode. 1397# If sent to the Secondary, the Secondary side will run failover work, 1398# then takes over server operation to become the service VM. 1399# 1400# Features: 1401# @unstable: This command is experimental. 1402# 1403# Since: 2.8 1404# 1405# Example: 1406# 1407# -> { "execute": "x-colo-lost-heartbeat" } 1408# <- { "return": {} } 1409# 1410## 1411{ 'command': 'x-colo-lost-heartbeat', 1412 'features': [ 'unstable' ] } 1413 1414## 1415# @migrate_cancel: 1416# 1417# Cancel the current executing migration process. 1418# 1419# Returns: nothing on success 1420# 1421# Notes: This command succeeds even if there is no migration process running. 1422# 1423# Since: 0.14 1424# 1425# Example: 1426# 1427# -> { "execute": "migrate_cancel" } 1428# <- { "return": {} } 1429# 1430## 1431{ 'command': 'migrate_cancel' } 1432 1433## 1434# @migrate-continue: 1435# 1436# Continue migration when it's in a paused state. 1437# 1438# @state: The state the migration is currently expected to be in 1439# 1440# Returns: nothing on success 1441# 1442# Since: 2.11 1443# 1444# Example: 1445# 1446# -> { "execute": "migrate-continue" , "arguments": 1447# { "state": "pre-switchover" } } 1448# <- { "return": {} } 1449## 1450{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1451 1452## 1453# @migrate: 1454# 1455# Migrates the current running guest to another Virtual Machine. 1456# 1457# @uri: the Uniform Resource Identifier of the destination VM 1458# 1459# @blk: do block migration (full disk copy) 1460# 1461# @inc: incremental disk copy migration 1462# 1463# @detach: this argument exists only for compatibility reasons and 1464# is ignored by QEMU 1465# 1466# @resume: resume one paused migration, default "off". (since 3.0) 1467# 1468# Returns: nothing on success 1469# 1470# Since: 0.14 1471# 1472# Notes: 1473# 1474# 1. The 'query-migrate' command should be used to check migration's progress 1475# and final result (this information is provided by the 'status' member) 1476# 1477# 2. All boolean arguments default to false 1478# 1479# 3. The user Monitor's "detach" argument is invalid in QMP and should not 1480# be used 1481# 1482# Example: 1483# 1484# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1485# <- { "return": {} } 1486# 1487## 1488{ 'command': 'migrate', 1489 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', 1490 '*detach': 'bool', '*resume': 'bool' } } 1491 1492## 1493# @migrate-incoming: 1494# 1495# Start an incoming migration, the qemu must have been started 1496# with -incoming defer 1497# 1498# @uri: The Uniform Resource Identifier identifying the source or 1499# address to listen on 1500# 1501# Returns: nothing on success 1502# 1503# Since: 2.3 1504# 1505# Notes: 1506# 1507# 1. It's a bad idea to use a string for the uri, but it needs to stay 1508# compatible with -incoming and the format of the uri is already exposed 1509# above libvirt. 1510# 1511# 2. QEMU must be started with -incoming defer to allow migrate-incoming to 1512# be used. 1513# 1514# 3. The uri format is the same as for -incoming 1515# 1516# Example: 1517# 1518# -> { "execute": "migrate-incoming", 1519# "arguments": { "uri": "tcp::4446" } } 1520# <- { "return": {} } 1521# 1522## 1523{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } } 1524 1525## 1526# @xen-save-devices-state: 1527# 1528# Save the state of all devices to file. The RAM and the block devices 1529# of the VM are not saved by this command. 1530# 1531# @filename: the file to save the state of the devices to as binary 1532# data. See xen-save-devices-state.txt for a description of the binary 1533# format. 1534# 1535# @live: Optional argument to ask QEMU to treat this command as part of a live 1536# migration. Default to true. (since 2.11) 1537# 1538# Returns: Nothing on success 1539# 1540# Since: 1.1 1541# 1542# Example: 1543# 1544# -> { "execute": "xen-save-devices-state", 1545# "arguments": { "filename": "/tmp/save" } } 1546# <- { "return": {} } 1547# 1548## 1549{ 'command': 'xen-save-devices-state', 1550 'data': {'filename': 'str', '*live':'bool' } } 1551 1552## 1553# @xen-set-global-dirty-log: 1554# 1555# Enable or disable the global dirty log mode. 1556# 1557# @enable: true to enable, false to disable. 1558# 1559# Returns: nothing 1560# 1561# Since: 1.3 1562# 1563# Example: 1564# 1565# -> { "execute": "xen-set-global-dirty-log", 1566# "arguments": { "enable": true } } 1567# <- { "return": {} } 1568# 1569## 1570{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1571 1572## 1573# @xen-load-devices-state: 1574# 1575# Load the state of all devices from file. The RAM and the block devices 1576# of the VM are not loaded by this command. 1577# 1578# @filename: the file to load the state of the devices from as binary 1579# data. See xen-save-devices-state.txt for a description of the binary 1580# format. 1581# 1582# Since: 2.7 1583# 1584# Example: 1585# 1586# -> { "execute": "xen-load-devices-state", 1587# "arguments": { "filename": "/tmp/resume" } } 1588# <- { "return": {} } 1589# 1590## 1591{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1592 1593## 1594# @xen-set-replication: 1595# 1596# Enable or disable replication. 1597# 1598# @enable: true to enable, false to disable. 1599# 1600# @primary: true for primary or false for secondary. 1601# 1602# @failover: true to do failover, false to stop. but cannot be 1603# specified if 'enable' is true. default value is false. 1604# 1605# Returns: nothing. 1606# 1607# Example: 1608# 1609# -> { "execute": "xen-set-replication", 1610# "arguments": {"enable": true, "primary": false} } 1611# <- { "return": {} } 1612# 1613# Since: 2.9 1614## 1615{ 'command': 'xen-set-replication', 1616 'data': { 'enable': 'bool', 'primary': 'bool', '*failover' : 'bool' }, 1617 'if': 'CONFIG_REPLICATION' } 1618 1619## 1620# @ReplicationStatus: 1621# 1622# The result format for 'query-xen-replication-status'. 1623# 1624# @error: true if an error happened, false if replication is normal. 1625# 1626# @desc: the human readable error description string, when 1627# @error is 'true'. 1628# 1629# Since: 2.9 1630## 1631{ 'struct': 'ReplicationStatus', 1632 'data': { 'error': 'bool', '*desc': 'str' }, 1633 'if': 'CONFIG_REPLICATION' } 1634 1635## 1636# @query-xen-replication-status: 1637# 1638# Query replication status while the vm is running. 1639# 1640# Returns: A @ReplicationStatus object showing the status. 1641# 1642# Example: 1643# 1644# -> { "execute": "query-xen-replication-status" } 1645# <- { "return": { "error": false } } 1646# 1647# Since: 2.9 1648## 1649{ 'command': 'query-xen-replication-status', 1650 'returns': 'ReplicationStatus', 1651 'if': 'CONFIG_REPLICATION' } 1652 1653## 1654# @xen-colo-do-checkpoint: 1655# 1656# Xen uses this command to notify replication to trigger a checkpoint. 1657# 1658# Returns: nothing. 1659# 1660# Example: 1661# 1662# -> { "execute": "xen-colo-do-checkpoint" } 1663# <- { "return": {} } 1664# 1665# Since: 2.9 1666## 1667{ 'command': 'xen-colo-do-checkpoint', 1668 'if': 'CONFIG_REPLICATION' } 1669 1670## 1671# @COLOStatus: 1672# 1673# The result format for 'query-colo-status'. 1674# 1675# @mode: COLO running mode. If COLO is running, this field will return 1676# 'primary' or 'secondary'. 1677# 1678# @last-mode: COLO last running mode. If COLO is running, this field 1679# will return same like mode field, after failover we can 1680# use this field to get last colo mode. (since 4.0) 1681# 1682# @reason: describes the reason for the COLO exit. 1683# 1684# Since: 3.1 1685## 1686{ 'struct': 'COLOStatus', 1687 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1688 'reason': 'COLOExitReason' } } 1689 1690## 1691# @query-colo-status: 1692# 1693# Query COLO status while the vm is running. 1694# 1695# Returns: A @COLOStatus object showing the status. 1696# 1697# Example: 1698# 1699# -> { "execute": "query-colo-status" } 1700# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1701# 1702# Since: 3.1 1703## 1704{ 'command': 'query-colo-status', 1705 'returns': 'COLOStatus' } 1706 1707## 1708# @migrate-recover: 1709# 1710# Provide a recovery migration stream URI. 1711# 1712# @uri: the URI to be used for the recovery of migration stream. 1713# 1714# Returns: nothing. 1715# 1716# Example: 1717# 1718# -> { "execute": "migrate-recover", 1719# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1720# <- { "return": {} } 1721# 1722# Since: 3.0 1723## 1724{ 'command': 'migrate-recover', 1725 'data': { 'uri': 'str' }, 1726 'allow-oob': true } 1727 1728## 1729# @migrate-pause: 1730# 1731# Pause a migration. Currently it only supports postcopy. 1732# 1733# Returns: nothing. 1734# 1735# Example: 1736# 1737# -> { "execute": "migrate-pause" } 1738# <- { "return": {} } 1739# 1740# Since: 3.0 1741## 1742{ 'command': 'migrate-pause', 'allow-oob': true } 1743 1744## 1745# @UNPLUG_PRIMARY: 1746# 1747# Emitted from source side of a migration when migration state is 1748# WAIT_UNPLUG. Device was unplugged by guest operating system. 1749# Device resources in QEMU are kept on standby to be able to re-plug it in case 1750# of migration failure. 1751# 1752# @device-id: QEMU device id of the unplugged device 1753# 1754# Since: 4.2 1755# 1756# Example: 1757# 1758# <- { "event": "UNPLUG_PRIMARY", 1759# "data": { "device-id": "hostdev0" }, 1760# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 1761# 1762## 1763{ 'event': 'UNPLUG_PRIMARY', 1764 'data': { 'device-id': 'str' } } 1765 1766## 1767# @DirtyRateVcpu: 1768# 1769# Dirty rate of vcpu. 1770# 1771# @id: vcpu index. 1772# 1773# @dirty-rate: dirty rate. 1774# 1775# Since: 6.2 1776## 1777{ 'struct': 'DirtyRateVcpu', 1778 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 1779 1780## 1781# @DirtyRateStatus: 1782# 1783# An enumeration of dirtyrate status. 1784# 1785# @unstarted: the dirtyrate thread has not been started. 1786# 1787# @measuring: the dirtyrate thread is measuring. 1788# 1789# @measured: the dirtyrate thread has measured and results are available. 1790# 1791# Since: 5.2 1792## 1793{ 'enum': 'DirtyRateStatus', 1794 'data': [ 'unstarted', 'measuring', 'measured'] } 1795 1796## 1797# @DirtyRateMeasureMode: 1798# 1799# An enumeration of mode of measuring dirtyrate. 1800# 1801# @page-sampling: calculate dirtyrate by sampling pages. 1802# 1803# @dirty-ring: calculate dirtyrate by dirty ring. 1804# 1805# @dirty-bitmap: calculate dirtyrate by dirty bitmap. 1806# 1807# Since: 6.2 1808## 1809{ 'enum': 'DirtyRateMeasureMode', 1810 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 1811 1812## 1813# @DirtyRateInfo: 1814# 1815# Information about current dirty page rate of vm. 1816# 1817# @dirty-rate: an estimate of the dirty page rate of the VM in units of 1818# MB/s, present only when estimating the rate has completed. 1819# 1820# @status: status containing dirtyrate query status includes 1821# 'unstarted' or 'measuring' or 'measured' 1822# 1823# @start-time: start time in units of second for calculation 1824# 1825# @calc-time: time in units of second for sample dirty pages 1826# 1827# @sample-pages: page count per GB for sample dirty pages 1828# the default value is 512 (since 6.1) 1829# 1830# @mode: mode containing method of calculate dirtyrate includes 1831# 'page-sampling' and 'dirty-ring' (Since 6.2) 1832# 1833# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring 1834# mode specified (Since 6.2) 1835# 1836# Since: 5.2 1837## 1838{ 'struct': 'DirtyRateInfo', 1839 'data': {'*dirty-rate': 'int64', 1840 'status': 'DirtyRateStatus', 1841 'start-time': 'int64', 1842 'calc-time': 'int64', 1843 'sample-pages': 'uint64', 1844 'mode': 'DirtyRateMeasureMode', 1845 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 1846 1847## 1848# @calc-dirty-rate: 1849# 1850# start calculating dirty page rate for vm 1851# 1852# @calc-time: time in units of second for sample dirty pages 1853# 1854# @sample-pages: page count per GB for sample dirty pages 1855# the default value is 512 (since 6.1) 1856# 1857# @mode: mechanism of calculating dirtyrate includes 1858# 'page-sampling' and 'dirty-ring' (Since 6.1) 1859# 1860# Since: 5.2 1861# 1862# Example: 1863# 1864# {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 1865# 'sample-pages': 512} } 1866# 1867## 1868{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 1869 '*sample-pages': 'int', 1870 '*mode': 'DirtyRateMeasureMode'} } 1871 1872## 1873# @query-dirty-rate: 1874# 1875# query dirty page rate in units of MB/s for vm 1876# 1877# Since: 5.2 1878## 1879{ 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' } 1880 1881## 1882# @DirtyLimitInfo: 1883# 1884# Dirty page rate limit information of a virtual CPU. 1885# 1886# @cpu-index: index of a virtual CPU. 1887# 1888# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 1889# CPU, 0 means unlimited. 1890# 1891# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 1892# 1893# Since: 7.1 1894# 1895## 1896{ 'struct': 'DirtyLimitInfo', 1897 'data': { 'cpu-index': 'int', 1898 'limit-rate': 'uint64', 1899 'current-rate': 'uint64' } } 1900 1901## 1902# @set-vcpu-dirty-limit: 1903# 1904# Set the upper limit of dirty page rate for virtual CPUs. 1905# 1906# Requires KVM with accelerator property "dirty-ring-size" set. 1907# A virtual CPU's dirty page rate is a measure of its memory load. 1908# To observe dirty page rates, use @calc-dirty-rate. 1909# 1910# @cpu-index: index of a virtual CPU, default is all. 1911# 1912# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 1913# 1914# Since: 7.1 1915# 1916# Example: 1917# {"execute": "set-vcpu-dirty-limit"} 1918# "arguments": { "dirty-rate": 200, 1919# "cpu-index": 1 } } 1920# 1921## 1922{ 'command': 'set-vcpu-dirty-limit', 1923 'data': { '*cpu-index': 'int', 1924 'dirty-rate': 'uint64' } } 1925 1926## 1927# @cancel-vcpu-dirty-limit: 1928# 1929# Cancel the upper limit of dirty page rate for virtual CPUs. 1930# 1931# Cancel the dirty page limit for the vCPU which has been set with 1932# set-vcpu-dirty-limit command. Note that this command requires 1933# support from dirty ring, same as the "set-vcpu-dirty-limit". 1934# 1935# @cpu-index: index of a virtual CPU, default is all. 1936# 1937# Since: 7.1 1938# 1939# Example: 1940# {"execute": "cancel-vcpu-dirty-limit"} 1941# "arguments": { "cpu-index": 1 } } 1942# 1943## 1944{ 'command': 'cancel-vcpu-dirty-limit', 1945 'data': { '*cpu-index': 'int'} } 1946 1947## 1948# @query-vcpu-dirty-limit: 1949# 1950# Returns information about virtual CPU dirty page rate limits, if any. 1951# 1952# Since: 7.1 1953# 1954# Example: 1955# {"execute": "query-vcpu-dirty-limit"} 1956# 1957## 1958{ 'command': 'query-vcpu-dirty-limit', 1959 'returns': [ 'DirtyLimitInfo' ] } 1960 1961## 1962# @snapshot-save: 1963# 1964# Save a VM snapshot 1965# 1966# @job-id: identifier for the newly created job 1967# @tag: name of the snapshot to create 1968# @vmstate: block device node name to save vmstate to 1969# @devices: list of block device node names to save a snapshot to 1970# 1971# Applications should not assume that the snapshot save is complete 1972# when this command returns. The job commands / events must be used 1973# to determine completion and to fetch details of any errors that arise. 1974# 1975# Note that execution of the guest CPUs may be stopped during the 1976# time it takes to save the snapshot. A future version of QEMU 1977# may ensure CPUs are executing continuously. 1978# 1979# It is strongly recommended that @devices contain all writable 1980# block device nodes if a consistent snapshot is required. 1981# 1982# If @tag already exists, an error will be reported 1983# 1984# Returns: nothing 1985# 1986# Example: 1987# 1988# -> { "execute": "snapshot-save", 1989# "arguments": { 1990# "job-id": "snapsave0", 1991# "tag": "my-snap", 1992# "vmstate": "disk0", 1993# "devices": ["disk0", "disk1"] 1994# } 1995# } 1996# <- { "return": { } } 1997# <- {"event": "JOB_STATUS_CHANGE", 1998# "data": {"status": "created", "id": "snapsave0"}} 1999# <- {"event": "JOB_STATUS_CHANGE", 2000# "data": {"status": "running", "id": "snapsave0"}} 2001# <- {"event": "STOP"} 2002# <- {"event": "RESUME"} 2003# <- {"event": "JOB_STATUS_CHANGE", 2004# "data": {"status": "waiting", "id": "snapsave0"}} 2005# <- {"event": "JOB_STATUS_CHANGE", 2006# "data": {"status": "pending", "id": "snapsave0"}} 2007# <- {"event": "JOB_STATUS_CHANGE", 2008# "data": {"status": "concluded", "id": "snapsave0"}} 2009# -> {"execute": "query-jobs"} 2010# <- {"return": [{"current-progress": 1, 2011# "status": "concluded", 2012# "total-progress": 1, 2013# "type": "snapshot-save", 2014# "id": "snapsave0"}]} 2015# 2016# Since: 6.0 2017## 2018{ 'command': 'snapshot-save', 2019 'data': { 'job-id': 'str', 2020 'tag': 'str', 2021 'vmstate': 'str', 2022 'devices': ['str'] } } 2023 2024## 2025# @snapshot-load: 2026# 2027# Load a VM snapshot 2028# 2029# @job-id: identifier for the newly created job 2030# @tag: name of the snapshot to load. 2031# @vmstate: block device node name to load vmstate from 2032# @devices: list of block device node names to load a snapshot from 2033# 2034# Applications should not assume that the snapshot load is complete 2035# when this command returns. The job commands / events must be used 2036# to determine completion and to fetch details of any errors that arise. 2037# 2038# Note that execution of the guest CPUs will be stopped during the 2039# time it takes to load the snapshot. 2040# 2041# It is strongly recommended that @devices contain all writable 2042# block device nodes that can have changed since the original 2043# @snapshot-save command execution. 2044# 2045# Returns: nothing 2046# 2047# Example: 2048# 2049# -> { "execute": "snapshot-load", 2050# "arguments": { 2051# "job-id": "snapload0", 2052# "tag": "my-snap", 2053# "vmstate": "disk0", 2054# "devices": ["disk0", "disk1"] 2055# } 2056# } 2057# <- { "return": { } } 2058# <- {"event": "JOB_STATUS_CHANGE", 2059# "data": {"status": "created", "id": "snapload0"}} 2060# <- {"event": "JOB_STATUS_CHANGE", 2061# "data": {"status": "running", "id": "snapload0"}} 2062# <- {"event": "STOP"} 2063# <- {"event": "RESUME"} 2064# <- {"event": "JOB_STATUS_CHANGE", 2065# "data": {"status": "waiting", "id": "snapload0"}} 2066# <- {"event": "JOB_STATUS_CHANGE", 2067# "data": {"status": "pending", "id": "snapload0"}} 2068# <- {"event": "JOB_STATUS_CHANGE", 2069# "data": {"status": "concluded", "id": "snapload0"}} 2070# -> {"execute": "query-jobs"} 2071# <- {"return": [{"current-progress": 1, 2072# "status": "concluded", 2073# "total-progress": 1, 2074# "type": "snapshot-load", 2075# "id": "snapload0"}]} 2076# 2077# Since: 6.0 2078## 2079{ 'command': 'snapshot-load', 2080 'data': { 'job-id': 'str', 2081 'tag': 'str', 2082 'vmstate': 'str', 2083 'devices': ['str'] } } 2084 2085## 2086# @snapshot-delete: 2087# 2088# Delete a VM snapshot 2089# 2090# @job-id: identifier for the newly created job 2091# @tag: name of the snapshot to delete. 2092# @devices: list of block device node names to delete a snapshot from 2093# 2094# Applications should not assume that the snapshot delete is complete 2095# when this command returns. The job commands / events must be used 2096# to determine completion and to fetch details of any errors that arise. 2097# 2098# Returns: nothing 2099# 2100# Example: 2101# 2102# -> { "execute": "snapshot-delete", 2103# "arguments": { 2104# "job-id": "snapdelete0", 2105# "tag": "my-snap", 2106# "devices": ["disk0", "disk1"] 2107# } 2108# } 2109# <- { "return": { } } 2110# <- {"event": "JOB_STATUS_CHANGE", 2111# "data": {"status": "created", "id": "snapdelete0"}} 2112# <- {"event": "JOB_STATUS_CHANGE", 2113# "data": {"status": "running", "id": "snapdelete0"}} 2114# <- {"event": "JOB_STATUS_CHANGE", 2115# "data": {"status": "waiting", "id": "snapdelete0"}} 2116# <- {"event": "JOB_STATUS_CHANGE", 2117# "data": {"status": "pending", "id": "snapdelete0"}} 2118# <- {"event": "JOB_STATUS_CHANGE", 2119# "data": {"status": "concluded", "id": "snapdelete0"}} 2120# -> {"execute": "query-jobs"} 2121# <- {"return": [{"current-progress": 1, 2122# "status": "concluded", 2123# "total-progress": 1, 2124# "type": "snapshot-delete", 2125# "id": "snapdelete0"}]} 2126# 2127# Since: 6.0 2128## 2129{ 'command': 'snapshot-delete', 2130 'data': { 'job-id': 'str', 2131 'tag': 'str', 2132 'devices': ['str'] } } 2133