1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the 20# target VM 21# 22# @total: total amount of bytes involved in the migration process 23# 24# @duplicate: number of duplicate (zero) pages (since 1.2) 25# 26# @skipped: number of skipped zero pages. Always zero, only provided for 27# compatibility (since 1.5) 28# 29# @normal: number of normal pages (since 1.2) 30# 31# @normal-bytes: number of normal bytes sent (since 1.2) 32# 33# @dirty-pages-rate: number of pages dirtied by second by the guest 34# (since 1.3) 35# 36# @mbps: throughput in megabits/sec. (since 1.6) 37# 38# @dirty-sync-count: number of times that dirty ram was synchronized 39# (since 2.1) 40# 41# @postcopy-requests: The number of page requests received from the 42# destination (since 2.7) 43# 44# @page-size: The number of bytes per page for the various page-based 45# statistics (since 2.10) 46# 47# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 48# 49# @pages-per-second: the number of memory pages transferred per second 50# (Since 4.0) 51# 52# @precopy-bytes: The number of bytes sent in the pre-copy phase 53# (since 7.0). 54# 55# @downtime-bytes: The number of bytes sent while the guest is paused 56# (since 7.0). 57# 58# @postcopy-bytes: The number of bytes sent during the post-copy phase 59# (since 7.0). 60# 61# @dirty-sync-missed-zero-copy: Number of times dirty RAM 62# synchronization could not avoid copying dirty pages. This is 63# between 0 and @dirty-sync-count * @multifd-channels. (since 64# 7.1) 65# 66# Features: 67# 68# @deprecated: Member @skipped is always zero since 1.5.3 69# 70# Since: 0.14 71# 72## 73{ 'struct': 'MigrationStats', 74 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 75 'duplicate': 'int', 76 'skipped': { 'type': 'int', 'features': [ 'deprecated' ] }, 77 'normal': 'int', 78 'normal-bytes': 'int', 'dirty-pages-rate': 'int', 79 'mbps': 'number', 'dirty-sync-count': 'int', 80 'postcopy-requests': 'int', 'page-size': 'int', 81 'multifd-bytes': 'uint64', 'pages-per-second': 'uint64', 82 'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64', 83 'postcopy-bytes': 'uint64', 84 'dirty-sync-missed-zero-copy': 'uint64' } } 85 86## 87# @XBZRLECacheStats: 88# 89# Detailed XBZRLE migration cache statistics 90# 91# @cache-size: XBZRLE cache size 92# 93# @bytes: amount of bytes already transferred to the target VM 94# 95# @pages: amount of pages transferred to the target VM 96# 97# @cache-miss: number of cache miss 98# 99# @cache-miss-rate: rate of cache miss (since 2.1) 100# 101# @encoding-rate: rate of encoded bytes (since 5.1) 102# 103# @overflow: number of overflows 104# 105# Since: 1.2 106## 107{ 'struct': 'XBZRLECacheStats', 108 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 109 'cache-miss': 'int', 'cache-miss-rate': 'number', 110 'encoding-rate': 'number', 'overflow': 'int' } } 111 112## 113# @CompressionStats: 114# 115# Detailed migration compression statistics 116# 117# @pages: amount of pages compressed and transferred to the target VM 118# 119# @busy: count of times that no free thread was available to compress 120# data 121# 122# @busy-rate: rate of thread busy 123# 124# @compressed-size: amount of bytes after compression 125# 126# @compression-rate: rate of compressed size 127# 128# Since: 3.1 129## 130{ 'struct': 'CompressionStats', 131 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 132 'compressed-size': 'int', 'compression-rate': 'number' } } 133 134## 135# @MigrationStatus: 136# 137# An enumeration of migration status. 138# 139# @none: no migration has ever happened. 140# 141# @setup: migration process has been initiated. 142# 143# @cancelling: in the process of cancelling migration. 144# 145# @cancelled: cancelling migration is finished. 146# 147# @active: in the process of doing migration. 148# 149# @postcopy-active: like active, but now in postcopy mode. (since 150# 2.5) 151# 152# @postcopy-paused: during postcopy but paused. (since 3.0) 153# 154# @postcopy-recover: trying to recover from a paused postcopy. (since 155# 3.0) 156# 157# @completed: migration is finished. 158# 159# @failed: some error occurred during migration process. 160# 161# @colo: VM is in the process of fault tolerance, VM can not get into 162# this state unless colo capability is enabled for migration. 163# (since 2.8) 164# 165# @pre-switchover: Paused before device serialisation. (since 2.11) 166# 167# @device: During device serialisation when pause-before-switchover is 168# enabled (since 2.11) 169# 170# @wait-unplug: wait for device unplug request by guest OS to be 171# completed. (since 4.2) 172# 173# Since: 2.3 174## 175{ 'enum': 'MigrationStatus', 176 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 177 'active', 'postcopy-active', 'postcopy-paused', 178 'postcopy-recover', 'completed', 'failed', 'colo', 179 'pre-switchover', 'device', 'wait-unplug' ] } 180## 181# @VfioStats: 182# 183# Detailed VFIO devices migration statistics 184# 185# @transferred: amount of bytes transferred to the target VM by VFIO 186# devices 187# 188# Since: 5.2 189## 190{ 'struct': 'VfioStats', 191 'data': {'transferred': 'int' } } 192 193## 194# @MigrationInfo: 195# 196# Information about current migration process. 197# 198# @status: @MigrationStatus describing the current migration status. 199# If this field is not returned, no migration process has been 200# initiated 201# 202# @ram: @MigrationStats containing detailed migration status, only 203# returned if status is 'active' or 'completed'(since 1.2) 204# 205# @disk: @MigrationStats containing detailed disk migration status, 206# only returned if status is 'active' and it is a block migration 207# 208# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 209# migration statistics, only returned if XBZRLE feature is on and 210# status is 'active' or 'completed' (since 1.2) 211# 212# @total-time: total amount of milliseconds since migration started. 213# If migration has ended, it returns the total migration time. 214# (since 1.2) 215# 216# @downtime: only present when migration finishes correctly total 217# downtime in milliseconds for the guest. (since 1.3) 218# 219# @expected-downtime: only present while migration is active expected 220# downtime in milliseconds for the guest in last walk of the dirty 221# bitmap. (since 1.3) 222# 223# @setup-time: amount of setup time in milliseconds *before* the 224# iterations begin but *after* the QMP command is issued. This is 225# designed to provide an accounting of any activities (such as 226# RDMA pinning) which may be expensive, but do not actually occur 227# during the iterative migration rounds themselves. (since 1.6) 228# 229# @cpu-throttle-percentage: percentage of time guest cpus are being 230# throttled during auto-converge. This is only present when 231# auto-converge has started throttling guest cpus. (Since 2.7) 232# 233# @error-desc: the human readable error description string. Clients 234# should not attempt to parse the error strings. (Since 2.7) 235# 236# @postcopy-blocktime: total time when all vCPU were blocked during 237# postcopy live migration. This is only present when the 238# postcopy-blocktime migration capability is enabled. (Since 3.0) 239# 240# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. 241# This is only present when the postcopy-blocktime migration 242# capability is enabled. (Since 3.0) 243# 244# @compression: migration compression statistics, only returned if 245# compression feature is on and status is 'active' or 'completed' 246# (Since 3.1) 247# 248# @socket-address: Only used for tcp, to know what the real port is 249# (Since 4.0) 250# 251# @vfio: @VfioStats containing detailed VFIO devices migration 252# statistics, only returned if VFIO device is present, migration 253# is supported by all VFIO devices and status is 'active' or 254# 'completed' (since 5.2) 255# 256# @blocked-reasons: A list of reasons an outgoing migration is 257# blocked. Present and non-empty when migration is blocked. 258# (since 6.0) 259# 260# @dirty-limit-throttle-time-per-round: Maximum throttle time 261# (in microseconds) of virtual CPUs each dirty ring full round, 262# which shows how MigrationCapability dirty-limit affects the 263# guest during live migration. (Since 8.1) 264# 265# @dirty-limit-ring-full-time: Estimated average dirty ring full time 266# (in microseconds) for each dirty ring full round. The value 267# equals the dirty ring memory size divided by the average dirty 268# page rate of the virtual CPU, which can be used to observe the 269# average memory load of the virtual CPU indirectly. Note that 270# zero means guest doesn't dirty memory. (Since 8.1) 271# 272# Since: 0.14 273## 274{ 'struct': 'MigrationInfo', 275 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 276 '*disk': 'MigrationStats', 277 '*vfio': 'VfioStats', 278 '*xbzrle-cache': 'XBZRLECacheStats', 279 '*total-time': 'int', 280 '*expected-downtime': 'int', 281 '*downtime': 'int', 282 '*setup-time': 'int', 283 '*cpu-throttle-percentage': 'int', 284 '*error-desc': 'str', 285 '*blocked-reasons': ['str'], 286 '*postcopy-blocktime': 'uint32', 287 '*postcopy-vcpu-blocktime': ['uint32'], 288 '*compression': 'CompressionStats', 289 '*socket-address': ['SocketAddress'], 290 '*dirty-limit-throttle-time-per-round': 'uint64', 291 '*dirty-limit-ring-full-time': 'uint64'} } 292 293## 294# @query-migrate: 295# 296# Returns information about current migration process. If migration 297# is active there will be another json-object with RAM migration 298# status and if block migration is active another one with block 299# migration status. 300# 301# Returns: @MigrationInfo 302# 303# Since: 0.14 304# 305# Examples: 306# 307# 1. Before the first migration 308# 309# -> { "execute": "query-migrate" } 310# <- { "return": {} } 311# 312# 2. Migration is done and has succeeded 313# 314# -> { "execute": "query-migrate" } 315# <- { "return": { 316# "status": "completed", 317# "total-time":12345, 318# "setup-time":12345, 319# "downtime":12345, 320# "ram":{ 321# "transferred":123, 322# "remaining":123, 323# "total":246, 324# "duplicate":123, 325# "normal":123, 326# "normal-bytes":123456, 327# "dirty-sync-count":15 328# } 329# } 330# } 331# 332# 3. Migration is done and has failed 333# 334# -> { "execute": "query-migrate" } 335# <- { "return": { "status": "failed" } } 336# 337# 4. Migration is being performed and is not a block migration: 338# 339# -> { "execute": "query-migrate" } 340# <- { 341# "return":{ 342# "status":"active", 343# "total-time":12345, 344# "setup-time":12345, 345# "expected-downtime":12345, 346# "ram":{ 347# "transferred":123, 348# "remaining":123, 349# "total":246, 350# "duplicate":123, 351# "normal":123, 352# "normal-bytes":123456, 353# "dirty-sync-count":15 354# } 355# } 356# } 357# 358# 5. Migration is being performed and is a block migration: 359# 360# -> { "execute": "query-migrate" } 361# <- { 362# "return":{ 363# "status":"active", 364# "total-time":12345, 365# "setup-time":12345, 366# "expected-downtime":12345, 367# "ram":{ 368# "total":1057024, 369# "remaining":1053304, 370# "transferred":3720, 371# "duplicate":123, 372# "normal":123, 373# "normal-bytes":123456, 374# "dirty-sync-count":15 375# }, 376# "disk":{ 377# "total":20971520, 378# "remaining":20880384, 379# "transferred":91136 380# } 381# } 382# } 383# 384# 6. Migration is being performed and XBZRLE is active: 385# 386# -> { "execute": "query-migrate" } 387# <- { 388# "return":{ 389# "status":"active", 390# "total-time":12345, 391# "setup-time":12345, 392# "expected-downtime":12345, 393# "ram":{ 394# "total":1057024, 395# "remaining":1053304, 396# "transferred":3720, 397# "duplicate":10, 398# "normal":3333, 399# "normal-bytes":3412992, 400# "dirty-sync-count":15 401# }, 402# "xbzrle-cache":{ 403# "cache-size":67108864, 404# "bytes":20971520, 405# "pages":2444343, 406# "cache-miss":2244, 407# "cache-miss-rate":0.123, 408# "encoding-rate":80.1, 409# "overflow":34434 410# } 411# } 412# } 413## 414{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 415 416## 417# @MigrationCapability: 418# 419# Migration capabilities enumeration 420# 421# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length 422# Encoding). This feature allows us to minimize migration traffic 423# for certain work loads, by sending compressed difference of the 424# pages 425# 426# @rdma-pin-all: Controls whether or not the entire VM memory 427# footprint is mlock()'d on demand or all at once. Refer to 428# docs/rdma.txt for usage. Disabled by default. (since 2.0) 429# 430# @zero-blocks: During storage migration encode blocks of zeroes 431# efficiently. This essentially saves 1MB of zeroes per block on 432# the wire. Enabling requires source and target VM to support 433# this feature. To enable it is sufficient to enable the 434# capability on the source VM. The feature is disabled by default. 435# (since 1.6) 436# 437# @compress: Use multiple compression threads to accelerate live 438# migration. This feature can help to reduce the migration 439# traffic, by sending compressed pages. Please note that if 440# compress and xbzrle are both on, compress only takes effect in 441# the ram bulk stage, after that, it will be disabled and only 442# xbzrle takes effect, this can help to minimize migration 443# traffic. The feature is disabled by default. (since 2.4) 444# 445# @events: generate events for each migration state change (since 2.4) 446# 447# @auto-converge: If enabled, QEMU will automatically throttle down 448# the guest to speed up convergence of RAM migration. (since 1.6) 449# 450# @postcopy-ram: Start executing on the migration target before all of 451# RAM has been migrated, pulling the remaining pages along as 452# needed. The capacity must have the same setting on both source 453# and target or migration will not even start. NOTE: If the 454# migration fails during postcopy the VM will fail. (since 2.6) 455# 456# @x-colo: If enabled, migration will never end, and the state of the 457# VM on the primary side will be migrated continuously to the VM 458# on secondary side, this process is called COarse-Grain LOck 459# Stepping (COLO) for Non-stop Service. (since 2.8) 460# 461# @release-ram: if enabled, qemu will free the migrated ram pages on 462# the source during postcopy-ram migration. (since 2.9) 463# 464# @block: If enabled, QEMU will also migrate the contents of all block 465# devices. Default is disabled. A possible alternative uses 466# mirror jobs to a builtin NBD server on the destination, which 467# offers more flexibility. (Since 2.10) 468# 469# @return-path: If enabled, migration will use the return path even 470# for precopy. (since 2.10) 471# 472# @pause-before-switchover: Pause outgoing migration before 473# serialising device state and before disabling block IO (since 474# 2.11) 475# 476# @multifd: Use more than one fd for migration (since 4.0) 477# 478# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 479# (since 2.12) 480# 481# @postcopy-blocktime: Calculate downtime for postcopy live migration 482# (since 3.0) 483# 484# @late-block-activate: If enabled, the destination will not activate 485# block devices (and thus take locks) immediately at the end of 486# migration. (since 3.0) 487# 488# @x-ignore-shared: If enabled, QEMU will not migrate shared memory 489# that is accessible on the destination machine. (since 4.0) 490# 491# @validate-uuid: Send the UUID of the source to allow the destination 492# to ensure it is the same. (since 4.2) 493# 494# @background-snapshot: If enabled, the migration stream will be a 495# snapshot of the VM exactly at the point when the migration 496# procedure starts. The VM RAM is saved with running VM. (since 497# 6.0) 498# 499# @zero-copy-send: Controls behavior on sending memory pages on 500# migration. When true, enables a zero-copy mechanism for sending 501# memory pages, if host supports it. Requires that QEMU be 502# permitted to use locked memory for guest RAM pages. (since 7.1) 503# 504# @postcopy-preempt: If enabled, the migration process will allow 505# postcopy requests to preempt precopy stream, so postcopy 506# requests will be handled faster. This is a performance feature 507# and should not affect the correctness of postcopy migration. 508# (since 7.1) 509# 510# @switchover-ack: If enabled, migration will not stop the source VM 511# and complete the migration until an ACK is received from the 512# destination that it's OK to do so. Exactly when this ACK is 513# sent depends on the migrated devices that use this feature. For 514# example, a device can use it to make sure some of its data is 515# sent and loaded in the destination before doing switchover. 516# This can reduce downtime if devices that support this capability 517# are present. 'return-path' capability must be enabled to use 518# it. (since 8.1) 519# 520# @dirty-limit: If enabled, migration will throttle vCPUs as needed to 521# keep their dirty page rate within @vcpu-dirty-limit. This can 522# improve responsiveness of large guests during live migration, 523# and can result in more stable read performance. Requires KVM 524# with accelerator property "dirty-ring-size" set. (Since 8.1) 525# 526# Features: 527# 528# @unstable: Members @x-colo and @x-ignore-shared are experimental. 529# 530# Since: 1.2 531## 532{ 'enum': 'MigrationCapability', 533 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 534 'compress', 'events', 'postcopy-ram', 535 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 536 'release-ram', 537 'block', 'return-path', 'pause-before-switchover', 'multifd', 538 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 539 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 540 'validate-uuid', 'background-snapshot', 541 'zero-copy-send', 'postcopy-preempt', 'switchover-ack', 542 'dirty-limit'] } 543 544## 545# @MigrationCapabilityStatus: 546# 547# Migration capability information 548# 549# @capability: capability enum 550# 551# @state: capability state bool 552# 553# Since: 1.2 554## 555{ 'struct': 'MigrationCapabilityStatus', 556 'data': { 'capability': 'MigrationCapability', 'state': 'bool' } } 557 558## 559# @migrate-set-capabilities: 560# 561# Enable/Disable the following migration capabilities (like xbzrle) 562# 563# @capabilities: json array of capability modifications to make 564# 565# Since: 1.2 566# 567# Example: 568# 569# -> { "execute": "migrate-set-capabilities" , "arguments": 570# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 571# <- { "return": {} } 572## 573{ 'command': 'migrate-set-capabilities', 574 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 575 576## 577# @query-migrate-capabilities: 578# 579# Returns information about the current migration capabilities status 580# 581# Returns: @MigrationCapabilityStatus 582# 583# Since: 1.2 584# 585# Example: 586# 587# -> { "execute": "query-migrate-capabilities" } 588# <- { "return": [ 589# {"state": false, "capability": "xbzrle"}, 590# {"state": false, "capability": "rdma-pin-all"}, 591# {"state": false, "capability": "auto-converge"}, 592# {"state": false, "capability": "zero-blocks"}, 593# {"state": false, "capability": "compress"}, 594# {"state": true, "capability": "events"}, 595# {"state": false, "capability": "postcopy-ram"}, 596# {"state": false, "capability": "x-colo"} 597# ]} 598## 599{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 600 601## 602# @MultiFDCompression: 603# 604# An enumeration of multifd compression methods. 605# 606# @none: no compression. 607# 608# @zlib: use zlib compression method. 609# 610# @zstd: use zstd compression method. 611# 612# Since: 5.0 613## 614{ 'enum': 'MultiFDCompression', 615 'data': [ 'none', 'zlib', 616 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 617 618## 619# @BitmapMigrationBitmapAliasTransform: 620# 621# @persistent: If present, the bitmap will be made persistent or 622# transient depending on this parameter. 623# 624# Since: 6.0 625## 626{ 'struct': 'BitmapMigrationBitmapAliasTransform', 627 'data': { 628 '*persistent': 'bool' 629 } } 630 631## 632# @BitmapMigrationBitmapAlias: 633# 634# @name: The name of the bitmap. 635# 636# @alias: An alias name for migration (for example the bitmap name on 637# the opposite site). 638# 639# @transform: Allows the modification of the migrated bitmap. (since 640# 6.0) 641# 642# Since: 5.2 643## 644{ 'struct': 'BitmapMigrationBitmapAlias', 645 'data': { 646 'name': 'str', 647 'alias': 'str', 648 '*transform': 'BitmapMigrationBitmapAliasTransform' 649 } } 650 651## 652# @BitmapMigrationNodeAlias: 653# 654# Maps a block node name and the bitmaps it has to aliases for dirty 655# bitmap migration. 656# 657# @node-name: A block node name. 658# 659# @alias: An alias block node name for migration (for example the node 660# name on the opposite site). 661# 662# @bitmaps: Mappings for the bitmaps on this node. 663# 664# Since: 5.2 665## 666{ 'struct': 'BitmapMigrationNodeAlias', 667 'data': { 668 'node-name': 'str', 669 'alias': 'str', 670 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 671 } } 672 673## 674# @MigrationParameter: 675# 676# Migration parameters enumeration 677# 678# @announce-initial: Initial delay (in milliseconds) before sending 679# the first announce (Since 4.0) 680# 681# @announce-max: Maximum delay (in milliseconds) between packets in 682# the announcement (Since 4.0) 683# 684# @announce-rounds: Number of self-announce packets sent after 685# migration (Since 4.0) 686# 687# @announce-step: Increase in delay (in milliseconds) between 688# subsequent packets in the announcement (Since 4.0) 689# 690# @compress-level: Set the compression level to be used in live 691# migration, the compression level is an integer between 0 and 9, 692# where 0 means no compression, 1 means the best compression 693# speed, and 9 means best compression ratio which will consume 694# more CPU. 695# 696# @compress-threads: Set compression thread count to be used in live 697# migration, the compression thread count is an integer between 1 698# and 255. 699# 700# @compress-wait-thread: Controls behavior when all compression 701# threads are currently busy. If true (default), wait for a free 702# compression thread to become available; otherwise, send the page 703# uncompressed. (Since 3.1) 704# 705# @decompress-threads: Set decompression thread count to be used in 706# live migration, the decompression thread count is an integer 707# between 1 and 255. Usually, decompression is at least 4 times as 708# fast as compression, so set the decompress-threads to the number 709# about 1/4 of compress-threads is adequate. 710# 711# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 712# bytes_xfer_period to trigger throttling. It is expressed as 713# percentage. The default value is 50. (Since 5.0) 714# 715# @cpu-throttle-initial: Initial percentage of time guest cpus are 716# throttled when migration auto-converge is activated. The 717# default value is 20. (Since 2.7) 718# 719# @cpu-throttle-increment: throttle percentage increase each time 720# auto-converge detects that migration is not making progress. 721# The default value is 10. (Since 2.7) 722# 723# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 724# the tail stage of throttling, the Guest is very sensitive to CPU 725# percentage while the @cpu-throttle -increment is excessive 726# usually at tail stage. If this parameter is true, we will 727# compute the ideal CPU percentage used by the Guest, which may 728# exactly make the dirty rate match the dirty rate threshold. 729# Then we will choose a smaller throttle increment between the one 730# specified by @cpu-throttle-increment and the one generated by 731# ideal CPU percentage. Therefore, it is compatible to 732# traditional throttling, meanwhile the throttle increment won't 733# be excessive at tail stage. The default value is false. (Since 734# 5.1) 735# 736# @tls-creds: ID of the 'tls-creds' object that provides credentials 737# for establishing a TLS connection over the migration data 738# channel. On the outgoing side of the migration, the credentials 739# must be for a 'client' endpoint, while for the incoming side the 740# credentials must be for a 'server' endpoint. Setting this will 741# enable TLS for all migrations. The default is unset, resulting 742# in unsecured migration at the QEMU level. (Since 2.7) 743# 744# @tls-hostname: hostname of the target host for the migration. This 745# is required when using x509 based TLS credentials and the 746# migration URI does not already include a hostname. For example 747# if using fd: or exec: based migration, the hostname must be 748# provided so that the server's x509 certificate identity can be 749# validated. (Since 2.7) 750# 751# @tls-authz: ID of the 'authz' object subclass that provides access 752# control checking of the TLS x509 certificate distinguished name. 753# This object is only resolved at time of use, so can be deleted 754# and recreated on the fly while the migration server is active. 755# If missing, it will default to denying access (Since 4.0) 756# 757# @max-bandwidth: to set maximum speed for migration. maximum speed 758# in bytes per second. (Since 2.8) 759# 760# @avail-switchover-bandwidth: to set the available bandwidth that 761# migration can use during switchover phase. NOTE! This does not 762# limit the bandwidth during switchover, but only for calculations when 763# making decisions to switchover. By default, this value is zero, 764# which means QEMU will estimate the bandwidth automatically. This can 765# be set when the estimated value is not accurate, while the user is 766# able to guarantee such bandwidth is available when switching over. 767# When specified correctly, this can make the switchover decision much 768# more accurate. (Since 8.2) 769# 770# @downtime-limit: set maximum tolerated downtime for migration. 771# maximum downtime in milliseconds (Since 2.8) 772# 773# @x-checkpoint-delay: The delay time (in ms) between two COLO 774# checkpoints in periodic mode. (Since 2.8) 775# 776# @block-incremental: Affects how much storage is migrated when the 777# block migration capability is enabled. When false, the entire 778# storage backing chain is migrated into a flattened image at the 779# destination; when true, only the active qcow2 layer is migrated 780# and the destination must already have access to the same backing 781# chain as was used on the source. (since 2.10) 782# 783# @multifd-channels: Number of channels used to migrate data in 784# parallel. This is the same number that the number of sockets 785# used for migration. The default value is 2 (since 4.0) 786# 787# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 788# needs to be a multiple of the target page size and a power of 2 789# (Since 2.11) 790# 791# @max-postcopy-bandwidth: Background transfer bandwidth during 792# postcopy. Defaults to 0 (unlimited). In bytes per second. 793# (Since 3.0) 794# 795# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 796# (Since 3.1) 797# 798# @multifd-compression: Which compression method to use. Defaults to 799# none. (Since 5.0) 800# 801# @multifd-zlib-level: Set the compression level to be used in live 802# migration, the compression level is an integer between 0 and 9, 803# where 0 means no compression, 1 means the best compression 804# speed, and 9 means best compression ratio which will consume 805# more CPU. Defaults to 1. (Since 5.0) 806# 807# @multifd-zstd-level: Set the compression level to be used in live 808# migration, the compression level is an integer between 0 and 20, 809# where 0 means no compression, 1 means the best compression 810# speed, and 20 means best compression ratio which will consume 811# more CPU. Defaults to 1. (Since 5.0) 812# 813# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 814# aliases for the purpose of dirty bitmap migration. Such aliases 815# may for example be the corresponding names on the opposite site. 816# The mapping must be one-to-one, but not necessarily complete: On 817# the source, unmapped bitmaps and all bitmaps on unmapped nodes 818# will be ignored. On the destination, encountering an unmapped 819# alias in the incoming migration stream will result in a report, 820# and all further bitmap migration data will then be discarded. 821# Note that the destination does not know about bitmaps it does 822# not receive, so there is no limitation or requirement regarding 823# the number of bitmaps received, or how they are named, or on 824# which nodes they are placed. By default (when this parameter 825# has never been set), bitmap names are mapped to themselves. 826# Nodes are mapped to their block device name if there is one, and 827# to their node name otherwise. (Since 5.2) 828# 829# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 830# limit during live migration. Should be in the range 1 to 1000ms. 831# Defaults to 1000ms. (Since 8.1) 832# 833# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 834# Defaults to 1. (Since 8.1) 835# 836# Features: 837# 838# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 839# are experimental. 840# 841# Since: 2.4 842## 843{ 'enum': 'MigrationParameter', 844 'data': ['announce-initial', 'announce-max', 845 'announce-rounds', 'announce-step', 846 'compress-level', 'compress-threads', 'decompress-threads', 847 'compress-wait-thread', 'throttle-trigger-threshold', 848 'cpu-throttle-initial', 'cpu-throttle-increment', 849 'cpu-throttle-tailslow', 850 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 851 'avail-switchover-bandwidth', 'downtime-limit', 852 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 853 'block-incremental', 854 'multifd-channels', 855 'xbzrle-cache-size', 'max-postcopy-bandwidth', 856 'max-cpu-throttle', 'multifd-compression', 857 'multifd-zlib-level', 'multifd-zstd-level', 858 'block-bitmap-mapping', 859 { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, 860 'vcpu-dirty-limit'] } 861 862## 863# @MigrateSetParameters: 864# 865# @announce-initial: Initial delay (in milliseconds) before sending 866# the first announce (Since 4.0) 867# 868# @announce-max: Maximum delay (in milliseconds) between packets in 869# the announcement (Since 4.0) 870# 871# @announce-rounds: Number of self-announce packets sent after 872# migration (Since 4.0) 873# 874# @announce-step: Increase in delay (in milliseconds) between 875# subsequent packets in the announcement (Since 4.0) 876# 877# @compress-level: compression level 878# 879# @compress-threads: compression thread count 880# 881# @compress-wait-thread: Controls behavior when all compression 882# threads are currently busy. If true (default), wait for a free 883# compression thread to become available; otherwise, send the page 884# uncompressed. (Since 3.1) 885# 886# @decompress-threads: decompression thread count 887# 888# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 889# bytes_xfer_period to trigger throttling. It is expressed as 890# percentage. The default value is 50. (Since 5.0) 891# 892# @cpu-throttle-initial: Initial percentage of time guest cpus are 893# throttled when migration auto-converge is activated. The 894# default value is 20. (Since 2.7) 895# 896# @cpu-throttle-increment: throttle percentage increase each time 897# auto-converge detects that migration is not making progress. 898# The default value is 10. (Since 2.7) 899# 900# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 901# the tail stage of throttling, the Guest is very sensitive to CPU 902# percentage while the @cpu-throttle -increment is excessive 903# usually at tail stage. If this parameter is true, we will 904# compute the ideal CPU percentage used by the Guest, which may 905# exactly make the dirty rate match the dirty rate threshold. 906# Then we will choose a smaller throttle increment between the one 907# specified by @cpu-throttle-increment and the one generated by 908# ideal CPU percentage. Therefore, it is compatible to 909# traditional throttling, meanwhile the throttle increment won't 910# be excessive at tail stage. The default value is false. (Since 911# 5.1) 912# 913# @tls-creds: ID of the 'tls-creds' object that provides credentials 914# for establishing a TLS connection over the migration data 915# channel. On the outgoing side of the migration, the credentials 916# must be for a 'client' endpoint, while for the incoming side the 917# credentials must be for a 'server' endpoint. Setting this to a 918# non-empty string enables TLS for all migrations. An empty 919# string means that QEMU will use plain text mode for migration, 920# rather than TLS (Since 2.9) Previously (since 2.7), this was 921# reported by omitting tls-creds instead. 922# 923# @tls-hostname: hostname of the target host for the migration. This 924# is required when using x509 based TLS credentials and the 925# migration URI does not already include a hostname. For example 926# if using fd: or exec: based migration, the hostname must be 927# provided so that the server's x509 certificate identity can be 928# validated. (Since 2.7) An empty string means that QEMU will use 929# the hostname associated with the migration URI, if any. (Since 930# 2.9) Previously (since 2.7), this was reported by omitting 931# tls-hostname instead. 932# 933# @max-bandwidth: to set maximum speed for migration. maximum speed 934# in bytes per second. (Since 2.8) 935# 936# @avail-switchover-bandwidth: to set the available bandwidth that 937# migration can use during switchover phase. NOTE! This does not 938# limit the bandwidth during switchover, but only for calculations when 939# making decisions to switchover. By default, this value is zero, 940# which means QEMU will estimate the bandwidth automatically. This can 941# be set when the estimated value is not accurate, while the user is 942# able to guarantee such bandwidth is available when switching over. 943# When specified correctly, this can make the switchover decision much 944# more accurate. (Since 8.2) 945# 946# @downtime-limit: set maximum tolerated downtime for migration. 947# maximum downtime in milliseconds (Since 2.8) 948# 949# @x-checkpoint-delay: the delay time between two COLO checkpoints. 950# (Since 2.8) 951# 952# @block-incremental: Affects how much storage is migrated when the 953# block migration capability is enabled. When false, the entire 954# storage backing chain is migrated into a flattened image at the 955# destination; when true, only the active qcow2 layer is migrated 956# and the destination must already have access to the same backing 957# chain as was used on the source. (since 2.10) 958# 959# @multifd-channels: Number of channels used to migrate data in 960# parallel. This is the same number that the number of sockets 961# used for migration. The default value is 2 (since 4.0) 962# 963# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 964# needs to be a multiple of the target page size and a power of 2 965# (Since 2.11) 966# 967# @max-postcopy-bandwidth: Background transfer bandwidth during 968# postcopy. Defaults to 0 (unlimited). In bytes per second. 969# (Since 3.0) 970# 971# @max-cpu-throttle: maximum cpu throttle percentage. The default 972# value is 99. (Since 3.1) 973# 974# @multifd-compression: Which compression method to use. Defaults to 975# none. (Since 5.0) 976# 977# @multifd-zlib-level: Set the compression level to be used in live 978# migration, the compression level is an integer between 0 and 9, 979# where 0 means no compression, 1 means the best compression 980# speed, and 9 means best compression ratio which will consume 981# more CPU. Defaults to 1. (Since 5.0) 982# 983# @multifd-zstd-level: Set the compression level to be used in live 984# migration, the compression level is an integer between 0 and 20, 985# where 0 means no compression, 1 means the best compression 986# speed, and 20 means best compression ratio which will consume 987# more CPU. Defaults to 1. (Since 5.0) 988# 989# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 990# aliases for the purpose of dirty bitmap migration. Such aliases 991# may for example be the corresponding names on the opposite site. 992# The mapping must be one-to-one, but not necessarily complete: On 993# the source, unmapped bitmaps and all bitmaps on unmapped nodes 994# will be ignored. On the destination, encountering an unmapped 995# alias in the incoming migration stream will result in a report, 996# and all further bitmap migration data will then be discarded. 997# Note that the destination does not know about bitmaps it does 998# not receive, so there is no limitation or requirement regarding 999# the number of bitmaps received, or how they are named, or on 1000# which nodes they are placed. By default (when this parameter 1001# has never been set), bitmap names are mapped to themselves. 1002# Nodes are mapped to their block device name if there is one, and 1003# to their node name otherwise. (Since 5.2) 1004# 1005# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1006# limit during live migration. Should be in the range 1 to 1000ms. 1007# Defaults to 1000ms. (Since 8.1) 1008# 1009# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1010# Defaults to 1. (Since 8.1) 1011# 1012# Features: 1013# 1014# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 1015# are experimental. 1016# 1017# TODO: either fuse back into MigrationParameters, or make 1018# MigrationParameters members mandatory 1019# 1020# Since: 2.4 1021## 1022{ 'struct': 'MigrateSetParameters', 1023 'data': { '*announce-initial': 'size', 1024 '*announce-max': 'size', 1025 '*announce-rounds': 'size', 1026 '*announce-step': 'size', 1027 '*compress-level': 'uint8', 1028 '*compress-threads': 'uint8', 1029 '*compress-wait-thread': 'bool', 1030 '*decompress-threads': 'uint8', 1031 '*throttle-trigger-threshold': 'uint8', 1032 '*cpu-throttle-initial': 'uint8', 1033 '*cpu-throttle-increment': 'uint8', 1034 '*cpu-throttle-tailslow': 'bool', 1035 '*tls-creds': 'StrOrNull', 1036 '*tls-hostname': 'StrOrNull', 1037 '*tls-authz': 'StrOrNull', 1038 '*max-bandwidth': 'size', 1039 '*avail-switchover-bandwidth': 'size', 1040 '*downtime-limit': 'uint64', 1041 '*x-checkpoint-delay': { 'type': 'uint32', 1042 'features': [ 'unstable' ] }, 1043 '*block-incremental': 'bool', 1044 '*multifd-channels': 'uint8', 1045 '*xbzrle-cache-size': 'size', 1046 '*max-postcopy-bandwidth': 'size', 1047 '*max-cpu-throttle': 'uint8', 1048 '*multifd-compression': 'MultiFDCompression', 1049 '*multifd-zlib-level': 'uint8', 1050 '*multifd-zstd-level': 'uint8', 1051 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1052 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1053 'features': [ 'unstable' ] }, 1054 '*vcpu-dirty-limit': 'uint64'} } 1055 1056## 1057# @migrate-set-parameters: 1058# 1059# Set various migration parameters. 1060# 1061# Since: 2.4 1062# 1063# Example: 1064# 1065# -> { "execute": "migrate-set-parameters" , 1066# "arguments": { "compress-level": 1 } } 1067# <- { "return": {} } 1068## 1069{ 'command': 'migrate-set-parameters', 'boxed': true, 1070 'data': 'MigrateSetParameters' } 1071 1072## 1073# @MigrationParameters: 1074# 1075# The optional members aren't actually optional. 1076# 1077# @announce-initial: Initial delay (in milliseconds) before sending 1078# the first announce (Since 4.0) 1079# 1080# @announce-max: Maximum delay (in milliseconds) between packets in 1081# the announcement (Since 4.0) 1082# 1083# @announce-rounds: Number of self-announce packets sent after 1084# migration (Since 4.0) 1085# 1086# @announce-step: Increase in delay (in milliseconds) between 1087# subsequent packets in the announcement (Since 4.0) 1088# 1089# @compress-level: compression level 1090# 1091# @compress-threads: compression thread count 1092# 1093# @compress-wait-thread: Controls behavior when all compression 1094# threads are currently busy. If true (default), wait for a free 1095# compression thread to become available; otherwise, send the page 1096# uncompressed. (Since 3.1) 1097# 1098# @decompress-threads: decompression thread count 1099# 1100# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 1101# bytes_xfer_period to trigger throttling. It is expressed as 1102# percentage. The default value is 50. (Since 5.0) 1103# 1104# @cpu-throttle-initial: Initial percentage of time guest cpus are 1105# throttled when migration auto-converge is activated. (Since 1106# 2.7) 1107# 1108# @cpu-throttle-increment: throttle percentage increase each time 1109# auto-converge detects that migration is not making progress. 1110# (Since 2.7) 1111# 1112# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 1113# the tail stage of throttling, the Guest is very sensitive to CPU 1114# percentage while the @cpu-throttle -increment is excessive 1115# usually at tail stage. If this parameter is true, we will 1116# compute the ideal CPU percentage used by the Guest, which may 1117# exactly make the dirty rate match the dirty rate threshold. 1118# Then we will choose a smaller throttle increment between the one 1119# specified by @cpu-throttle-increment and the one generated by 1120# ideal CPU percentage. Therefore, it is compatible to 1121# traditional throttling, meanwhile the throttle increment won't 1122# be excessive at tail stage. The default value is false. (Since 1123# 5.1) 1124# 1125# @tls-creds: ID of the 'tls-creds' object that provides credentials 1126# for establishing a TLS connection over the migration data 1127# channel. On the outgoing side of the migration, the credentials 1128# must be for a 'client' endpoint, while for the incoming side the 1129# credentials must be for a 'server' endpoint. An empty string 1130# means that QEMU will use plain text mode for migration, rather 1131# than TLS (Since 2.7) Note: 2.8 reports this by omitting 1132# tls-creds instead. 1133# 1134# @tls-hostname: hostname of the target host for the migration. This 1135# is required when using x509 based TLS credentials and the 1136# migration URI does not already include a hostname. For example 1137# if using fd: or exec: based migration, the hostname must be 1138# provided so that the server's x509 certificate identity can be 1139# validated. (Since 2.7) An empty string means that QEMU will use 1140# the hostname associated with the migration URI, if any. (Since 1141# 2.9) Note: 2.8 reports this by omitting tls-hostname instead. 1142# 1143# @tls-authz: ID of the 'authz' object subclass that provides access 1144# control checking of the TLS x509 certificate distinguished name. 1145# (Since 4.0) 1146# 1147# @max-bandwidth: to set maximum speed for migration. maximum speed 1148# in bytes per second. (Since 2.8) 1149# 1150# @avail-switchover-bandwidth: to set the available bandwidth that 1151# migration can use during switchover phase. NOTE! This does not 1152# limit the bandwidth during switchover, but only for calculations when 1153# making decisions to switchover. By default, this value is zero, 1154# which means QEMU will estimate the bandwidth automatically. This can 1155# be set when the estimated value is not accurate, while the user is 1156# able to guarantee such bandwidth is available when switching over. 1157# When specified correctly, this can make the switchover decision much 1158# more accurate. (Since 8.2) 1159# 1160# @downtime-limit: set maximum tolerated downtime for migration. 1161# maximum downtime in milliseconds (Since 2.8) 1162# 1163# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1164# (Since 2.8) 1165# 1166# @block-incremental: Affects how much storage is migrated when the 1167# block migration capability is enabled. When false, the entire 1168# storage backing chain is migrated into a flattened image at the 1169# destination; when true, only the active qcow2 layer is migrated 1170# and the destination must already have access to the same backing 1171# chain as was used on the source. (since 2.10) 1172# 1173# @multifd-channels: Number of channels used to migrate data in 1174# parallel. This is the same number that the number of sockets 1175# used for migration. The default value is 2 (since 4.0) 1176# 1177# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1178# needs to be a multiple of the target page size and a power of 2 1179# (Since 2.11) 1180# 1181# @max-postcopy-bandwidth: Background transfer bandwidth during 1182# postcopy. Defaults to 0 (unlimited). In bytes per second. 1183# (Since 3.0) 1184# 1185# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1186# (Since 3.1) 1187# 1188# @multifd-compression: Which compression method to use. Defaults to 1189# none. (Since 5.0) 1190# 1191# @multifd-zlib-level: Set the compression level to be used in live 1192# migration, the compression level is an integer between 0 and 9, 1193# where 0 means no compression, 1 means the best compression 1194# speed, and 9 means best compression ratio which will consume 1195# more CPU. Defaults to 1. (Since 5.0) 1196# 1197# @multifd-zstd-level: Set the compression level to be used in live 1198# migration, the compression level is an integer between 0 and 20, 1199# where 0 means no compression, 1 means the best compression 1200# speed, and 20 means best compression ratio which will consume 1201# more CPU. Defaults to 1. (Since 5.0) 1202# 1203# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1204# aliases for the purpose of dirty bitmap migration. Such aliases 1205# may for example be the corresponding names on the opposite site. 1206# The mapping must be one-to-one, but not necessarily complete: On 1207# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1208# will be ignored. On the destination, encountering an unmapped 1209# alias in the incoming migration stream will result in a report, 1210# and all further bitmap migration data will then be discarded. 1211# Note that the destination does not know about bitmaps it does 1212# not receive, so there is no limitation or requirement regarding 1213# the number of bitmaps received, or how they are named, or on 1214# which nodes they are placed. By default (when this parameter 1215# has never been set), bitmap names are mapped to themselves. 1216# Nodes are mapped to their block device name if there is one, and 1217# to their node name otherwise. (Since 5.2) 1218# 1219# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1220# limit during live migration. Should be in the range 1 to 1000ms. 1221# Defaults to 1000ms. (Since 8.1) 1222# 1223# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1224# Defaults to 1. (Since 8.1) 1225# 1226# Features: 1227# 1228# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 1229# are experimental. 1230# 1231# Since: 2.4 1232## 1233{ 'struct': 'MigrationParameters', 1234 'data': { '*announce-initial': 'size', 1235 '*announce-max': 'size', 1236 '*announce-rounds': 'size', 1237 '*announce-step': 'size', 1238 '*compress-level': 'uint8', 1239 '*compress-threads': 'uint8', 1240 '*compress-wait-thread': 'bool', 1241 '*decompress-threads': 'uint8', 1242 '*throttle-trigger-threshold': 'uint8', 1243 '*cpu-throttle-initial': 'uint8', 1244 '*cpu-throttle-increment': 'uint8', 1245 '*cpu-throttle-tailslow': 'bool', 1246 '*tls-creds': 'str', 1247 '*tls-hostname': 'str', 1248 '*tls-authz': 'str', 1249 '*max-bandwidth': 'size', 1250 '*avail-switchover-bandwidth': 'size', 1251 '*downtime-limit': 'uint64', 1252 '*x-checkpoint-delay': { 'type': 'uint32', 1253 'features': [ 'unstable' ] }, 1254 '*block-incremental': 'bool', 1255 '*multifd-channels': 'uint8', 1256 '*xbzrle-cache-size': 'size', 1257 '*max-postcopy-bandwidth': 'size', 1258 '*max-cpu-throttle': 'uint8', 1259 '*multifd-compression': 'MultiFDCompression', 1260 '*multifd-zlib-level': 'uint8', 1261 '*multifd-zstd-level': 'uint8', 1262 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1263 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1264 'features': [ 'unstable' ] }, 1265 '*vcpu-dirty-limit': 'uint64'} } 1266 1267## 1268# @query-migrate-parameters: 1269# 1270# Returns information about the current migration parameters 1271# 1272# Returns: @MigrationParameters 1273# 1274# Since: 2.4 1275# 1276# Example: 1277# 1278# -> { "execute": "query-migrate-parameters" } 1279# <- { "return": { 1280# "decompress-threads": 2, 1281# "cpu-throttle-increment": 10, 1282# "compress-threads": 8, 1283# "compress-level": 1, 1284# "cpu-throttle-initial": 20, 1285# "max-bandwidth": 33554432, 1286# "downtime-limit": 300 1287# } 1288# } 1289## 1290{ 'command': 'query-migrate-parameters', 1291 'returns': 'MigrationParameters' } 1292 1293## 1294# @migrate-start-postcopy: 1295# 1296# Followup to a migration command to switch the migration to postcopy 1297# mode. The postcopy-ram capability must be set on both source and 1298# destination before the original migration command. 1299# 1300# Since: 2.5 1301# 1302# Example: 1303# 1304# -> { "execute": "migrate-start-postcopy" } 1305# <- { "return": {} } 1306## 1307{ 'command': 'migrate-start-postcopy' } 1308 1309## 1310# @MIGRATION: 1311# 1312# Emitted when a migration event happens 1313# 1314# @status: @MigrationStatus describing the current migration status. 1315# 1316# Since: 2.4 1317# 1318# Example: 1319# 1320# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1321# "event": "MIGRATION", 1322# "data": {"status": "completed"} } 1323## 1324{ 'event': 'MIGRATION', 1325 'data': {'status': 'MigrationStatus'}} 1326 1327## 1328# @MIGRATION_PASS: 1329# 1330# Emitted from the source side of a migration at the start of each 1331# pass (when it syncs the dirty bitmap) 1332# 1333# @pass: An incrementing count (starting at 1 on the first pass) 1334# 1335# Since: 2.6 1336# 1337# Example: 1338# 1339# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1340# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1341## 1342{ 'event': 'MIGRATION_PASS', 1343 'data': { 'pass': 'int' } } 1344 1345## 1346# @COLOMessage: 1347# 1348# The message transmission between Primary side and Secondary side. 1349# 1350# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1351# 1352# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for 1353# checkpointing 1354# 1355# @checkpoint-reply: SVM gets PVM's checkpoint request 1356# 1357# @vmstate-send: VM's state will be sent by PVM. 1358# 1359# @vmstate-size: The total size of VMstate. 1360# 1361# @vmstate-received: VM's state has been received by SVM. 1362# 1363# @vmstate-loaded: VM's state has been loaded by SVM. 1364# 1365# Since: 2.8 1366## 1367{ 'enum': 'COLOMessage', 1368 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1369 'vmstate-send', 'vmstate-size', 'vmstate-received', 1370 'vmstate-loaded' ] } 1371 1372## 1373# @COLOMode: 1374# 1375# The COLO current mode. 1376# 1377# @none: COLO is disabled. 1378# 1379# @primary: COLO node in primary side. 1380# 1381# @secondary: COLO node in slave side. 1382# 1383# Since: 2.8 1384## 1385{ 'enum': 'COLOMode', 1386 'data': [ 'none', 'primary', 'secondary'] } 1387 1388## 1389# @FailoverStatus: 1390# 1391# An enumeration of COLO failover status 1392# 1393# @none: no failover has ever happened 1394# 1395# @require: got failover requirement but not handled 1396# 1397# @active: in the process of doing failover 1398# 1399# @completed: finish the process of failover 1400# 1401# @relaunch: restart the failover process, from 'none' -> 'completed' 1402# (Since 2.9) 1403# 1404# Since: 2.8 1405## 1406{ 'enum': 'FailoverStatus', 1407 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1408 1409## 1410# @COLO_EXIT: 1411# 1412# Emitted when VM finishes COLO mode due to some errors happening or 1413# at the request of users. 1414# 1415# @mode: report COLO mode when COLO exited. 1416# 1417# @reason: describes the reason for the COLO exit. 1418# 1419# Since: 3.1 1420# 1421# Example: 1422# 1423# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1424# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1425## 1426{ 'event': 'COLO_EXIT', 1427 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1428 1429## 1430# @COLOExitReason: 1431# 1432# The reason for a COLO exit. 1433# 1434# @none: failover has never happened. This state does not occur in 1435# the COLO_EXIT event, and is only visible in the result of 1436# query-colo-status. 1437# 1438# @request: COLO exit is due to an external request. 1439# 1440# @error: COLO exit is due to an internal error. 1441# 1442# @processing: COLO is currently handling a failover (since 4.0). 1443# 1444# Since: 3.1 1445## 1446{ 'enum': 'COLOExitReason', 1447 'data': [ 'none', 'request', 'error' , 'processing' ] } 1448 1449## 1450# @x-colo-lost-heartbeat: 1451# 1452# Tell qemu that heartbeat is lost, request it to do takeover 1453# procedures. If this command is sent to the PVM, the Primary side 1454# will exit COLO mode. If sent to the Secondary, the Secondary side 1455# will run failover work, then takes over server operation to become 1456# the service VM. 1457# 1458# Features: 1459# 1460# @unstable: This command is experimental. 1461# 1462# Since: 2.8 1463# 1464# Example: 1465# 1466# -> { "execute": "x-colo-lost-heartbeat" } 1467# <- { "return": {} } 1468## 1469{ 'command': 'x-colo-lost-heartbeat', 1470 'features': [ 'unstable' ], 1471 'if': 'CONFIG_REPLICATION' } 1472 1473## 1474# @migrate_cancel: 1475# 1476# Cancel the current executing migration process. 1477# 1478# Returns: nothing on success 1479# 1480# Notes: This command succeeds even if there is no migration process 1481# running. 1482# 1483# Since: 0.14 1484# 1485# Example: 1486# 1487# -> { "execute": "migrate_cancel" } 1488# <- { "return": {} } 1489## 1490{ 'command': 'migrate_cancel' } 1491 1492## 1493# @migrate-continue: 1494# 1495# Continue migration when it's in a paused state. 1496# 1497# @state: The state the migration is currently expected to be in 1498# 1499# Returns: nothing on success 1500# 1501# Since: 2.11 1502# 1503# Example: 1504# 1505# -> { "execute": "migrate-continue" , "arguments": 1506# { "state": "pre-switchover" } } 1507# <- { "return": {} } 1508## 1509{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1510 1511## 1512# @migrate: 1513# 1514# Migrates the current running guest to another Virtual Machine. 1515# 1516# @uri: the Uniform Resource Identifier of the destination VM 1517# 1518# @blk: do block migration (full disk copy) 1519# 1520# @inc: incremental disk copy migration 1521# 1522# @detach: this argument exists only for compatibility reasons and is 1523# ignored by QEMU 1524# 1525# @resume: resume one paused migration, default "off". (since 3.0) 1526# 1527# Returns: nothing on success 1528# 1529# Since: 0.14 1530# 1531# Notes: 1532# 1533# 1. The 'query-migrate' command should be used to check migration's 1534# progress and final result (this information is provided by the 1535# 'status' member) 1536# 1537# 2. All boolean arguments default to false 1538# 1539# 3. The user Monitor's "detach" argument is invalid in QMP and should 1540# not be used 1541# 1542# Example: 1543# 1544# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1545# <- { "return": {} } 1546## 1547{ 'command': 'migrate', 1548 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', 1549 '*detach': 'bool', '*resume': 'bool' } } 1550 1551## 1552# @migrate-incoming: 1553# 1554# Start an incoming migration, the qemu must have been started with 1555# -incoming defer 1556# 1557# @uri: The Uniform Resource Identifier identifying the source or 1558# address to listen on 1559# 1560# Returns: nothing on success 1561# 1562# Since: 2.3 1563# 1564# Notes: 1565# 1566# 1. It's a bad idea to use a string for the uri, but it needs 1567# to stay compatible with -incoming and the format of the uri 1568# is already exposed above libvirt. 1569# 1570# 2. QEMU must be started with -incoming defer to allow 1571# migrate-incoming to be used. 1572# 1573# 3. The uri format is the same as for -incoming 1574# 1575# Example: 1576# 1577# -> { "execute": "migrate-incoming", 1578# "arguments": { "uri": "tcp::4446" } } 1579# <- { "return": {} } 1580## 1581{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } } 1582 1583## 1584# @xen-save-devices-state: 1585# 1586# Save the state of all devices to file. The RAM and the block 1587# devices of the VM are not saved by this command. 1588# 1589# @filename: the file to save the state of the devices to as binary 1590# data. See xen-save-devices-state.txt for a description of the 1591# binary format. 1592# 1593# @live: Optional argument to ask QEMU to treat this command as part 1594# of a live migration. Default to true. (since 2.11) 1595# 1596# Returns: Nothing on success 1597# 1598# Since: 1.1 1599# 1600# Example: 1601# 1602# -> { "execute": "xen-save-devices-state", 1603# "arguments": { "filename": "/tmp/save" } } 1604# <- { "return": {} } 1605## 1606{ 'command': 'xen-save-devices-state', 1607 'data': {'filename': 'str', '*live':'bool' } } 1608 1609## 1610# @xen-set-global-dirty-log: 1611# 1612# Enable or disable the global dirty log mode. 1613# 1614# @enable: true to enable, false to disable. 1615# 1616# Returns: nothing 1617# 1618# Since: 1.3 1619# 1620# Example: 1621# 1622# -> { "execute": "xen-set-global-dirty-log", 1623# "arguments": { "enable": true } } 1624# <- { "return": {} } 1625## 1626{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1627 1628## 1629# @xen-load-devices-state: 1630# 1631# Load the state of all devices from file. The RAM and the block 1632# devices of the VM are not loaded by this command. 1633# 1634# @filename: the file to load the state of the devices from as binary 1635# data. See xen-save-devices-state.txt for a description of the 1636# binary format. 1637# 1638# Since: 2.7 1639# 1640# Example: 1641# 1642# -> { "execute": "xen-load-devices-state", 1643# "arguments": { "filename": "/tmp/resume" } } 1644# <- { "return": {} } 1645## 1646{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1647 1648## 1649# @xen-set-replication: 1650# 1651# Enable or disable replication. 1652# 1653# @enable: true to enable, false to disable. 1654# 1655# @primary: true for primary or false for secondary. 1656# 1657# @failover: true to do failover, false to stop. but cannot be 1658# specified if 'enable' is true. default value is false. 1659# 1660# Returns: nothing. 1661# 1662# Example: 1663# 1664# -> { "execute": "xen-set-replication", 1665# "arguments": {"enable": true, "primary": false} } 1666# <- { "return": {} } 1667# 1668# Since: 2.9 1669## 1670{ 'command': 'xen-set-replication', 1671 'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' }, 1672 'if': 'CONFIG_REPLICATION' } 1673 1674## 1675# @ReplicationStatus: 1676# 1677# The result format for 'query-xen-replication-status'. 1678# 1679# @error: true if an error happened, false if replication is normal. 1680# 1681# @desc: the human readable error description string, when @error is 1682# 'true'. 1683# 1684# Since: 2.9 1685## 1686{ 'struct': 'ReplicationStatus', 1687 'data': { 'error': 'bool', '*desc': 'str' }, 1688 'if': 'CONFIG_REPLICATION' } 1689 1690## 1691# @query-xen-replication-status: 1692# 1693# Query replication status while the vm is running. 1694# 1695# Returns: A @ReplicationStatus object showing the status. 1696# 1697# Example: 1698# 1699# -> { "execute": "query-xen-replication-status" } 1700# <- { "return": { "error": false } } 1701# 1702# Since: 2.9 1703## 1704{ 'command': 'query-xen-replication-status', 1705 'returns': 'ReplicationStatus', 1706 'if': 'CONFIG_REPLICATION' } 1707 1708## 1709# @xen-colo-do-checkpoint: 1710# 1711# Xen uses this command to notify replication to trigger a checkpoint. 1712# 1713# Returns: nothing. 1714# 1715# Example: 1716# 1717# -> { "execute": "xen-colo-do-checkpoint" } 1718# <- { "return": {} } 1719# 1720# Since: 2.9 1721## 1722{ 'command': 'xen-colo-do-checkpoint', 1723 'if': 'CONFIG_REPLICATION' } 1724 1725## 1726# @COLOStatus: 1727# 1728# The result format for 'query-colo-status'. 1729# 1730# @mode: COLO running mode. If COLO is running, this field will 1731# return 'primary' or 'secondary'. 1732# 1733# @last-mode: COLO last running mode. If COLO is running, this field 1734# will return same like mode field, after failover we can use this 1735# field to get last colo mode. (since 4.0) 1736# 1737# @reason: describes the reason for the COLO exit. 1738# 1739# Since: 3.1 1740## 1741{ 'struct': 'COLOStatus', 1742 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1743 'reason': 'COLOExitReason' }, 1744 'if': 'CONFIG_REPLICATION' } 1745 1746## 1747# @query-colo-status: 1748# 1749# Query COLO status while the vm is running. 1750# 1751# Returns: A @COLOStatus object showing the status. 1752# 1753# Example: 1754# 1755# -> { "execute": "query-colo-status" } 1756# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1757# 1758# Since: 3.1 1759## 1760{ 'command': 'query-colo-status', 1761 'returns': 'COLOStatus', 1762 'if': 'CONFIG_REPLICATION' } 1763 1764## 1765# @migrate-recover: 1766# 1767# Provide a recovery migration stream URI. 1768# 1769# @uri: the URI to be used for the recovery of migration stream. 1770# 1771# Returns: nothing. 1772# 1773# Example: 1774# 1775# -> { "execute": "migrate-recover", 1776# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1777# <- { "return": {} } 1778# 1779# Since: 3.0 1780## 1781{ 'command': 'migrate-recover', 1782 'data': { 'uri': 'str' }, 1783 'allow-oob': true } 1784 1785## 1786# @migrate-pause: 1787# 1788# Pause a migration. Currently it only supports postcopy. 1789# 1790# Returns: nothing. 1791# 1792# Example: 1793# 1794# -> { "execute": "migrate-pause" } 1795# <- { "return": {} } 1796# 1797# Since: 3.0 1798## 1799{ 'command': 'migrate-pause', 'allow-oob': true } 1800 1801## 1802# @UNPLUG_PRIMARY: 1803# 1804# Emitted from source side of a migration when migration state is 1805# WAIT_UNPLUG. Device was unplugged by guest operating system. Device 1806# resources in QEMU are kept on standby to be able to re-plug it in 1807# case of migration failure. 1808# 1809# @device-id: QEMU device id of the unplugged device 1810# 1811# Since: 4.2 1812# 1813# Example: 1814# 1815# <- { "event": "UNPLUG_PRIMARY", 1816# "data": { "device-id": "hostdev0" }, 1817# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 1818## 1819{ 'event': 'UNPLUG_PRIMARY', 1820 'data': { 'device-id': 'str' } } 1821 1822## 1823# @DirtyRateVcpu: 1824# 1825# Dirty rate of vcpu. 1826# 1827# @id: vcpu index. 1828# 1829# @dirty-rate: dirty rate. 1830# 1831# Since: 6.2 1832## 1833{ 'struct': 'DirtyRateVcpu', 1834 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 1835 1836## 1837# @DirtyRateStatus: 1838# 1839# Dirty page rate measurement status. 1840# 1841# @unstarted: measuring thread has not been started yet 1842# 1843# @measuring: measuring thread is running 1844# 1845# @measured: dirty page rate is measured and the results are available 1846# 1847# Since: 5.2 1848## 1849{ 'enum': 'DirtyRateStatus', 1850 'data': [ 'unstarted', 'measuring', 'measured'] } 1851 1852## 1853# @DirtyRateMeasureMode: 1854# 1855# Method used to measure dirty page rate. Differences between 1856# available methods are explained in @calc-dirty-rate. 1857# 1858# @page-sampling: use page sampling 1859# 1860# @dirty-ring: use dirty ring 1861# 1862# @dirty-bitmap: use dirty bitmap 1863# 1864# Since: 6.2 1865## 1866{ 'enum': 'DirtyRateMeasureMode', 1867 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 1868 1869## 1870# @TimeUnit: 1871# 1872# Specifies unit in which time-related value is specified. 1873# 1874# @second: value is in seconds 1875# 1876# @millisecond: value is in milliseconds 1877# 1878# Since 8.2 1879# 1880## 1881{ 'enum': 'TimeUnit', 1882 'data': ['second', 'millisecond'] } 1883 1884## 1885# @DirtyRateInfo: 1886# 1887# Information about measured dirty page rate. 1888# 1889# @dirty-rate: an estimate of the dirty page rate of the VM in units 1890# of MiB/s. Value is present only when @status is 'measured'. 1891# 1892# @status: current status of dirty page rate measurements 1893# 1894# @start-time: start time in units of second for calculation 1895# 1896# @calc-time: time period for which dirty page rate was measured, 1897# expressed and rounded down to @calc-time-unit. 1898# 1899# @calc-time-unit: time unit of @calc-time (Since 8.2) 1900# 1901# @sample-pages: number of sampled pages per GiB of guest memory. 1902# Valid only in page-sampling mode (Since 6.1) 1903# 1904# @mode: mode that was used to measure dirty page rate (Since 6.2) 1905# 1906# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was 1907# specified (Since 6.2) 1908# 1909# Since: 5.2 1910## 1911{ 'struct': 'DirtyRateInfo', 1912 'data': {'*dirty-rate': 'int64', 1913 'status': 'DirtyRateStatus', 1914 'start-time': 'int64', 1915 'calc-time': 'int64', 1916 'calc-time-unit': 'TimeUnit', 1917 'sample-pages': 'uint64', 1918 'mode': 'DirtyRateMeasureMode', 1919 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 1920 1921## 1922# @calc-dirty-rate: 1923# 1924# Start measuring dirty page rate of the VM. Results can be retrieved 1925# with @query-dirty-rate after measurements are completed. 1926# 1927# Dirty page rate is the number of pages changed in a given time 1928# period expressed in MiB/s. The following methods of calculation are 1929# available: 1930# 1931# 1. In page sampling mode, a random subset of pages are selected and 1932# hashed twice: once at the beginning of measurement time period, 1933# and once again at the end. If two hashes for some page are 1934# different, the page is counted as changed. Since this method 1935# relies on sampling and hashing, calculated dirty page rate is 1936# only an estimate of its true value. Increasing @sample-pages 1937# improves estimation quality at the cost of higher computational 1938# overhead. 1939# 1940# 2. Dirty bitmap mode captures writes to memory (for example by 1941# temporarily revoking write access to all pages) and counting page 1942# faults. Information about modified pages is collected into a 1943# bitmap, where each bit corresponds to one guest page. This mode 1944# requires that KVM accelerator property "dirty-ring-size" is *not* 1945# set. 1946# 1947# 3. Dirty ring mode is similar to dirty bitmap mode, but the 1948# information about modified pages is collected into ring buffer. 1949# This mode tracks page modification per each vCPU separately. It 1950# requires that KVM accelerator property "dirty-ring-size" is set. 1951# 1952# @calc-time: time period for which dirty page rate is calculated. 1953# By default it is specified in seconds, but the unit can be set 1954# explicitly with @calc-time-unit. Note that larger @calc-time 1955# values will typically result in smaller dirty page rates because 1956# page dirtying is a one-time event. Once some page is counted 1957# as dirty during @calc-time period, further writes to this page 1958# will not increase dirty page rate anymore. 1959# 1960# @calc-time-unit: time unit in which @calc-time is specified. 1961# By default it is seconds. (Since 8.2) 1962# 1963# @sample-pages: number of sampled pages per each GiB of guest memory. 1964# Default value is 512. For 4KiB guest pages this corresponds to 1965# sampling ratio of 0.2%. This argument is used only in page 1966# sampling mode. (Since 6.1) 1967# 1968# @mode: mechanism for tracking dirty pages. Default value is 1969# 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'. 1970# (Since 6.1) 1971# 1972# Since: 5.2 1973# 1974# Example: 1975# 1976# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 1977# 'sample-pages': 512} } 1978# <- { "return": {} } 1979# 1980# Measure dirty rate using dirty bitmap for 500 milliseconds: 1981# 1982# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500, 1983# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} } 1984# 1985# <- { "return": {} } 1986## 1987{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 1988 '*calc-time-unit': 'TimeUnit', 1989 '*sample-pages': 'int', 1990 '*mode': 'DirtyRateMeasureMode'} } 1991 1992## 1993# @query-dirty-rate: 1994# 1995# Query results of the most recent invocation of @calc-dirty-rate. 1996# 1997# @calc-time-unit: time unit in which to report calculation time. 1998# By default it is reported in seconds. (Since 8.2) 1999# 2000# Since: 5.2 2001# 2002# Examples: 2003# 2004# 1. Measurement is in progress: 2005# 2006# <- {"status": "measuring", "sample-pages": 512, 2007# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2008# "calc-time-unit": "second"} 2009# 2010# 2. Measurement has been completed: 2011# 2012# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108, 2013# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2014# "calc-time-unit": "second"} 2015## 2016{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' }, 2017 'returns': 'DirtyRateInfo' } 2018 2019## 2020# @DirtyLimitInfo: 2021# 2022# Dirty page rate limit information of a virtual CPU. 2023# 2024# @cpu-index: index of a virtual CPU. 2025# 2026# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 2027# CPU, 0 means unlimited. 2028# 2029# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 2030# 2031# Since: 7.1 2032## 2033{ 'struct': 'DirtyLimitInfo', 2034 'data': { 'cpu-index': 'int', 2035 'limit-rate': 'uint64', 2036 'current-rate': 'uint64' } } 2037 2038## 2039# @set-vcpu-dirty-limit: 2040# 2041# Set the upper limit of dirty page rate for virtual CPUs. 2042# 2043# Requires KVM with accelerator property "dirty-ring-size" set. A 2044# virtual CPU's dirty page rate is a measure of its memory load. To 2045# observe dirty page rates, use @calc-dirty-rate. 2046# 2047# @cpu-index: index of a virtual CPU, default is all. 2048# 2049# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 2050# 2051# Since: 7.1 2052# 2053# Example: 2054# 2055# -> {"execute": "set-vcpu-dirty-limit"} 2056# "arguments": { "dirty-rate": 200, 2057# "cpu-index": 1 } } 2058# <- { "return": {} } 2059## 2060{ 'command': 'set-vcpu-dirty-limit', 2061 'data': { '*cpu-index': 'int', 2062 'dirty-rate': 'uint64' } } 2063 2064## 2065# @cancel-vcpu-dirty-limit: 2066# 2067# Cancel the upper limit of dirty page rate for virtual CPUs. 2068# 2069# Cancel the dirty page limit for the vCPU which has been set with 2070# set-vcpu-dirty-limit command. Note that this command requires 2071# support from dirty ring, same as the "set-vcpu-dirty-limit". 2072# 2073# @cpu-index: index of a virtual CPU, default is all. 2074# 2075# Since: 7.1 2076# 2077# Example: 2078# 2079# -> {"execute": "cancel-vcpu-dirty-limit"}, 2080# "arguments": { "cpu-index": 1 } } 2081# <- { "return": {} } 2082## 2083{ 'command': 'cancel-vcpu-dirty-limit', 2084 'data': { '*cpu-index': 'int'} } 2085 2086## 2087# @query-vcpu-dirty-limit: 2088# 2089# Returns information about virtual CPU dirty page rate limits, if 2090# any. 2091# 2092# Since: 7.1 2093# 2094# Example: 2095# 2096# -> {"execute": "query-vcpu-dirty-limit"} 2097# <- {"return": [ 2098# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 2099# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 2100## 2101{ 'command': 'query-vcpu-dirty-limit', 2102 'returns': [ 'DirtyLimitInfo' ] } 2103 2104## 2105# @MigrationThreadInfo: 2106# 2107# Information about migrationthreads 2108# 2109# @name: the name of migration thread 2110# 2111# @thread-id: ID of the underlying host thread 2112# 2113# Since: 7.2 2114## 2115{ 'struct': 'MigrationThreadInfo', 2116 'data': {'name': 'str', 2117 'thread-id': 'int'} } 2118 2119## 2120# @query-migrationthreads: 2121# 2122# Returns information of migration threads 2123# 2124# data: migration thread name 2125# 2126# Returns: information about migration threads 2127# 2128# Since: 7.2 2129## 2130{ 'command': 'query-migrationthreads', 2131 'returns': ['MigrationThreadInfo'] } 2132 2133## 2134# @snapshot-save: 2135# 2136# Save a VM snapshot 2137# 2138# @job-id: identifier for the newly created job 2139# 2140# @tag: name of the snapshot to create 2141# 2142# @vmstate: block device node name to save vmstate to 2143# 2144# @devices: list of block device node names to save a snapshot to 2145# 2146# Applications should not assume that the snapshot save is complete 2147# when this command returns. The job commands / events must be used 2148# to determine completion and to fetch details of any errors that 2149# arise. 2150# 2151# Note that execution of the guest CPUs may be stopped during the time 2152# it takes to save the snapshot. A future version of QEMU may ensure 2153# CPUs are executing continuously. 2154# 2155# It is strongly recommended that @devices contain all writable block 2156# device nodes if a consistent snapshot is required. 2157# 2158# If @tag already exists, an error will be reported 2159# 2160# Returns: nothing 2161# 2162# Example: 2163# 2164# -> { "execute": "snapshot-save", 2165# "arguments": { 2166# "job-id": "snapsave0", 2167# "tag": "my-snap", 2168# "vmstate": "disk0", 2169# "devices": ["disk0", "disk1"] 2170# } 2171# } 2172# <- { "return": { } } 2173# <- {"event": "JOB_STATUS_CHANGE", 2174# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 2175# "data": {"status": "created", "id": "snapsave0"}} 2176# <- {"event": "JOB_STATUS_CHANGE", 2177# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 2178# "data": {"status": "running", "id": "snapsave0"}} 2179# <- {"event": "STOP", 2180# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 2181# <- {"event": "RESUME", 2182# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2183# <- {"event": "JOB_STATUS_CHANGE", 2184# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2185# "data": {"status": "waiting", "id": "snapsave0"}} 2186# <- {"event": "JOB_STATUS_CHANGE", 2187# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2188# "data": {"status": "pending", "id": "snapsave0"}} 2189# <- {"event": "JOB_STATUS_CHANGE", 2190# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2191# "data": {"status": "concluded", "id": "snapsave0"}} 2192# -> {"execute": "query-jobs"} 2193# <- {"return": [{"current-progress": 1, 2194# "status": "concluded", 2195# "total-progress": 1, 2196# "type": "snapshot-save", 2197# "id": "snapsave0"}]} 2198# 2199# Since: 6.0 2200## 2201{ 'command': 'snapshot-save', 2202 'data': { 'job-id': 'str', 2203 'tag': 'str', 2204 'vmstate': 'str', 2205 'devices': ['str'] } } 2206 2207## 2208# @snapshot-load: 2209# 2210# Load a VM snapshot 2211# 2212# @job-id: identifier for the newly created job 2213# 2214# @tag: name of the snapshot to load. 2215# 2216# @vmstate: block device node name to load vmstate from 2217# 2218# @devices: list of block device node names to load a snapshot from 2219# 2220# Applications should not assume that the snapshot load is complete 2221# when this command returns. The job commands / events must be used 2222# to determine completion and to fetch details of any errors that 2223# arise. 2224# 2225# Note that execution of the guest CPUs will be stopped during the 2226# time it takes to load the snapshot. 2227# 2228# It is strongly recommended that @devices contain all writable block 2229# device nodes that can have changed since the original @snapshot-save 2230# command execution. 2231# 2232# Returns: nothing 2233# 2234# Example: 2235# 2236# -> { "execute": "snapshot-load", 2237# "arguments": { 2238# "job-id": "snapload0", 2239# "tag": "my-snap", 2240# "vmstate": "disk0", 2241# "devices": ["disk0", "disk1"] 2242# } 2243# } 2244# <- { "return": { } } 2245# <- {"event": "JOB_STATUS_CHANGE", 2246# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2247# "data": {"status": "created", "id": "snapload0"}} 2248# <- {"event": "JOB_STATUS_CHANGE", 2249# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2250# "data": {"status": "running", "id": "snapload0"}} 2251# <- {"event": "STOP", 2252# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2253# <- {"event": "RESUME", 2254# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2255# <- {"event": "JOB_STATUS_CHANGE", 2256# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2257# "data": {"status": "waiting", "id": "snapload0"}} 2258# <- {"event": "JOB_STATUS_CHANGE", 2259# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2260# "data": {"status": "pending", "id": "snapload0"}} 2261# <- {"event": "JOB_STATUS_CHANGE", 2262# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2263# "data": {"status": "concluded", "id": "snapload0"}} 2264# -> {"execute": "query-jobs"} 2265# <- {"return": [{"current-progress": 1, 2266# "status": "concluded", 2267# "total-progress": 1, 2268# "type": "snapshot-load", 2269# "id": "snapload0"}]} 2270# 2271# Since: 6.0 2272## 2273{ 'command': 'snapshot-load', 2274 'data': { 'job-id': 'str', 2275 'tag': 'str', 2276 'vmstate': 'str', 2277 'devices': ['str'] } } 2278 2279## 2280# @snapshot-delete: 2281# 2282# Delete a VM snapshot 2283# 2284# @job-id: identifier for the newly created job 2285# 2286# @tag: name of the snapshot to delete. 2287# 2288# @devices: list of block device node names to delete a snapshot from 2289# 2290# Applications should not assume that the snapshot delete is complete 2291# when this command returns. The job commands / events must be used 2292# to determine completion and to fetch details of any errors that 2293# arise. 2294# 2295# Returns: nothing 2296# 2297# Example: 2298# 2299# -> { "execute": "snapshot-delete", 2300# "arguments": { 2301# "job-id": "snapdelete0", 2302# "tag": "my-snap", 2303# "devices": ["disk0", "disk1"] 2304# } 2305# } 2306# <- { "return": { } } 2307# <- {"event": "JOB_STATUS_CHANGE", 2308# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2309# "data": {"status": "created", "id": "snapdelete0"}} 2310# <- {"event": "JOB_STATUS_CHANGE", 2311# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2312# "data": {"status": "running", "id": "snapdelete0"}} 2313# <- {"event": "JOB_STATUS_CHANGE", 2314# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2315# "data": {"status": "waiting", "id": "snapdelete0"}} 2316# <- {"event": "JOB_STATUS_CHANGE", 2317# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2318# "data": {"status": "pending", "id": "snapdelete0"}} 2319# <- {"event": "JOB_STATUS_CHANGE", 2320# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2321# "data": {"status": "concluded", "id": "snapdelete0"}} 2322# -> {"execute": "query-jobs"} 2323# <- {"return": [{"current-progress": 1, 2324# "status": "concluded", 2325# "total-progress": 1, 2326# "type": "snapshot-delete", 2327# "id": "snapdelete0"}]} 2328# 2329# Since: 6.0 2330## 2331{ 'command': 'snapshot-delete', 2332 'data': { 'job-id': 'str', 2333 'tag': 'str', 2334 'devices': ['str'] } } 2335