1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the 20# target VM 21# 22# @total: total amount of bytes involved in the migration process 23# 24# @duplicate: number of duplicate (zero) pages (since 1.2) 25# 26# @skipped: number of skipped zero pages. Always zero, only provided for 27# compatibility (since 1.5) 28# 29# @normal: number of normal pages (since 1.2) 30# 31# @normal-bytes: number of normal bytes sent (since 1.2) 32# 33# @dirty-pages-rate: number of pages dirtied by second by the guest 34# (since 1.3) 35# 36# @mbps: throughput in megabits/sec. (since 1.6) 37# 38# @dirty-sync-count: number of times that dirty ram was synchronized 39# (since 2.1) 40# 41# @postcopy-requests: The number of page requests received from the 42# destination (since 2.7) 43# 44# @page-size: The number of bytes per page for the various page-based 45# statistics (since 2.10) 46# 47# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 48# 49# @pages-per-second: the number of memory pages transferred per second 50# (Since 4.0) 51# 52# @precopy-bytes: The number of bytes sent in the pre-copy phase 53# (since 7.0). 54# 55# @downtime-bytes: The number of bytes sent while the guest is paused 56# (since 7.0). 57# 58# @postcopy-bytes: The number of bytes sent during the post-copy phase 59# (since 7.0). 60# 61# @dirty-sync-missed-zero-copy: Number of times dirty RAM 62# synchronization could not avoid copying dirty pages. This is 63# between 0 and @dirty-sync-count * @multifd-channels. (since 64# 7.1) 65# 66# Features: 67# 68# @deprecated: Member @skipped is always zero since 1.5.3 69# 70# Since: 0.14 71# 72## 73{ 'struct': 'MigrationStats', 74 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 75 'duplicate': 'int', 76 'skipped': { 'type': 'int', 'features': [ 'deprecated' ] }, 77 'normal': 'int', 78 'normal-bytes': 'int', 'dirty-pages-rate': 'int', 79 'mbps': 'number', 'dirty-sync-count': 'int', 80 'postcopy-requests': 'int', 'page-size': 'int', 81 'multifd-bytes': 'uint64', 'pages-per-second': 'uint64', 82 'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64', 83 'postcopy-bytes': 'uint64', 84 'dirty-sync-missed-zero-copy': 'uint64' } } 85 86## 87# @XBZRLECacheStats: 88# 89# Detailed XBZRLE migration cache statistics 90# 91# @cache-size: XBZRLE cache size 92# 93# @bytes: amount of bytes already transferred to the target VM 94# 95# @pages: amount of pages transferred to the target VM 96# 97# @cache-miss: number of cache miss 98# 99# @cache-miss-rate: rate of cache miss (since 2.1) 100# 101# @encoding-rate: rate of encoded bytes (since 5.1) 102# 103# @overflow: number of overflows 104# 105# Since: 1.2 106## 107{ 'struct': 'XBZRLECacheStats', 108 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 109 'cache-miss': 'int', 'cache-miss-rate': 'number', 110 'encoding-rate': 'number', 'overflow': 'int' } } 111 112## 113# @CompressionStats: 114# 115# Detailed migration compression statistics 116# 117# @pages: amount of pages compressed and transferred to the target VM 118# 119# @busy: count of times that no free thread was available to compress 120# data 121# 122# @busy-rate: rate of thread busy 123# 124# @compressed-size: amount of bytes after compression 125# 126# @compression-rate: rate of compressed size 127# 128# Since: 3.1 129## 130{ 'struct': 'CompressionStats', 131 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 132 'compressed-size': 'int', 'compression-rate': 'number' } } 133 134## 135# @MigrationStatus: 136# 137# An enumeration of migration status. 138# 139# @none: no migration has ever happened. 140# 141# @setup: migration process has been initiated. 142# 143# @cancelling: in the process of cancelling migration. 144# 145# @cancelled: cancelling migration is finished. 146# 147# @active: in the process of doing migration. 148# 149# @postcopy-active: like active, but now in postcopy mode. (since 150# 2.5) 151# 152# @postcopy-paused: during postcopy but paused. (since 3.0) 153# 154# @postcopy-recover: trying to recover from a paused postcopy. (since 155# 3.0) 156# 157# @completed: migration is finished. 158# 159# @failed: some error occurred during migration process. 160# 161# @colo: VM is in the process of fault tolerance, VM can not get into 162# this state unless colo capability is enabled for migration. 163# (since 2.8) 164# 165# @pre-switchover: Paused before device serialisation. (since 2.11) 166# 167# @device: During device serialisation when pause-before-switchover is 168# enabled (since 2.11) 169# 170# @wait-unplug: wait for device unplug request by guest OS to be 171# completed. (since 4.2) 172# 173# Since: 2.3 174## 175{ 'enum': 'MigrationStatus', 176 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 177 'active', 'postcopy-active', 'postcopy-paused', 178 'postcopy-recover', 'completed', 'failed', 'colo', 179 'pre-switchover', 'device', 'wait-unplug' ] } 180## 181# @VfioStats: 182# 183# Detailed VFIO devices migration statistics 184# 185# @transferred: amount of bytes transferred to the target VM by VFIO 186# devices 187# 188# Since: 5.2 189## 190{ 'struct': 'VfioStats', 191 'data': {'transferred': 'int' } } 192 193## 194# @MigrationInfo: 195# 196# Information about current migration process. 197# 198# @status: @MigrationStatus describing the current migration status. 199# If this field is not returned, no migration process has been 200# initiated 201# 202# @ram: @MigrationStats containing detailed migration status, only 203# returned if status is 'active' or 'completed'(since 1.2) 204# 205# @disk: @MigrationStats containing detailed disk migration status, 206# only returned if status is 'active' and it is a block migration 207# 208# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 209# migration statistics, only returned if XBZRLE feature is on and 210# status is 'active' or 'completed' (since 1.2) 211# 212# @total-time: total amount of milliseconds since migration started. 213# If migration has ended, it returns the total migration time. 214# (since 1.2) 215# 216# @downtime: only present when migration finishes correctly total 217# downtime in milliseconds for the guest. (since 1.3) 218# 219# @expected-downtime: only present while migration is active expected 220# downtime in milliseconds for the guest in last walk of the dirty 221# bitmap. (since 1.3) 222# 223# @setup-time: amount of setup time in milliseconds *before* the 224# iterations begin but *after* the QMP command is issued. This is 225# designed to provide an accounting of any activities (such as 226# RDMA pinning) which may be expensive, but do not actually occur 227# during the iterative migration rounds themselves. (since 1.6) 228# 229# @cpu-throttle-percentage: percentage of time guest cpus are being 230# throttled during auto-converge. This is only present when 231# auto-converge has started throttling guest cpus. (Since 2.7) 232# 233# @error-desc: the human readable error description string. Clients 234# should not attempt to parse the error strings. (Since 2.7) 235# 236# @postcopy-blocktime: total time when all vCPU were blocked during 237# postcopy live migration. This is only present when the 238# postcopy-blocktime migration capability is enabled. (Since 3.0) 239# 240# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. 241# This is only present when the postcopy-blocktime migration 242# capability is enabled. (Since 3.0) 243# 244# @compression: migration compression statistics, only returned if 245# compression feature is on and status is 'active' or 'completed' 246# (Since 3.1) 247# 248# @socket-address: Only used for tcp, to know what the real port is 249# (Since 4.0) 250# 251# @vfio: @VfioStats containing detailed VFIO devices migration 252# statistics, only returned if VFIO device is present, migration 253# is supported by all VFIO devices and status is 'active' or 254# 'completed' (since 5.2) 255# 256# @blocked-reasons: A list of reasons an outgoing migration is 257# blocked. Present and non-empty when migration is blocked. 258# (since 6.0) 259# 260# @dirty-limit-throttle-time-per-round: Maximum throttle time 261# (in microseconds) of virtual CPUs each dirty ring full round, 262# which shows how MigrationCapability dirty-limit affects the 263# guest during live migration. (Since 8.1) 264# 265# @dirty-limit-ring-full-time: Estimated average dirty ring full time 266# (in microseconds) for each dirty ring full round. The value 267# equals the dirty ring memory size divided by the average dirty 268# page rate of the virtual CPU, which can be used to observe the 269# average memory load of the virtual CPU indirectly. Note that 270# zero means guest doesn't dirty memory. (Since 8.1) 271# 272# Since: 0.14 273## 274{ 'struct': 'MigrationInfo', 275 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 276 '*disk': 'MigrationStats', 277 '*vfio': 'VfioStats', 278 '*xbzrle-cache': 'XBZRLECacheStats', 279 '*total-time': 'int', 280 '*expected-downtime': 'int', 281 '*downtime': 'int', 282 '*setup-time': 'int', 283 '*cpu-throttle-percentage': 'int', 284 '*error-desc': 'str', 285 '*blocked-reasons': ['str'], 286 '*postcopy-blocktime': 'uint32', 287 '*postcopy-vcpu-blocktime': ['uint32'], 288 '*compression': 'CompressionStats', 289 '*socket-address': ['SocketAddress'], 290 '*dirty-limit-throttle-time-per-round': 'uint64', 291 '*dirty-limit-ring-full-time': 'uint64'} } 292 293## 294# @query-migrate: 295# 296# Returns information about current migration process. If migration 297# is active there will be another json-object with RAM migration 298# status and if block migration is active another one with block 299# migration status. 300# 301# Returns: @MigrationInfo 302# 303# Since: 0.14 304# 305# Examples: 306# 307# 1. Before the first migration 308# 309# -> { "execute": "query-migrate" } 310# <- { "return": {} } 311# 312# 2. Migration is done and has succeeded 313# 314# -> { "execute": "query-migrate" } 315# <- { "return": { 316# "status": "completed", 317# "total-time":12345, 318# "setup-time":12345, 319# "downtime":12345, 320# "ram":{ 321# "transferred":123, 322# "remaining":123, 323# "total":246, 324# "duplicate":123, 325# "normal":123, 326# "normal-bytes":123456, 327# "dirty-sync-count":15 328# } 329# } 330# } 331# 332# 3. Migration is done and has failed 333# 334# -> { "execute": "query-migrate" } 335# <- { "return": { "status": "failed" } } 336# 337# 4. Migration is being performed and is not a block migration: 338# 339# -> { "execute": "query-migrate" } 340# <- { 341# "return":{ 342# "status":"active", 343# "total-time":12345, 344# "setup-time":12345, 345# "expected-downtime":12345, 346# "ram":{ 347# "transferred":123, 348# "remaining":123, 349# "total":246, 350# "duplicate":123, 351# "normal":123, 352# "normal-bytes":123456, 353# "dirty-sync-count":15 354# } 355# } 356# } 357# 358# 5. Migration is being performed and is a block migration: 359# 360# -> { "execute": "query-migrate" } 361# <- { 362# "return":{ 363# "status":"active", 364# "total-time":12345, 365# "setup-time":12345, 366# "expected-downtime":12345, 367# "ram":{ 368# "total":1057024, 369# "remaining":1053304, 370# "transferred":3720, 371# "duplicate":123, 372# "normal":123, 373# "normal-bytes":123456, 374# "dirty-sync-count":15 375# }, 376# "disk":{ 377# "total":20971520, 378# "remaining":20880384, 379# "transferred":91136 380# } 381# } 382# } 383# 384# 6. Migration is being performed and XBZRLE is active: 385# 386# -> { "execute": "query-migrate" } 387# <- { 388# "return":{ 389# "status":"active", 390# "total-time":12345, 391# "setup-time":12345, 392# "expected-downtime":12345, 393# "ram":{ 394# "total":1057024, 395# "remaining":1053304, 396# "transferred":3720, 397# "duplicate":10, 398# "normal":3333, 399# "normal-bytes":3412992, 400# "dirty-sync-count":15 401# }, 402# "xbzrle-cache":{ 403# "cache-size":67108864, 404# "bytes":20971520, 405# "pages":2444343, 406# "cache-miss":2244, 407# "cache-miss-rate":0.123, 408# "encoding-rate":80.1, 409# "overflow":34434 410# } 411# } 412# } 413## 414{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 415 416## 417# @MigrationCapability: 418# 419# Migration capabilities enumeration 420# 421# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length 422# Encoding). This feature allows us to minimize migration traffic 423# for certain work loads, by sending compressed difference of the 424# pages 425# 426# @rdma-pin-all: Controls whether or not the entire VM memory 427# footprint is mlock()'d on demand or all at once. Refer to 428# docs/rdma.txt for usage. Disabled by default. (since 2.0) 429# 430# @zero-blocks: During storage migration encode blocks of zeroes 431# efficiently. This essentially saves 1MB of zeroes per block on 432# the wire. Enabling requires source and target VM to support 433# this feature. To enable it is sufficient to enable the 434# capability on the source VM. The feature is disabled by default. 435# (since 1.6) 436# 437# @compress: Use multiple compression threads to accelerate live 438# migration. This feature can help to reduce the migration 439# traffic, by sending compressed pages. Please note that if 440# compress and xbzrle are both on, compress only takes effect in 441# the ram bulk stage, after that, it will be disabled and only 442# xbzrle takes effect, this can help to minimize migration 443# traffic. The feature is disabled by default. (since 2.4) 444# 445# @events: generate events for each migration state change (since 2.4) 446# 447# @auto-converge: If enabled, QEMU will automatically throttle down 448# the guest to speed up convergence of RAM migration. (since 1.6) 449# 450# @postcopy-ram: Start executing on the migration target before all of 451# RAM has been migrated, pulling the remaining pages along as 452# needed. The capacity must have the same setting on both source 453# and target or migration will not even start. NOTE: If the 454# migration fails during postcopy the VM will fail. (since 2.6) 455# 456# @x-colo: If enabled, migration will never end, and the state of the 457# VM on the primary side will be migrated continuously to the VM 458# on secondary side, this process is called COarse-Grain LOck 459# Stepping (COLO) for Non-stop Service. (since 2.8) 460# 461# @release-ram: if enabled, qemu will free the migrated ram pages on 462# the source during postcopy-ram migration. (since 2.9) 463# 464# @block: If enabled, QEMU will also migrate the contents of all block 465# devices. Default is disabled. A possible alternative uses 466# mirror jobs to a builtin NBD server on the destination, which 467# offers more flexibility. (Since 2.10) 468# 469# @return-path: If enabled, migration will use the return path even 470# for precopy. (since 2.10) 471# 472# @pause-before-switchover: Pause outgoing migration before 473# serialising device state and before disabling block IO (since 474# 2.11) 475# 476# @multifd: Use more than one fd for migration (since 4.0) 477# 478# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 479# (since 2.12) 480# 481# @postcopy-blocktime: Calculate downtime for postcopy live migration 482# (since 3.0) 483# 484# @late-block-activate: If enabled, the destination will not activate 485# block devices (and thus take locks) immediately at the end of 486# migration. (since 3.0) 487# 488# @x-ignore-shared: If enabled, QEMU will not migrate shared memory 489# that is accessible on the destination machine. (since 4.0) 490# 491# @validate-uuid: Send the UUID of the source to allow the destination 492# to ensure it is the same. (since 4.2) 493# 494# @background-snapshot: If enabled, the migration stream will be a 495# snapshot of the VM exactly at the point when the migration 496# procedure starts. The VM RAM is saved with running VM. (since 497# 6.0) 498# 499# @zero-copy-send: Controls behavior on sending memory pages on 500# migration. When true, enables a zero-copy mechanism for sending 501# memory pages, if host supports it. Requires that QEMU be 502# permitted to use locked memory for guest RAM pages. (since 7.1) 503# 504# @postcopy-preempt: If enabled, the migration process will allow 505# postcopy requests to preempt precopy stream, so postcopy 506# requests will be handled faster. This is a performance feature 507# and should not affect the correctness of postcopy migration. 508# (since 7.1) 509# 510# @switchover-ack: If enabled, migration will not stop the source VM 511# and complete the migration until an ACK is received from the 512# destination that it's OK to do so. Exactly when this ACK is 513# sent depends on the migrated devices that use this feature. For 514# example, a device can use it to make sure some of its data is 515# sent and loaded in the destination before doing switchover. 516# This can reduce downtime if devices that support this capability 517# are present. 'return-path' capability must be enabled to use 518# it. (since 8.1) 519# 520# @dirty-limit: If enabled, migration will throttle vCPUs as needed to 521# keep their dirty page rate within @vcpu-dirty-limit. This can 522# improve responsiveness of large guests during live migration, 523# and can result in more stable read performance. Requires KVM 524# with accelerator property "dirty-ring-size" set. (Since 8.1) 525# 526# Features: 527# 528# @unstable: Members @x-colo and @x-ignore-shared are experimental. 529# 530# Since: 1.2 531## 532{ 'enum': 'MigrationCapability', 533 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 534 'compress', 'events', 'postcopy-ram', 535 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 536 'release-ram', 537 'block', 'return-path', 'pause-before-switchover', 'multifd', 538 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 539 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 540 'validate-uuid', 'background-snapshot', 541 'zero-copy-send', 'postcopy-preempt', 'switchover-ack', 542 'dirty-limit'] } 543 544## 545# @MigrationCapabilityStatus: 546# 547# Migration capability information 548# 549# @capability: capability enum 550# 551# @state: capability state bool 552# 553# Since: 1.2 554## 555{ 'struct': 'MigrationCapabilityStatus', 556 'data': { 'capability': 'MigrationCapability', 'state': 'bool' } } 557 558## 559# @migrate-set-capabilities: 560# 561# Enable/Disable the following migration capabilities (like xbzrle) 562# 563# @capabilities: json array of capability modifications to make 564# 565# Since: 1.2 566# 567# Example: 568# 569# -> { "execute": "migrate-set-capabilities" , "arguments": 570# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 571# <- { "return": {} } 572## 573{ 'command': 'migrate-set-capabilities', 574 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 575 576## 577# @query-migrate-capabilities: 578# 579# Returns information about the current migration capabilities status 580# 581# Returns: @MigrationCapabilityStatus 582# 583# Since: 1.2 584# 585# Example: 586# 587# -> { "execute": "query-migrate-capabilities" } 588# <- { "return": [ 589# {"state": false, "capability": "xbzrle"}, 590# {"state": false, "capability": "rdma-pin-all"}, 591# {"state": false, "capability": "auto-converge"}, 592# {"state": false, "capability": "zero-blocks"}, 593# {"state": false, "capability": "compress"}, 594# {"state": true, "capability": "events"}, 595# {"state": false, "capability": "postcopy-ram"}, 596# {"state": false, "capability": "x-colo"} 597# ]} 598## 599{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 600 601## 602# @MultiFDCompression: 603# 604# An enumeration of multifd compression methods. 605# 606# @none: no compression. 607# 608# @zlib: use zlib compression method. 609# 610# @zstd: use zstd compression method. 611# 612# Since: 5.0 613## 614{ 'enum': 'MultiFDCompression', 615 'data': [ 'none', 'zlib', 616 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 617 618## 619# @BitmapMigrationBitmapAliasTransform: 620# 621# @persistent: If present, the bitmap will be made persistent or 622# transient depending on this parameter. 623# 624# Since: 6.0 625## 626{ 'struct': 'BitmapMigrationBitmapAliasTransform', 627 'data': { 628 '*persistent': 'bool' 629 } } 630 631## 632# @BitmapMigrationBitmapAlias: 633# 634# @name: The name of the bitmap. 635# 636# @alias: An alias name for migration (for example the bitmap name on 637# the opposite site). 638# 639# @transform: Allows the modification of the migrated bitmap. (since 640# 6.0) 641# 642# Since: 5.2 643## 644{ 'struct': 'BitmapMigrationBitmapAlias', 645 'data': { 646 'name': 'str', 647 'alias': 'str', 648 '*transform': 'BitmapMigrationBitmapAliasTransform' 649 } } 650 651## 652# @BitmapMigrationNodeAlias: 653# 654# Maps a block node name and the bitmaps it has to aliases for dirty 655# bitmap migration. 656# 657# @node-name: A block node name. 658# 659# @alias: An alias block node name for migration (for example the node 660# name on the opposite site). 661# 662# @bitmaps: Mappings for the bitmaps on this node. 663# 664# Since: 5.2 665## 666{ 'struct': 'BitmapMigrationNodeAlias', 667 'data': { 668 'node-name': 'str', 669 'alias': 'str', 670 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 671 } } 672 673## 674# @MigrationParameter: 675# 676# Migration parameters enumeration 677# 678# @announce-initial: Initial delay (in milliseconds) before sending 679# the first announce (Since 4.0) 680# 681# @announce-max: Maximum delay (in milliseconds) between packets in 682# the announcement (Since 4.0) 683# 684# @announce-rounds: Number of self-announce packets sent after 685# migration (Since 4.0) 686# 687# @announce-step: Increase in delay (in milliseconds) between 688# subsequent packets in the announcement (Since 4.0) 689# 690# @compress-level: Set the compression level to be used in live 691# migration, the compression level is an integer between 0 and 9, 692# where 0 means no compression, 1 means the best compression 693# speed, and 9 means best compression ratio which will consume 694# more CPU. 695# 696# @compress-threads: Set compression thread count to be used in live 697# migration, the compression thread count is an integer between 1 698# and 255. 699# 700# @compress-wait-thread: Controls behavior when all compression 701# threads are currently busy. If true (default), wait for a free 702# compression thread to become available; otherwise, send the page 703# uncompressed. (Since 3.1) 704# 705# @decompress-threads: Set decompression thread count to be used in 706# live migration, the decompression thread count is an integer 707# between 1 and 255. Usually, decompression is at least 4 times as 708# fast as compression, so set the decompress-threads to the number 709# about 1/4 of compress-threads is adequate. 710# 711# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 712# bytes_xfer_period to trigger throttling. It is expressed as 713# percentage. The default value is 50. (Since 5.0) 714# 715# @cpu-throttle-initial: Initial percentage of time guest cpus are 716# throttled when migration auto-converge is activated. The 717# default value is 20. (Since 2.7) 718# 719# @cpu-throttle-increment: throttle percentage increase each time 720# auto-converge detects that migration is not making progress. 721# The default value is 10. (Since 2.7) 722# 723# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 724# the tail stage of throttling, the Guest is very sensitive to CPU 725# percentage while the @cpu-throttle -increment is excessive 726# usually at tail stage. If this parameter is true, we will 727# compute the ideal CPU percentage used by the Guest, which may 728# exactly make the dirty rate match the dirty rate threshold. 729# Then we will choose a smaller throttle increment between the one 730# specified by @cpu-throttle-increment and the one generated by 731# ideal CPU percentage. Therefore, it is compatible to 732# traditional throttling, meanwhile the throttle increment won't 733# be excessive at tail stage. The default value is false. (Since 734# 5.1) 735# 736# @tls-creds: ID of the 'tls-creds' object that provides credentials 737# for establishing a TLS connection over the migration data 738# channel. On the outgoing side of the migration, the credentials 739# must be for a 'client' endpoint, while for the incoming side the 740# credentials must be for a 'server' endpoint. Setting this will 741# enable TLS for all migrations. The default is unset, resulting 742# in unsecured migration at the QEMU level. (Since 2.7) 743# 744# @tls-hostname: hostname of the target host for the migration. This 745# is required when using x509 based TLS credentials and the 746# migration URI does not already include a hostname. For example 747# if using fd: or exec: based migration, the hostname must be 748# provided so that the server's x509 certificate identity can be 749# validated. (Since 2.7) 750# 751# @tls-authz: ID of the 'authz' object subclass that provides access 752# control checking of the TLS x509 certificate distinguished name. 753# This object is only resolved at time of use, so can be deleted 754# and recreated on the fly while the migration server is active. 755# If missing, it will default to denying access (Since 4.0) 756# 757# @max-bandwidth: to set maximum speed for migration. maximum speed 758# in bytes per second. (Since 2.8) 759# 760# @avail-switchover-bandwidth: to set the available bandwidth that 761# migration can use during switchover phase. NOTE! This does not 762# limit the bandwidth during switchover, but only for calculations when 763# making decisions to switchover. By default, this value is zero, 764# which means QEMU will estimate the bandwidth automatically. This can 765# be set when the estimated value is not accurate, while the user is 766# able to guarantee such bandwidth is available when switching over. 767# When specified correctly, this can make the switchover decision much 768# more accurate. (Since 8.2) 769# 770# @downtime-limit: set maximum tolerated downtime for migration. 771# maximum downtime in milliseconds (Since 2.8) 772# 773# @x-checkpoint-delay: The delay time (in ms) between two COLO 774# checkpoints in periodic mode. (Since 2.8) 775# 776# @block-incremental: Affects how much storage is migrated when the 777# block migration capability is enabled. When false, the entire 778# storage backing chain is migrated into a flattened image at the 779# destination; when true, only the active qcow2 layer is migrated 780# and the destination must already have access to the same backing 781# chain as was used on the source. (since 2.10) 782# 783# @multifd-channels: Number of channels used to migrate data in 784# parallel. This is the same number that the number of sockets 785# used for migration. The default value is 2 (since 4.0) 786# 787# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 788# needs to be a multiple of the target page size and a power of 2 789# (Since 2.11) 790# 791# @max-postcopy-bandwidth: Background transfer bandwidth during 792# postcopy. Defaults to 0 (unlimited). In bytes per second. 793# (Since 3.0) 794# 795# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 796# (Since 3.1) 797# 798# @multifd-compression: Which compression method to use. Defaults to 799# none. (Since 5.0) 800# 801# @multifd-zlib-level: Set the compression level to be used in live 802# migration, the compression level is an integer between 0 and 9, 803# where 0 means no compression, 1 means the best compression 804# speed, and 9 means best compression ratio which will consume 805# more CPU. Defaults to 1. (Since 5.0) 806# 807# @multifd-zstd-level: Set the compression level to be used in live 808# migration, the compression level is an integer between 0 and 20, 809# where 0 means no compression, 1 means the best compression 810# speed, and 20 means best compression ratio which will consume 811# more CPU. Defaults to 1. (Since 5.0) 812# 813# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 814# aliases for the purpose of dirty bitmap migration. Such aliases 815# may for example be the corresponding names on the opposite site. 816# The mapping must be one-to-one, but not necessarily complete: On 817# the source, unmapped bitmaps and all bitmaps on unmapped nodes 818# will be ignored. On the destination, encountering an unmapped 819# alias in the incoming migration stream will result in a report, 820# and all further bitmap migration data will then be discarded. 821# Note that the destination does not know about bitmaps it does 822# not receive, so there is no limitation or requirement regarding 823# the number of bitmaps received, or how they are named, or on 824# which nodes they are placed. By default (when this parameter 825# has never been set), bitmap names are mapped to themselves. 826# Nodes are mapped to their block device name if there is one, and 827# to their node name otherwise. (Since 5.2) 828# 829# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 830# limit during live migration. Should be in the range 1 to 1000ms. 831# Defaults to 1000ms. (Since 8.1) 832# 833# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 834# Defaults to 1. (Since 8.1) 835# 836# Features: 837# 838# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 839# are experimental. 840# 841# Since: 2.4 842## 843{ 'enum': 'MigrationParameter', 844 'data': ['announce-initial', 'announce-max', 845 'announce-rounds', 'announce-step', 846 'compress-level', 'compress-threads', 'decompress-threads', 847 'compress-wait-thread', 'throttle-trigger-threshold', 848 'cpu-throttle-initial', 'cpu-throttle-increment', 849 'cpu-throttle-tailslow', 850 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 851 'avail-switchover-bandwidth', 'downtime-limit', 852 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 853 'block-incremental', 854 'multifd-channels', 855 'xbzrle-cache-size', 'max-postcopy-bandwidth', 856 'max-cpu-throttle', 'multifd-compression', 857 'multifd-zlib-level', 'multifd-zstd-level', 858 'block-bitmap-mapping', 859 { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, 860 'vcpu-dirty-limit'] } 861 862## 863# @MigrateSetParameters: 864# 865# @announce-initial: Initial delay (in milliseconds) before sending 866# the first announce (Since 4.0) 867# 868# @announce-max: Maximum delay (in milliseconds) between packets in 869# the announcement (Since 4.0) 870# 871# @announce-rounds: Number of self-announce packets sent after 872# migration (Since 4.0) 873# 874# @announce-step: Increase in delay (in milliseconds) between 875# subsequent packets in the announcement (Since 4.0) 876# 877# @compress-level: compression level 878# 879# @compress-threads: compression thread count 880# 881# @compress-wait-thread: Controls behavior when all compression 882# threads are currently busy. If true (default), wait for a free 883# compression thread to become available; otherwise, send the page 884# uncompressed. (Since 3.1) 885# 886# @decompress-threads: decompression thread count 887# 888# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 889# bytes_xfer_period to trigger throttling. It is expressed as 890# percentage. The default value is 50. (Since 5.0) 891# 892# @cpu-throttle-initial: Initial percentage of time guest cpus are 893# throttled when migration auto-converge is activated. The 894# default value is 20. (Since 2.7) 895# 896# @cpu-throttle-increment: throttle percentage increase each time 897# auto-converge detects that migration is not making progress. 898# The default value is 10. (Since 2.7) 899# 900# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 901# the tail stage of throttling, the Guest is very sensitive to CPU 902# percentage while the @cpu-throttle -increment is excessive 903# usually at tail stage. If this parameter is true, we will 904# compute the ideal CPU percentage used by the Guest, which may 905# exactly make the dirty rate match the dirty rate threshold. 906# Then we will choose a smaller throttle increment between the one 907# specified by @cpu-throttle-increment and the one generated by 908# ideal CPU percentage. Therefore, it is compatible to 909# traditional throttling, meanwhile the throttle increment won't 910# be excessive at tail stage. The default value is false. (Since 911# 5.1) 912# 913# @tls-creds: ID of the 'tls-creds' object that provides credentials 914# for establishing a TLS connection over the migration data 915# channel. On the outgoing side of the migration, the credentials 916# must be for a 'client' endpoint, while for the incoming side the 917# credentials must be for a 'server' endpoint. Setting this to a 918# non-empty string enables TLS for all migrations. An empty 919# string means that QEMU will use plain text mode for migration, 920# rather than TLS (Since 2.9) Previously (since 2.7), this was 921# reported by omitting tls-creds instead. 922# 923# @tls-hostname: hostname of the target host for the migration. This 924# is required when using x509 based TLS credentials and the 925# migration URI does not already include a hostname. For example 926# if using fd: or exec: based migration, the hostname must be 927# provided so that the server's x509 certificate identity can be 928# validated. (Since 2.7) An empty string means that QEMU will use 929# the hostname associated with the migration URI, if any. (Since 930# 2.9) Previously (since 2.7), this was reported by omitting 931# tls-hostname instead. 932# 933# @max-bandwidth: to set maximum speed for migration. maximum speed 934# in bytes per second. (Since 2.8) 935# 936# @avail-switchover-bandwidth: to set the available bandwidth that 937# migration can use during switchover phase. NOTE! This does not 938# limit the bandwidth during switchover, but only for calculations when 939# making decisions to switchover. By default, this value is zero, 940# which means QEMU will estimate the bandwidth automatically. This can 941# be set when the estimated value is not accurate, while the user is 942# able to guarantee such bandwidth is available when switching over. 943# When specified correctly, this can make the switchover decision much 944# more accurate. (Since 8.2) 945# 946# @downtime-limit: set maximum tolerated downtime for migration. 947# maximum downtime in milliseconds (Since 2.8) 948# 949# @x-checkpoint-delay: the delay time between two COLO checkpoints. 950# (Since 2.8) 951# 952# @block-incremental: Affects how much storage is migrated when the 953# block migration capability is enabled. When false, the entire 954# storage backing chain is migrated into a flattened image at the 955# destination; when true, only the active qcow2 layer is migrated 956# and the destination must already have access to the same backing 957# chain as was used on the source. (since 2.10) 958# 959# @multifd-channels: Number of channels used to migrate data in 960# parallel. This is the same number that the number of sockets 961# used for migration. The default value is 2 (since 4.0) 962# 963# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 964# needs to be a multiple of the target page size and a power of 2 965# (Since 2.11) 966# 967# @max-postcopy-bandwidth: Background transfer bandwidth during 968# postcopy. Defaults to 0 (unlimited). In bytes per second. 969# (Since 3.0) 970# 971# @max-cpu-throttle: maximum cpu throttle percentage. The default 972# value is 99. (Since 3.1) 973# 974# @multifd-compression: Which compression method to use. Defaults to 975# none. (Since 5.0) 976# 977# @multifd-zlib-level: Set the compression level to be used in live 978# migration, the compression level is an integer between 0 and 9, 979# where 0 means no compression, 1 means the best compression 980# speed, and 9 means best compression ratio which will consume 981# more CPU. Defaults to 1. (Since 5.0) 982# 983# @multifd-zstd-level: Set the compression level to be used in live 984# migration, the compression level is an integer between 0 and 20, 985# where 0 means no compression, 1 means the best compression 986# speed, and 20 means best compression ratio which will consume 987# more CPU. Defaults to 1. (Since 5.0) 988# 989# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 990# aliases for the purpose of dirty bitmap migration. Such aliases 991# may for example be the corresponding names on the opposite site. 992# The mapping must be one-to-one, but not necessarily complete: On 993# the source, unmapped bitmaps and all bitmaps on unmapped nodes 994# will be ignored. On the destination, encountering an unmapped 995# alias in the incoming migration stream will result in a report, 996# and all further bitmap migration data will then be discarded. 997# Note that the destination does not know about bitmaps it does 998# not receive, so there is no limitation or requirement regarding 999# the number of bitmaps received, or how they are named, or on 1000# which nodes they are placed. By default (when this parameter 1001# has never been set), bitmap names are mapped to themselves. 1002# Nodes are mapped to their block device name if there is one, and 1003# to their node name otherwise. (Since 5.2) 1004# 1005# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1006# limit during live migration. Should be in the range 1 to 1000ms. 1007# Defaults to 1000ms. (Since 8.1) 1008# 1009# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1010# Defaults to 1. (Since 8.1) 1011# 1012# Features: 1013# 1014# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 1015# are experimental. 1016# 1017# TODO: either fuse back into MigrationParameters, or make 1018# MigrationParameters members mandatory 1019# 1020# Since: 2.4 1021## 1022{ 'struct': 'MigrateSetParameters', 1023 'data': { '*announce-initial': 'size', 1024 '*announce-max': 'size', 1025 '*announce-rounds': 'size', 1026 '*announce-step': 'size', 1027 '*compress-level': 'uint8', 1028 '*compress-threads': 'uint8', 1029 '*compress-wait-thread': 'bool', 1030 '*decompress-threads': 'uint8', 1031 '*throttle-trigger-threshold': 'uint8', 1032 '*cpu-throttle-initial': 'uint8', 1033 '*cpu-throttle-increment': 'uint8', 1034 '*cpu-throttle-tailslow': 'bool', 1035 '*tls-creds': 'StrOrNull', 1036 '*tls-hostname': 'StrOrNull', 1037 '*tls-authz': 'StrOrNull', 1038 '*max-bandwidth': 'size', 1039 '*avail-switchover-bandwidth': 'size', 1040 '*downtime-limit': 'uint64', 1041 '*x-checkpoint-delay': { 'type': 'uint32', 1042 'features': [ 'unstable' ] }, 1043 '*block-incremental': 'bool', 1044 '*multifd-channels': 'uint8', 1045 '*xbzrle-cache-size': 'size', 1046 '*max-postcopy-bandwidth': 'size', 1047 '*max-cpu-throttle': 'uint8', 1048 '*multifd-compression': 'MultiFDCompression', 1049 '*multifd-zlib-level': 'uint8', 1050 '*multifd-zstd-level': 'uint8', 1051 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1052 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1053 'features': [ 'unstable' ] }, 1054 '*vcpu-dirty-limit': 'uint64'} } 1055 1056## 1057# @migrate-set-parameters: 1058# 1059# Set various migration parameters. 1060# 1061# Since: 2.4 1062# 1063# Example: 1064# 1065# -> { "execute": "migrate-set-parameters" , 1066# "arguments": { "compress-level": 1 } } 1067# <- { "return": {} } 1068## 1069{ 'command': 'migrate-set-parameters', 'boxed': true, 1070 'data': 'MigrateSetParameters' } 1071 1072## 1073# @MigrationParameters: 1074# 1075# The optional members aren't actually optional. 1076# 1077# @announce-initial: Initial delay (in milliseconds) before sending 1078# the first announce (Since 4.0) 1079# 1080# @announce-max: Maximum delay (in milliseconds) between packets in 1081# the announcement (Since 4.0) 1082# 1083# @announce-rounds: Number of self-announce packets sent after 1084# migration (Since 4.0) 1085# 1086# @announce-step: Increase in delay (in milliseconds) between 1087# subsequent packets in the announcement (Since 4.0) 1088# 1089# @compress-level: compression level 1090# 1091# @compress-threads: compression thread count 1092# 1093# @compress-wait-thread: Controls behavior when all compression 1094# threads are currently busy. If true (default), wait for a free 1095# compression thread to become available; otherwise, send the page 1096# uncompressed. (Since 3.1) 1097# 1098# @decompress-threads: decompression thread count 1099# 1100# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 1101# bytes_xfer_period to trigger throttling. It is expressed as 1102# percentage. The default value is 50. (Since 5.0) 1103# 1104# @cpu-throttle-initial: Initial percentage of time guest cpus are 1105# throttled when migration auto-converge is activated. (Since 1106# 2.7) 1107# 1108# @cpu-throttle-increment: throttle percentage increase each time 1109# auto-converge detects that migration is not making progress. 1110# (Since 2.7) 1111# 1112# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 1113# the tail stage of throttling, the Guest is very sensitive to CPU 1114# percentage while the @cpu-throttle -increment is excessive 1115# usually at tail stage. If this parameter is true, we will 1116# compute the ideal CPU percentage used by the Guest, which may 1117# exactly make the dirty rate match the dirty rate threshold. 1118# Then we will choose a smaller throttle increment between the one 1119# specified by @cpu-throttle-increment and the one generated by 1120# ideal CPU percentage. Therefore, it is compatible to 1121# traditional throttling, meanwhile the throttle increment won't 1122# be excessive at tail stage. The default value is false. (Since 1123# 5.1) 1124# 1125# @tls-creds: ID of the 'tls-creds' object that provides credentials 1126# for establishing a TLS connection over the migration data 1127# channel. On the outgoing side of the migration, the credentials 1128# must be for a 'client' endpoint, while for the incoming side the 1129# credentials must be for a 'server' endpoint. An empty string 1130# means that QEMU will use plain text mode for migration, rather 1131# than TLS (Since 2.7) Note: 2.8 reports this by omitting 1132# tls-creds instead. 1133# 1134# @tls-hostname: hostname of the target host for the migration. This 1135# is required when using x509 based TLS credentials and the 1136# migration URI does not already include a hostname. For example 1137# if using fd: or exec: based migration, the hostname must be 1138# provided so that the server's x509 certificate identity can be 1139# validated. (Since 2.7) An empty string means that QEMU will use 1140# the hostname associated with the migration URI, if any. (Since 1141# 2.9) Note: 2.8 reports this by omitting tls-hostname instead. 1142# 1143# @tls-authz: ID of the 'authz' object subclass that provides access 1144# control checking of the TLS x509 certificate distinguished name. 1145# (Since 4.0) 1146# 1147# @max-bandwidth: to set maximum speed for migration. maximum speed 1148# in bytes per second. (Since 2.8) 1149# 1150# @avail-switchover-bandwidth: to set the available bandwidth that 1151# migration can use during switchover phase. NOTE! This does not 1152# limit the bandwidth during switchover, but only for calculations when 1153# making decisions to switchover. By default, this value is zero, 1154# which means QEMU will estimate the bandwidth automatically. This can 1155# be set when the estimated value is not accurate, while the user is 1156# able to guarantee such bandwidth is available when switching over. 1157# When specified correctly, this can make the switchover decision much 1158# more accurate. (Since 8.2) 1159# 1160# @downtime-limit: set maximum tolerated downtime for migration. 1161# maximum downtime in milliseconds (Since 2.8) 1162# 1163# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1164# (Since 2.8) 1165# 1166# @block-incremental: Affects how much storage is migrated when the 1167# block migration capability is enabled. When false, the entire 1168# storage backing chain is migrated into a flattened image at the 1169# destination; when true, only the active qcow2 layer is migrated 1170# and the destination must already have access to the same backing 1171# chain as was used on the source. (since 2.10) 1172# 1173# @multifd-channels: Number of channels used to migrate data in 1174# parallel. This is the same number that the number of sockets 1175# used for migration. The default value is 2 (since 4.0) 1176# 1177# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1178# needs to be a multiple of the target page size and a power of 2 1179# (Since 2.11) 1180# 1181# @max-postcopy-bandwidth: Background transfer bandwidth during 1182# postcopy. Defaults to 0 (unlimited). In bytes per second. 1183# (Since 3.0) 1184# 1185# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1186# (Since 3.1) 1187# 1188# @multifd-compression: Which compression method to use. Defaults to 1189# none. (Since 5.0) 1190# 1191# @multifd-zlib-level: Set the compression level to be used in live 1192# migration, the compression level is an integer between 0 and 9, 1193# where 0 means no compression, 1 means the best compression 1194# speed, and 9 means best compression ratio which will consume 1195# more CPU. Defaults to 1. (Since 5.0) 1196# 1197# @multifd-zstd-level: Set the compression level to be used in live 1198# migration, the compression level is an integer between 0 and 20, 1199# where 0 means no compression, 1 means the best compression 1200# speed, and 20 means best compression ratio which will consume 1201# more CPU. Defaults to 1. (Since 5.0) 1202# 1203# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1204# aliases for the purpose of dirty bitmap migration. Such aliases 1205# may for example be the corresponding names on the opposite site. 1206# The mapping must be one-to-one, but not necessarily complete: On 1207# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1208# will be ignored. On the destination, encountering an unmapped 1209# alias in the incoming migration stream will result in a report, 1210# and all further bitmap migration data will then be discarded. 1211# Note that the destination does not know about bitmaps it does 1212# not receive, so there is no limitation or requirement regarding 1213# the number of bitmaps received, or how they are named, or on 1214# which nodes they are placed. By default (when this parameter 1215# has never been set), bitmap names are mapped to themselves. 1216# Nodes are mapped to their block device name if there is one, and 1217# to their node name otherwise. (Since 5.2) 1218# 1219# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1220# limit during live migration. Should be in the range 1 to 1000ms. 1221# Defaults to 1000ms. (Since 8.1) 1222# 1223# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1224# Defaults to 1. (Since 8.1) 1225# 1226# Features: 1227# 1228# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period 1229# are experimental. 1230# 1231# Since: 2.4 1232## 1233{ 'struct': 'MigrationParameters', 1234 'data': { '*announce-initial': 'size', 1235 '*announce-max': 'size', 1236 '*announce-rounds': 'size', 1237 '*announce-step': 'size', 1238 '*compress-level': 'uint8', 1239 '*compress-threads': 'uint8', 1240 '*compress-wait-thread': 'bool', 1241 '*decompress-threads': 'uint8', 1242 '*throttle-trigger-threshold': 'uint8', 1243 '*cpu-throttle-initial': 'uint8', 1244 '*cpu-throttle-increment': 'uint8', 1245 '*cpu-throttle-tailslow': 'bool', 1246 '*tls-creds': 'str', 1247 '*tls-hostname': 'str', 1248 '*tls-authz': 'str', 1249 '*max-bandwidth': 'size', 1250 '*avail-switchover-bandwidth': 'size', 1251 '*downtime-limit': 'uint64', 1252 '*x-checkpoint-delay': { 'type': 'uint32', 1253 'features': [ 'unstable' ] }, 1254 '*block-incremental': 'bool', 1255 '*multifd-channels': 'uint8', 1256 '*xbzrle-cache-size': 'size', 1257 '*max-postcopy-bandwidth': 'size', 1258 '*max-cpu-throttle': 'uint8', 1259 '*multifd-compression': 'MultiFDCompression', 1260 '*multifd-zlib-level': 'uint8', 1261 '*multifd-zstd-level': 'uint8', 1262 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1263 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1264 'features': [ 'unstable' ] }, 1265 '*vcpu-dirty-limit': 'uint64'} } 1266 1267## 1268# @query-migrate-parameters: 1269# 1270# Returns information about the current migration parameters 1271# 1272# Returns: @MigrationParameters 1273# 1274# Since: 2.4 1275# 1276# Example: 1277# 1278# -> { "execute": "query-migrate-parameters" } 1279# <- { "return": { 1280# "decompress-threads": 2, 1281# "cpu-throttle-increment": 10, 1282# "compress-threads": 8, 1283# "compress-level": 1, 1284# "cpu-throttle-initial": 20, 1285# "max-bandwidth": 33554432, 1286# "downtime-limit": 300 1287# } 1288# } 1289## 1290{ 'command': 'query-migrate-parameters', 1291 'returns': 'MigrationParameters' } 1292 1293## 1294# @migrate-start-postcopy: 1295# 1296# Followup to a migration command to switch the migration to postcopy 1297# mode. The postcopy-ram capability must be set on both source and 1298# destination before the original migration command. 1299# 1300# Since: 2.5 1301# 1302# Example: 1303# 1304# -> { "execute": "migrate-start-postcopy" } 1305# <- { "return": {} } 1306## 1307{ 'command': 'migrate-start-postcopy' } 1308 1309## 1310# @MIGRATION: 1311# 1312# Emitted when a migration event happens 1313# 1314# @status: @MigrationStatus describing the current migration status. 1315# 1316# Since: 2.4 1317# 1318# Example: 1319# 1320# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1321# "event": "MIGRATION", 1322# "data": {"status": "completed"} } 1323## 1324{ 'event': 'MIGRATION', 1325 'data': {'status': 'MigrationStatus'}} 1326 1327## 1328# @MIGRATION_PASS: 1329# 1330# Emitted from the source side of a migration at the start of each 1331# pass (when it syncs the dirty bitmap) 1332# 1333# @pass: An incrementing count (starting at 1 on the first pass) 1334# 1335# Since: 2.6 1336# 1337# Example: 1338# 1339# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1340# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1341## 1342{ 'event': 'MIGRATION_PASS', 1343 'data': { 'pass': 'int' } } 1344 1345## 1346# @COLOMessage: 1347# 1348# The message transmission between Primary side and Secondary side. 1349# 1350# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1351# 1352# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for 1353# checkpointing 1354# 1355# @checkpoint-reply: SVM gets PVM's checkpoint request 1356# 1357# @vmstate-send: VM's state will be sent by PVM. 1358# 1359# @vmstate-size: The total size of VMstate. 1360# 1361# @vmstate-received: VM's state has been received by SVM. 1362# 1363# @vmstate-loaded: VM's state has been loaded by SVM. 1364# 1365# Since: 2.8 1366## 1367{ 'enum': 'COLOMessage', 1368 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1369 'vmstate-send', 'vmstate-size', 'vmstate-received', 1370 'vmstate-loaded' ] } 1371 1372## 1373# @COLOMode: 1374# 1375# The COLO current mode. 1376# 1377# @none: COLO is disabled. 1378# 1379# @primary: COLO node in primary side. 1380# 1381# @secondary: COLO node in slave side. 1382# 1383# Since: 2.8 1384## 1385{ 'enum': 'COLOMode', 1386 'data': [ 'none', 'primary', 'secondary'] } 1387 1388## 1389# @FailoverStatus: 1390# 1391# An enumeration of COLO failover status 1392# 1393# @none: no failover has ever happened 1394# 1395# @require: got failover requirement but not handled 1396# 1397# @active: in the process of doing failover 1398# 1399# @completed: finish the process of failover 1400# 1401# @relaunch: restart the failover process, from 'none' -> 'completed' 1402# (Since 2.9) 1403# 1404# Since: 2.8 1405## 1406{ 'enum': 'FailoverStatus', 1407 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1408 1409## 1410# @COLO_EXIT: 1411# 1412# Emitted when VM finishes COLO mode due to some errors happening or 1413# at the request of users. 1414# 1415# @mode: report COLO mode when COLO exited. 1416# 1417# @reason: describes the reason for the COLO exit. 1418# 1419# Since: 3.1 1420# 1421# Example: 1422# 1423# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1424# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1425## 1426{ 'event': 'COLO_EXIT', 1427 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1428 1429## 1430# @COLOExitReason: 1431# 1432# The reason for a COLO exit. 1433# 1434# @none: failover has never happened. This state does not occur in 1435# the COLO_EXIT event, and is only visible in the result of 1436# query-colo-status. 1437# 1438# @request: COLO exit is due to an external request. 1439# 1440# @error: COLO exit is due to an internal error. 1441# 1442# @processing: COLO is currently handling a failover (since 4.0). 1443# 1444# Since: 3.1 1445## 1446{ 'enum': 'COLOExitReason', 1447 'data': [ 'none', 'request', 'error' , 'processing' ] } 1448 1449## 1450# @x-colo-lost-heartbeat: 1451# 1452# Tell qemu that heartbeat is lost, request it to do takeover 1453# procedures. If this command is sent to the PVM, the Primary side 1454# will exit COLO mode. If sent to the Secondary, the Secondary side 1455# will run failover work, then takes over server operation to become 1456# the service VM. 1457# 1458# Features: 1459# 1460# @unstable: This command is experimental. 1461# 1462# Since: 2.8 1463# 1464# Example: 1465# 1466# -> { "execute": "x-colo-lost-heartbeat" } 1467# <- { "return": {} } 1468## 1469{ 'command': 'x-colo-lost-heartbeat', 1470 'features': [ 'unstable' ], 1471 'if': 'CONFIG_REPLICATION' } 1472 1473## 1474# @migrate_cancel: 1475# 1476# Cancel the current executing migration process. 1477# 1478# Returns: nothing on success 1479# 1480# Notes: This command succeeds even if there is no migration process 1481# running. 1482# 1483# Since: 0.14 1484# 1485# Example: 1486# 1487# -> { "execute": "migrate_cancel" } 1488# <- { "return": {} } 1489## 1490{ 'command': 'migrate_cancel' } 1491 1492## 1493# @migrate-continue: 1494# 1495# Continue migration when it's in a paused state. 1496# 1497# @state: The state the migration is currently expected to be in 1498# 1499# Returns: nothing on success 1500# 1501# Since: 2.11 1502# 1503# Example: 1504# 1505# -> { "execute": "migrate-continue" , "arguments": 1506# { "state": "pre-switchover" } } 1507# <- { "return": {} } 1508## 1509{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1510 1511## 1512# @migrate: 1513# 1514# Migrates the current running guest to another Virtual Machine. 1515# 1516# @uri: the Uniform Resource Identifier of the destination VM 1517# 1518# @blk: do block migration (full disk copy) 1519# 1520# @inc: incremental disk copy migration 1521# 1522# @detach: this argument exists only for compatibility reasons and is 1523# ignored by QEMU 1524# 1525# @resume: resume one paused migration, default "off". (since 3.0) 1526# 1527# Features: 1528# 1529# @deprecated: Member @inc is deprecated. Use blockdev-mirror with 1530# NBD instead. 1531# 1532# Returns: nothing on success 1533# 1534# Since: 0.14 1535# 1536# Notes: 1537# 1538# 1. The 'query-migrate' command should be used to check migration's 1539# progress and final result (this information is provided by the 1540# 'status' member) 1541# 1542# 2. All boolean arguments default to false 1543# 1544# 3. The user Monitor's "detach" argument is invalid in QMP and should 1545# not be used 1546# 1547# Example: 1548# 1549# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1550# <- { "return": {} } 1551## 1552{ 'command': 'migrate', 1553 'data': {'uri': 'str', '*blk': 'bool', 1554 '*inc': { 'type': 'bool', 'features': [ 'deprecated' ] }, 1555 '*detach': 'bool', '*resume': 'bool' } } 1556 1557## 1558# @migrate-incoming: 1559# 1560# Start an incoming migration, the qemu must have been started with 1561# -incoming defer 1562# 1563# @uri: The Uniform Resource Identifier identifying the source or 1564# address to listen on 1565# 1566# Returns: nothing on success 1567# 1568# Since: 2.3 1569# 1570# Notes: 1571# 1572# 1. It's a bad idea to use a string for the uri, but it needs 1573# to stay compatible with -incoming and the format of the uri 1574# is already exposed above libvirt. 1575# 1576# 2. QEMU must be started with -incoming defer to allow 1577# migrate-incoming to be used. 1578# 1579# 3. The uri format is the same as for -incoming 1580# 1581# Example: 1582# 1583# -> { "execute": "migrate-incoming", 1584# "arguments": { "uri": "tcp::4446" } } 1585# <- { "return": {} } 1586## 1587{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } } 1588 1589## 1590# @xen-save-devices-state: 1591# 1592# Save the state of all devices to file. The RAM and the block 1593# devices of the VM are not saved by this command. 1594# 1595# @filename: the file to save the state of the devices to as binary 1596# data. See xen-save-devices-state.txt for a description of the 1597# binary format. 1598# 1599# @live: Optional argument to ask QEMU to treat this command as part 1600# of a live migration. Default to true. (since 2.11) 1601# 1602# Returns: Nothing on success 1603# 1604# Since: 1.1 1605# 1606# Example: 1607# 1608# -> { "execute": "xen-save-devices-state", 1609# "arguments": { "filename": "/tmp/save" } } 1610# <- { "return": {} } 1611## 1612{ 'command': 'xen-save-devices-state', 1613 'data': {'filename': 'str', '*live':'bool' } } 1614 1615## 1616# @xen-set-global-dirty-log: 1617# 1618# Enable or disable the global dirty log mode. 1619# 1620# @enable: true to enable, false to disable. 1621# 1622# Returns: nothing 1623# 1624# Since: 1.3 1625# 1626# Example: 1627# 1628# -> { "execute": "xen-set-global-dirty-log", 1629# "arguments": { "enable": true } } 1630# <- { "return": {} } 1631## 1632{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1633 1634## 1635# @xen-load-devices-state: 1636# 1637# Load the state of all devices from file. The RAM and the block 1638# devices of the VM are not loaded by this command. 1639# 1640# @filename: the file to load the state of the devices from as binary 1641# data. See xen-save-devices-state.txt for a description of the 1642# binary format. 1643# 1644# Since: 2.7 1645# 1646# Example: 1647# 1648# -> { "execute": "xen-load-devices-state", 1649# "arguments": { "filename": "/tmp/resume" } } 1650# <- { "return": {} } 1651## 1652{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1653 1654## 1655# @xen-set-replication: 1656# 1657# Enable or disable replication. 1658# 1659# @enable: true to enable, false to disable. 1660# 1661# @primary: true for primary or false for secondary. 1662# 1663# @failover: true to do failover, false to stop. but cannot be 1664# specified if 'enable' is true. default value is false. 1665# 1666# Returns: nothing. 1667# 1668# Example: 1669# 1670# -> { "execute": "xen-set-replication", 1671# "arguments": {"enable": true, "primary": false} } 1672# <- { "return": {} } 1673# 1674# Since: 2.9 1675## 1676{ 'command': 'xen-set-replication', 1677 'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' }, 1678 'if': 'CONFIG_REPLICATION' } 1679 1680## 1681# @ReplicationStatus: 1682# 1683# The result format for 'query-xen-replication-status'. 1684# 1685# @error: true if an error happened, false if replication is normal. 1686# 1687# @desc: the human readable error description string, when @error is 1688# 'true'. 1689# 1690# Since: 2.9 1691## 1692{ 'struct': 'ReplicationStatus', 1693 'data': { 'error': 'bool', '*desc': 'str' }, 1694 'if': 'CONFIG_REPLICATION' } 1695 1696## 1697# @query-xen-replication-status: 1698# 1699# Query replication status while the vm is running. 1700# 1701# Returns: A @ReplicationStatus object showing the status. 1702# 1703# Example: 1704# 1705# -> { "execute": "query-xen-replication-status" } 1706# <- { "return": { "error": false } } 1707# 1708# Since: 2.9 1709## 1710{ 'command': 'query-xen-replication-status', 1711 'returns': 'ReplicationStatus', 1712 'if': 'CONFIG_REPLICATION' } 1713 1714## 1715# @xen-colo-do-checkpoint: 1716# 1717# Xen uses this command to notify replication to trigger a checkpoint. 1718# 1719# Returns: nothing. 1720# 1721# Example: 1722# 1723# -> { "execute": "xen-colo-do-checkpoint" } 1724# <- { "return": {} } 1725# 1726# Since: 2.9 1727## 1728{ 'command': 'xen-colo-do-checkpoint', 1729 'if': 'CONFIG_REPLICATION' } 1730 1731## 1732# @COLOStatus: 1733# 1734# The result format for 'query-colo-status'. 1735# 1736# @mode: COLO running mode. If COLO is running, this field will 1737# return 'primary' or 'secondary'. 1738# 1739# @last-mode: COLO last running mode. If COLO is running, this field 1740# will return same like mode field, after failover we can use this 1741# field to get last colo mode. (since 4.0) 1742# 1743# @reason: describes the reason for the COLO exit. 1744# 1745# Since: 3.1 1746## 1747{ 'struct': 'COLOStatus', 1748 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1749 'reason': 'COLOExitReason' }, 1750 'if': 'CONFIG_REPLICATION' } 1751 1752## 1753# @query-colo-status: 1754# 1755# Query COLO status while the vm is running. 1756# 1757# Returns: A @COLOStatus object showing the status. 1758# 1759# Example: 1760# 1761# -> { "execute": "query-colo-status" } 1762# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1763# 1764# Since: 3.1 1765## 1766{ 'command': 'query-colo-status', 1767 'returns': 'COLOStatus', 1768 'if': 'CONFIG_REPLICATION' } 1769 1770## 1771# @migrate-recover: 1772# 1773# Provide a recovery migration stream URI. 1774# 1775# @uri: the URI to be used for the recovery of migration stream. 1776# 1777# Returns: nothing. 1778# 1779# Example: 1780# 1781# -> { "execute": "migrate-recover", 1782# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1783# <- { "return": {} } 1784# 1785# Since: 3.0 1786## 1787{ 'command': 'migrate-recover', 1788 'data': { 'uri': 'str' }, 1789 'allow-oob': true } 1790 1791## 1792# @migrate-pause: 1793# 1794# Pause a migration. Currently it only supports postcopy. 1795# 1796# Returns: nothing. 1797# 1798# Example: 1799# 1800# -> { "execute": "migrate-pause" } 1801# <- { "return": {} } 1802# 1803# Since: 3.0 1804## 1805{ 'command': 'migrate-pause', 'allow-oob': true } 1806 1807## 1808# @UNPLUG_PRIMARY: 1809# 1810# Emitted from source side of a migration when migration state is 1811# WAIT_UNPLUG. Device was unplugged by guest operating system. Device 1812# resources in QEMU are kept on standby to be able to re-plug it in 1813# case of migration failure. 1814# 1815# @device-id: QEMU device id of the unplugged device 1816# 1817# Since: 4.2 1818# 1819# Example: 1820# 1821# <- { "event": "UNPLUG_PRIMARY", 1822# "data": { "device-id": "hostdev0" }, 1823# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 1824## 1825{ 'event': 'UNPLUG_PRIMARY', 1826 'data': { 'device-id': 'str' } } 1827 1828## 1829# @DirtyRateVcpu: 1830# 1831# Dirty rate of vcpu. 1832# 1833# @id: vcpu index. 1834# 1835# @dirty-rate: dirty rate. 1836# 1837# Since: 6.2 1838## 1839{ 'struct': 'DirtyRateVcpu', 1840 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 1841 1842## 1843# @DirtyRateStatus: 1844# 1845# Dirty page rate measurement status. 1846# 1847# @unstarted: measuring thread has not been started yet 1848# 1849# @measuring: measuring thread is running 1850# 1851# @measured: dirty page rate is measured and the results are available 1852# 1853# Since: 5.2 1854## 1855{ 'enum': 'DirtyRateStatus', 1856 'data': [ 'unstarted', 'measuring', 'measured'] } 1857 1858## 1859# @DirtyRateMeasureMode: 1860# 1861# Method used to measure dirty page rate. Differences between 1862# available methods are explained in @calc-dirty-rate. 1863# 1864# @page-sampling: use page sampling 1865# 1866# @dirty-ring: use dirty ring 1867# 1868# @dirty-bitmap: use dirty bitmap 1869# 1870# Since: 6.2 1871## 1872{ 'enum': 'DirtyRateMeasureMode', 1873 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 1874 1875## 1876# @TimeUnit: 1877# 1878# Specifies unit in which time-related value is specified. 1879# 1880# @second: value is in seconds 1881# 1882# @millisecond: value is in milliseconds 1883# 1884# Since 8.2 1885# 1886## 1887{ 'enum': 'TimeUnit', 1888 'data': ['second', 'millisecond'] } 1889 1890## 1891# @DirtyRateInfo: 1892# 1893# Information about measured dirty page rate. 1894# 1895# @dirty-rate: an estimate of the dirty page rate of the VM in units 1896# of MiB/s. Value is present only when @status is 'measured'. 1897# 1898# @status: current status of dirty page rate measurements 1899# 1900# @start-time: start time in units of second for calculation 1901# 1902# @calc-time: time period for which dirty page rate was measured, 1903# expressed and rounded down to @calc-time-unit. 1904# 1905# @calc-time-unit: time unit of @calc-time (Since 8.2) 1906# 1907# @sample-pages: number of sampled pages per GiB of guest memory. 1908# Valid only in page-sampling mode (Since 6.1) 1909# 1910# @mode: mode that was used to measure dirty page rate (Since 6.2) 1911# 1912# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was 1913# specified (Since 6.2) 1914# 1915# Since: 5.2 1916## 1917{ 'struct': 'DirtyRateInfo', 1918 'data': {'*dirty-rate': 'int64', 1919 'status': 'DirtyRateStatus', 1920 'start-time': 'int64', 1921 'calc-time': 'int64', 1922 'calc-time-unit': 'TimeUnit', 1923 'sample-pages': 'uint64', 1924 'mode': 'DirtyRateMeasureMode', 1925 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 1926 1927## 1928# @calc-dirty-rate: 1929# 1930# Start measuring dirty page rate of the VM. Results can be retrieved 1931# with @query-dirty-rate after measurements are completed. 1932# 1933# Dirty page rate is the number of pages changed in a given time 1934# period expressed in MiB/s. The following methods of calculation are 1935# available: 1936# 1937# 1. In page sampling mode, a random subset of pages are selected and 1938# hashed twice: once at the beginning of measurement time period, 1939# and once again at the end. If two hashes for some page are 1940# different, the page is counted as changed. Since this method 1941# relies on sampling and hashing, calculated dirty page rate is 1942# only an estimate of its true value. Increasing @sample-pages 1943# improves estimation quality at the cost of higher computational 1944# overhead. 1945# 1946# 2. Dirty bitmap mode captures writes to memory (for example by 1947# temporarily revoking write access to all pages) and counting page 1948# faults. Information about modified pages is collected into a 1949# bitmap, where each bit corresponds to one guest page. This mode 1950# requires that KVM accelerator property "dirty-ring-size" is *not* 1951# set. 1952# 1953# 3. Dirty ring mode is similar to dirty bitmap mode, but the 1954# information about modified pages is collected into ring buffer. 1955# This mode tracks page modification per each vCPU separately. It 1956# requires that KVM accelerator property "dirty-ring-size" is set. 1957# 1958# @calc-time: time period for which dirty page rate is calculated. 1959# By default it is specified in seconds, but the unit can be set 1960# explicitly with @calc-time-unit. Note that larger @calc-time 1961# values will typically result in smaller dirty page rates because 1962# page dirtying is a one-time event. Once some page is counted 1963# as dirty during @calc-time period, further writes to this page 1964# will not increase dirty page rate anymore. 1965# 1966# @calc-time-unit: time unit in which @calc-time is specified. 1967# By default it is seconds. (Since 8.2) 1968# 1969# @sample-pages: number of sampled pages per each GiB of guest memory. 1970# Default value is 512. For 4KiB guest pages this corresponds to 1971# sampling ratio of 0.2%. This argument is used only in page 1972# sampling mode. (Since 6.1) 1973# 1974# @mode: mechanism for tracking dirty pages. Default value is 1975# 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'. 1976# (Since 6.1) 1977# 1978# Since: 5.2 1979# 1980# Example: 1981# 1982# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 1983# 'sample-pages': 512} } 1984# <- { "return": {} } 1985# 1986# Measure dirty rate using dirty bitmap for 500 milliseconds: 1987# 1988# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500, 1989# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} } 1990# 1991# <- { "return": {} } 1992## 1993{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 1994 '*calc-time-unit': 'TimeUnit', 1995 '*sample-pages': 'int', 1996 '*mode': 'DirtyRateMeasureMode'} } 1997 1998## 1999# @query-dirty-rate: 2000# 2001# Query results of the most recent invocation of @calc-dirty-rate. 2002# 2003# @calc-time-unit: time unit in which to report calculation time. 2004# By default it is reported in seconds. (Since 8.2) 2005# 2006# Since: 5.2 2007# 2008# Examples: 2009# 2010# 1. Measurement is in progress: 2011# 2012# <- {"status": "measuring", "sample-pages": 512, 2013# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2014# "calc-time-unit": "second"} 2015# 2016# 2. Measurement has been completed: 2017# 2018# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108, 2019# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2020# "calc-time-unit": "second"} 2021## 2022{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' }, 2023 'returns': 'DirtyRateInfo' } 2024 2025## 2026# @DirtyLimitInfo: 2027# 2028# Dirty page rate limit information of a virtual CPU. 2029# 2030# @cpu-index: index of a virtual CPU. 2031# 2032# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 2033# CPU, 0 means unlimited. 2034# 2035# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 2036# 2037# Since: 7.1 2038## 2039{ 'struct': 'DirtyLimitInfo', 2040 'data': { 'cpu-index': 'int', 2041 'limit-rate': 'uint64', 2042 'current-rate': 'uint64' } } 2043 2044## 2045# @set-vcpu-dirty-limit: 2046# 2047# Set the upper limit of dirty page rate for virtual CPUs. 2048# 2049# Requires KVM with accelerator property "dirty-ring-size" set. A 2050# virtual CPU's dirty page rate is a measure of its memory load. To 2051# observe dirty page rates, use @calc-dirty-rate. 2052# 2053# @cpu-index: index of a virtual CPU, default is all. 2054# 2055# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 2056# 2057# Since: 7.1 2058# 2059# Example: 2060# 2061# -> {"execute": "set-vcpu-dirty-limit"} 2062# "arguments": { "dirty-rate": 200, 2063# "cpu-index": 1 } } 2064# <- { "return": {} } 2065## 2066{ 'command': 'set-vcpu-dirty-limit', 2067 'data': { '*cpu-index': 'int', 2068 'dirty-rate': 'uint64' } } 2069 2070## 2071# @cancel-vcpu-dirty-limit: 2072# 2073# Cancel the upper limit of dirty page rate for virtual CPUs. 2074# 2075# Cancel the dirty page limit for the vCPU which has been set with 2076# set-vcpu-dirty-limit command. Note that this command requires 2077# support from dirty ring, same as the "set-vcpu-dirty-limit". 2078# 2079# @cpu-index: index of a virtual CPU, default is all. 2080# 2081# Since: 7.1 2082# 2083# Example: 2084# 2085# -> {"execute": "cancel-vcpu-dirty-limit"}, 2086# "arguments": { "cpu-index": 1 } } 2087# <- { "return": {} } 2088## 2089{ 'command': 'cancel-vcpu-dirty-limit', 2090 'data': { '*cpu-index': 'int'} } 2091 2092## 2093# @query-vcpu-dirty-limit: 2094# 2095# Returns information about virtual CPU dirty page rate limits, if 2096# any. 2097# 2098# Since: 7.1 2099# 2100# Example: 2101# 2102# -> {"execute": "query-vcpu-dirty-limit"} 2103# <- {"return": [ 2104# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 2105# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 2106## 2107{ 'command': 'query-vcpu-dirty-limit', 2108 'returns': [ 'DirtyLimitInfo' ] } 2109 2110## 2111# @MigrationThreadInfo: 2112# 2113# Information about migrationthreads 2114# 2115# @name: the name of migration thread 2116# 2117# @thread-id: ID of the underlying host thread 2118# 2119# Since: 7.2 2120## 2121{ 'struct': 'MigrationThreadInfo', 2122 'data': {'name': 'str', 2123 'thread-id': 'int'} } 2124 2125## 2126# @query-migrationthreads: 2127# 2128# Returns information of migration threads 2129# 2130# data: migration thread name 2131# 2132# Returns: information about migration threads 2133# 2134# Since: 7.2 2135## 2136{ 'command': 'query-migrationthreads', 2137 'returns': ['MigrationThreadInfo'] } 2138 2139## 2140# @snapshot-save: 2141# 2142# Save a VM snapshot 2143# 2144# @job-id: identifier for the newly created job 2145# 2146# @tag: name of the snapshot to create 2147# 2148# @vmstate: block device node name to save vmstate to 2149# 2150# @devices: list of block device node names to save a snapshot to 2151# 2152# Applications should not assume that the snapshot save is complete 2153# when this command returns. The job commands / events must be used 2154# to determine completion and to fetch details of any errors that 2155# arise. 2156# 2157# Note that execution of the guest CPUs may be stopped during the time 2158# it takes to save the snapshot. A future version of QEMU may ensure 2159# CPUs are executing continuously. 2160# 2161# It is strongly recommended that @devices contain all writable block 2162# device nodes if a consistent snapshot is required. 2163# 2164# If @tag already exists, an error will be reported 2165# 2166# Returns: nothing 2167# 2168# Example: 2169# 2170# -> { "execute": "snapshot-save", 2171# "arguments": { 2172# "job-id": "snapsave0", 2173# "tag": "my-snap", 2174# "vmstate": "disk0", 2175# "devices": ["disk0", "disk1"] 2176# } 2177# } 2178# <- { "return": { } } 2179# <- {"event": "JOB_STATUS_CHANGE", 2180# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 2181# "data": {"status": "created", "id": "snapsave0"}} 2182# <- {"event": "JOB_STATUS_CHANGE", 2183# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 2184# "data": {"status": "running", "id": "snapsave0"}} 2185# <- {"event": "STOP", 2186# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 2187# <- {"event": "RESUME", 2188# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2189# <- {"event": "JOB_STATUS_CHANGE", 2190# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2191# "data": {"status": "waiting", "id": "snapsave0"}} 2192# <- {"event": "JOB_STATUS_CHANGE", 2193# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2194# "data": {"status": "pending", "id": "snapsave0"}} 2195# <- {"event": "JOB_STATUS_CHANGE", 2196# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2197# "data": {"status": "concluded", "id": "snapsave0"}} 2198# -> {"execute": "query-jobs"} 2199# <- {"return": [{"current-progress": 1, 2200# "status": "concluded", 2201# "total-progress": 1, 2202# "type": "snapshot-save", 2203# "id": "snapsave0"}]} 2204# 2205# Since: 6.0 2206## 2207{ 'command': 'snapshot-save', 2208 'data': { 'job-id': 'str', 2209 'tag': 'str', 2210 'vmstate': 'str', 2211 'devices': ['str'] } } 2212 2213## 2214# @snapshot-load: 2215# 2216# Load a VM snapshot 2217# 2218# @job-id: identifier for the newly created job 2219# 2220# @tag: name of the snapshot to load. 2221# 2222# @vmstate: block device node name to load vmstate from 2223# 2224# @devices: list of block device node names to load a snapshot from 2225# 2226# Applications should not assume that the snapshot load is complete 2227# when this command returns. The job commands / events must be used 2228# to determine completion and to fetch details of any errors that 2229# arise. 2230# 2231# Note that execution of the guest CPUs will be stopped during the 2232# time it takes to load the snapshot. 2233# 2234# It is strongly recommended that @devices contain all writable block 2235# device nodes that can have changed since the original @snapshot-save 2236# command execution. 2237# 2238# Returns: nothing 2239# 2240# Example: 2241# 2242# -> { "execute": "snapshot-load", 2243# "arguments": { 2244# "job-id": "snapload0", 2245# "tag": "my-snap", 2246# "vmstate": "disk0", 2247# "devices": ["disk0", "disk1"] 2248# } 2249# } 2250# <- { "return": { } } 2251# <- {"event": "JOB_STATUS_CHANGE", 2252# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2253# "data": {"status": "created", "id": "snapload0"}} 2254# <- {"event": "JOB_STATUS_CHANGE", 2255# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2256# "data": {"status": "running", "id": "snapload0"}} 2257# <- {"event": "STOP", 2258# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2259# <- {"event": "RESUME", 2260# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2261# <- {"event": "JOB_STATUS_CHANGE", 2262# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2263# "data": {"status": "waiting", "id": "snapload0"}} 2264# <- {"event": "JOB_STATUS_CHANGE", 2265# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2266# "data": {"status": "pending", "id": "snapload0"}} 2267# <- {"event": "JOB_STATUS_CHANGE", 2268# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2269# "data": {"status": "concluded", "id": "snapload0"}} 2270# -> {"execute": "query-jobs"} 2271# <- {"return": [{"current-progress": 1, 2272# "status": "concluded", 2273# "total-progress": 1, 2274# "type": "snapshot-load", 2275# "id": "snapload0"}]} 2276# 2277# Since: 6.0 2278## 2279{ 'command': 'snapshot-load', 2280 'data': { 'job-id': 'str', 2281 'tag': 'str', 2282 'vmstate': 'str', 2283 'devices': ['str'] } } 2284 2285## 2286# @snapshot-delete: 2287# 2288# Delete a VM snapshot 2289# 2290# @job-id: identifier for the newly created job 2291# 2292# @tag: name of the snapshot to delete. 2293# 2294# @devices: list of block device node names to delete a snapshot from 2295# 2296# Applications should not assume that the snapshot delete is complete 2297# when this command returns. The job commands / events must be used 2298# to determine completion and to fetch details of any errors that 2299# arise. 2300# 2301# Returns: nothing 2302# 2303# Example: 2304# 2305# -> { "execute": "snapshot-delete", 2306# "arguments": { 2307# "job-id": "snapdelete0", 2308# "tag": "my-snap", 2309# "devices": ["disk0", "disk1"] 2310# } 2311# } 2312# <- { "return": { } } 2313# <- {"event": "JOB_STATUS_CHANGE", 2314# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2315# "data": {"status": "created", "id": "snapdelete0"}} 2316# <- {"event": "JOB_STATUS_CHANGE", 2317# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2318# "data": {"status": "running", "id": "snapdelete0"}} 2319# <- {"event": "JOB_STATUS_CHANGE", 2320# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2321# "data": {"status": "waiting", "id": "snapdelete0"}} 2322# <- {"event": "JOB_STATUS_CHANGE", 2323# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2324# "data": {"status": "pending", "id": "snapdelete0"}} 2325# <- {"event": "JOB_STATUS_CHANGE", 2326# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2327# "data": {"status": "concluded", "id": "snapdelete0"}} 2328# -> {"execute": "query-jobs"} 2329# <- {"return": [{"current-progress": 1, 2330# "status": "concluded", 2331# "total-progress": 1, 2332# "type": "snapshot-delete", 2333# "id": "snapdelete0"}]} 2334# 2335# Since: 6.0 2336## 2337{ 'command': 'snapshot-delete', 2338 'data': { 'job-id': 'str', 2339 'tag': 'str', 2340 'devices': ['str'] } } 2341