1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the 20# target VM 21# 22# @total: total amount of bytes involved in the migration process 23# 24# @duplicate: number of duplicate (zero) pages (since 1.2) 25# 26# @normal: number of normal pages (since 1.2) 27# 28# @normal-bytes: number of normal bytes sent (since 1.2) 29# 30# @dirty-pages-rate: number of pages dirtied by second by the guest 31# (since 1.3) 32# 33# @mbps: throughput in megabits/sec. (since 1.6) 34# 35# @dirty-sync-count: number of times that dirty ram was synchronized 36# (since 2.1) 37# 38# @postcopy-requests: The number of page requests received from the 39# destination (since 2.7) 40# 41# @page-size: The number of bytes per page for the various page-based 42# statistics (since 2.10) 43# 44# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 45# 46# @pages-per-second: the number of memory pages transferred per second 47# (Since 4.0) 48# 49# @precopy-bytes: The number of bytes sent in the pre-copy phase 50# (since 7.0). 51# 52# @downtime-bytes: The number of bytes sent while the guest is paused 53# (since 7.0). 54# 55# @postcopy-bytes: The number of bytes sent during the post-copy phase 56# (since 7.0). 57# 58# @dirty-sync-missed-zero-copy: Number of times dirty RAM 59# synchronization could not avoid copying dirty pages. This is 60# between 0 and @dirty-sync-count * @multifd-channels. (since 61# 7.1) 62# 63# Since: 0.14 64## 65{ 'struct': 'MigrationStats', 66 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 67 'duplicate': 'int', 68 'normal': 'int', 69 'normal-bytes': 'int', 'dirty-pages-rate': 'int', 70 'mbps': 'number', 'dirty-sync-count': 'int', 71 'postcopy-requests': 'int', 'page-size': 'int', 72 'multifd-bytes': 'uint64', 'pages-per-second': 'uint64', 73 'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64', 74 'postcopy-bytes': 'uint64', 75 'dirty-sync-missed-zero-copy': 'uint64' } } 76 77## 78# @XBZRLECacheStats: 79# 80# Detailed XBZRLE migration cache statistics 81# 82# @cache-size: XBZRLE cache size 83# 84# @bytes: amount of bytes already transferred to the target VM 85# 86# @pages: amount of pages transferred to the target VM 87# 88# @cache-miss: number of cache miss 89# 90# @cache-miss-rate: rate of cache miss (since 2.1) 91# 92# @encoding-rate: rate of encoded bytes (since 5.1) 93# 94# @overflow: number of overflows 95# 96# Since: 1.2 97## 98{ 'struct': 'XBZRLECacheStats', 99 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 100 'cache-miss': 'int', 'cache-miss-rate': 'number', 101 'encoding-rate': 'number', 'overflow': 'int' } } 102 103## 104# @CompressionStats: 105# 106# Detailed migration compression statistics 107# 108# @pages: amount of pages compressed and transferred to the target VM 109# 110# @busy: count of times that no free thread was available to compress 111# data 112# 113# @busy-rate: rate of thread busy 114# 115# @compressed-size: amount of bytes after compression 116# 117# @compression-rate: rate of compressed size 118# 119# Since: 3.1 120## 121{ 'struct': 'CompressionStats', 122 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 123 'compressed-size': 'int', 'compression-rate': 'number' } } 124 125## 126# @MigrationStatus: 127# 128# An enumeration of migration status. 129# 130# @none: no migration has ever happened. 131# 132# @setup: migration process has been initiated. 133# 134# @cancelling: in the process of cancelling migration. 135# 136# @cancelled: cancelling migration is finished. 137# 138# @active: in the process of doing migration. 139# 140# @postcopy-active: like active, but now in postcopy mode. (since 141# 2.5) 142# 143# @postcopy-paused: during postcopy but paused. (since 3.0) 144# 145# @postcopy-recover: trying to recover from a paused postcopy. (since 146# 3.0) 147# 148# @completed: migration is finished. 149# 150# @failed: some error occurred during migration process. 151# 152# @colo: VM is in the process of fault tolerance, VM can not get into 153# this state unless colo capability is enabled for migration. 154# (since 2.8) 155# 156# @pre-switchover: Paused before device serialisation. (since 2.11) 157# 158# @device: During device serialisation when pause-before-switchover is 159# enabled (since 2.11) 160# 161# @wait-unplug: wait for device unplug request by guest OS to be 162# completed. (since 4.2) 163# 164# Since: 2.3 165## 166{ 'enum': 'MigrationStatus', 167 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 168 'active', 'postcopy-active', 'postcopy-paused', 169 'postcopy-recover', 'completed', 'failed', 'colo', 170 'pre-switchover', 'device', 'wait-unplug' ] } 171## 172# @VfioStats: 173# 174# Detailed VFIO devices migration statistics 175# 176# @transferred: amount of bytes transferred to the target VM by VFIO 177# devices 178# 179# Since: 5.2 180## 181{ 'struct': 'VfioStats', 182 'data': {'transferred': 'int' } } 183 184## 185# @MigrationInfo: 186# 187# Information about current migration process. 188# 189# @status: @MigrationStatus describing the current migration status. 190# If this field is not returned, no migration process has been 191# initiated 192# 193# @ram: @MigrationStats containing detailed migration status, only 194# returned if status is 'active' or 'completed'(since 1.2) 195# 196# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 197# migration statistics, only returned if XBZRLE feature is on and 198# status is 'active' or 'completed' (since 1.2) 199# 200# @total-time: total amount of milliseconds since migration started. 201# If migration has ended, it returns the total migration time. 202# (since 1.2) 203# 204# @downtime: only present when migration finishes correctly total 205# downtime in milliseconds for the guest. (since 1.3) 206# 207# @expected-downtime: only present while migration is active expected 208# downtime in milliseconds for the guest in last walk of the dirty 209# bitmap. (since 1.3) 210# 211# @setup-time: amount of setup time in milliseconds *before* the 212# iterations begin but *after* the QMP command is issued. This is 213# designed to provide an accounting of any activities (such as 214# RDMA pinning) which may be expensive, but do not actually occur 215# during the iterative migration rounds themselves. (since 1.6) 216# 217# @cpu-throttle-percentage: percentage of time guest cpus are being 218# throttled during auto-converge. This is only present when 219# auto-converge has started throttling guest cpus. (Since 2.7) 220# 221# @error-desc: the human readable error description string. Clients 222# should not attempt to parse the error strings. (Since 2.7) 223# 224# @postcopy-blocktime: total time when all vCPU were blocked during 225# postcopy live migration. This is only present when the 226# postcopy-blocktime migration capability is enabled. (Since 3.0) 227# 228# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. 229# This is only present when the postcopy-blocktime migration 230# capability is enabled. (Since 3.0) 231# 232# @compression: migration compression statistics, only returned if 233# compression feature is on and status is 'active' or 'completed' 234# (Since 3.1) 235# 236# @socket-address: Only used for tcp, to know what the real port is 237# (Since 4.0) 238# 239# @vfio: @VfioStats containing detailed VFIO devices migration 240# statistics, only returned if VFIO device is present, migration 241# is supported by all VFIO devices and status is 'active' or 242# 'completed' (since 5.2) 243# 244# @blocked-reasons: A list of reasons an outgoing migration is 245# blocked. Present and non-empty when migration is blocked. 246# (since 6.0) 247# 248# @dirty-limit-throttle-time-per-round: Maximum throttle time 249# (in microseconds) of virtual CPUs each dirty ring full round, 250# which shows how MigrationCapability dirty-limit affects the 251# guest during live migration. (Since 8.1) 252# 253# @dirty-limit-ring-full-time: Estimated average dirty ring full time 254# (in microseconds) for each dirty ring full round. The value 255# equals the dirty ring memory size divided by the average dirty 256# page rate of the virtual CPU, which can be used to observe the 257# average memory load of the virtual CPU indirectly. Note that 258# zero means guest doesn't dirty memory. (Since 8.1) 259# 260# Features: 261# 262# @deprecated: Member @compression is deprecated because it is 263# unreliable and untested. It is recommended to use multifd 264# migration, which offers an alternative compression 265# implementation that is reliable and tested. 266# 267# Since: 0.14 268## 269{ 'struct': 'MigrationInfo', 270 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 271 '*vfio': 'VfioStats', 272 '*xbzrle-cache': 'XBZRLECacheStats', 273 '*total-time': 'int', 274 '*expected-downtime': 'int', 275 '*downtime': 'int', 276 '*setup-time': 'int', 277 '*cpu-throttle-percentage': 'int', 278 '*error-desc': 'str', 279 '*blocked-reasons': ['str'], 280 '*postcopy-blocktime': 'uint32', 281 '*postcopy-vcpu-blocktime': ['uint32'], 282 '*compression': { 'type': 'CompressionStats', 'features': [ 'deprecated' ] }, 283 '*socket-address': ['SocketAddress'], 284 '*dirty-limit-throttle-time-per-round': 'uint64', 285 '*dirty-limit-ring-full-time': 'uint64'} } 286 287## 288# @query-migrate: 289# 290# Returns information about current migration process. If migration 291# is active there will be another json-object with RAM migration 292# status. 293# 294# Returns: @MigrationInfo 295# 296# Since: 0.14 297# 298# Examples: 299# 300# 1. Before the first migration 301# 302# -> { "execute": "query-migrate" } 303# <- { "return": {} } 304# 305# 2. Migration is done and has succeeded 306# 307# -> { "execute": "query-migrate" } 308# <- { "return": { 309# "status": "completed", 310# "total-time":12345, 311# "setup-time":12345, 312# "downtime":12345, 313# "ram":{ 314# "transferred":123, 315# "remaining":123, 316# "total":246, 317# "duplicate":123, 318# "normal":123, 319# "normal-bytes":123456, 320# "dirty-sync-count":15 321# } 322# } 323# } 324# 325# 3. Migration is done and has failed 326# 327# -> { "execute": "query-migrate" } 328# <- { "return": { "status": "failed" } } 329# 330# 4. Migration is being performed: 331# 332# -> { "execute": "query-migrate" } 333# <- { 334# "return":{ 335# "status":"active", 336# "total-time":12345, 337# "setup-time":12345, 338# "expected-downtime":12345, 339# "ram":{ 340# "transferred":123, 341# "remaining":123, 342# "total":246, 343# "duplicate":123, 344# "normal":123, 345# "normal-bytes":123456, 346# "dirty-sync-count":15 347# } 348# } 349# } 350# 351# 5. Migration is being performed and XBZRLE is active: 352# 353# -> { "execute": "query-migrate" } 354# <- { 355# "return":{ 356# "status":"active", 357# "total-time":12345, 358# "setup-time":12345, 359# "expected-downtime":12345, 360# "ram":{ 361# "total":1057024, 362# "remaining":1053304, 363# "transferred":3720, 364# "duplicate":10, 365# "normal":3333, 366# "normal-bytes":3412992, 367# "dirty-sync-count":15 368# }, 369# "xbzrle-cache":{ 370# "cache-size":67108864, 371# "bytes":20971520, 372# "pages":2444343, 373# "cache-miss":2244, 374# "cache-miss-rate":0.123, 375# "encoding-rate":80.1, 376# "overflow":34434 377# } 378# } 379# } 380## 381{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 382 383## 384# @MigrationCapability: 385# 386# Migration capabilities enumeration 387# 388# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length 389# Encoding). This feature allows us to minimize migration traffic 390# for certain work loads, by sending compressed difference of the 391# pages 392# 393# @rdma-pin-all: Controls whether or not the entire VM memory 394# footprint is mlock()'d on demand or all at once. Refer to 395# docs/rdma.txt for usage. Disabled by default. (since 2.0) 396# 397# @zero-blocks: During storage migration encode blocks of zeroes 398# efficiently. This essentially saves 1MB of zeroes per block on 399# the wire. Enabling requires source and target VM to support 400# this feature. To enable it is sufficient to enable the 401# capability on the source VM. The feature is disabled by default. 402# (since 1.6) 403# 404# @compress: Use multiple compression threads to accelerate live 405# migration. This feature can help to reduce the migration 406# traffic, by sending compressed pages. Please note that if 407# compress and xbzrle are both on, compress only takes effect in 408# the ram bulk stage, after that, it will be disabled and only 409# xbzrle takes effect, this can help to minimize migration 410# traffic. The feature is disabled by default. (since 2.4) 411# 412# @events: generate events for each migration state change (since 2.4) 413# 414# @auto-converge: If enabled, QEMU will automatically throttle down 415# the guest to speed up convergence of RAM migration. (since 1.6) 416# 417# @postcopy-ram: Start executing on the migration target before all of 418# RAM has been migrated, pulling the remaining pages along as 419# needed. The capacity must have the same setting on both source 420# and target or migration will not even start. NOTE: If the 421# migration fails during postcopy the VM will fail. (since 2.6) 422# 423# @x-colo: If enabled, migration will never end, and the state of the 424# VM on the primary side will be migrated continuously to the VM 425# on secondary side, this process is called COarse-Grain LOck 426# Stepping (COLO) for Non-stop Service. (since 2.8) 427# 428# @release-ram: if enabled, qemu will free the migrated ram pages on 429# the source during postcopy-ram migration. (since 2.9) 430# 431# @return-path: If enabled, migration will use the return path even 432# for precopy. (since 2.10) 433# 434# @pause-before-switchover: Pause outgoing migration before 435# serialising device state and before disabling block IO (since 436# 2.11) 437# 438# @multifd: Use more than one fd for migration (since 4.0) 439# 440# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 441# (since 2.12) 442# 443# @postcopy-blocktime: Calculate downtime for postcopy live migration 444# (since 3.0) 445# 446# @late-block-activate: If enabled, the destination will not activate 447# block devices (and thus take locks) immediately at the end of 448# migration. (since 3.0) 449# 450# @x-ignore-shared: If enabled, QEMU will not migrate shared memory 451# that is accessible on the destination machine. (since 4.0) 452# 453# @validate-uuid: Send the UUID of the source to allow the destination 454# to ensure it is the same. (since 4.2) 455# 456# @background-snapshot: If enabled, the migration stream will be a 457# snapshot of the VM exactly at the point when the migration 458# procedure starts. The VM RAM is saved with running VM. 459# (since 6.0) 460# 461# @zero-copy-send: Controls behavior on sending memory pages on 462# migration. When true, enables a zero-copy mechanism for sending 463# memory pages, if host supports it. Requires that QEMU be 464# permitted to use locked memory for guest RAM pages. (since 7.1) 465# 466# @postcopy-preempt: If enabled, the migration process will allow 467# postcopy requests to preempt precopy stream, so postcopy 468# requests will be handled faster. This is a performance feature 469# and should not affect the correctness of postcopy migration. 470# (since 7.1) 471# 472# @switchover-ack: If enabled, migration will not stop the source VM 473# and complete the migration until an ACK is received from the 474# destination that it's OK to do so. Exactly when this ACK is 475# sent depends on the migrated devices that use this feature. For 476# example, a device can use it to make sure some of its data is 477# sent and loaded in the destination before doing switchover. 478# This can reduce downtime if devices that support this capability 479# are present. 'return-path' capability must be enabled to use 480# it. (since 8.1) 481# 482# @dirty-limit: If enabled, migration will throttle vCPUs as needed to 483# keep their dirty page rate within @vcpu-dirty-limit. This can 484# improve responsiveness of large guests during live migration, 485# and can result in more stable read performance. Requires KVM 486# with accelerator property "dirty-ring-size" set. (Since 8.1) 487# 488# @mapped-ram: Migrate using fixed offsets in the migration file for 489# each RAM page. Requires a migration URI that supports seeking, 490# such as a file. (since 9.0) 491# 492# Features: 493# 494# @deprecated: Member @compress is deprecated because it is unreliable 495# and untested. It is recommended to use multifd migration, which 496# offers an alternative compression implementation that is 497# reliable and tested. 498# 499# @unstable: Members @x-colo and @x-ignore-shared are experimental. 500# 501# Since: 1.2 502## 503{ 'enum': 'MigrationCapability', 504 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 505 { 'name': 'compress', 'features': [ 'deprecated' ] }, 506 'events', 'postcopy-ram', 507 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 508 'release-ram', 509 'return-path', 'pause-before-switchover', 'multifd', 510 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 511 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 512 'validate-uuid', 'background-snapshot', 513 'zero-copy-send', 'postcopy-preempt', 'switchover-ack', 514 'dirty-limit', 'mapped-ram'] } 515 516## 517# @MigrationCapabilityStatus: 518# 519# Migration capability information 520# 521# @capability: capability enum 522# 523# @state: capability state bool 524# 525# Since: 1.2 526## 527{ 'struct': 'MigrationCapabilityStatus', 528 'data': { 'capability': 'MigrationCapability', 'state': 'bool' } } 529 530## 531# @migrate-set-capabilities: 532# 533# Enable/Disable the following migration capabilities (like xbzrle) 534# 535# @capabilities: json array of capability modifications to make 536# 537# Since: 1.2 538# 539# Example: 540# 541# -> { "execute": "migrate-set-capabilities" , "arguments": 542# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 543# <- { "return": {} } 544## 545{ 'command': 'migrate-set-capabilities', 546 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 547 548## 549# @query-migrate-capabilities: 550# 551# Returns information about the current migration capabilities status 552# 553# Returns: @MigrationCapabilityStatus 554# 555# Since: 1.2 556# 557# Example: 558# 559# -> { "execute": "query-migrate-capabilities" } 560# <- { "return": [ 561# {"state": false, "capability": "xbzrle"}, 562# {"state": false, "capability": "rdma-pin-all"}, 563# {"state": false, "capability": "auto-converge"}, 564# {"state": false, "capability": "zero-blocks"}, 565# {"state": false, "capability": "compress"}, 566# {"state": true, "capability": "events"}, 567# {"state": false, "capability": "postcopy-ram"}, 568# {"state": false, "capability": "x-colo"} 569# ]} 570## 571{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 572 573## 574# @MultiFDCompression: 575# 576# An enumeration of multifd compression methods. 577# 578# @none: no compression. 579# 580# @zlib: use zlib compression method. 581# 582# @zstd: use zstd compression method. 583# 584# Since: 5.0 585## 586{ 'enum': 'MultiFDCompression', 587 'data': [ 'none', 'zlib', 588 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } 589 590## 591# @MigMode: 592# 593# @normal: the original form of migration. (since 8.2) 594# 595# @cpr-reboot: The migrate command stops the VM and saves state to the 596# URI. After quitting QEMU, the user resumes by running QEMU 597# -incoming. 598# 599# This mode allows the user to quit QEMU, optionally update and 600# reboot the OS, and restart QEMU. If the user reboots, the URI 601# must persist across the reboot, such as by using a file. 602# 603# Unlike normal mode, the use of certain local storage options 604# does not block the migration, but the user must not modify the 605# contents of guest block devices between the quit and restart. 606# 607# This mode supports VFIO devices provided the user first puts the 608# guest in the suspended runstate, such as by issuing 609# guest-suspend-ram to the QEMU guest agent. 610# 611# Best performance is achieved when the memory backend is shared 612# and the @x-ignore-shared migration capability is set, but this 613# is not required. Further, if the user reboots before restarting 614# such a configuration, the shared memory must persist across the 615# reboot, such as by backing it with a dax device. 616# 617# @cpr-reboot may not be used with postcopy, background-snapshot, 618# or COLO. 619# 620# (since 8.2) 621## 622{ 'enum': 'MigMode', 623 'data': [ 'normal', 'cpr-reboot' ] } 624 625## 626# @ZeroPageDetection: 627# 628# @none: Do not perform zero page checking. 629# 630# @legacy: Perform zero page checking in main migration thread. 631# 632# @multifd: Perform zero page checking in multifd sender thread if 633# multifd migration is enabled, else in the main migration thread 634# as for @legacy. 635# 636# Since: 9.0 637## 638{ 'enum': 'ZeroPageDetection', 639 'data': [ 'none', 'legacy', 'multifd' ] } 640 641## 642# @BitmapMigrationBitmapAliasTransform: 643# 644# @persistent: If present, the bitmap will be made persistent or 645# transient depending on this parameter. 646# 647# Since: 6.0 648## 649{ 'struct': 'BitmapMigrationBitmapAliasTransform', 650 'data': { 651 '*persistent': 'bool' 652 } } 653 654## 655# @BitmapMigrationBitmapAlias: 656# 657# @name: The name of the bitmap. 658# 659# @alias: An alias name for migration (for example the bitmap name on 660# the opposite site). 661# 662# @transform: Allows the modification of the migrated bitmap. (since 663# 6.0) 664# 665# Since: 5.2 666## 667{ 'struct': 'BitmapMigrationBitmapAlias', 668 'data': { 669 'name': 'str', 670 'alias': 'str', 671 '*transform': 'BitmapMigrationBitmapAliasTransform' 672 } } 673 674## 675# @BitmapMigrationNodeAlias: 676# 677# Maps a block node name and the bitmaps it has to aliases for dirty 678# bitmap migration. 679# 680# @node-name: A block node name. 681# 682# @alias: An alias block node name for migration (for example the node 683# name on the opposite site). 684# 685# @bitmaps: Mappings for the bitmaps on this node. 686# 687# Since: 5.2 688## 689{ 'struct': 'BitmapMigrationNodeAlias', 690 'data': { 691 'node-name': 'str', 692 'alias': 'str', 693 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 694 } } 695 696## 697# @MigrationParameter: 698# 699# Migration parameters enumeration 700# 701# @announce-initial: Initial delay (in milliseconds) before sending 702# the first announce (Since 4.0) 703# 704# @announce-max: Maximum delay (in milliseconds) between packets in 705# the announcement (Since 4.0) 706# 707# @announce-rounds: Number of self-announce packets sent after 708# migration (Since 4.0) 709# 710# @announce-step: Increase in delay (in milliseconds) between 711# subsequent packets in the announcement (Since 4.0) 712# 713# @compress-level: Set the compression level to be used in live 714# migration, the compression level is an integer between 0 and 9, 715# where 0 means no compression, 1 means the best compression 716# speed, and 9 means best compression ratio which will consume 717# more CPU. 718# 719# @compress-threads: Set compression thread count to be used in live 720# migration, the compression thread count is an integer between 1 721# and 255. 722# 723# @compress-wait-thread: Controls behavior when all compression 724# threads are currently busy. If true (default), wait for a free 725# compression thread to become available; otherwise, send the page 726# uncompressed. (Since 3.1) 727# 728# @decompress-threads: Set decompression thread count to be used in 729# live migration, the decompression thread count is an integer 730# between 1 and 255. Usually, decompression is at least 4 times as 731# fast as compression, so set the decompress-threads to the number 732# about 1/4 of compress-threads is adequate. 733# 734# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 735# bytes_xfer_period to trigger throttling. It is expressed as 736# percentage. The default value is 50. (Since 5.0) 737# 738# @cpu-throttle-initial: Initial percentage of time guest cpus are 739# throttled when migration auto-converge is activated. The 740# default value is 20. (Since 2.7) 741# 742# @cpu-throttle-increment: throttle percentage increase each time 743# auto-converge detects that migration is not making progress. 744# The default value is 10. (Since 2.7) 745# 746# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 747# the tail stage of throttling, the Guest is very sensitive to CPU 748# percentage while the @cpu-throttle -increment is excessive 749# usually at tail stage. If this parameter is true, we will 750# compute the ideal CPU percentage used by the Guest, which may 751# exactly make the dirty rate match the dirty rate threshold. 752# Then we will choose a smaller throttle increment between the one 753# specified by @cpu-throttle-increment and the one generated by 754# ideal CPU percentage. Therefore, it is compatible to 755# traditional throttling, meanwhile the throttle increment won't 756# be excessive at tail stage. The default value is false. (Since 757# 5.1) 758# 759# @tls-creds: ID of the 'tls-creds' object that provides credentials 760# for establishing a TLS connection over the migration data 761# channel. On the outgoing side of the migration, the credentials 762# must be for a 'client' endpoint, while for the incoming side the 763# credentials must be for a 'server' endpoint. Setting this to a 764# non-empty string enables TLS for all migrations. An empty 765# string means that QEMU will use plain text mode for migration, 766# rather than TLS. (Since 2.7) 767# 768# @tls-hostname: migration target's hostname for validating the 769# server's x509 certificate identity. If empty, QEMU will use the 770# hostname from the migration URI, if any. A non-empty value is 771# required when using x509 based TLS credentials and the migration 772# URI does not include a hostname, such as fd: or exec: based 773# migration. (Since 2.7) 774# 775# Note: empty value works only since 2.9. 776# 777# @tls-authz: ID of the 'authz' object subclass that provides access 778# control checking of the TLS x509 certificate distinguished name. 779# This object is only resolved at time of use, so can be deleted 780# and recreated on the fly while the migration server is active. 781# If missing, it will default to denying access (Since 4.0) 782# 783# @max-bandwidth: maximum speed for migration, in bytes per second. 784# (Since 2.8) 785# 786# @avail-switchover-bandwidth: to set the available bandwidth that 787# migration can use during switchover phase. NOTE! This does not 788# limit the bandwidth during switchover, but only for calculations 789# when making decisions to switchover. By default, this value is 790# zero, which means QEMU will estimate the bandwidth 791# automatically. This can be set when the estimated value is not 792# accurate, while the user is able to guarantee such bandwidth is 793# available when switching over. When specified correctly, this 794# can make the switchover decision much more accurate. 795# (Since 8.2) 796# 797# @downtime-limit: set maximum tolerated downtime for migration. 798# maximum downtime in milliseconds (Since 2.8) 799# 800# @x-checkpoint-delay: The delay time (in ms) between two COLO 801# checkpoints in periodic mode. (Since 2.8) 802# 803# @multifd-channels: Number of channels used to migrate data in 804# parallel. This is the same number that the number of sockets 805# used for migration. The default value is 2 (since 4.0) 806# 807# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 808# needs to be a multiple of the target page size and a power of 2 809# (Since 2.11) 810# 811# @max-postcopy-bandwidth: Background transfer bandwidth during 812# postcopy. Defaults to 0 (unlimited). In bytes per second. 813# (Since 3.0) 814# 815# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 816# (Since 3.1) 817# 818# @multifd-compression: Which compression method to use. Defaults to 819# none. (Since 5.0) 820# 821# @multifd-zlib-level: Set the compression level to be used in live 822# migration, the compression level is an integer between 0 and 9, 823# where 0 means no compression, 1 means the best compression 824# speed, and 9 means best compression ratio which will consume 825# more CPU. Defaults to 1. (Since 5.0) 826# 827# @multifd-zstd-level: Set the compression level to be used in live 828# migration, the compression level is an integer between 0 and 20, 829# where 0 means no compression, 1 means the best compression 830# speed, and 20 means best compression ratio which will consume 831# more CPU. Defaults to 1. (Since 5.0) 832# 833# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 834# aliases for the purpose of dirty bitmap migration. Such aliases 835# may for example be the corresponding names on the opposite site. 836# The mapping must be one-to-one, but not necessarily complete: On 837# the source, unmapped bitmaps and all bitmaps on unmapped nodes 838# will be ignored. On the destination, encountering an unmapped 839# alias in the incoming migration stream will result in a report, 840# and all further bitmap migration data will then be discarded. 841# Note that the destination does not know about bitmaps it does 842# not receive, so there is no limitation or requirement regarding 843# the number of bitmaps received, or how they are named, or on 844# which nodes they are placed. By default (when this parameter 845# has never been set), bitmap names are mapped to themselves. 846# Nodes are mapped to their block device name if there is one, and 847# to their node name otherwise. (Since 5.2) 848# 849# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 850# limit during live migration. Should be in the range 1 to 851# 1000ms. Defaults to 1000ms. (Since 8.1) 852# 853# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 854# Defaults to 1. (Since 8.1) 855# 856# @mode: Migration mode. See description in @MigMode. Default is 857# 'normal'. (Since 8.2) 858# 859# @zero-page-detection: Whether and how to detect zero pages. 860# See description in @ZeroPageDetection. Default is 'multifd'. 861# (since 9.0) 862# 863# Features: 864# 865# @deprecated: Members @compress-level, @compress-threads, 866# @decompress-threads and @compress-wait-thread are deprecated 867# because @compression is deprecated. 868# 869# @unstable: Members @x-checkpoint-delay and 870# @x-vcpu-dirty-limit-period are experimental. 871# 872# Since: 2.4 873## 874{ 'enum': 'MigrationParameter', 875 'data': ['announce-initial', 'announce-max', 876 'announce-rounds', 'announce-step', 877 { 'name': 'compress-level', 'features': [ 'deprecated' ] }, 878 { 'name': 'compress-threads', 'features': [ 'deprecated' ] }, 879 { 'name': 'decompress-threads', 'features': [ 'deprecated' ] }, 880 { 'name': 'compress-wait-thread', 'features': [ 'deprecated' ] }, 881 'throttle-trigger-threshold', 882 'cpu-throttle-initial', 'cpu-throttle-increment', 883 'cpu-throttle-tailslow', 884 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 885 'avail-switchover-bandwidth', 'downtime-limit', 886 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 887 'multifd-channels', 888 'xbzrle-cache-size', 'max-postcopy-bandwidth', 889 'max-cpu-throttle', 'multifd-compression', 890 'multifd-zlib-level', 'multifd-zstd-level', 891 'block-bitmap-mapping', 892 { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, 893 'vcpu-dirty-limit', 894 'mode', 895 'zero-page-detection'] } 896 897## 898# @MigrateSetParameters: 899# 900# @announce-initial: Initial delay (in milliseconds) before sending 901# the first announce (Since 4.0) 902# 903# @announce-max: Maximum delay (in milliseconds) between packets in 904# the announcement (Since 4.0) 905# 906# @announce-rounds: Number of self-announce packets sent after 907# migration (Since 4.0) 908# 909# @announce-step: Increase in delay (in milliseconds) between 910# subsequent packets in the announcement (Since 4.0) 911# 912# @compress-level: Set the compression level to be used in live 913# migration, the compression level is an integer between 0 and 9, 914# where 0 means no compression, 1 means the best compression 915# speed, and 9 means best compression ratio which will consume 916# more CPU. 917# 918# @compress-threads: Set compression thread count to be used in live 919# migration, the compression thread count is an integer between 1 920# and 255. 921# 922# @compress-wait-thread: Controls behavior when all compression 923# threads are currently busy. If true (default), wait for a free 924# compression thread to become available; otherwise, send the page 925# uncompressed. (Since 3.1) 926# 927# @decompress-threads: Set decompression thread count to be used in 928# live migration, the decompression thread count is an integer 929# between 1 and 255. Usually, decompression is at least 4 times as 930# fast as compression, so set the decompress-threads to the number 931# about 1/4 of compress-threads is adequate. 932# 933# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 934# bytes_xfer_period to trigger throttling. It is expressed as 935# percentage. The default value is 50. (Since 5.0) 936# 937# @cpu-throttle-initial: Initial percentage of time guest cpus are 938# throttled when migration auto-converge is activated. The 939# default value is 20. (Since 2.7) 940# 941# @cpu-throttle-increment: throttle percentage increase each time 942# auto-converge detects that migration is not making progress. 943# The default value is 10. (Since 2.7) 944# 945# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 946# the tail stage of throttling, the Guest is very sensitive to CPU 947# percentage while the @cpu-throttle -increment is excessive 948# usually at tail stage. If this parameter is true, we will 949# compute the ideal CPU percentage used by the Guest, which may 950# exactly make the dirty rate match the dirty rate threshold. 951# Then we will choose a smaller throttle increment between the one 952# specified by @cpu-throttle-increment and the one generated by 953# ideal CPU percentage. Therefore, it is compatible to 954# traditional throttling, meanwhile the throttle increment won't 955# be excessive at tail stage. The default value is false. (Since 956# 5.1) 957# 958# @tls-creds: ID of the 'tls-creds' object that provides credentials 959# for establishing a TLS connection over the migration data 960# channel. On the outgoing side of the migration, the credentials 961# must be for a 'client' endpoint, while for the incoming side the 962# credentials must be for a 'server' endpoint. Setting this to a 963# non-empty string enables TLS for all migrations. An empty 964# string means that QEMU will use plain text mode for migration, 965# rather than TLS. This is the default. (Since 2.7) 966# 967# @tls-hostname: migration target's hostname for validating the 968# server's x509 certificate identity. If empty, QEMU will use the 969# hostname from the migration URI, if any. A non-empty value is 970# required when using x509 based TLS credentials and the migration 971# URI does not include a hostname, such as fd: or exec: based 972# migration. (Since 2.7) 973# 974# Note: empty value works only since 2.9. 975# 976# @tls-authz: ID of the 'authz' object subclass that provides access 977# control checking of the TLS x509 certificate distinguished name. 978# This object is only resolved at time of use, so can be deleted 979# and recreated on the fly while the migration server is active. 980# If missing, it will default to denying access (Since 4.0) 981# 982# @max-bandwidth: maximum speed for migration, in bytes per second. 983# (Since 2.8) 984# 985# @avail-switchover-bandwidth: to set the available bandwidth that 986# migration can use during switchover phase. NOTE! This does not 987# limit the bandwidth during switchover, but only for calculations 988# when making decisions to switchover. By default, this value is 989# zero, which means QEMU will estimate the bandwidth 990# automatically. This can be set when the estimated value is not 991# accurate, while the user is able to guarantee such bandwidth is 992# available when switching over. When specified correctly, this 993# can make the switchover decision much more accurate. 994# (Since 8.2) 995# 996# @downtime-limit: set maximum tolerated downtime for migration. 997# maximum downtime in milliseconds (Since 2.8) 998# 999# @x-checkpoint-delay: The delay time (in ms) between two COLO 1000# checkpoints in periodic mode. (Since 2.8) 1001# 1002# @multifd-channels: Number of channels used to migrate data in 1003# parallel. This is the same number that the number of sockets 1004# used for migration. The default value is 2 (since 4.0) 1005# 1006# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1007# needs to be a multiple of the target page size and a power of 2 1008# (Since 2.11) 1009# 1010# @max-postcopy-bandwidth: Background transfer bandwidth during 1011# postcopy. Defaults to 0 (unlimited). In bytes per second. 1012# (Since 3.0) 1013# 1014# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1015# (Since 3.1) 1016# 1017# @multifd-compression: Which compression method to use. Defaults to 1018# none. (Since 5.0) 1019# 1020# @multifd-zlib-level: Set the compression level to be used in live 1021# migration, the compression level is an integer between 0 and 9, 1022# where 0 means no compression, 1 means the best compression 1023# speed, and 9 means best compression ratio which will consume 1024# more CPU. Defaults to 1. (Since 5.0) 1025# 1026# @multifd-zstd-level: Set the compression level to be used in live 1027# migration, the compression level is an integer between 0 and 20, 1028# where 0 means no compression, 1 means the best compression 1029# speed, and 20 means best compression ratio which will consume 1030# more CPU. Defaults to 1. (Since 5.0) 1031# 1032# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1033# aliases for the purpose of dirty bitmap migration. Such aliases 1034# may for example be the corresponding names on the opposite site. 1035# The mapping must be one-to-one, but not necessarily complete: On 1036# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1037# will be ignored. On the destination, encountering an unmapped 1038# alias in the incoming migration stream will result in a report, 1039# and all further bitmap migration data will then be discarded. 1040# Note that the destination does not know about bitmaps it does 1041# not receive, so there is no limitation or requirement regarding 1042# the number of bitmaps received, or how they are named, or on 1043# which nodes they are placed. By default (when this parameter 1044# has never been set), bitmap names are mapped to themselves. 1045# Nodes are mapped to their block device name if there is one, and 1046# to their node name otherwise. (Since 5.2) 1047# 1048# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1049# limit during live migration. Should be in the range 1 to 1050# 1000ms. Defaults to 1000ms. (Since 8.1) 1051# 1052# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1053# Defaults to 1. (Since 8.1) 1054# 1055# @mode: Migration mode. See description in @MigMode. Default is 1056# 'normal'. (Since 8.2) 1057# 1058# @zero-page-detection: Whether and how to detect zero pages. 1059# See description in @ZeroPageDetection. Default is 'multifd'. 1060# (since 9.0) 1061# 1062# Features: 1063# 1064# @deprecated: Members @compress-level, @compress-threads, 1065# @decompress-threads and @compress-wait-thread are deprecated 1066# because @compression is deprecated. 1067# 1068# @unstable: Members @x-checkpoint-delay and 1069# @x-vcpu-dirty-limit-period are experimental. 1070# 1071# TODO: either fuse back into MigrationParameters, or make 1072# MigrationParameters members mandatory 1073# 1074# Since: 2.4 1075## 1076{ 'struct': 'MigrateSetParameters', 1077 'data': { '*announce-initial': 'size', 1078 '*announce-max': 'size', 1079 '*announce-rounds': 'size', 1080 '*announce-step': 'size', 1081 '*compress-level': { 'type': 'uint8', 1082 'features': [ 'deprecated' ] }, 1083 '*compress-threads': { 'type': 'uint8', 1084 'features': [ 'deprecated' ] }, 1085 '*compress-wait-thread': { 'type': 'bool', 1086 'features': [ 'deprecated' ] }, 1087 '*decompress-threads': { 'type': 'uint8', 1088 'features': [ 'deprecated' ] }, 1089 '*throttle-trigger-threshold': 'uint8', 1090 '*cpu-throttle-initial': 'uint8', 1091 '*cpu-throttle-increment': 'uint8', 1092 '*cpu-throttle-tailslow': 'bool', 1093 '*tls-creds': 'StrOrNull', 1094 '*tls-hostname': 'StrOrNull', 1095 '*tls-authz': 'StrOrNull', 1096 '*max-bandwidth': 'size', 1097 '*avail-switchover-bandwidth': 'size', 1098 '*downtime-limit': 'uint64', 1099 '*x-checkpoint-delay': { 'type': 'uint32', 1100 'features': [ 'unstable' ] }, 1101 '*multifd-channels': 'uint8', 1102 '*xbzrle-cache-size': 'size', 1103 '*max-postcopy-bandwidth': 'size', 1104 '*max-cpu-throttle': 'uint8', 1105 '*multifd-compression': 'MultiFDCompression', 1106 '*multifd-zlib-level': 'uint8', 1107 '*multifd-zstd-level': 'uint8', 1108 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1109 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1110 'features': [ 'unstable' ] }, 1111 '*vcpu-dirty-limit': 'uint64', 1112 '*mode': 'MigMode', 1113 '*zero-page-detection': 'ZeroPageDetection'} } 1114 1115## 1116# @migrate-set-parameters: 1117# 1118# Set various migration parameters. 1119# 1120# Since: 2.4 1121# 1122# Example: 1123# 1124# -> { "execute": "migrate-set-parameters" , 1125# "arguments": { "multifd-channels": 5 } } 1126# <- { "return": {} } 1127## 1128{ 'command': 'migrate-set-parameters', 'boxed': true, 1129 'data': 'MigrateSetParameters' } 1130 1131## 1132# @MigrationParameters: 1133# 1134# The optional members aren't actually optional. 1135# 1136# @announce-initial: Initial delay (in milliseconds) before sending 1137# the first announce (Since 4.0) 1138# 1139# @announce-max: Maximum delay (in milliseconds) between packets in 1140# the announcement (Since 4.0) 1141# 1142# @announce-rounds: Number of self-announce packets sent after 1143# migration (Since 4.0) 1144# 1145# @announce-step: Increase in delay (in milliseconds) between 1146# subsequent packets in the announcement (Since 4.0) 1147# 1148# @compress-level: compression level 1149# 1150# @compress-threads: compression thread count 1151# 1152# @compress-wait-thread: Controls behavior when all compression 1153# threads are currently busy. If true (default), wait for a free 1154# compression thread to become available; otherwise, send the page 1155# uncompressed. (Since 3.1) 1156# 1157# @decompress-threads: decompression thread count 1158# 1159# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 1160# bytes_xfer_period to trigger throttling. It is expressed as 1161# percentage. The default value is 50. (Since 5.0) 1162# 1163# @cpu-throttle-initial: Initial percentage of time guest cpus are 1164# throttled when migration auto-converge is activated. (Since 1165# 2.7) 1166# 1167# @cpu-throttle-increment: throttle percentage increase each time 1168# auto-converge detects that migration is not making progress. 1169# (Since 2.7) 1170# 1171# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 1172# the tail stage of throttling, the Guest is very sensitive to CPU 1173# percentage while the @cpu-throttle -increment is excessive 1174# usually at tail stage. If this parameter is true, we will 1175# compute the ideal CPU percentage used by the Guest, which may 1176# exactly make the dirty rate match the dirty rate threshold. 1177# Then we will choose a smaller throttle increment between the one 1178# specified by @cpu-throttle-increment and the one generated by 1179# ideal CPU percentage. Therefore, it is compatible to 1180# traditional throttling, meanwhile the throttle increment won't 1181# be excessive at tail stage. The default value is false. (Since 1182# 5.1) 1183# 1184# @tls-creds: ID of the 'tls-creds' object that provides credentials 1185# for establishing a TLS connection over the migration data 1186# channel. On the outgoing side of the migration, the credentials 1187# must be for a 'client' endpoint, while for the incoming side the 1188# credentials must be for a 'server' endpoint. An empty string 1189# means that QEMU will use plain text mode for migration, rather 1190# than TLS. (Since 2.7) 1191# 1192# Note: 2.8 omits empty @tls-creds instead. 1193# 1194# @tls-hostname: migration target's hostname for validating the 1195# server's x509 certificate identity. If empty, QEMU will use the 1196# hostname from the migration URI, if any. (Since 2.7) 1197# 1198# Note: 2.8 omits empty @tls-hostname instead. 1199# 1200# @tls-authz: ID of the 'authz' object subclass that provides access 1201# control checking of the TLS x509 certificate distinguished name. 1202# (Since 4.0) 1203# 1204# @max-bandwidth: maximum speed for migration, in bytes per second. 1205# (Since 2.8) 1206# 1207# @avail-switchover-bandwidth: to set the available bandwidth that 1208# migration can use during switchover phase. NOTE! This does not 1209# limit the bandwidth during switchover, but only for calculations 1210# when making decisions to switchover. By default, this value is 1211# zero, which means QEMU will estimate the bandwidth 1212# automatically. This can be set when the estimated value is not 1213# accurate, while the user is able to guarantee such bandwidth is 1214# available when switching over. When specified correctly, this 1215# can make the switchover decision much more accurate. 1216# (Since 8.2) 1217# 1218# @downtime-limit: set maximum tolerated downtime for migration. 1219# maximum downtime in milliseconds (Since 2.8) 1220# 1221# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1222# (Since 2.8) 1223# 1224# @multifd-channels: Number of channels used to migrate data in 1225# parallel. This is the same number that the number of sockets 1226# used for migration. The default value is 2 (since 4.0) 1227# 1228# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1229# needs to be a multiple of the target page size and a power of 2 1230# (Since 2.11) 1231# 1232# @max-postcopy-bandwidth: Background transfer bandwidth during 1233# postcopy. Defaults to 0 (unlimited). In bytes per second. 1234# (Since 3.0) 1235# 1236# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1237# (Since 3.1) 1238# 1239# @multifd-compression: Which compression method to use. Defaults to 1240# none. (Since 5.0) 1241# 1242# @multifd-zlib-level: Set the compression level to be used in live 1243# migration, the compression level is an integer between 0 and 9, 1244# where 0 means no compression, 1 means the best compression 1245# speed, and 9 means best compression ratio which will consume 1246# more CPU. Defaults to 1. (Since 5.0) 1247# 1248# @multifd-zstd-level: Set the compression level to be used in live 1249# migration, the compression level is an integer between 0 and 20, 1250# where 0 means no compression, 1 means the best compression 1251# speed, and 20 means best compression ratio which will consume 1252# more CPU. Defaults to 1. (Since 5.0) 1253# 1254# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1255# aliases for the purpose of dirty bitmap migration. Such aliases 1256# may for example be the corresponding names on the opposite site. 1257# The mapping must be one-to-one, but not necessarily complete: On 1258# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1259# will be ignored. On the destination, encountering an unmapped 1260# alias in the incoming migration stream will result in a report, 1261# and all further bitmap migration data will then be discarded. 1262# Note that the destination does not know about bitmaps it does 1263# not receive, so there is no limitation or requirement regarding 1264# the number of bitmaps received, or how they are named, or on 1265# which nodes they are placed. By default (when this parameter 1266# has never been set), bitmap names are mapped to themselves. 1267# Nodes are mapped to their block device name if there is one, and 1268# to their node name otherwise. (Since 5.2) 1269# 1270# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1271# limit during live migration. Should be in the range 1 to 1272# 1000ms. Defaults to 1000ms. (Since 8.1) 1273# 1274# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1275# Defaults to 1. (Since 8.1) 1276# 1277# @mode: Migration mode. See description in @MigMode. Default is 1278# 'normal'. (Since 8.2) 1279# 1280# @zero-page-detection: Whether and how to detect zero pages. 1281# See description in @ZeroPageDetection. Default is 'multifd'. 1282# (since 9.0) 1283# 1284# Features: 1285# 1286# @deprecated: Members @compress-level, @compress-threads, 1287# @decompress-threads and @compress-wait-thread are deprecated 1288# because @compression is deprecated. 1289# 1290# @unstable: Members @x-checkpoint-delay and 1291# @x-vcpu-dirty-limit-period are experimental. 1292# 1293# Since: 2.4 1294## 1295{ 'struct': 'MigrationParameters', 1296 'data': { '*announce-initial': 'size', 1297 '*announce-max': 'size', 1298 '*announce-rounds': 'size', 1299 '*announce-step': 'size', 1300 '*compress-level': { 'type': 'uint8', 1301 'features': [ 'deprecated' ] }, 1302 '*compress-threads': { 'type': 'uint8', 1303 'features': [ 'deprecated' ] }, 1304 '*compress-wait-thread': { 'type': 'bool', 1305 'features': [ 'deprecated' ] }, 1306 '*decompress-threads': { 'type': 'uint8', 1307 'features': [ 'deprecated' ] }, 1308 '*throttle-trigger-threshold': 'uint8', 1309 '*cpu-throttle-initial': 'uint8', 1310 '*cpu-throttle-increment': 'uint8', 1311 '*cpu-throttle-tailslow': 'bool', 1312 '*tls-creds': 'str', 1313 '*tls-hostname': 'str', 1314 '*tls-authz': 'str', 1315 '*max-bandwidth': 'size', 1316 '*avail-switchover-bandwidth': 'size', 1317 '*downtime-limit': 'uint64', 1318 '*x-checkpoint-delay': { 'type': 'uint32', 1319 'features': [ 'unstable' ] }, 1320 '*multifd-channels': 'uint8', 1321 '*xbzrle-cache-size': 'size', 1322 '*max-postcopy-bandwidth': 'size', 1323 '*max-cpu-throttle': 'uint8', 1324 '*multifd-compression': 'MultiFDCompression', 1325 '*multifd-zlib-level': 'uint8', 1326 '*multifd-zstd-level': 'uint8', 1327 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1328 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1329 'features': [ 'unstable' ] }, 1330 '*vcpu-dirty-limit': 'uint64', 1331 '*mode': 'MigMode', 1332 '*zero-page-detection': 'ZeroPageDetection'} } 1333 1334## 1335# @query-migrate-parameters: 1336# 1337# Returns information about the current migration parameters 1338# 1339# Returns: @MigrationParameters 1340# 1341# Since: 2.4 1342# 1343# Example: 1344# 1345# -> { "execute": "query-migrate-parameters" } 1346# <- { "return": { 1347# "multifd-channels": 2, 1348# "cpu-throttle-increment": 10, 1349# "cpu-throttle-initial": 20, 1350# "max-bandwidth": 33554432, 1351# "downtime-limit": 300 1352# } 1353# } 1354## 1355{ 'command': 'query-migrate-parameters', 1356 'returns': 'MigrationParameters' } 1357 1358## 1359# @migrate-start-postcopy: 1360# 1361# Followup to a migration command to switch the migration to postcopy 1362# mode. The postcopy-ram capability must be set on both source and 1363# destination before the original migration command. 1364# 1365# Since: 2.5 1366# 1367# Example: 1368# 1369# -> { "execute": "migrate-start-postcopy" } 1370# <- { "return": {} } 1371## 1372{ 'command': 'migrate-start-postcopy' } 1373 1374## 1375# @MIGRATION: 1376# 1377# Emitted when a migration event happens 1378# 1379# @status: @MigrationStatus describing the current migration status. 1380# 1381# Since: 2.4 1382# 1383# Example: 1384# 1385# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1386# "event": "MIGRATION", 1387# "data": {"status": "completed"} } 1388## 1389{ 'event': 'MIGRATION', 1390 'data': {'status': 'MigrationStatus'}} 1391 1392## 1393# @MIGRATION_PASS: 1394# 1395# Emitted from the source side of a migration at the start of each 1396# pass (when it syncs the dirty bitmap) 1397# 1398# @pass: An incrementing count (starting at 1 on the first pass) 1399# 1400# Since: 2.6 1401# 1402# Example: 1403# 1404# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1405# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1406## 1407{ 'event': 'MIGRATION_PASS', 1408 'data': { 'pass': 'int' } } 1409 1410## 1411# @COLOMessage: 1412# 1413# The message transmission between Primary side and Secondary side. 1414# 1415# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1416# 1417# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for 1418# checkpointing 1419# 1420# @checkpoint-reply: SVM gets PVM's checkpoint request 1421# 1422# @vmstate-send: VM's state will be sent by PVM. 1423# 1424# @vmstate-size: The total size of VMstate. 1425# 1426# @vmstate-received: VM's state has been received by SVM. 1427# 1428# @vmstate-loaded: VM's state has been loaded by SVM. 1429# 1430# Since: 2.8 1431## 1432{ 'enum': 'COLOMessage', 1433 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1434 'vmstate-send', 'vmstate-size', 'vmstate-received', 1435 'vmstate-loaded' ] } 1436 1437## 1438# @COLOMode: 1439# 1440# The COLO current mode. 1441# 1442# @none: COLO is disabled. 1443# 1444# @primary: COLO node in primary side. 1445# 1446# @secondary: COLO node in slave side. 1447# 1448# Since: 2.8 1449## 1450{ 'enum': 'COLOMode', 1451 'data': [ 'none', 'primary', 'secondary'] } 1452 1453## 1454# @FailoverStatus: 1455# 1456# An enumeration of COLO failover status 1457# 1458# @none: no failover has ever happened 1459# 1460# @require: got failover requirement but not handled 1461# 1462# @active: in the process of doing failover 1463# 1464# @completed: finish the process of failover 1465# 1466# @relaunch: restart the failover process, from 'none' -> 'completed' 1467# (Since 2.9) 1468# 1469# Since: 2.8 1470## 1471{ 'enum': 'FailoverStatus', 1472 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1473 1474## 1475# @COLO_EXIT: 1476# 1477# Emitted when VM finishes COLO mode due to some errors happening or 1478# at the request of users. 1479# 1480# @mode: report COLO mode when COLO exited. 1481# 1482# @reason: describes the reason for the COLO exit. 1483# 1484# Since: 3.1 1485# 1486# Example: 1487# 1488# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1489# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1490## 1491{ 'event': 'COLO_EXIT', 1492 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1493 1494## 1495# @COLOExitReason: 1496# 1497# The reason for a COLO exit. 1498# 1499# @none: failover has never happened. This state does not occur in 1500# the COLO_EXIT event, and is only visible in the result of 1501# query-colo-status. 1502# 1503# @request: COLO exit is due to an external request. 1504# 1505# @error: COLO exit is due to an internal error. 1506# 1507# @processing: COLO is currently handling a failover (since 4.0). 1508# 1509# Since: 3.1 1510## 1511{ 'enum': 'COLOExitReason', 1512 'data': [ 'none', 'request', 'error' , 'processing' ] } 1513 1514## 1515# @x-colo-lost-heartbeat: 1516# 1517# Tell qemu that heartbeat is lost, request it to do takeover 1518# procedures. If this command is sent to the PVM, the Primary side 1519# will exit COLO mode. If sent to the Secondary, the Secondary side 1520# will run failover work, then takes over server operation to become 1521# the service VM. 1522# 1523# Features: 1524# 1525# @unstable: This command is experimental. 1526# 1527# Since: 2.8 1528# 1529# Example: 1530# 1531# -> { "execute": "x-colo-lost-heartbeat" } 1532# <- { "return": {} } 1533## 1534{ 'command': 'x-colo-lost-heartbeat', 1535 'features': [ 'unstable' ], 1536 'if': 'CONFIG_REPLICATION' } 1537 1538## 1539# @migrate_cancel: 1540# 1541# Cancel the current executing migration process. 1542# 1543# Notes: This command succeeds even if there is no migration process 1544# running. 1545# 1546# Since: 0.14 1547# 1548# Example: 1549# 1550# -> { "execute": "migrate_cancel" } 1551# <- { "return": {} } 1552## 1553{ 'command': 'migrate_cancel' } 1554 1555## 1556# @migrate-continue: 1557# 1558# Continue migration when it's in a paused state. 1559# 1560# @state: The state the migration is currently expected to be in 1561# 1562# Since: 2.11 1563# 1564# Example: 1565# 1566# -> { "execute": "migrate-continue" , "arguments": 1567# { "state": "pre-switchover" } } 1568# <- { "return": {} } 1569## 1570{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1571 1572## 1573# @MigrationAddressType: 1574# 1575# The migration stream transport mechanisms. 1576# 1577# @socket: Migrate via socket. 1578# 1579# @exec: Direct the migration stream to another process. 1580# 1581# @rdma: Migrate via RDMA. 1582# 1583# @file: Direct the migration stream to a file. 1584# 1585# Since: 8.2 1586## 1587{ 'enum': 'MigrationAddressType', 1588 'data': [ 'socket', 'exec', 'rdma', 'file' ] } 1589 1590## 1591# @FileMigrationArgs: 1592# 1593# @filename: The file to receive the migration stream 1594# 1595# @offset: The file offset where the migration stream will start 1596# 1597# Since: 8.2 1598## 1599{ 'struct': 'FileMigrationArgs', 1600 'data': { 'filename': 'str', 1601 'offset': 'uint64' } } 1602 1603## 1604# @MigrationExecCommand: 1605# 1606# @args: command (list head) and arguments to execute. 1607# 1608# Since: 8.2 1609## 1610{ 'struct': 'MigrationExecCommand', 1611 'data': {'args': [ 'str' ] } } 1612 1613## 1614# @MigrationAddress: 1615# 1616# Migration endpoint configuration. 1617# 1618# @transport: The migration stream transport mechanism 1619# 1620# Since: 8.2 1621## 1622{ 'union': 'MigrationAddress', 1623 'base': { 'transport' : 'MigrationAddressType'}, 1624 'discriminator': 'transport', 1625 'data': { 1626 'socket': 'SocketAddress', 1627 'exec': 'MigrationExecCommand', 1628 'rdma': 'InetSocketAddress', 1629 'file': 'FileMigrationArgs' } } 1630 1631## 1632# @MigrationChannelType: 1633# 1634# The migration channel-type request options. 1635# 1636# @main: Main outbound migration channel. 1637# 1638# Since: 8.1 1639## 1640{ 'enum': 'MigrationChannelType', 1641 'data': [ 'main' ] } 1642 1643## 1644# @MigrationChannel: 1645# 1646# Migration stream channel parameters. 1647# 1648# @channel-type: Channel type for transferring packet information. 1649# 1650# @addr: Migration endpoint configuration on destination interface. 1651# 1652# Since: 8.1 1653## 1654{ 'struct': 'MigrationChannel', 1655 'data': { 1656 'channel-type': 'MigrationChannelType', 1657 'addr': 'MigrationAddress' } } 1658 1659## 1660# @migrate: 1661# 1662# Migrates the current running guest to another Virtual Machine. 1663# 1664# @uri: the Uniform Resource Identifier of the destination VM 1665# 1666# @channels: list of migration stream channels with each stream in the 1667# list connected to a destination interface endpoint. 1668# 1669# @detach: this argument exists only for compatibility reasons and is 1670# ignored by QEMU 1671# 1672# @resume: resume one paused migration, default "off". (since 3.0) 1673# 1674# Since: 0.14 1675# 1676# Notes: 1677# 1678# 1. The 'query-migrate' command should be used to check 1679# migration's progress and final result (this information is 1680# provided by the 'status' member) 1681# 1682# 2. All boolean arguments default to false 1683# 1684# 3. The user Monitor's "detach" argument is invalid in QMP and 1685# should not be used 1686# 1687# 4. The uri argument should have the Uniform Resource Identifier 1688# of default destination VM. This connection will be bound to 1689# default network. 1690# 1691# 5. For now, number of migration streams is restricted to one, 1692# i.e. number of items in 'channels' list is just 1. 1693# 1694# 6. The 'uri' and 'channels' arguments are mutually exclusive; 1695# exactly one of the two should be present. 1696# 1697# Example: 1698# 1699# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1700# <- { "return": {} } 1701# 1702# -> { "execute": "migrate", 1703# "arguments": { 1704# "channels": [ { "channel-type": "main", 1705# "addr": { "transport": "socket", 1706# "type": "inet", 1707# "host": "10.12.34.9", 1708# "port": "1050" } } ] } } 1709# <- { "return": {} } 1710# 1711# -> { "execute": "migrate", 1712# "arguments": { 1713# "channels": [ { "channel-type": "main", 1714# "addr": { "transport": "exec", 1715# "args": [ "/bin/nc", "-p", "6000", 1716# "/some/sock" ] } } ] } } 1717# <- { "return": {} } 1718# 1719# -> { "execute": "migrate", 1720# "arguments": { 1721# "channels": [ { "channel-type": "main", 1722# "addr": { "transport": "rdma", 1723# "host": "10.12.34.9", 1724# "port": "1050" } } ] } } 1725# <- { "return": {} } 1726# 1727# -> { "execute": "migrate", 1728# "arguments": { 1729# "channels": [ { "channel-type": "main", 1730# "addr": { "transport": "file", 1731# "filename": "/tmp/migfile", 1732# "offset": "0x1000" } } ] } } 1733# <- { "return": {} } 1734# 1735## 1736{ 'command': 'migrate', 1737 'data': {'*uri': 'str', 1738 '*channels': [ 'MigrationChannel' ], 1739 '*detach': 'bool', '*resume': 'bool' } } 1740 1741## 1742# @migrate-incoming: 1743# 1744# Start an incoming migration, the qemu must have been started with 1745# -incoming defer 1746# 1747# @uri: The Uniform Resource Identifier identifying the source or 1748# address to listen on 1749# 1750# @channels: list of migration stream channels with each stream in the 1751# list connected to a destination interface endpoint. 1752# 1753# @exit-on-error: Exit on incoming migration failure. Default true. 1754# When set to false, the failure triggers a MIGRATION event, and 1755# error details could be retrieved with query-migrate. (since 9.1) 1756# 1757# Since: 2.3 1758# 1759# Notes: 1760# 1761# 1. It's a bad idea to use a string for the uri, but it needs to 1762# stay compatible with -incoming and the format of the uri is 1763# already exposed above libvirt. 1764# 1765# 2. QEMU must be started with -incoming defer to allow 1766# migrate-incoming to be used. 1767# 1768# 3. The uri format is the same as for -incoming 1769# 1770# 4. For now, number of migration streams is restricted to one, 1771# i.e. number of items in 'channels' list is just 1. 1772# 1773# 5. The 'uri' and 'channels' arguments are mutually exclusive; 1774# exactly one of the two should be present. 1775# 1776# Example: 1777# 1778# -> { "execute": "migrate-incoming", 1779# "arguments": { "uri": "tcp:0:4446" } } 1780# <- { "return": {} } 1781# 1782# -> { "execute": "migrate-incoming", 1783# "arguments": { 1784# "channels": [ { "channel-type": "main", 1785# "addr": { "transport": "socket", 1786# "type": "inet", 1787# "host": "10.12.34.9", 1788# "port": "1050" } } ] } } 1789# <- { "return": {} } 1790# 1791# -> { "execute": "migrate-incoming", 1792# "arguments": { 1793# "channels": [ { "channel-type": "main", 1794# "addr": { "transport": "exec", 1795# "args": [ "/bin/nc", "-p", "6000", 1796# "/some/sock" ] } } ] } } 1797# <- { "return": {} } 1798# 1799# -> { "execute": "migrate-incoming", 1800# "arguments": { 1801# "channels": [ { "channel-type": "main", 1802# "addr": { "transport": "rdma", 1803# "host": "10.12.34.9", 1804# "port": "1050" } } ] } } 1805# <- { "return": {} } 1806## 1807{ 'command': 'migrate-incoming', 1808 'data': {'*uri': 'str', 1809 '*channels': [ 'MigrationChannel' ], 1810 '*exit-on-error': 'bool' } } 1811 1812## 1813# @xen-save-devices-state: 1814# 1815# Save the state of all devices to file. The RAM and the block 1816# devices of the VM are not saved by this command. 1817# 1818# @filename: the file to save the state of the devices to as binary 1819# data. See xen-save-devices-state.txt for a description of the 1820# binary format. 1821# 1822# @live: Optional argument to ask QEMU to treat this command as part 1823# of a live migration. Default to true. (since 2.11) 1824# 1825# Since: 1.1 1826# 1827# Example: 1828# 1829# -> { "execute": "xen-save-devices-state", 1830# "arguments": { "filename": "/tmp/save" } } 1831# <- { "return": {} } 1832## 1833{ 'command': 'xen-save-devices-state', 1834 'data': {'filename': 'str', '*live':'bool' } } 1835 1836## 1837# @xen-set-global-dirty-log: 1838# 1839# Enable or disable the global dirty log mode. 1840# 1841# @enable: true to enable, false to disable. 1842# 1843# Since: 1.3 1844# 1845# Example: 1846# 1847# -> { "execute": "xen-set-global-dirty-log", 1848# "arguments": { "enable": true } } 1849# <- { "return": {} } 1850## 1851{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1852 1853## 1854# @xen-load-devices-state: 1855# 1856# Load the state of all devices from file. The RAM and the block 1857# devices of the VM are not loaded by this command. 1858# 1859# @filename: the file to load the state of the devices from as binary 1860# data. See xen-save-devices-state.txt for a description of the 1861# binary format. 1862# 1863# Since: 2.7 1864# 1865# Example: 1866# 1867# -> { "execute": "xen-load-devices-state", 1868# "arguments": { "filename": "/tmp/resume" } } 1869# <- { "return": {} } 1870## 1871{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1872 1873## 1874# @xen-set-replication: 1875# 1876# Enable or disable replication. 1877# 1878# @enable: true to enable, false to disable. 1879# 1880# @primary: true for primary or false for secondary. 1881# 1882# @failover: true to do failover, false to stop. Cannot be specified 1883# if 'enable' is true. Default value is false. 1884# 1885# Example: 1886# 1887# -> { "execute": "xen-set-replication", 1888# "arguments": {"enable": true, "primary": false} } 1889# <- { "return": {} } 1890# 1891# Since: 2.9 1892## 1893{ 'command': 'xen-set-replication', 1894 'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' }, 1895 'if': 'CONFIG_REPLICATION' } 1896 1897## 1898# @ReplicationStatus: 1899# 1900# The result format for 'query-xen-replication-status'. 1901# 1902# @error: true if an error happened, false if replication is normal. 1903# 1904# @desc: the human readable error description string, when @error is 1905# 'true'. 1906# 1907# Since: 2.9 1908## 1909{ 'struct': 'ReplicationStatus', 1910 'data': { 'error': 'bool', '*desc': 'str' }, 1911 'if': 'CONFIG_REPLICATION' } 1912 1913## 1914# @query-xen-replication-status: 1915# 1916# Query replication status while the vm is running. 1917# 1918# Returns: A @ReplicationStatus object showing the status. 1919# 1920# Example: 1921# 1922# -> { "execute": "query-xen-replication-status" } 1923# <- { "return": { "error": false } } 1924# 1925# Since: 2.9 1926## 1927{ 'command': 'query-xen-replication-status', 1928 'returns': 'ReplicationStatus', 1929 'if': 'CONFIG_REPLICATION' } 1930 1931## 1932# @xen-colo-do-checkpoint: 1933# 1934# Xen uses this command to notify replication to trigger a checkpoint. 1935# 1936# Example: 1937# 1938# -> { "execute": "xen-colo-do-checkpoint" } 1939# <- { "return": {} } 1940# 1941# Since: 2.9 1942## 1943{ 'command': 'xen-colo-do-checkpoint', 1944 'if': 'CONFIG_REPLICATION' } 1945 1946## 1947# @COLOStatus: 1948# 1949# The result format for 'query-colo-status'. 1950# 1951# @mode: COLO running mode. If COLO is running, this field will 1952# return 'primary' or 'secondary'. 1953# 1954# @last-mode: COLO last running mode. If COLO is running, this field 1955# will return same like mode field, after failover we can use this 1956# field to get last colo mode. (since 4.0) 1957# 1958# @reason: describes the reason for the COLO exit. 1959# 1960# Since: 3.1 1961## 1962{ 'struct': 'COLOStatus', 1963 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1964 'reason': 'COLOExitReason' }, 1965 'if': 'CONFIG_REPLICATION' } 1966 1967## 1968# @query-colo-status: 1969# 1970# Query COLO status while the vm is running. 1971# 1972# Returns: A @COLOStatus object showing the status. 1973# 1974# Example: 1975# 1976# -> { "execute": "query-colo-status" } 1977# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1978# 1979# Since: 3.1 1980## 1981{ 'command': 'query-colo-status', 1982 'returns': 'COLOStatus', 1983 'if': 'CONFIG_REPLICATION' } 1984 1985## 1986# @migrate-recover: 1987# 1988# Provide a recovery migration stream URI. 1989# 1990# @uri: the URI to be used for the recovery of migration stream. 1991# 1992# Example: 1993# 1994# -> { "execute": "migrate-recover", 1995# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1996# <- { "return": {} } 1997# 1998# Since: 3.0 1999## 2000{ 'command': 'migrate-recover', 2001 'data': { 'uri': 'str' }, 2002 'allow-oob': true } 2003 2004## 2005# @migrate-pause: 2006# 2007# Pause a migration. Currently it only supports postcopy. 2008# 2009# Example: 2010# 2011# -> { "execute": "migrate-pause" } 2012# <- { "return": {} } 2013# 2014# Since: 3.0 2015## 2016{ 'command': 'migrate-pause', 'allow-oob': true } 2017 2018## 2019# @UNPLUG_PRIMARY: 2020# 2021# Emitted from source side of a migration when migration state is 2022# WAIT_UNPLUG. Device was unplugged by guest operating system. Device 2023# resources in QEMU are kept on standby to be able to re-plug it in 2024# case of migration failure. 2025# 2026# @device-id: QEMU device id of the unplugged device 2027# 2028# Since: 4.2 2029# 2030# Example: 2031# 2032# <- { "event": "UNPLUG_PRIMARY", 2033# "data": { "device-id": "hostdev0" }, 2034# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 2035## 2036{ 'event': 'UNPLUG_PRIMARY', 2037 'data': { 'device-id': 'str' } } 2038 2039## 2040# @DirtyRateVcpu: 2041# 2042# Dirty rate of vcpu. 2043# 2044# @id: vcpu index. 2045# 2046# @dirty-rate: dirty rate. 2047# 2048# Since: 6.2 2049## 2050{ 'struct': 'DirtyRateVcpu', 2051 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 2052 2053## 2054# @DirtyRateStatus: 2055# 2056# Dirty page rate measurement status. 2057# 2058# @unstarted: measuring thread has not been started yet 2059# 2060# @measuring: measuring thread is running 2061# 2062# @measured: dirty page rate is measured and the results are available 2063# 2064# Since: 5.2 2065## 2066{ 'enum': 'DirtyRateStatus', 2067 'data': [ 'unstarted', 'measuring', 'measured'] } 2068 2069## 2070# @DirtyRateMeasureMode: 2071# 2072# Method used to measure dirty page rate. Differences between 2073# available methods are explained in @calc-dirty-rate. 2074# 2075# @page-sampling: use page sampling 2076# 2077# @dirty-ring: use dirty ring 2078# 2079# @dirty-bitmap: use dirty bitmap 2080# 2081# Since: 6.2 2082## 2083{ 'enum': 'DirtyRateMeasureMode', 2084 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 2085 2086## 2087# @TimeUnit: 2088# 2089# Specifies unit in which time-related value is specified. 2090# 2091# @second: value is in seconds 2092# 2093# @millisecond: value is in milliseconds 2094# 2095# Since: 8.2 2096## 2097{ 'enum': 'TimeUnit', 2098 'data': ['second', 'millisecond'] } 2099 2100## 2101# @DirtyRateInfo: 2102# 2103# Information about measured dirty page rate. 2104# 2105# @dirty-rate: an estimate of the dirty page rate of the VM in units 2106# of MiB/s. Value is present only when @status is 'measured'. 2107# 2108# @status: current status of dirty page rate measurements 2109# 2110# @start-time: start time in units of second for calculation 2111# 2112# @calc-time: time period for which dirty page rate was measured, 2113# expressed and rounded down to @calc-time-unit. 2114# 2115# @calc-time-unit: time unit of @calc-time (Since 8.2) 2116# 2117# @sample-pages: number of sampled pages per GiB of guest memory. 2118# Valid only in page-sampling mode (Since 6.1) 2119# 2120# @mode: mode that was used to measure dirty page rate (Since 6.2) 2121# 2122# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was 2123# specified (Since 6.2) 2124# 2125# Since: 5.2 2126## 2127{ 'struct': 'DirtyRateInfo', 2128 'data': {'*dirty-rate': 'int64', 2129 'status': 'DirtyRateStatus', 2130 'start-time': 'int64', 2131 'calc-time': 'int64', 2132 'calc-time-unit': 'TimeUnit', 2133 'sample-pages': 'uint64', 2134 'mode': 'DirtyRateMeasureMode', 2135 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 2136 2137## 2138# @calc-dirty-rate: 2139# 2140# Start measuring dirty page rate of the VM. Results can be retrieved 2141# with @query-dirty-rate after measurements are completed. 2142# 2143# Dirty page rate is the number of pages changed in a given time 2144# period expressed in MiB/s. The following methods of calculation are 2145# available: 2146# 2147# 1. In page sampling mode, a random subset of pages are selected and 2148# hashed twice: once at the beginning of measurement time period, 2149# and once again at the end. If two hashes for some page are 2150# different, the page is counted as changed. Since this method 2151# relies on sampling and hashing, calculated dirty page rate is 2152# only an estimate of its true value. Increasing @sample-pages 2153# improves estimation quality at the cost of higher computational 2154# overhead. 2155# 2156# 2. Dirty bitmap mode captures writes to memory (for example by 2157# temporarily revoking write access to all pages) and counting page 2158# faults. Information about modified pages is collected into a 2159# bitmap, where each bit corresponds to one guest page. This mode 2160# requires that KVM accelerator property "dirty-ring-size" is *not* 2161# set. 2162# 2163# 3. Dirty ring mode is similar to dirty bitmap mode, but the 2164# information about modified pages is collected into ring buffer. 2165# This mode tracks page modification per each vCPU separately. It 2166# requires that KVM accelerator property "dirty-ring-size" is set. 2167# 2168# @calc-time: time period for which dirty page rate is calculated. 2169# By default it is specified in seconds, but the unit can be set 2170# explicitly with @calc-time-unit. Note that larger @calc-time 2171# values will typically result in smaller dirty page rates because 2172# page dirtying is a one-time event. Once some page is counted 2173# as dirty during @calc-time period, further writes to this page 2174# will not increase dirty page rate anymore. 2175# 2176# @calc-time-unit: time unit in which @calc-time is specified. 2177# By default it is seconds. (Since 8.2) 2178# 2179# @sample-pages: number of sampled pages per each GiB of guest memory. 2180# Default value is 512. For 4KiB guest pages this corresponds to 2181# sampling ratio of 0.2%. This argument is used only in page 2182# sampling mode. (Since 6.1) 2183# 2184# @mode: mechanism for tracking dirty pages. Default value is 2185# 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'. 2186# (Since 6.1) 2187# 2188# Since: 5.2 2189# 2190# Example: 2191# 2192# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 2193# 'sample-pages': 512} } 2194# <- { "return": {} } 2195# 2196# Measure dirty rate using dirty bitmap for 500 milliseconds: 2197# 2198# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500, 2199# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} } 2200# 2201# <- { "return": {} } 2202## 2203{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 2204 '*calc-time-unit': 'TimeUnit', 2205 '*sample-pages': 'int', 2206 '*mode': 'DirtyRateMeasureMode'} } 2207 2208## 2209# @query-dirty-rate: 2210# 2211# Query results of the most recent invocation of @calc-dirty-rate. 2212# 2213# @calc-time-unit: time unit in which to report calculation time. 2214# By default it is reported in seconds. (Since 8.2) 2215# 2216# Since: 5.2 2217# 2218# Examples: 2219# 2220# 1. Measurement is in progress: 2221# 2222# <- {"status": "measuring", "sample-pages": 512, 2223# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2224# "calc-time-unit": "second"} 2225# 2226# 2. Measurement has been completed: 2227# 2228# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108, 2229# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2230# "calc-time-unit": "second"} 2231## 2232{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' }, 2233 'returns': 'DirtyRateInfo' } 2234 2235## 2236# @DirtyLimitInfo: 2237# 2238# Dirty page rate limit information of a virtual CPU. 2239# 2240# @cpu-index: index of a virtual CPU. 2241# 2242# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 2243# CPU, 0 means unlimited. 2244# 2245# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 2246# 2247# Since: 7.1 2248## 2249{ 'struct': 'DirtyLimitInfo', 2250 'data': { 'cpu-index': 'int', 2251 'limit-rate': 'uint64', 2252 'current-rate': 'uint64' } } 2253 2254## 2255# @set-vcpu-dirty-limit: 2256# 2257# Set the upper limit of dirty page rate for virtual CPUs. 2258# 2259# Requires KVM with accelerator property "dirty-ring-size" set. A 2260# virtual CPU's dirty page rate is a measure of its memory load. To 2261# observe dirty page rates, use @calc-dirty-rate. 2262# 2263# @cpu-index: index of a virtual CPU, default is all. 2264# 2265# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 2266# 2267# Since: 7.1 2268# 2269# Example: 2270# 2271# -> {"execute": "set-vcpu-dirty-limit"} 2272# "arguments": { "dirty-rate": 200, 2273# "cpu-index": 1 } } 2274# <- { "return": {} } 2275## 2276{ 'command': 'set-vcpu-dirty-limit', 2277 'data': { '*cpu-index': 'int', 2278 'dirty-rate': 'uint64' } } 2279 2280## 2281# @cancel-vcpu-dirty-limit: 2282# 2283# Cancel the upper limit of dirty page rate for virtual CPUs. 2284# 2285# Cancel the dirty page limit for the vCPU which has been set with 2286# set-vcpu-dirty-limit command. Note that this command requires 2287# support from dirty ring, same as the "set-vcpu-dirty-limit". 2288# 2289# @cpu-index: index of a virtual CPU, default is all. 2290# 2291# Since: 7.1 2292# 2293# Example: 2294# 2295# -> {"execute": "cancel-vcpu-dirty-limit"}, 2296# "arguments": { "cpu-index": 1 } } 2297# <- { "return": {} } 2298## 2299{ 'command': 'cancel-vcpu-dirty-limit', 2300 'data': { '*cpu-index': 'int'} } 2301 2302## 2303# @query-vcpu-dirty-limit: 2304# 2305# Returns information about virtual CPU dirty page rate limits, if 2306# any. 2307# 2308# Since: 7.1 2309# 2310# Example: 2311# 2312# -> {"execute": "query-vcpu-dirty-limit"} 2313# <- {"return": [ 2314# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 2315# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 2316## 2317{ 'command': 'query-vcpu-dirty-limit', 2318 'returns': [ 'DirtyLimitInfo' ] } 2319 2320## 2321# @MigrationThreadInfo: 2322# 2323# Information about migrationthreads 2324# 2325# @name: the name of migration thread 2326# 2327# @thread-id: ID of the underlying host thread 2328# 2329# Since: 7.2 2330## 2331{ 'struct': 'MigrationThreadInfo', 2332 'data': {'name': 'str', 2333 'thread-id': 'int'} } 2334 2335## 2336# @query-migrationthreads: 2337# 2338# Returns information of migration threads 2339# 2340# Returns: @MigrationThreadInfo 2341# 2342# Since: 7.2 2343## 2344{ 'command': 'query-migrationthreads', 2345 'returns': ['MigrationThreadInfo'] } 2346 2347## 2348# @snapshot-save: 2349# 2350# Save a VM snapshot 2351# 2352# @job-id: identifier for the newly created job 2353# 2354# @tag: name of the snapshot to create 2355# 2356# @vmstate: block device node name to save vmstate to 2357# 2358# @devices: list of block device node names to save a snapshot to 2359# 2360# Applications should not assume that the snapshot save is complete 2361# when this command returns. The job commands / events must be used 2362# to determine completion and to fetch details of any errors that 2363# arise. 2364# 2365# Note that execution of the guest CPUs may be stopped during the time 2366# it takes to save the snapshot. A future version of QEMU may ensure 2367# CPUs are executing continuously. 2368# 2369# It is strongly recommended that @devices contain all writable block 2370# device nodes if a consistent snapshot is required. 2371# 2372# If @tag already exists, an error will be reported 2373# 2374# Example: 2375# 2376# -> { "execute": "snapshot-save", 2377# "arguments": { 2378# "job-id": "snapsave0", 2379# "tag": "my-snap", 2380# "vmstate": "disk0", 2381# "devices": ["disk0", "disk1"] 2382# } 2383# } 2384# <- { "return": { } } 2385# <- {"event": "JOB_STATUS_CHANGE", 2386# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 2387# "data": {"status": "created", "id": "snapsave0"}} 2388# <- {"event": "JOB_STATUS_CHANGE", 2389# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 2390# "data": {"status": "running", "id": "snapsave0"}} 2391# <- {"event": "STOP", 2392# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 2393# <- {"event": "RESUME", 2394# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2395# <- {"event": "JOB_STATUS_CHANGE", 2396# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2397# "data": {"status": "waiting", "id": "snapsave0"}} 2398# <- {"event": "JOB_STATUS_CHANGE", 2399# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2400# "data": {"status": "pending", "id": "snapsave0"}} 2401# <- {"event": "JOB_STATUS_CHANGE", 2402# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2403# "data": {"status": "concluded", "id": "snapsave0"}} 2404# -> {"execute": "query-jobs"} 2405# <- {"return": [{"current-progress": 1, 2406# "status": "concluded", 2407# "total-progress": 1, 2408# "type": "snapshot-save", 2409# "id": "snapsave0"}]} 2410# 2411# Since: 6.0 2412## 2413{ 'command': 'snapshot-save', 2414 'data': { 'job-id': 'str', 2415 'tag': 'str', 2416 'vmstate': 'str', 2417 'devices': ['str'] } } 2418 2419## 2420# @snapshot-load: 2421# 2422# Load a VM snapshot 2423# 2424# @job-id: identifier for the newly created job 2425# 2426# @tag: name of the snapshot to load. 2427# 2428# @vmstate: block device node name to load vmstate from 2429# 2430# @devices: list of block device node names to load a snapshot from 2431# 2432# Applications should not assume that the snapshot load is complete 2433# when this command returns. The job commands / events must be used 2434# to determine completion and to fetch details of any errors that 2435# arise. 2436# 2437# Note that execution of the guest CPUs will be stopped during the 2438# time it takes to load the snapshot. 2439# 2440# It is strongly recommended that @devices contain all writable block 2441# device nodes that can have changed since the original @snapshot-save 2442# command execution. 2443# 2444# Example: 2445# 2446# -> { "execute": "snapshot-load", 2447# "arguments": { 2448# "job-id": "snapload0", 2449# "tag": "my-snap", 2450# "vmstate": "disk0", 2451# "devices": ["disk0", "disk1"] 2452# } 2453# } 2454# <- { "return": { } } 2455# <- {"event": "JOB_STATUS_CHANGE", 2456# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2457# "data": {"status": "created", "id": "snapload0"}} 2458# <- {"event": "JOB_STATUS_CHANGE", 2459# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2460# "data": {"status": "running", "id": "snapload0"}} 2461# <- {"event": "STOP", 2462# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2463# <- {"event": "RESUME", 2464# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2465# <- {"event": "JOB_STATUS_CHANGE", 2466# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2467# "data": {"status": "waiting", "id": "snapload0"}} 2468# <- {"event": "JOB_STATUS_CHANGE", 2469# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2470# "data": {"status": "pending", "id": "snapload0"}} 2471# <- {"event": "JOB_STATUS_CHANGE", 2472# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2473# "data": {"status": "concluded", "id": "snapload0"}} 2474# -> {"execute": "query-jobs"} 2475# <- {"return": [{"current-progress": 1, 2476# "status": "concluded", 2477# "total-progress": 1, 2478# "type": "snapshot-load", 2479# "id": "snapload0"}]} 2480# 2481# Since: 6.0 2482## 2483{ 'command': 'snapshot-load', 2484 'data': { 'job-id': 'str', 2485 'tag': 'str', 2486 'vmstate': 'str', 2487 'devices': ['str'] } } 2488 2489## 2490# @snapshot-delete: 2491# 2492# Delete a VM snapshot 2493# 2494# @job-id: identifier for the newly created job 2495# 2496# @tag: name of the snapshot to delete. 2497# 2498# @devices: list of block device node names to delete a snapshot from 2499# 2500# Applications should not assume that the snapshot delete is complete 2501# when this command returns. The job commands / events must be used 2502# to determine completion and to fetch details of any errors that 2503# arise. 2504# 2505# Example: 2506# 2507# -> { "execute": "snapshot-delete", 2508# "arguments": { 2509# "job-id": "snapdelete0", 2510# "tag": "my-snap", 2511# "devices": ["disk0", "disk1"] 2512# } 2513# } 2514# <- { "return": { } } 2515# <- {"event": "JOB_STATUS_CHANGE", 2516# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2517# "data": {"status": "created", "id": "snapdelete0"}} 2518# <- {"event": "JOB_STATUS_CHANGE", 2519# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2520# "data": {"status": "running", "id": "snapdelete0"}} 2521# <- {"event": "JOB_STATUS_CHANGE", 2522# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2523# "data": {"status": "waiting", "id": "snapdelete0"}} 2524# <- {"event": "JOB_STATUS_CHANGE", 2525# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2526# "data": {"status": "pending", "id": "snapdelete0"}} 2527# <- {"event": "JOB_STATUS_CHANGE", 2528# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2529# "data": {"status": "concluded", "id": "snapdelete0"}} 2530# -> {"execute": "query-jobs"} 2531# <- {"return": [{"current-progress": 1, 2532# "status": "concluded", 2533# "total-progress": 1, 2534# "type": "snapshot-delete", 2535# "id": "snapdelete0"}]} 2536# 2537# Since: 6.0 2538## 2539{ 'command': 'snapshot-delete', 2540 'data': { 'job-id': 'str', 2541 'tag': 'str', 2542 'devices': ['str'] } } 2543