1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the 20# target VM 21# 22# @total: total amount of bytes involved in the migration process 23# 24# @duplicate: number of duplicate (zero) pages (since 1.2) 25# 26# @normal: number of normal pages (since 1.2) 27# 28# @normal-bytes: number of normal bytes sent (since 1.2) 29# 30# @dirty-pages-rate: number of pages dirtied by second by the guest 31# (since 1.3) 32# 33# @mbps: throughput in megabits/sec. (since 1.6) 34# 35# @dirty-sync-count: number of times that dirty ram was synchronized 36# (since 2.1) 37# 38# @postcopy-requests: The number of page requests received from the 39# destination (since 2.7) 40# 41# @page-size: The number of bytes per page for the various page-based 42# statistics (since 2.10) 43# 44# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 45# 46# @pages-per-second: the number of memory pages transferred per second 47# (Since 4.0) 48# 49# @precopy-bytes: The number of bytes sent in the pre-copy phase 50# (since 7.0). 51# 52# @downtime-bytes: The number of bytes sent while the guest is paused 53# (since 7.0). 54# 55# @postcopy-bytes: The number of bytes sent during the post-copy phase 56# (since 7.0). 57# 58# @dirty-sync-missed-zero-copy: Number of times dirty RAM 59# synchronization could not avoid copying dirty pages. This is 60# between 0 and @dirty-sync-count * @multifd-channels. (since 61# 7.1) 62# 63# Since: 0.14 64## 65{ 'struct': 'MigrationStats', 66 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 67 'duplicate': 'int', 68 'normal': 'int', 69 'normal-bytes': 'int', 'dirty-pages-rate': 'int', 70 'mbps': 'number', 'dirty-sync-count': 'int', 71 'postcopy-requests': 'int', 'page-size': 'int', 72 'multifd-bytes': 'uint64', 'pages-per-second': 'uint64', 73 'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64', 74 'postcopy-bytes': 'uint64', 75 'dirty-sync-missed-zero-copy': 'uint64' } } 76 77## 78# @XBZRLECacheStats: 79# 80# Detailed XBZRLE migration cache statistics 81# 82# @cache-size: XBZRLE cache size 83# 84# @bytes: amount of bytes already transferred to the target VM 85# 86# @pages: amount of pages transferred to the target VM 87# 88# @cache-miss: number of cache miss 89# 90# @cache-miss-rate: rate of cache miss (since 2.1) 91# 92# @encoding-rate: rate of encoded bytes (since 5.1) 93# 94# @overflow: number of overflows 95# 96# Since: 1.2 97## 98{ 'struct': 'XBZRLECacheStats', 99 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 100 'cache-miss': 'int', 'cache-miss-rate': 'number', 101 'encoding-rate': 'number', 'overflow': 'int' } } 102 103## 104# @CompressionStats: 105# 106# Detailed migration compression statistics 107# 108# @pages: amount of pages compressed and transferred to the target VM 109# 110# @busy: count of times that no free thread was available to compress 111# data 112# 113# @busy-rate: rate of thread busy 114# 115# @compressed-size: amount of bytes after compression 116# 117# @compression-rate: rate of compressed size 118# 119# Since: 3.1 120## 121{ 'struct': 'CompressionStats', 122 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 123 'compressed-size': 'int', 'compression-rate': 'number' } } 124 125## 126# @MigrationStatus: 127# 128# An enumeration of migration status. 129# 130# @none: no migration has ever happened. 131# 132# @setup: migration process has been initiated. 133# 134# @cancelling: in the process of cancelling migration. 135# 136# @cancelled: cancelling migration is finished. 137# 138# @active: in the process of doing migration. 139# 140# @postcopy-active: like active, but now in postcopy mode. (since 141# 2.5) 142# 143# @postcopy-paused: during postcopy but paused. (since 3.0) 144# 145# @postcopy-recover-setup: setup phase for a postcopy recovery 146# process, preparing for a recovery phase to start. (since 9.1) 147# 148# @postcopy-recover: trying to recover from a paused postcopy. (since 149# 3.0) 150# 151# @completed: migration is finished. 152# 153# @failed: some error occurred during migration process. 154# 155# @colo: VM is in the process of fault tolerance, VM can not get into 156# this state unless colo capability is enabled for migration. 157# (since 2.8) 158# 159# @pre-switchover: Paused before device serialisation. (since 2.11) 160# 161# @device: During device serialisation when pause-before-switchover is 162# enabled (since 2.11) 163# 164# @wait-unplug: wait for device unplug request by guest OS to be 165# completed. (since 4.2) 166# 167# Since: 2.3 168## 169{ 'enum': 'MigrationStatus', 170 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 171 'active', 'postcopy-active', 'postcopy-paused', 172 'postcopy-recover-setup', 173 'postcopy-recover', 'completed', 'failed', 'colo', 174 'pre-switchover', 'device', 'wait-unplug' ] } 175## 176# @VfioStats: 177# 178# Detailed VFIO devices migration statistics 179# 180# @transferred: amount of bytes transferred to the target VM by VFIO 181# devices 182# 183# Since: 5.2 184## 185{ 'struct': 'VfioStats', 186 'data': {'transferred': 'int' } } 187 188## 189# @MigrationInfo: 190# 191# Information about current migration process. 192# 193# @status: @MigrationStatus describing the current migration status. 194# If this field is not returned, no migration process has been 195# initiated 196# 197# @ram: @MigrationStats containing detailed migration status, only 198# returned if status is 'active' or 'completed'(since 1.2) 199# 200# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 201# migration statistics, only returned if XBZRLE feature is on and 202# status is 'active' or 'completed' (since 1.2) 203# 204# @total-time: total amount of milliseconds since migration started. 205# If migration has ended, it returns the total migration time. 206# (since 1.2) 207# 208# @downtime: only present when migration finishes correctly total 209# downtime in milliseconds for the guest. (since 1.3) 210# 211# @expected-downtime: only present while migration is active expected 212# downtime in milliseconds for the guest in last walk of the dirty 213# bitmap. (since 1.3) 214# 215# @setup-time: amount of setup time in milliseconds *before* the 216# iterations begin but *after* the QMP command is issued. This is 217# designed to provide an accounting of any activities (such as 218# RDMA pinning) which may be expensive, but do not actually occur 219# during the iterative migration rounds themselves. (since 1.6) 220# 221# @cpu-throttle-percentage: percentage of time guest cpus are being 222# throttled during auto-converge. This is only present when 223# auto-converge has started throttling guest cpus. (Since 2.7) 224# 225# @error-desc: the human readable error description string. Clients 226# should not attempt to parse the error strings. (Since 2.7) 227# 228# @postcopy-blocktime: total time when all vCPU were blocked during 229# postcopy live migration. This is only present when the 230# postcopy-blocktime migration capability is enabled. (Since 3.0) 231# 232# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. 233# This is only present when the postcopy-blocktime migration 234# capability is enabled. (Since 3.0) 235# 236# @socket-address: Only used for tcp, to know what the real port is 237# (Since 4.0) 238# 239# @vfio: @VfioStats containing detailed VFIO devices migration 240# statistics, only returned if VFIO device is present, migration 241# is supported by all VFIO devices and status is 'active' or 242# 'completed' (since 5.2) 243# 244# @blocked-reasons: A list of reasons an outgoing migration is 245# blocked. Present and non-empty when migration is blocked. 246# (since 6.0) 247# 248# @dirty-limit-throttle-time-per-round: Maximum throttle time (in 249# microseconds) of virtual CPUs each dirty ring full round, which 250# shows how MigrationCapability dirty-limit affects the guest 251# during live migration. (Since 8.1) 252# 253# @dirty-limit-ring-full-time: Estimated average dirty ring full time 254# (in microseconds) for each dirty ring full round. The value 255# equals the dirty ring memory size divided by the average dirty 256# page rate of the virtual CPU, which can be used to observe the 257# average memory load of the virtual CPU indirectly. Note that 258# zero means guest doesn't dirty memory. (Since 8.1) 259# 260# Since: 0.14 261## 262{ 'struct': 'MigrationInfo', 263 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 264 '*vfio': 'VfioStats', 265 '*xbzrle-cache': 'XBZRLECacheStats', 266 '*total-time': 'int', 267 '*expected-downtime': 'int', 268 '*downtime': 'int', 269 '*setup-time': 'int', 270 '*cpu-throttle-percentage': 'int', 271 '*error-desc': 'str', 272 '*blocked-reasons': ['str'], 273 '*postcopy-blocktime': 'uint32', 274 '*postcopy-vcpu-blocktime': ['uint32'], 275 '*socket-address': ['SocketAddress'], 276 '*dirty-limit-throttle-time-per-round': 'uint64', 277 '*dirty-limit-ring-full-time': 'uint64'} } 278 279## 280# @query-migrate: 281# 282# Returns information about current migration process. If migration 283# is active there will be another json-object with RAM migration 284# status. 285# 286# Returns: @MigrationInfo 287# 288# Since: 0.14 289# 290# .. qmp-example:: 291# :title: Before the first migration 292# 293# -> { "execute": "query-migrate" } 294# <- { "return": {} } 295# 296# .. qmp-example:: 297# :title: Migration is done and has succeeded 298# 299# -> { "execute": "query-migrate" } 300# <- { "return": { 301# "status": "completed", 302# "total-time":12345, 303# "setup-time":12345, 304# "downtime":12345, 305# "ram":{ 306# "transferred":123, 307# "remaining":123, 308# "total":246, 309# "duplicate":123, 310# "normal":123, 311# "normal-bytes":123456, 312# "dirty-sync-count":15 313# } 314# } 315# } 316# 317# .. qmp-example:: 318# :title: Migration is done and has failed 319# 320# -> { "execute": "query-migrate" } 321# <- { "return": { "status": "failed" } } 322# 323# .. qmp-example:: 324# :title: Migration is being performed 325# 326# -> { "execute": "query-migrate" } 327# <- { 328# "return":{ 329# "status":"active", 330# "total-time":12345, 331# "setup-time":12345, 332# "expected-downtime":12345, 333# "ram":{ 334# "transferred":123, 335# "remaining":123, 336# "total":246, 337# "duplicate":123, 338# "normal":123, 339# "normal-bytes":123456, 340# "dirty-sync-count":15 341# } 342# } 343# } 344# 345# .. qmp-example:: 346# :title: Migration is being performed and XBZRLE is active 347# 348# -> { "execute": "query-migrate" } 349# <- { 350# "return":{ 351# "status":"active", 352# "total-time":12345, 353# "setup-time":12345, 354# "expected-downtime":12345, 355# "ram":{ 356# "total":1057024, 357# "remaining":1053304, 358# "transferred":3720, 359# "duplicate":10, 360# "normal":3333, 361# "normal-bytes":3412992, 362# "dirty-sync-count":15 363# }, 364# "xbzrle-cache":{ 365# "cache-size":67108864, 366# "bytes":20971520, 367# "pages":2444343, 368# "cache-miss":2244, 369# "cache-miss-rate":0.123, 370# "encoding-rate":80.1, 371# "overflow":34434 372# } 373# } 374# } 375## 376{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 377 378## 379# @MigrationCapability: 380# 381# Migration capabilities enumeration 382# 383# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length 384# Encoding). This feature allows us to minimize migration traffic 385# for certain work loads, by sending compressed difference of the 386# pages 387# 388# @rdma-pin-all: Controls whether or not the entire VM memory 389# footprint is mlock()'d on demand or all at once. Refer to 390# docs/rdma.txt for usage. Disabled by default. (since 2.0) 391# 392# @zero-blocks: During storage migration encode blocks of zeroes 393# efficiently. This essentially saves 1MB of zeroes per block on 394# the wire. Enabling requires source and target VM to support 395# this feature. To enable it is sufficient to enable the 396# capability on the source VM. The feature is disabled by 397# default. (since 1.6) 398# 399# @events: generate events for each migration state change (since 2.4) 400# 401# @auto-converge: If enabled, QEMU will automatically throttle down 402# the guest to speed up convergence of RAM migration. (since 1.6) 403# 404# @postcopy-ram: Start executing on the migration target before all of 405# RAM has been migrated, pulling the remaining pages along as 406# needed. The capacity must have the same setting on both source 407# and target or migration will not even start. NOTE: If the 408# migration fails during postcopy the VM will fail. (since 2.6) 409# 410# @x-colo: If enabled, migration will never end, and the state of the 411# VM on the primary side will be migrated continuously to the VM 412# on secondary side, this process is called COarse-Grain LOck 413# Stepping (COLO) for Non-stop Service. (since 2.8) 414# 415# @release-ram: if enabled, qemu will free the migrated ram pages on 416# the source during postcopy-ram migration. (since 2.9) 417# 418# @return-path: If enabled, migration will use the return path even 419# for precopy. (since 2.10) 420# 421# @pause-before-switchover: Pause outgoing migration before 422# serialising device state and before disabling block IO (since 423# 2.11) 424# 425# @multifd: Use more than one fd for migration (since 4.0) 426# 427# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 428# (since 2.12) 429# 430# @postcopy-blocktime: Calculate downtime for postcopy live migration 431# (since 3.0) 432# 433# @late-block-activate: If enabled, the destination will not activate 434# block devices (and thus take locks) immediately at the end of 435# migration. (since 3.0) 436# 437# @x-ignore-shared: If enabled, QEMU will not migrate shared memory 438# that is accessible on the destination machine. (since 4.0) 439# 440# @validate-uuid: Send the UUID of the source to allow the destination 441# to ensure it is the same. (since 4.2) 442# 443# @background-snapshot: If enabled, the migration stream will be a 444# snapshot of the VM exactly at the point when the migration 445# procedure starts. The VM RAM is saved with running VM. 446# (since 6.0) 447# 448# @zero-copy-send: Controls behavior on sending memory pages on 449# migration. When true, enables a zero-copy mechanism for sending 450# memory pages, if host supports it. Requires that QEMU be 451# permitted to use locked memory for guest RAM pages. (since 7.1) 452# 453# @postcopy-preempt: If enabled, the migration process will allow 454# postcopy requests to preempt precopy stream, so postcopy 455# requests will be handled faster. This is a performance feature 456# and should not affect the correctness of postcopy migration. 457# (since 7.1) 458# 459# @switchover-ack: If enabled, migration will not stop the source VM 460# and complete the migration until an ACK is received from the 461# destination that it's OK to do so. Exactly when this ACK is 462# sent depends on the migrated devices that use this feature. For 463# example, a device can use it to make sure some of its data is 464# sent and loaded in the destination before doing switchover. 465# This can reduce downtime if devices that support this capability 466# are present. 'return-path' capability must be enabled to use 467# it. (since 8.1) 468# 469# @dirty-limit: If enabled, migration will throttle vCPUs as needed to 470# keep their dirty page rate within @vcpu-dirty-limit. This can 471# improve responsiveness of large guests during live migration, 472# and can result in more stable read performance. Requires KVM 473# with accelerator property "dirty-ring-size" set. (Since 8.1) 474# 475# @mapped-ram: Migrate using fixed offsets in the migration file for 476# each RAM page. Requires a migration URI that supports seeking, 477# such as a file. (since 9.0) 478# 479# Features: 480# 481# @unstable: Members @x-colo and @x-ignore-shared are experimental. 482# 483# Since: 1.2 484## 485{ 'enum': 'MigrationCapability', 486 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 487 'events', 'postcopy-ram', 488 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 489 'release-ram', 490 'return-path', 'pause-before-switchover', 'multifd', 491 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 492 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 493 'validate-uuid', 'background-snapshot', 494 'zero-copy-send', 'postcopy-preempt', 'switchover-ack', 495 'dirty-limit', 'mapped-ram'] } 496 497## 498# @MigrationCapabilityStatus: 499# 500# Migration capability information 501# 502# @capability: capability enum 503# 504# @state: capability state bool 505# 506# Since: 1.2 507## 508{ 'struct': 'MigrationCapabilityStatus', 509 'data': { 'capability': 'MigrationCapability', 'state': 'bool' } } 510 511## 512# @migrate-set-capabilities: 513# 514# Enable/Disable the following migration capabilities (like xbzrle) 515# 516# @capabilities: json array of capability modifications to make 517# 518# Since: 1.2 519# 520# .. qmp-example:: 521# 522# -> { "execute": "migrate-set-capabilities" , "arguments": 523# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 524# <- { "return": {} } 525## 526{ 'command': 'migrate-set-capabilities', 527 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 528 529## 530# @query-migrate-capabilities: 531# 532# Returns information about the current migration capabilities status 533# 534# Returns: @MigrationCapabilityStatus 535# 536# Since: 1.2 537# 538# .. qmp-example:: 539# 540# -> { "execute": "query-migrate-capabilities" } 541# <- { "return": [ 542# {"state": false, "capability": "xbzrle"}, 543# {"state": false, "capability": "rdma-pin-all"}, 544# {"state": false, "capability": "auto-converge"}, 545# {"state": false, "capability": "zero-blocks"}, 546# {"state": true, "capability": "events"}, 547# {"state": false, "capability": "postcopy-ram"}, 548# {"state": false, "capability": "x-colo"} 549# ]} 550## 551{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 552 553## 554# @MultiFDCompression: 555# 556# An enumeration of multifd compression methods. 557# 558# @none: no compression. 559# 560# @zlib: use zlib compression method. 561# 562# @zstd: use zstd compression method. 563# 564# @qpl: use qpl compression method. Query Processing Library(qpl) is 565# based on the deflate compression algorithm and use the Intel 566# In-Memory Analytics Accelerator(IAA) accelerated compression and 567# decompression. (Since 9.1) 568# 569# @uadk: use UADK library compression method. (Since 9.1) 570# 571# Since: 5.0 572## 573{ 'enum': 'MultiFDCompression', 574 'data': [ 'none', 'zlib', 575 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' }, 576 { 'name': 'qpl', 'if': 'CONFIG_QPL' }, 577 { 'name': 'uadk', 'if': 'CONFIG_UADK' } ] } 578 579## 580# @MigMode: 581# 582# @normal: the original form of migration. (since 8.2) 583# 584# @cpr-reboot: The migrate command stops the VM and saves state to the 585# URI. After quitting QEMU, the user resumes by running QEMU 586# -incoming. 587# 588# This mode allows the user to quit QEMU, optionally update and 589# reboot the OS, and restart QEMU. If the user reboots, the URI 590# must persist across the reboot, such as by using a file. 591# 592# Unlike normal mode, the use of certain local storage options 593# does not block the migration, but the user must not modify the 594# contents of guest block devices between the quit and restart. 595# 596# This mode supports VFIO devices provided the user first puts the 597# guest in the suspended runstate, such as by issuing 598# guest-suspend-ram to the QEMU guest agent. 599# 600# Best performance is achieved when the memory backend is shared 601# and the @x-ignore-shared migration capability is set, but this 602# is not required. Further, if the user reboots before restarting 603# such a configuration, the shared memory must persist across the 604# reboot, such as by backing it with a dax device. 605# 606# @cpr-reboot may not be used with postcopy, background-snapshot, 607# or COLO. 608# 609# (since 8.2) 610## 611{ 'enum': 'MigMode', 612 'data': [ 'normal', 'cpr-reboot' ] } 613 614## 615# @ZeroPageDetection: 616# 617# @none: Do not perform zero page checking. 618# 619# @legacy: Perform zero page checking in main migration thread. 620# 621# @multifd: Perform zero page checking in multifd sender thread if 622# multifd migration is enabled, else in the main migration thread 623# as for @legacy. 624# 625# Since: 9.0 626## 627{ 'enum': 'ZeroPageDetection', 628 'data': [ 'none', 'legacy', 'multifd' ] } 629 630## 631# @BitmapMigrationBitmapAliasTransform: 632# 633# @persistent: If present, the bitmap will be made persistent or 634# transient depending on this parameter. 635# 636# Since: 6.0 637## 638{ 'struct': 'BitmapMigrationBitmapAliasTransform', 639 'data': { 640 '*persistent': 'bool' 641 } } 642 643## 644# @BitmapMigrationBitmapAlias: 645# 646# @name: The name of the bitmap. 647# 648# @alias: An alias name for migration (for example the bitmap name on 649# the opposite site). 650# 651# @transform: Allows the modification of the migrated bitmap. (since 652# 6.0) 653# 654# Since: 5.2 655## 656{ 'struct': 'BitmapMigrationBitmapAlias', 657 'data': { 658 'name': 'str', 659 'alias': 'str', 660 '*transform': 'BitmapMigrationBitmapAliasTransform' 661 } } 662 663## 664# @BitmapMigrationNodeAlias: 665# 666# Maps a block node name and the bitmaps it has to aliases for dirty 667# bitmap migration. 668# 669# @node-name: A block node name. 670# 671# @alias: An alias block node name for migration (for example the node 672# name on the opposite site). 673# 674# @bitmaps: Mappings for the bitmaps on this node. 675# 676# Since: 5.2 677## 678{ 'struct': 'BitmapMigrationNodeAlias', 679 'data': { 680 'node-name': 'str', 681 'alias': 'str', 682 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 683 } } 684 685## 686# @MigrationParameter: 687# 688# Migration parameters enumeration 689# 690# @announce-initial: Initial delay (in milliseconds) before sending 691# the first announce (Since 4.0) 692# 693# @announce-max: Maximum delay (in milliseconds) between packets in 694# the announcement (Since 4.0) 695# 696# @announce-rounds: Number of self-announce packets sent after 697# migration (Since 4.0) 698# 699# @announce-step: Increase in delay (in milliseconds) between 700# subsequent packets in the announcement (Since 4.0) 701# 702# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 703# bytes_xfer_period to trigger throttling. It is expressed as 704# percentage. The default value is 50. (Since 5.0) 705# 706# @cpu-throttle-initial: Initial percentage of time guest cpus are 707# throttled when migration auto-converge is activated. The 708# default value is 20. (Since 2.7) 709# 710# @cpu-throttle-increment: throttle percentage increase each time 711# auto-converge detects that migration is not making progress. 712# The default value is 10. (Since 2.7) 713# 714# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 715# the tail stage of throttling, the Guest is very sensitive to CPU 716# percentage while the @cpu-throttle -increment is excessive 717# usually at tail stage. If this parameter is true, we will 718# compute the ideal CPU percentage used by the Guest, which may 719# exactly make the dirty rate match the dirty rate threshold. 720# Then we will choose a smaller throttle increment between the one 721# specified by @cpu-throttle-increment and the one generated by 722# ideal CPU percentage. Therefore, it is compatible to 723# traditional throttling, meanwhile the throttle increment won't 724# be excessive at tail stage. The default value is false. (Since 725# 5.1) 726# 727# @tls-creds: ID of the 'tls-creds' object that provides credentials 728# for establishing a TLS connection over the migration data 729# channel. On the outgoing side of the migration, the credentials 730# must be for a 'client' endpoint, while for the incoming side the 731# credentials must be for a 'server' endpoint. Setting this to a 732# non-empty string enables TLS for all migrations. An empty 733# string means that QEMU will use plain text mode for migration, 734# rather than TLS. (Since 2.7) 735# 736# @tls-hostname: migration target's hostname for validating the 737# server's x509 certificate identity. If empty, QEMU will use the 738# hostname from the migration URI, if any. A non-empty value is 739# required when using x509 based TLS credentials and the migration 740# URI does not include a hostname, such as fd: or exec: based 741# migration. (Since 2.7) 742# 743# Note: empty value works only since 2.9. 744# 745# @tls-authz: ID of the 'authz' object subclass that provides access 746# control checking of the TLS x509 certificate distinguished name. 747# This object is only resolved at time of use, so can be deleted 748# and recreated on the fly while the migration server is active. 749# If missing, it will default to denying access (Since 4.0) 750# 751# @max-bandwidth: maximum speed for migration, in bytes per second. 752# (Since 2.8) 753# 754# @avail-switchover-bandwidth: to set the available bandwidth that 755# migration can use during switchover phase. NOTE! This does not 756# limit the bandwidth during switchover, but only for calculations 757# when making decisions to switchover. By default, this value is 758# zero, which means QEMU will estimate the bandwidth 759# automatically. This can be set when the estimated value is not 760# accurate, while the user is able to guarantee such bandwidth is 761# available when switching over. When specified correctly, this 762# can make the switchover decision much more accurate. 763# (Since 8.2) 764# 765# @downtime-limit: set maximum tolerated downtime for migration. 766# maximum downtime in milliseconds (Since 2.8) 767# 768# @x-checkpoint-delay: The delay time (in ms) between two COLO 769# checkpoints in periodic mode. (Since 2.8) 770# 771# @multifd-channels: Number of channels used to migrate data in 772# parallel. This is the same number that the number of sockets 773# used for migration. The default value is 2 (since 4.0) 774# 775# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 776# needs to be a multiple of the target page size and a power of 2 777# (Since 2.11) 778# 779# @max-postcopy-bandwidth: Background transfer bandwidth during 780# postcopy. Defaults to 0 (unlimited). In bytes per second. 781# (Since 3.0) 782# 783# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 784# (Since 3.1) 785# 786# @multifd-compression: Which compression method to use. Defaults to 787# none. (Since 5.0) 788# 789# @multifd-zlib-level: Set the compression level to be used in live 790# migration, the compression level is an integer between 0 and 9, 791# where 0 means no compression, 1 means the best compression 792# speed, and 9 means best compression ratio which will consume 793# more CPU. Defaults to 1. (Since 5.0) 794# 795# @multifd-zstd-level: Set the compression level to be used in live 796# migration, the compression level is an integer between 0 and 20, 797# where 0 means no compression, 1 means the best compression 798# speed, and 20 means best compression ratio which will consume 799# more CPU. Defaults to 1. (Since 5.0) 800# 801# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 802# aliases for the purpose of dirty bitmap migration. Such aliases 803# may for example be the corresponding names on the opposite site. 804# The mapping must be one-to-one, but not necessarily complete: On 805# the source, unmapped bitmaps and all bitmaps on unmapped nodes 806# will be ignored. On the destination, encountering an unmapped 807# alias in the incoming migration stream will result in a report, 808# and all further bitmap migration data will then be discarded. 809# Note that the destination does not know about bitmaps it does 810# not receive, so there is no limitation or requirement regarding 811# the number of bitmaps received, or how they are named, or on 812# which nodes they are placed. By default (when this parameter 813# has never been set), bitmap names are mapped to themselves. 814# Nodes are mapped to their block device name if there is one, and 815# to their node name otherwise. (Since 5.2) 816# 817# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 818# limit during live migration. Should be in the range 1 to 819# 1000ms. Defaults to 1000ms. (Since 8.1) 820# 821# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 822# Defaults to 1. (Since 8.1) 823# 824# @mode: Migration mode. See description in @MigMode. Default is 825# 'normal'. (Since 8.2) 826# 827# @zero-page-detection: Whether and how to detect zero pages. 828# See description in @ZeroPageDetection. Default is 'multifd'. 829# (since 9.0) 830# 831# @direct-io: Open migration files with O_DIRECT when possible. This 832# only has effect if the @mapped-ram capability is enabled. 833# (Since 9.1) 834# 835# Features: 836# 837# @unstable: Members @x-checkpoint-delay and 838# @x-vcpu-dirty-limit-period are experimental. 839# 840# Since: 2.4 841## 842{ 'enum': 'MigrationParameter', 843 'data': ['announce-initial', 'announce-max', 844 'announce-rounds', 'announce-step', 845 'throttle-trigger-threshold', 846 'cpu-throttle-initial', 'cpu-throttle-increment', 847 'cpu-throttle-tailslow', 848 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 849 'avail-switchover-bandwidth', 'downtime-limit', 850 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 851 'multifd-channels', 852 'xbzrle-cache-size', 'max-postcopy-bandwidth', 853 'max-cpu-throttle', 'multifd-compression', 854 'multifd-zlib-level', 'multifd-zstd-level', 855 'block-bitmap-mapping', 856 { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, 857 'vcpu-dirty-limit', 858 'mode', 859 'zero-page-detection', 860 'direct-io'] } 861 862## 863# @MigrateSetParameters: 864# 865# @announce-initial: Initial delay (in milliseconds) before sending 866# the first announce (Since 4.0) 867# 868# @announce-max: Maximum delay (in milliseconds) between packets in 869# the announcement (Since 4.0) 870# 871# @announce-rounds: Number of self-announce packets sent after 872# migration (Since 4.0) 873# 874# @announce-step: Increase in delay (in milliseconds) between 875# subsequent packets in the announcement (Since 4.0) 876# 877# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 878# bytes_xfer_period to trigger throttling. It is expressed as 879# percentage. The default value is 50. (Since 5.0) 880# 881# @cpu-throttle-initial: Initial percentage of time guest cpus are 882# throttled when migration auto-converge is activated. The 883# default value is 20. (Since 2.7) 884# 885# @cpu-throttle-increment: throttle percentage increase each time 886# auto-converge detects that migration is not making progress. 887# The default value is 10. (Since 2.7) 888# 889# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 890# the tail stage of throttling, the Guest is very sensitive to CPU 891# percentage while the @cpu-throttle -increment is excessive 892# usually at tail stage. If this parameter is true, we will 893# compute the ideal CPU percentage used by the Guest, which may 894# exactly make the dirty rate match the dirty rate threshold. 895# Then we will choose a smaller throttle increment between the one 896# specified by @cpu-throttle-increment and the one generated by 897# ideal CPU percentage. Therefore, it is compatible to 898# traditional throttling, meanwhile the throttle increment won't 899# be excessive at tail stage. The default value is false. (Since 900# 5.1) 901# 902# @tls-creds: ID of the 'tls-creds' object that provides credentials 903# for establishing a TLS connection over the migration data 904# channel. On the outgoing side of the migration, the credentials 905# must be for a 'client' endpoint, while for the incoming side the 906# credentials must be for a 'server' endpoint. Setting this to a 907# non-empty string enables TLS for all migrations. An empty 908# string means that QEMU will use plain text mode for migration, 909# rather than TLS. This is the default. (Since 2.7) 910# 911# @tls-hostname: migration target's hostname for validating the 912# server's x509 certificate identity. If empty, QEMU will use the 913# hostname from the migration URI, if any. A non-empty value is 914# required when using x509 based TLS credentials and the migration 915# URI does not include a hostname, such as fd: or exec: based 916# migration. (Since 2.7) 917# 918# Note: empty value works only since 2.9. 919# 920# @tls-authz: ID of the 'authz' object subclass that provides access 921# control checking of the TLS x509 certificate distinguished name. 922# This object is only resolved at time of use, so can be deleted 923# and recreated on the fly while the migration server is active. 924# If missing, it will default to denying access (Since 4.0) 925# 926# @max-bandwidth: maximum speed for migration, in bytes per second. 927# (Since 2.8) 928# 929# @avail-switchover-bandwidth: to set the available bandwidth that 930# migration can use during switchover phase. NOTE! This does not 931# limit the bandwidth during switchover, but only for calculations 932# when making decisions to switchover. By default, this value is 933# zero, which means QEMU will estimate the bandwidth 934# automatically. This can be set when the estimated value is not 935# accurate, while the user is able to guarantee such bandwidth is 936# available when switching over. When specified correctly, this 937# can make the switchover decision much more accurate. 938# (Since 8.2) 939# 940# @downtime-limit: set maximum tolerated downtime for migration. 941# maximum downtime in milliseconds (Since 2.8) 942# 943# @x-checkpoint-delay: The delay time (in ms) between two COLO 944# checkpoints in periodic mode. (Since 2.8) 945# 946# @multifd-channels: Number of channels used to migrate data in 947# parallel. This is the same number that the number of sockets 948# used for migration. The default value is 2 (since 4.0) 949# 950# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 951# needs to be a multiple of the target page size and a power of 2 952# (Since 2.11) 953# 954# @max-postcopy-bandwidth: Background transfer bandwidth during 955# postcopy. Defaults to 0 (unlimited). In bytes per second. 956# (Since 3.0) 957# 958# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 959# (Since 3.1) 960# 961# @multifd-compression: Which compression method to use. Defaults to 962# none. (Since 5.0) 963# 964# @multifd-zlib-level: Set the compression level to be used in live 965# migration, the compression level is an integer between 0 and 9, 966# where 0 means no compression, 1 means the best compression 967# speed, and 9 means best compression ratio which will consume 968# more CPU. Defaults to 1. (Since 5.0) 969# 970# @multifd-zstd-level: Set the compression level to be used in live 971# migration, the compression level is an integer between 0 and 20, 972# where 0 means no compression, 1 means the best compression 973# speed, and 20 means best compression ratio which will consume 974# more CPU. Defaults to 1. (Since 5.0) 975# 976# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 977# aliases for the purpose of dirty bitmap migration. Such aliases 978# may for example be the corresponding names on the opposite site. 979# The mapping must be one-to-one, but not necessarily complete: On 980# the source, unmapped bitmaps and all bitmaps on unmapped nodes 981# will be ignored. On the destination, encountering an unmapped 982# alias in the incoming migration stream will result in a report, 983# and all further bitmap migration data will then be discarded. 984# Note that the destination does not know about bitmaps it does 985# not receive, so there is no limitation or requirement regarding 986# the number of bitmaps received, or how they are named, or on 987# which nodes they are placed. By default (when this parameter 988# has never been set), bitmap names are mapped to themselves. 989# Nodes are mapped to their block device name if there is one, and 990# to their node name otherwise. (Since 5.2) 991# 992# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 993# limit during live migration. Should be in the range 1 to 994# 1000ms. Defaults to 1000ms. (Since 8.1) 995# 996# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 997# Defaults to 1. (Since 8.1) 998# 999# @mode: Migration mode. See description in @MigMode. Default is 1000# 'normal'. (Since 8.2) 1001# 1002# @zero-page-detection: Whether and how to detect zero pages. 1003# See description in @ZeroPageDetection. Default is 'multifd'. 1004# (since 9.0) 1005# 1006# @direct-io: Open migration files with O_DIRECT when possible. This 1007# only has effect if the @mapped-ram capability is enabled. 1008# (Since 9.1) 1009# 1010# Features: 1011# 1012# @unstable: Members @x-checkpoint-delay and 1013# @x-vcpu-dirty-limit-period are experimental. 1014# 1015# TODO: either fuse back into MigrationParameters, or make 1016# MigrationParameters members mandatory 1017# 1018# Since: 2.4 1019## 1020{ 'struct': 'MigrateSetParameters', 1021 'data': { '*announce-initial': 'size', 1022 '*announce-max': 'size', 1023 '*announce-rounds': 'size', 1024 '*announce-step': 'size', 1025 '*throttle-trigger-threshold': 'uint8', 1026 '*cpu-throttle-initial': 'uint8', 1027 '*cpu-throttle-increment': 'uint8', 1028 '*cpu-throttle-tailslow': 'bool', 1029 '*tls-creds': 'StrOrNull', 1030 '*tls-hostname': 'StrOrNull', 1031 '*tls-authz': 'StrOrNull', 1032 '*max-bandwidth': 'size', 1033 '*avail-switchover-bandwidth': 'size', 1034 '*downtime-limit': 'uint64', 1035 '*x-checkpoint-delay': { 'type': 'uint32', 1036 'features': [ 'unstable' ] }, 1037 '*multifd-channels': 'uint8', 1038 '*xbzrle-cache-size': 'size', 1039 '*max-postcopy-bandwidth': 'size', 1040 '*max-cpu-throttle': 'uint8', 1041 '*multifd-compression': 'MultiFDCompression', 1042 '*multifd-zlib-level': 'uint8', 1043 '*multifd-zstd-level': 'uint8', 1044 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1045 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1046 'features': [ 'unstable' ] }, 1047 '*vcpu-dirty-limit': 'uint64', 1048 '*mode': 'MigMode', 1049 '*zero-page-detection': 'ZeroPageDetection', 1050 '*direct-io': 'bool' } } 1051 1052## 1053# @migrate-set-parameters: 1054# 1055# Set various migration parameters. 1056# 1057# Since: 2.4 1058# 1059# .. qmp-example:: 1060# 1061# -> { "execute": "migrate-set-parameters" , 1062# "arguments": { "multifd-channels": 5 } } 1063# <- { "return": {} } 1064## 1065{ 'command': 'migrate-set-parameters', 'boxed': true, 1066 'data': 'MigrateSetParameters' } 1067 1068## 1069# @MigrationParameters: 1070# 1071# The optional members aren't actually optional. 1072# 1073# @announce-initial: Initial delay (in milliseconds) before sending 1074# the first announce (Since 4.0) 1075# 1076# @announce-max: Maximum delay (in milliseconds) between packets in 1077# the announcement (Since 4.0) 1078# 1079# @announce-rounds: Number of self-announce packets sent after 1080# migration (Since 4.0) 1081# 1082# @announce-step: Increase in delay (in milliseconds) between 1083# subsequent packets in the announcement (Since 4.0) 1084# 1085# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 1086# bytes_xfer_period to trigger throttling. It is expressed as 1087# percentage. The default value is 50. (Since 5.0) 1088# 1089# @cpu-throttle-initial: Initial percentage of time guest cpus are 1090# throttled when migration auto-converge is activated. (Since 1091# 2.7) 1092# 1093# @cpu-throttle-increment: throttle percentage increase each time 1094# auto-converge detects that migration is not making progress. 1095# (Since 2.7) 1096# 1097# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 1098# the tail stage of throttling, the Guest is very sensitive to CPU 1099# percentage while the @cpu-throttle -increment is excessive 1100# usually at tail stage. If this parameter is true, we will 1101# compute the ideal CPU percentage used by the Guest, which may 1102# exactly make the dirty rate match the dirty rate threshold. 1103# Then we will choose a smaller throttle increment between the one 1104# specified by @cpu-throttle-increment and the one generated by 1105# ideal CPU percentage. Therefore, it is compatible to 1106# traditional throttling, meanwhile the throttle increment won't 1107# be excessive at tail stage. The default value is false. (Since 1108# 5.1) 1109# 1110# @tls-creds: ID of the 'tls-creds' object that provides credentials 1111# for establishing a TLS connection over the migration data 1112# channel. On the outgoing side of the migration, the credentials 1113# must be for a 'client' endpoint, while for the incoming side the 1114# credentials must be for a 'server' endpoint. An empty string 1115# means that QEMU will use plain text mode for migration, rather 1116# than TLS. (Since 2.7) 1117# 1118# Note: 2.8 omits empty @tls-creds instead. 1119# 1120# @tls-hostname: migration target's hostname for validating the 1121# server's x509 certificate identity. If empty, QEMU will use the 1122# hostname from the migration URI, if any. (Since 2.7) 1123# 1124# Note: 2.8 omits empty @tls-hostname instead. 1125# 1126# @tls-authz: ID of the 'authz' object subclass that provides access 1127# control checking of the TLS x509 certificate distinguished name. 1128# (Since 4.0) 1129# 1130# @max-bandwidth: maximum speed for migration, in bytes per second. 1131# (Since 2.8) 1132# 1133# @avail-switchover-bandwidth: to set the available bandwidth that 1134# migration can use during switchover phase. NOTE! This does not 1135# limit the bandwidth during switchover, but only for calculations 1136# when making decisions to switchover. By default, this value is 1137# zero, which means QEMU will estimate the bandwidth 1138# automatically. This can be set when the estimated value is not 1139# accurate, while the user is able to guarantee such bandwidth is 1140# available when switching over. When specified correctly, this 1141# can make the switchover decision much more accurate. 1142# (Since 8.2) 1143# 1144# @downtime-limit: set maximum tolerated downtime for migration. 1145# maximum downtime in milliseconds (Since 2.8) 1146# 1147# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1148# (Since 2.8) 1149# 1150# @multifd-channels: Number of channels used to migrate data in 1151# parallel. This is the same number that the number of sockets 1152# used for migration. The default value is 2 (since 4.0) 1153# 1154# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1155# needs to be a multiple of the target page size and a power of 2 1156# (Since 2.11) 1157# 1158# @max-postcopy-bandwidth: Background transfer bandwidth during 1159# postcopy. Defaults to 0 (unlimited). In bytes per second. 1160# (Since 3.0) 1161# 1162# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1163# (Since 3.1) 1164# 1165# @multifd-compression: Which compression method to use. Defaults to 1166# none. (Since 5.0) 1167# 1168# @multifd-zlib-level: Set the compression level to be used in live 1169# migration, the compression level is an integer between 0 and 9, 1170# where 0 means no compression, 1 means the best compression 1171# speed, and 9 means best compression ratio which will consume 1172# more CPU. Defaults to 1. (Since 5.0) 1173# 1174# @multifd-zstd-level: Set the compression level to be used in live 1175# migration, the compression level is an integer between 0 and 20, 1176# where 0 means no compression, 1 means the best compression 1177# speed, and 20 means best compression ratio which will consume 1178# more CPU. Defaults to 1. (Since 5.0) 1179# 1180# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1181# aliases for the purpose of dirty bitmap migration. Such aliases 1182# may for example be the corresponding names on the opposite site. 1183# The mapping must be one-to-one, but not necessarily complete: On 1184# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1185# will be ignored. On the destination, encountering an unmapped 1186# alias in the incoming migration stream will result in a report, 1187# and all further bitmap migration data will then be discarded. 1188# Note that the destination does not know about bitmaps it does 1189# not receive, so there is no limitation or requirement regarding 1190# the number of bitmaps received, or how they are named, or on 1191# which nodes they are placed. By default (when this parameter 1192# has never been set), bitmap names are mapped to themselves. 1193# Nodes are mapped to their block device name if there is one, and 1194# to their node name otherwise. (Since 5.2) 1195# 1196# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1197# limit during live migration. Should be in the range 1 to 1198# 1000ms. Defaults to 1000ms. (Since 8.1) 1199# 1200# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1201# Defaults to 1. (Since 8.1) 1202# 1203# @mode: Migration mode. See description in @MigMode. Default is 1204# 'normal'. (Since 8.2) 1205# 1206# @zero-page-detection: Whether and how to detect zero pages. 1207# See description in @ZeroPageDetection. Default is 'multifd'. 1208# (since 9.0) 1209# 1210# @direct-io: Open migration files with O_DIRECT when possible. This 1211# only has effect if the @mapped-ram capability is enabled. 1212# (Since 9.1) 1213# 1214# Features: 1215# 1216# @unstable: Members @x-checkpoint-delay and 1217# @x-vcpu-dirty-limit-period are experimental. 1218# 1219# Since: 2.4 1220## 1221{ 'struct': 'MigrationParameters', 1222 'data': { '*announce-initial': 'size', 1223 '*announce-max': 'size', 1224 '*announce-rounds': 'size', 1225 '*announce-step': 'size', 1226 '*throttle-trigger-threshold': 'uint8', 1227 '*cpu-throttle-initial': 'uint8', 1228 '*cpu-throttle-increment': 'uint8', 1229 '*cpu-throttle-tailslow': 'bool', 1230 '*tls-creds': 'str', 1231 '*tls-hostname': 'str', 1232 '*tls-authz': 'str', 1233 '*max-bandwidth': 'size', 1234 '*avail-switchover-bandwidth': 'size', 1235 '*downtime-limit': 'uint64', 1236 '*x-checkpoint-delay': { 'type': 'uint32', 1237 'features': [ 'unstable' ] }, 1238 '*multifd-channels': 'uint8', 1239 '*xbzrle-cache-size': 'size', 1240 '*max-postcopy-bandwidth': 'size', 1241 '*max-cpu-throttle': 'uint8', 1242 '*multifd-compression': 'MultiFDCompression', 1243 '*multifd-zlib-level': 'uint8', 1244 '*multifd-zstd-level': 'uint8', 1245 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1246 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1247 'features': [ 'unstable' ] }, 1248 '*vcpu-dirty-limit': 'uint64', 1249 '*mode': 'MigMode', 1250 '*zero-page-detection': 'ZeroPageDetection', 1251 '*direct-io': 'bool' } } 1252 1253## 1254# @query-migrate-parameters: 1255# 1256# Returns information about the current migration parameters 1257# 1258# Returns: @MigrationParameters 1259# 1260# Since: 2.4 1261# 1262# .. qmp-example:: 1263# 1264# -> { "execute": "query-migrate-parameters" } 1265# <- { "return": { 1266# "multifd-channels": 2, 1267# "cpu-throttle-increment": 10, 1268# "cpu-throttle-initial": 20, 1269# "max-bandwidth": 33554432, 1270# "downtime-limit": 300 1271# } 1272# } 1273## 1274{ 'command': 'query-migrate-parameters', 1275 'returns': 'MigrationParameters' } 1276 1277## 1278# @migrate-start-postcopy: 1279# 1280# Followup to a migration command to switch the migration to postcopy 1281# mode. The postcopy-ram capability must be set on both source and 1282# destination before the original migration command. 1283# 1284# Since: 2.5 1285# 1286# .. qmp-example:: 1287# 1288# -> { "execute": "migrate-start-postcopy" } 1289# <- { "return": {} } 1290## 1291{ 'command': 'migrate-start-postcopy' } 1292 1293## 1294# @MIGRATION: 1295# 1296# Emitted when a migration event happens 1297# 1298# @status: @MigrationStatus describing the current migration status. 1299# 1300# Since: 2.4 1301# 1302# .. qmp-example:: 1303# 1304# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1305# "event": "MIGRATION", 1306# "data": {"status": "completed"} } 1307## 1308{ 'event': 'MIGRATION', 1309 'data': {'status': 'MigrationStatus'}} 1310 1311## 1312# @MIGRATION_PASS: 1313# 1314# Emitted from the source side of a migration at the start of each 1315# pass (when it syncs the dirty bitmap) 1316# 1317# @pass: An incrementing count (starting at 1 on the first pass) 1318# 1319# Since: 2.6 1320# 1321# .. qmp-example:: 1322# 1323# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1324# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1325## 1326{ 'event': 'MIGRATION_PASS', 1327 'data': { 'pass': 'int' } } 1328 1329## 1330# @COLOMessage: 1331# 1332# The message transmission between Primary side and Secondary side. 1333# 1334# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1335# 1336# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for 1337# checkpointing 1338# 1339# @checkpoint-reply: SVM gets PVM's checkpoint request 1340# 1341# @vmstate-send: VM's state will be sent by PVM. 1342# 1343# @vmstate-size: The total size of VMstate. 1344# 1345# @vmstate-received: VM's state has been received by SVM. 1346# 1347# @vmstate-loaded: VM's state has been loaded by SVM. 1348# 1349# Since: 2.8 1350## 1351{ 'enum': 'COLOMessage', 1352 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1353 'vmstate-send', 'vmstate-size', 'vmstate-received', 1354 'vmstate-loaded' ] } 1355 1356## 1357# @COLOMode: 1358# 1359# The COLO current mode. 1360# 1361# @none: COLO is disabled. 1362# 1363# @primary: COLO node in primary side. 1364# 1365# @secondary: COLO node in slave side. 1366# 1367# Since: 2.8 1368## 1369{ 'enum': 'COLOMode', 1370 'data': [ 'none', 'primary', 'secondary'] } 1371 1372## 1373# @FailoverStatus: 1374# 1375# An enumeration of COLO failover status 1376# 1377# @none: no failover has ever happened 1378# 1379# @require: got failover requirement but not handled 1380# 1381# @active: in the process of doing failover 1382# 1383# @completed: finish the process of failover 1384# 1385# @relaunch: restart the failover process, from 'none' -> 'completed' 1386# (Since 2.9) 1387# 1388# Since: 2.8 1389## 1390{ 'enum': 'FailoverStatus', 1391 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1392 1393## 1394# @COLO_EXIT: 1395# 1396# Emitted when VM finishes COLO mode due to some errors happening or 1397# at the request of users. 1398# 1399# @mode: report COLO mode when COLO exited. 1400# 1401# @reason: describes the reason for the COLO exit. 1402# 1403# Since: 3.1 1404# 1405# .. qmp-example:: 1406# 1407# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1408# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1409## 1410{ 'event': 'COLO_EXIT', 1411 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1412 1413## 1414# @COLOExitReason: 1415# 1416# The reason for a COLO exit. 1417# 1418# @none: failover has never happened. This state does not occur in 1419# the COLO_EXIT event, and is only visible in the result of 1420# query-colo-status. 1421# 1422# @request: COLO exit is due to an external request. 1423# 1424# @error: COLO exit is due to an internal error. 1425# 1426# @processing: COLO is currently handling a failover (since 4.0). 1427# 1428# Since: 3.1 1429## 1430{ 'enum': 'COLOExitReason', 1431 'data': [ 'none', 'request', 'error' , 'processing' ] } 1432 1433## 1434# @x-colo-lost-heartbeat: 1435# 1436# Tell qemu that heartbeat is lost, request it to do takeover 1437# procedures. If this command is sent to the PVM, the Primary side 1438# will exit COLO mode. If sent to the Secondary, the Secondary side 1439# will run failover work, then takes over server operation to become 1440# the service VM. 1441# 1442# Features: 1443# 1444# @unstable: This command is experimental. 1445# 1446# Since: 2.8 1447# 1448# .. qmp-example:: 1449# 1450# -> { "execute": "x-colo-lost-heartbeat" } 1451# <- { "return": {} } 1452## 1453{ 'command': 'x-colo-lost-heartbeat', 1454 'features': [ 'unstable' ], 1455 'if': 'CONFIG_REPLICATION' } 1456 1457## 1458# @migrate_cancel: 1459# 1460# Cancel the current executing migration process. 1461# 1462# .. note:: This command succeeds even if there is no migration 1463# process running. 1464# 1465# Since: 0.14 1466# 1467# .. qmp-example:: 1468# 1469# -> { "execute": "migrate_cancel" } 1470# <- { "return": {} } 1471## 1472{ 'command': 'migrate_cancel' } 1473 1474## 1475# @migrate-continue: 1476# 1477# Continue migration when it's in a paused state. 1478# 1479# @state: The state the migration is currently expected to be in 1480# 1481# Since: 2.11 1482# 1483# .. qmp-example:: 1484# 1485# -> { "execute": "migrate-continue" , "arguments": 1486# { "state": "pre-switchover" } } 1487# <- { "return": {} } 1488## 1489{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1490 1491## 1492# @MigrationAddressType: 1493# 1494# The migration stream transport mechanisms. 1495# 1496# @socket: Migrate via socket. 1497# 1498# @exec: Direct the migration stream to another process. 1499# 1500# @rdma: Migrate via RDMA. 1501# 1502# @file: Direct the migration stream to a file. 1503# 1504# Since: 8.2 1505## 1506{ 'enum': 'MigrationAddressType', 1507 'data': [ 'socket', 'exec', 'rdma', 'file' ] } 1508 1509## 1510# @FileMigrationArgs: 1511# 1512# @filename: The file to receive the migration stream 1513# 1514# @offset: The file offset where the migration stream will start 1515# 1516# Since: 8.2 1517## 1518{ 'struct': 'FileMigrationArgs', 1519 'data': { 'filename': 'str', 1520 'offset': 'uint64' } } 1521 1522## 1523# @MigrationExecCommand: 1524# 1525# @args: command (list head) and arguments to execute. 1526# 1527# Since: 8.2 1528## 1529{ 'struct': 'MigrationExecCommand', 1530 'data': {'args': [ 'str' ] } } 1531 1532## 1533# @MigrationAddress: 1534# 1535# Migration endpoint configuration. 1536# 1537# @transport: The migration stream transport mechanism 1538# 1539# Since: 8.2 1540## 1541{ 'union': 'MigrationAddress', 1542 'base': { 'transport' : 'MigrationAddressType'}, 1543 'discriminator': 'transport', 1544 'data': { 1545 'socket': 'SocketAddress', 1546 'exec': 'MigrationExecCommand', 1547 'rdma': 'InetSocketAddress', 1548 'file': 'FileMigrationArgs' } } 1549 1550## 1551# @MigrationChannelType: 1552# 1553# The migration channel-type request options. 1554# 1555# @main: Main outbound migration channel. 1556# 1557# Since: 8.1 1558## 1559{ 'enum': 'MigrationChannelType', 1560 'data': [ 'main' ] } 1561 1562## 1563# @MigrationChannel: 1564# 1565# Migration stream channel parameters. 1566# 1567# @channel-type: Channel type for transferring packet information. 1568# 1569# @addr: Migration endpoint configuration on destination interface. 1570# 1571# Since: 8.1 1572## 1573{ 'struct': 'MigrationChannel', 1574 'data': { 1575 'channel-type': 'MigrationChannelType', 1576 'addr': 'MigrationAddress' } } 1577 1578## 1579# @migrate: 1580# 1581# Migrates the current running guest to another Virtual Machine. 1582# 1583# @uri: the Uniform Resource Identifier of the destination VM 1584# 1585# @channels: list of migration stream channels with each stream in the 1586# list connected to a destination interface endpoint. 1587# 1588# @detach: this argument exists only for compatibility reasons and is 1589# ignored by QEMU 1590# 1591# @resume: resume one paused migration, default "off". (since 3.0) 1592# 1593# Since: 0.14 1594# 1595# .. admonition:: Notes 1596# 1597# 1. The 'query-migrate' command should be used to check 1598# migration's progress and final result (this information is 1599# provided by the 'status' member). 1600# 1601# 2. All boolean arguments default to false. 1602# 1603# 3. The user Monitor's "detach" argument is invalid in QMP and 1604# should not be used. 1605# 1606# 4. The uri argument should have the Uniform Resource Identifier 1607# of default destination VM. This connection will be bound to 1608# default network. 1609# 1610# 5. For now, number of migration streams is restricted to one, 1611# i.e. number of items in 'channels' list is just 1. 1612# 1613# 6. The 'uri' and 'channels' arguments are mutually exclusive; 1614# exactly one of the two should be present. 1615# 1616# .. qmp-example:: 1617# 1618# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1619# <- { "return": {} } 1620# 1621# -> { "execute": "migrate", 1622# "arguments": { 1623# "channels": [ { "channel-type": "main", 1624# "addr": { "transport": "socket", 1625# "type": "inet", 1626# "host": "10.12.34.9", 1627# "port": "1050" } } ] } } 1628# <- { "return": {} } 1629# 1630# -> { "execute": "migrate", 1631# "arguments": { 1632# "channels": [ { "channel-type": "main", 1633# "addr": { "transport": "exec", 1634# "args": [ "/bin/nc", "-p", "6000", 1635# "/some/sock" ] } } ] } } 1636# <- { "return": {} } 1637# 1638# -> { "execute": "migrate", 1639# "arguments": { 1640# "channels": [ { "channel-type": "main", 1641# "addr": { "transport": "rdma", 1642# "host": "10.12.34.9", 1643# "port": "1050" } } ] } } 1644# <- { "return": {} } 1645# 1646# -> { "execute": "migrate", 1647# "arguments": { 1648# "channels": [ { "channel-type": "main", 1649# "addr": { "transport": "file", 1650# "filename": "/tmp/migfile", 1651# "offset": "0x1000" } } ] } } 1652# <- { "return": {} } 1653## 1654{ 'command': 'migrate', 1655 'data': {'*uri': 'str', 1656 '*channels': [ 'MigrationChannel' ], 1657 '*detach': 'bool', '*resume': 'bool' } } 1658 1659## 1660# @migrate-incoming: 1661# 1662# Start an incoming migration, the qemu must have been started with 1663# -incoming defer 1664# 1665# @uri: The Uniform Resource Identifier identifying the source or 1666# address to listen on 1667# 1668# @channels: list of migration stream channels with each stream in the 1669# list connected to a destination interface endpoint. 1670# 1671# @exit-on-error: Exit on incoming migration failure. Default true. 1672# When set to false, the failure triggers a MIGRATION event, and 1673# error details could be retrieved with query-migrate. 1674# (since 9.1) 1675# 1676# Since: 2.3 1677# 1678# .. admonition:: Notes 1679# 1680# 1. It's a bad idea to use a string for the uri, but it needs to 1681# stay compatible with -incoming and the format of the uri is 1682# already exposed above libvirt. 1683# 1684# 2. QEMU must be started with -incoming defer to allow 1685# migrate-incoming to be used. 1686# 1687# 3. The uri format is the same as for -incoming 1688# 1689# 4. For now, number of migration streams is restricted to one, 1690# i.e. number of items in 'channels' list is just 1. 1691# 1692# 5. The 'uri' and 'channels' arguments are mutually exclusive; 1693# exactly one of the two should be present. 1694# 1695# .. qmp-example:: 1696# 1697# -> { "execute": "migrate-incoming", 1698# "arguments": { "uri": "tcp:0:4446" } } 1699# <- { "return": {} } 1700# 1701# -> { "execute": "migrate-incoming", 1702# "arguments": { 1703# "channels": [ { "channel-type": "main", 1704# "addr": { "transport": "socket", 1705# "type": "inet", 1706# "host": "10.12.34.9", 1707# "port": "1050" } } ] } } 1708# <- { "return": {} } 1709# 1710# -> { "execute": "migrate-incoming", 1711# "arguments": { 1712# "channels": [ { "channel-type": "main", 1713# "addr": { "transport": "exec", 1714# "args": [ "/bin/nc", "-p", "6000", 1715# "/some/sock" ] } } ] } } 1716# <- { "return": {} } 1717# 1718# -> { "execute": "migrate-incoming", 1719# "arguments": { 1720# "channels": [ { "channel-type": "main", 1721# "addr": { "transport": "rdma", 1722# "host": "10.12.34.9", 1723# "port": "1050" } } ] } } 1724# <- { "return": {} } 1725## 1726{ 'command': 'migrate-incoming', 1727 'data': {'*uri': 'str', 1728 '*channels': [ 'MigrationChannel' ], 1729 '*exit-on-error': 'bool' } } 1730 1731## 1732# @xen-save-devices-state: 1733# 1734# Save the state of all devices to file. The RAM and the block 1735# devices of the VM are not saved by this command. 1736# 1737# @filename: the file to save the state of the devices to as binary 1738# data. See xen-save-devices-state.txt for a description of the 1739# binary format. 1740# 1741# @live: Optional argument to ask QEMU to treat this command as part 1742# of a live migration. Default to true. (since 2.11) 1743# 1744# Since: 1.1 1745# 1746# .. qmp-example:: 1747# 1748# -> { "execute": "xen-save-devices-state", 1749# "arguments": { "filename": "/tmp/save" } } 1750# <- { "return": {} } 1751## 1752{ 'command': 'xen-save-devices-state', 1753 'data': {'filename': 'str', '*live':'bool' } } 1754 1755## 1756# @xen-set-global-dirty-log: 1757# 1758# Enable or disable the global dirty log mode. 1759# 1760# @enable: true to enable, false to disable. 1761# 1762# Since: 1.3 1763# 1764# .. qmp-example:: 1765# 1766# -> { "execute": "xen-set-global-dirty-log", 1767# "arguments": { "enable": true } } 1768# <- { "return": {} } 1769## 1770{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1771 1772## 1773# @xen-load-devices-state: 1774# 1775# Load the state of all devices from file. The RAM and the block 1776# devices of the VM are not loaded by this command. 1777# 1778# @filename: the file to load the state of the devices from as binary 1779# data. See xen-save-devices-state.txt for a description of the 1780# binary format. 1781# 1782# Since: 2.7 1783# 1784# .. qmp-example:: 1785# 1786# -> { "execute": "xen-load-devices-state", 1787# "arguments": { "filename": "/tmp/resume" } } 1788# <- { "return": {} } 1789## 1790{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1791 1792## 1793# @xen-set-replication: 1794# 1795# Enable or disable replication. 1796# 1797# @enable: true to enable, false to disable. 1798# 1799# @primary: true for primary or false for secondary. 1800# 1801# @failover: true to do failover, false to stop. Cannot be specified 1802# if 'enable' is true. Default value is false. 1803# 1804# .. qmp-example:: 1805# 1806# -> { "execute": "xen-set-replication", 1807# "arguments": {"enable": true, "primary": false} } 1808# <- { "return": {} } 1809# 1810# Since: 2.9 1811## 1812{ 'command': 'xen-set-replication', 1813 'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' }, 1814 'if': 'CONFIG_REPLICATION' } 1815 1816## 1817# @ReplicationStatus: 1818# 1819# The result format for 'query-xen-replication-status'. 1820# 1821# @error: true if an error happened, false if replication is normal. 1822# 1823# @desc: the human readable error description string, when @error is 1824# 'true'. 1825# 1826# Since: 2.9 1827## 1828{ 'struct': 'ReplicationStatus', 1829 'data': { 'error': 'bool', '*desc': 'str' }, 1830 'if': 'CONFIG_REPLICATION' } 1831 1832## 1833# @query-xen-replication-status: 1834# 1835# Query replication status while the vm is running. 1836# 1837# Returns: A @ReplicationStatus object showing the status. 1838# 1839# .. qmp-example:: 1840# 1841# -> { "execute": "query-xen-replication-status" } 1842# <- { "return": { "error": false } } 1843# 1844# Since: 2.9 1845## 1846{ 'command': 'query-xen-replication-status', 1847 'returns': 'ReplicationStatus', 1848 'if': 'CONFIG_REPLICATION' } 1849 1850## 1851# @xen-colo-do-checkpoint: 1852# 1853# Xen uses this command to notify replication to trigger a checkpoint. 1854# 1855# .. qmp-example:: 1856# 1857# -> { "execute": "xen-colo-do-checkpoint" } 1858# <- { "return": {} } 1859# 1860# Since: 2.9 1861## 1862{ 'command': 'xen-colo-do-checkpoint', 1863 'if': 'CONFIG_REPLICATION' } 1864 1865## 1866# @COLOStatus: 1867# 1868# The result format for 'query-colo-status'. 1869# 1870# @mode: COLO running mode. If COLO is running, this field will 1871# return 'primary' or 'secondary'. 1872# 1873# @last-mode: COLO last running mode. If COLO is running, this field 1874# will return same like mode field, after failover we can use this 1875# field to get last colo mode. (since 4.0) 1876# 1877# @reason: describes the reason for the COLO exit. 1878# 1879# Since: 3.1 1880## 1881{ 'struct': 'COLOStatus', 1882 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1883 'reason': 'COLOExitReason' }, 1884 'if': 'CONFIG_REPLICATION' } 1885 1886## 1887# @query-colo-status: 1888# 1889# Query COLO status while the vm is running. 1890# 1891# Returns: A @COLOStatus object showing the status. 1892# 1893# .. qmp-example:: 1894# 1895# -> { "execute": "query-colo-status" } 1896# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1897# 1898# Since: 3.1 1899## 1900{ 'command': 'query-colo-status', 1901 'returns': 'COLOStatus', 1902 'if': 'CONFIG_REPLICATION' } 1903 1904## 1905# @migrate-recover: 1906# 1907# Provide a recovery migration stream URI. 1908# 1909# @uri: the URI to be used for the recovery of migration stream. 1910# 1911# .. qmp-example:: 1912# 1913# -> { "execute": "migrate-recover", 1914# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1915# <- { "return": {} } 1916# 1917# Since: 3.0 1918## 1919{ 'command': 'migrate-recover', 1920 'data': { 'uri': 'str' }, 1921 'allow-oob': true } 1922 1923## 1924# @migrate-pause: 1925# 1926# Pause a migration. Currently it only supports postcopy. 1927# 1928# .. qmp-example:: 1929# 1930# -> { "execute": "migrate-pause" } 1931# <- { "return": {} } 1932# 1933# Since: 3.0 1934## 1935{ 'command': 'migrate-pause', 'allow-oob': true } 1936 1937## 1938# @UNPLUG_PRIMARY: 1939# 1940# Emitted from source side of a migration when migration state is 1941# WAIT_UNPLUG. Device was unplugged by guest operating system. 1942# Device resources in QEMU are kept on standby to be able to re-plug 1943# it in case of migration failure. 1944# 1945# @device-id: QEMU device id of the unplugged device 1946# 1947# Since: 4.2 1948# 1949# .. qmp-example:: 1950# 1951# <- { "event": "UNPLUG_PRIMARY", 1952# "data": { "device-id": "hostdev0" }, 1953# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 1954## 1955{ 'event': 'UNPLUG_PRIMARY', 1956 'data': { 'device-id': 'str' } } 1957 1958## 1959# @DirtyRateVcpu: 1960# 1961# Dirty rate of vcpu. 1962# 1963# @id: vcpu index. 1964# 1965# @dirty-rate: dirty rate. 1966# 1967# Since: 6.2 1968## 1969{ 'struct': 'DirtyRateVcpu', 1970 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 1971 1972## 1973# @DirtyRateStatus: 1974# 1975# Dirty page rate measurement status. 1976# 1977# @unstarted: measuring thread has not been started yet 1978# 1979# @measuring: measuring thread is running 1980# 1981# @measured: dirty page rate is measured and the results are available 1982# 1983# Since: 5.2 1984## 1985{ 'enum': 'DirtyRateStatus', 1986 'data': [ 'unstarted', 'measuring', 'measured'] } 1987 1988## 1989# @DirtyRateMeasureMode: 1990# 1991# Method used to measure dirty page rate. Differences between 1992# available methods are explained in @calc-dirty-rate. 1993# 1994# @page-sampling: use page sampling 1995# 1996# @dirty-ring: use dirty ring 1997# 1998# @dirty-bitmap: use dirty bitmap 1999# 2000# Since: 6.2 2001## 2002{ 'enum': 'DirtyRateMeasureMode', 2003 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 2004 2005## 2006# @TimeUnit: 2007# 2008# Specifies unit in which time-related value is specified. 2009# 2010# @second: value is in seconds 2011# 2012# @millisecond: value is in milliseconds 2013# 2014# Since: 8.2 2015## 2016{ 'enum': 'TimeUnit', 2017 'data': ['second', 'millisecond'] } 2018 2019## 2020# @DirtyRateInfo: 2021# 2022# Information about measured dirty page rate. 2023# 2024# @dirty-rate: an estimate of the dirty page rate of the VM in units 2025# of MiB/s. Value is present only when @status is 'measured'. 2026# 2027# @status: current status of dirty page rate measurements 2028# 2029# @start-time: start time in units of second for calculation 2030# 2031# @calc-time: time period for which dirty page rate was measured, 2032# expressed and rounded down to @calc-time-unit. 2033# 2034# @calc-time-unit: time unit of @calc-time (Since 8.2) 2035# 2036# @sample-pages: number of sampled pages per GiB of guest memory. 2037# Valid only in page-sampling mode (Since 6.1) 2038# 2039# @mode: mode that was used to measure dirty page rate (Since 6.2) 2040# 2041# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was 2042# specified (Since 6.2) 2043# 2044# Since: 5.2 2045## 2046{ 'struct': 'DirtyRateInfo', 2047 'data': {'*dirty-rate': 'int64', 2048 'status': 'DirtyRateStatus', 2049 'start-time': 'int64', 2050 'calc-time': 'int64', 2051 'calc-time-unit': 'TimeUnit', 2052 'sample-pages': 'uint64', 2053 'mode': 'DirtyRateMeasureMode', 2054 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 2055 2056## 2057# @calc-dirty-rate: 2058# 2059# Start measuring dirty page rate of the VM. Results can be retrieved 2060# with @query-dirty-rate after measurements are completed. 2061# 2062# Dirty page rate is the number of pages changed in a given time 2063# period expressed in MiB/s. The following methods of calculation are 2064# available: 2065# 2066# 1. In page sampling mode, a random subset of pages are selected and 2067# hashed twice: once at the beginning of measurement time period, 2068# and once again at the end. If two hashes for some page are 2069# different, the page is counted as changed. Since this method 2070# relies on sampling and hashing, calculated dirty page rate is 2071# only an estimate of its true value. Increasing @sample-pages 2072# improves estimation quality at the cost of higher computational 2073# overhead. 2074# 2075# 2. Dirty bitmap mode captures writes to memory (for example by 2076# temporarily revoking write access to all pages) and counting page 2077# faults. Information about modified pages is collected into a 2078# bitmap, where each bit corresponds to one guest page. This mode 2079# requires that KVM accelerator property "dirty-ring-size" is *not* 2080# set. 2081# 2082# 3. Dirty ring mode is similar to dirty bitmap mode, but the 2083# information about modified pages is collected into ring buffer. 2084# This mode tracks page modification per each vCPU separately. It 2085# requires that KVM accelerator property "dirty-ring-size" is set. 2086# 2087# @calc-time: time period for which dirty page rate is calculated. By 2088# default it is specified in seconds, but the unit can be set 2089# explicitly with @calc-time-unit. Note that larger @calc-time 2090# values will typically result in smaller dirty page rates because 2091# page dirtying is a one-time event. Once some page is counted as 2092# dirty during @calc-time period, further writes to this page will 2093# not increase dirty page rate anymore. 2094# 2095# @calc-time-unit: time unit in which @calc-time is specified. By 2096# default it is seconds. (Since 8.2) 2097# 2098# @sample-pages: number of sampled pages per each GiB of guest memory. 2099# Default value is 512. For 4KiB guest pages this corresponds to 2100# sampling ratio of 0.2%. This argument is used only in page 2101# sampling mode. (Since 6.1) 2102# 2103# @mode: mechanism for tracking dirty pages. Default value is 2104# 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'. 2105# (Since 6.1) 2106# 2107# Since: 5.2 2108# 2109# .. qmp-example:: 2110# 2111# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 2112# "sample-pages": 512} } 2113# <- { "return": {} } 2114# 2115# .. qmp-example:: 2116# :annotated: 2117# 2118# Measure dirty rate using dirty bitmap for 500 milliseconds:: 2119# 2120# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500, 2121# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} } 2122# 2123# <- { "return": {} } 2124## 2125{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 2126 '*calc-time-unit': 'TimeUnit', 2127 '*sample-pages': 'int', 2128 '*mode': 'DirtyRateMeasureMode'} } 2129 2130## 2131# @query-dirty-rate: 2132# 2133# Query results of the most recent invocation of @calc-dirty-rate. 2134# 2135# @calc-time-unit: time unit in which to report calculation time. 2136# By default it is reported in seconds. (Since 8.2) 2137# 2138# Since: 5.2 2139# 2140# .. qmp-example:: 2141# :title: Measurement is in progress 2142# 2143# <- {"status": "measuring", "sample-pages": 512, 2144# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2145# "calc-time-unit": "second"} 2146# 2147# .. qmp-example:: 2148# :title: Measurement has been completed 2149# 2150# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108, 2151# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2152# "calc-time-unit": "second"} 2153## 2154{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' }, 2155 'returns': 'DirtyRateInfo' } 2156 2157## 2158# @DirtyLimitInfo: 2159# 2160# Dirty page rate limit information of a virtual CPU. 2161# 2162# @cpu-index: index of a virtual CPU. 2163# 2164# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 2165# CPU, 0 means unlimited. 2166# 2167# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 2168# 2169# Since: 7.1 2170## 2171{ 'struct': 'DirtyLimitInfo', 2172 'data': { 'cpu-index': 'int', 2173 'limit-rate': 'uint64', 2174 'current-rate': 'uint64' } } 2175 2176## 2177# @set-vcpu-dirty-limit: 2178# 2179# Set the upper limit of dirty page rate for virtual CPUs. 2180# 2181# Requires KVM with accelerator property "dirty-ring-size" set. A 2182# virtual CPU's dirty page rate is a measure of its memory load. To 2183# observe dirty page rates, use @calc-dirty-rate. 2184# 2185# @cpu-index: index of a virtual CPU, default is all. 2186# 2187# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 2188# 2189# Since: 7.1 2190# 2191# .. qmp-example:: 2192# 2193# -> {"execute": "set-vcpu-dirty-limit"} 2194# "arguments": { "dirty-rate": 200, 2195# "cpu-index": 1 } } 2196# <- { "return": {} } 2197## 2198{ 'command': 'set-vcpu-dirty-limit', 2199 'data': { '*cpu-index': 'int', 2200 'dirty-rate': 'uint64' } } 2201 2202## 2203# @cancel-vcpu-dirty-limit: 2204# 2205# Cancel the upper limit of dirty page rate for virtual CPUs. 2206# 2207# Cancel the dirty page limit for the vCPU which has been set with 2208# set-vcpu-dirty-limit command. Note that this command requires 2209# support from dirty ring, same as the "set-vcpu-dirty-limit". 2210# 2211# @cpu-index: index of a virtual CPU, default is all. 2212# 2213# Since: 7.1 2214# 2215# .. qmp-example:: 2216# 2217# -> {"execute": "cancel-vcpu-dirty-limit"}, 2218# "arguments": { "cpu-index": 1 } } 2219# <- { "return": {} } 2220## 2221{ 'command': 'cancel-vcpu-dirty-limit', 2222 'data': { '*cpu-index': 'int'} } 2223 2224## 2225# @query-vcpu-dirty-limit: 2226# 2227# Returns information about virtual CPU dirty page rate limits, if 2228# any. 2229# 2230# Since: 7.1 2231# 2232# .. qmp-example:: 2233# 2234# -> {"execute": "query-vcpu-dirty-limit"} 2235# <- {"return": [ 2236# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 2237# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 2238## 2239{ 'command': 'query-vcpu-dirty-limit', 2240 'returns': [ 'DirtyLimitInfo' ] } 2241 2242## 2243# @MigrationThreadInfo: 2244# 2245# Information about migrationthreads 2246# 2247# @name: the name of migration thread 2248# 2249# @thread-id: ID of the underlying host thread 2250# 2251# Since: 7.2 2252## 2253{ 'struct': 'MigrationThreadInfo', 2254 'data': {'name': 'str', 2255 'thread-id': 'int'} } 2256 2257## 2258# @query-migrationthreads: 2259# 2260# Returns information of migration threads 2261# 2262# Returns: @MigrationThreadInfo 2263# 2264# Since: 7.2 2265## 2266{ 'command': 'query-migrationthreads', 2267 'returns': ['MigrationThreadInfo'] } 2268 2269## 2270# @snapshot-save: 2271# 2272# Save a VM snapshot 2273# 2274# @job-id: identifier for the newly created job 2275# 2276# @tag: name of the snapshot to create 2277# 2278# @vmstate: block device node name to save vmstate to 2279# 2280# @devices: list of block device node names to save a snapshot to 2281# 2282# Applications should not assume that the snapshot save is complete 2283# when this command returns. The job commands / events must be used 2284# to determine completion and to fetch details of any errors that 2285# arise. 2286# 2287# Note that execution of the guest CPUs may be stopped during the time 2288# it takes to save the snapshot. A future version of QEMU may ensure 2289# CPUs are executing continuously. 2290# 2291# It is strongly recommended that @devices contain all writable block 2292# device nodes if a consistent snapshot is required. 2293# 2294# If @tag already exists, an error will be reported 2295# 2296# .. qmp-example:: 2297# 2298# -> { "execute": "snapshot-save", 2299# "arguments": { 2300# "job-id": "snapsave0", 2301# "tag": "my-snap", 2302# "vmstate": "disk0", 2303# "devices": ["disk0", "disk1"] 2304# } 2305# } 2306# <- { "return": { } } 2307# <- {"event": "JOB_STATUS_CHANGE", 2308# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 2309# "data": {"status": "created", "id": "snapsave0"}} 2310# <- {"event": "JOB_STATUS_CHANGE", 2311# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 2312# "data": {"status": "running", "id": "snapsave0"}} 2313# <- {"event": "STOP", 2314# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 2315# <- {"event": "RESUME", 2316# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2317# <- {"event": "JOB_STATUS_CHANGE", 2318# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2319# "data": {"status": "waiting", "id": "snapsave0"}} 2320# <- {"event": "JOB_STATUS_CHANGE", 2321# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2322# "data": {"status": "pending", "id": "snapsave0"}} 2323# <- {"event": "JOB_STATUS_CHANGE", 2324# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2325# "data": {"status": "concluded", "id": "snapsave0"}} 2326# -> {"execute": "query-jobs"} 2327# <- {"return": [{"current-progress": 1, 2328# "status": "concluded", 2329# "total-progress": 1, 2330# "type": "snapshot-save", 2331# "id": "snapsave0"}]} 2332# 2333# Since: 6.0 2334## 2335{ 'command': 'snapshot-save', 2336 'data': { 'job-id': 'str', 2337 'tag': 'str', 2338 'vmstate': 'str', 2339 'devices': ['str'] } } 2340 2341## 2342# @snapshot-load: 2343# 2344# Load a VM snapshot 2345# 2346# @job-id: identifier for the newly created job 2347# 2348# @tag: name of the snapshot to load. 2349# 2350# @vmstate: block device node name to load vmstate from 2351# 2352# @devices: list of block device node names to load a snapshot from 2353# 2354# Applications should not assume that the snapshot load is complete 2355# when this command returns. The job commands / events must be used 2356# to determine completion and to fetch details of any errors that 2357# arise. 2358# 2359# Note that execution of the guest CPUs will be stopped during the 2360# time it takes to load the snapshot. 2361# 2362# It is strongly recommended that @devices contain all writable block 2363# device nodes that can have changed since the original @snapshot-save 2364# command execution. 2365# 2366# .. qmp-example:: 2367# 2368# -> { "execute": "snapshot-load", 2369# "arguments": { 2370# "job-id": "snapload0", 2371# "tag": "my-snap", 2372# "vmstate": "disk0", 2373# "devices": ["disk0", "disk1"] 2374# } 2375# } 2376# <- { "return": { } } 2377# <- {"event": "JOB_STATUS_CHANGE", 2378# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2379# "data": {"status": "created", "id": "snapload0"}} 2380# <- {"event": "JOB_STATUS_CHANGE", 2381# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2382# "data": {"status": "running", "id": "snapload0"}} 2383# <- {"event": "STOP", 2384# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2385# <- {"event": "RESUME", 2386# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2387# <- {"event": "JOB_STATUS_CHANGE", 2388# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2389# "data": {"status": "waiting", "id": "snapload0"}} 2390# <- {"event": "JOB_STATUS_CHANGE", 2391# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2392# "data": {"status": "pending", "id": "snapload0"}} 2393# <- {"event": "JOB_STATUS_CHANGE", 2394# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2395# "data": {"status": "concluded", "id": "snapload0"}} 2396# -> {"execute": "query-jobs"} 2397# <- {"return": [{"current-progress": 1, 2398# "status": "concluded", 2399# "total-progress": 1, 2400# "type": "snapshot-load", 2401# "id": "snapload0"}]} 2402# 2403# Since: 6.0 2404## 2405{ 'command': 'snapshot-load', 2406 'data': { 'job-id': 'str', 2407 'tag': 'str', 2408 'vmstate': 'str', 2409 'devices': ['str'] } } 2410 2411## 2412# @snapshot-delete: 2413# 2414# Delete a VM snapshot 2415# 2416# @job-id: identifier for the newly created job 2417# 2418# @tag: name of the snapshot to delete. 2419# 2420# @devices: list of block device node names to delete a snapshot from 2421# 2422# Applications should not assume that the snapshot delete is complete 2423# when this command returns. The job commands / events must be used 2424# to determine completion and to fetch details of any errors that 2425# arise. 2426# 2427# .. qmp-example:: 2428# 2429# -> { "execute": "snapshot-delete", 2430# "arguments": { 2431# "job-id": "snapdelete0", 2432# "tag": "my-snap", 2433# "devices": ["disk0", "disk1"] 2434# } 2435# } 2436# <- { "return": { } } 2437# <- {"event": "JOB_STATUS_CHANGE", 2438# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2439# "data": {"status": "created", "id": "snapdelete0"}} 2440# <- {"event": "JOB_STATUS_CHANGE", 2441# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2442# "data": {"status": "running", "id": "snapdelete0"}} 2443# <- {"event": "JOB_STATUS_CHANGE", 2444# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2445# "data": {"status": "waiting", "id": "snapdelete0"}} 2446# <- {"event": "JOB_STATUS_CHANGE", 2447# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2448# "data": {"status": "pending", "id": "snapdelete0"}} 2449# <- {"event": "JOB_STATUS_CHANGE", 2450# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2451# "data": {"status": "concluded", "id": "snapdelete0"}} 2452# -> {"execute": "query-jobs"} 2453# <- {"return": [{"current-progress": 1, 2454# "status": "concluded", 2455# "total-progress": 1, 2456# "type": "snapshot-delete", 2457# "id": "snapdelete0"}]} 2458# 2459# Since: 6.0 2460## 2461{ 'command': 'snapshot-delete', 2462 'data': { 'job-id': 'str', 2463 'tag': 'str', 2464 'devices': ['str'] } } 2465