1# -*- Mode: Python -*- 2# vim: filetype=python 3# 4 5## 6# = Migration 7## 8 9{ 'include': 'common.json' } 10{ 'include': 'sockets.json' } 11 12## 13# @MigrationStats: 14# 15# Detailed migration status. 16# 17# @transferred: amount of bytes already transferred to the target VM 18# 19# @remaining: amount of bytes remaining to be transferred to the 20# target VM 21# 22# @total: total amount of bytes involved in the migration process 23# 24# @duplicate: number of duplicate (zero) pages (since 1.2) 25# 26# @normal: number of normal pages (since 1.2) 27# 28# @normal-bytes: number of normal bytes sent (since 1.2) 29# 30# @dirty-pages-rate: number of pages dirtied by second by the guest 31# (since 1.3) 32# 33# @mbps: throughput in megabits/sec. (since 1.6) 34# 35# @dirty-sync-count: number of times that dirty ram was synchronized 36# (since 2.1) 37# 38# @postcopy-requests: The number of page requests received from the 39# destination (since 2.7) 40# 41# @page-size: The number of bytes per page for the various page-based 42# statistics (since 2.10) 43# 44# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 45# 46# @pages-per-second: the number of memory pages transferred per second 47# (Since 4.0) 48# 49# @precopy-bytes: The number of bytes sent in the pre-copy phase 50# (since 7.0). 51# 52# @downtime-bytes: The number of bytes sent while the guest is paused 53# (since 7.0). 54# 55# @postcopy-bytes: The number of bytes sent during the post-copy phase 56# (since 7.0). 57# 58# @dirty-sync-missed-zero-copy: Number of times dirty RAM 59# synchronization could not avoid copying dirty pages. This is 60# between 0 and @dirty-sync-count * @multifd-channels. (since 61# 7.1) 62# 63# Since: 0.14 64## 65{ 'struct': 'MigrationStats', 66 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 67 'duplicate': 'int', 68 'normal': 'int', 69 'normal-bytes': 'int', 'dirty-pages-rate': 'int', 70 'mbps': 'number', 'dirty-sync-count': 'int', 71 'postcopy-requests': 'int', 'page-size': 'int', 72 'multifd-bytes': 'uint64', 'pages-per-second': 'uint64', 73 'precopy-bytes': 'uint64', 'downtime-bytes': 'uint64', 74 'postcopy-bytes': 'uint64', 75 'dirty-sync-missed-zero-copy': 'uint64' } } 76 77## 78# @XBZRLECacheStats: 79# 80# Detailed XBZRLE migration cache statistics 81# 82# @cache-size: XBZRLE cache size 83# 84# @bytes: amount of bytes already transferred to the target VM 85# 86# @pages: amount of pages transferred to the target VM 87# 88# @cache-miss: number of cache miss 89# 90# @cache-miss-rate: rate of cache miss (since 2.1) 91# 92# @encoding-rate: rate of encoded bytes (since 5.1) 93# 94# @overflow: number of overflows 95# 96# Since: 1.2 97## 98{ 'struct': 'XBZRLECacheStats', 99 'data': {'cache-size': 'size', 'bytes': 'int', 'pages': 'int', 100 'cache-miss': 'int', 'cache-miss-rate': 'number', 101 'encoding-rate': 'number', 'overflow': 'int' } } 102 103## 104# @CompressionStats: 105# 106# Detailed migration compression statistics 107# 108# @pages: amount of pages compressed and transferred to the target VM 109# 110# @busy: count of times that no free thread was available to compress 111# data 112# 113# @busy-rate: rate of thread busy 114# 115# @compressed-size: amount of bytes after compression 116# 117# @compression-rate: rate of compressed size 118# 119# Since: 3.1 120## 121{ 'struct': 'CompressionStats', 122 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 123 'compressed-size': 'int', 'compression-rate': 'number' } } 124 125## 126# @MigrationStatus: 127# 128# An enumeration of migration status. 129# 130# @none: no migration has ever happened. 131# 132# @setup: migration process has been initiated. 133# 134# @cancelling: in the process of cancelling migration. 135# 136# @cancelled: cancelling migration is finished. 137# 138# @active: in the process of doing migration. 139# 140# @postcopy-active: like active, but now in postcopy mode. (since 141# 2.5) 142# 143# @postcopy-paused: during postcopy but paused. (since 3.0) 144# 145# @postcopy-recover-setup: setup phase for a postcopy recovery process, 146# preparing for a recovery phase to start. (since 9.1) 147# 148# @postcopy-recover: trying to recover from a paused postcopy. (since 149# 3.0) 150# 151# @completed: migration is finished. 152# 153# @failed: some error occurred during migration process. 154# 155# @colo: VM is in the process of fault tolerance, VM can not get into 156# this state unless colo capability is enabled for migration. 157# (since 2.8) 158# 159# @pre-switchover: Paused before device serialisation. (since 2.11) 160# 161# @device: During device serialisation when pause-before-switchover is 162# enabled (since 2.11) 163# 164# @wait-unplug: wait for device unplug request by guest OS to be 165# completed. (since 4.2) 166# 167# Since: 2.3 168## 169{ 'enum': 'MigrationStatus', 170 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 171 'active', 'postcopy-active', 'postcopy-paused', 172 'postcopy-recover-setup', 173 'postcopy-recover', 'completed', 'failed', 'colo', 174 'pre-switchover', 'device', 'wait-unplug' ] } 175## 176# @VfioStats: 177# 178# Detailed VFIO devices migration statistics 179# 180# @transferred: amount of bytes transferred to the target VM by VFIO 181# devices 182# 183# Since: 5.2 184## 185{ 'struct': 'VfioStats', 186 'data': {'transferred': 'int' } } 187 188## 189# @MigrationInfo: 190# 191# Information about current migration process. 192# 193# @status: @MigrationStatus describing the current migration status. 194# If this field is not returned, no migration process has been 195# initiated 196# 197# @ram: @MigrationStats containing detailed migration status, only 198# returned if status is 'active' or 'completed'(since 1.2) 199# 200# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 201# migration statistics, only returned if XBZRLE feature is on and 202# status is 'active' or 'completed' (since 1.2) 203# 204# @total-time: total amount of milliseconds since migration started. 205# If migration has ended, it returns the total migration time. 206# (since 1.2) 207# 208# @downtime: only present when migration finishes correctly total 209# downtime in milliseconds for the guest. (since 1.3) 210# 211# @expected-downtime: only present while migration is active expected 212# downtime in milliseconds for the guest in last walk of the dirty 213# bitmap. (since 1.3) 214# 215# @setup-time: amount of setup time in milliseconds *before* the 216# iterations begin but *after* the QMP command is issued. This is 217# designed to provide an accounting of any activities (such as 218# RDMA pinning) which may be expensive, but do not actually occur 219# during the iterative migration rounds themselves. (since 1.6) 220# 221# @cpu-throttle-percentage: percentage of time guest cpus are being 222# throttled during auto-converge. This is only present when 223# auto-converge has started throttling guest cpus. (Since 2.7) 224# 225# @error-desc: the human readable error description string. Clients 226# should not attempt to parse the error strings. (Since 2.7) 227# 228# @postcopy-blocktime: total time when all vCPU were blocked during 229# postcopy live migration. This is only present when the 230# postcopy-blocktime migration capability is enabled. (Since 3.0) 231# 232# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. 233# This is only present when the postcopy-blocktime migration 234# capability is enabled. (Since 3.0) 235# 236# @socket-address: Only used for tcp, to know what the real port is 237# (Since 4.0) 238# 239# @vfio: @VfioStats containing detailed VFIO devices migration 240# statistics, only returned if VFIO device is present, migration 241# is supported by all VFIO devices and status is 'active' or 242# 'completed' (since 5.2) 243# 244# @blocked-reasons: A list of reasons an outgoing migration is 245# blocked. Present and non-empty when migration is blocked. 246# (since 6.0) 247# 248# @dirty-limit-throttle-time-per-round: Maximum throttle time 249# (in microseconds) of virtual CPUs each dirty ring full round, 250# which shows how MigrationCapability dirty-limit affects the 251# guest during live migration. (Since 8.1) 252# 253# @dirty-limit-ring-full-time: Estimated average dirty ring full time 254# (in microseconds) for each dirty ring full round. The value 255# equals the dirty ring memory size divided by the average dirty 256# page rate of the virtual CPU, which can be used to observe the 257# average memory load of the virtual CPU indirectly. Note that 258# zero means guest doesn't dirty memory. (Since 8.1) 259# 260# Since: 0.14 261## 262{ 'struct': 'MigrationInfo', 263 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 264 '*vfio': 'VfioStats', 265 '*xbzrle-cache': 'XBZRLECacheStats', 266 '*total-time': 'int', 267 '*expected-downtime': 'int', 268 '*downtime': 'int', 269 '*setup-time': 'int', 270 '*cpu-throttle-percentage': 'int', 271 '*error-desc': 'str', 272 '*blocked-reasons': ['str'], 273 '*postcopy-blocktime': 'uint32', 274 '*postcopy-vcpu-blocktime': ['uint32'], 275 '*socket-address': ['SocketAddress'], 276 '*dirty-limit-throttle-time-per-round': 'uint64', 277 '*dirty-limit-ring-full-time': 'uint64'} } 278 279## 280# @query-migrate: 281# 282# Returns information about current migration process. If migration 283# is active there will be another json-object with RAM migration 284# status. 285# 286# Returns: @MigrationInfo 287# 288# Since: 0.14 289# 290# Examples: 291# 292# 1. Before the first migration 293# 294# -> { "execute": "query-migrate" } 295# <- { "return": {} } 296# 297# 2. Migration is done and has succeeded 298# 299# -> { "execute": "query-migrate" } 300# <- { "return": { 301# "status": "completed", 302# "total-time":12345, 303# "setup-time":12345, 304# "downtime":12345, 305# "ram":{ 306# "transferred":123, 307# "remaining":123, 308# "total":246, 309# "duplicate":123, 310# "normal":123, 311# "normal-bytes":123456, 312# "dirty-sync-count":15 313# } 314# } 315# } 316# 317# 3. Migration is done and has failed 318# 319# -> { "execute": "query-migrate" } 320# <- { "return": { "status": "failed" } } 321# 322# 4. Migration is being performed: 323# 324# -> { "execute": "query-migrate" } 325# <- { 326# "return":{ 327# "status":"active", 328# "total-time":12345, 329# "setup-time":12345, 330# "expected-downtime":12345, 331# "ram":{ 332# "transferred":123, 333# "remaining":123, 334# "total":246, 335# "duplicate":123, 336# "normal":123, 337# "normal-bytes":123456, 338# "dirty-sync-count":15 339# } 340# } 341# } 342# 343# 5. Migration is being performed and XBZRLE is active: 344# 345# -> { "execute": "query-migrate" } 346# <- { 347# "return":{ 348# "status":"active", 349# "total-time":12345, 350# "setup-time":12345, 351# "expected-downtime":12345, 352# "ram":{ 353# "total":1057024, 354# "remaining":1053304, 355# "transferred":3720, 356# "duplicate":10, 357# "normal":3333, 358# "normal-bytes":3412992, 359# "dirty-sync-count":15 360# }, 361# "xbzrle-cache":{ 362# "cache-size":67108864, 363# "bytes":20971520, 364# "pages":2444343, 365# "cache-miss":2244, 366# "cache-miss-rate":0.123, 367# "encoding-rate":80.1, 368# "overflow":34434 369# } 370# } 371# } 372## 373{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 374 375## 376# @MigrationCapability: 377# 378# Migration capabilities enumeration 379# 380# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length 381# Encoding). This feature allows us to minimize migration traffic 382# for certain work loads, by sending compressed difference of the 383# pages 384# 385# @rdma-pin-all: Controls whether or not the entire VM memory 386# footprint is mlock()'d on demand or all at once. Refer to 387# docs/rdma.txt for usage. Disabled by default. (since 2.0) 388# 389# @zero-blocks: During storage migration encode blocks of zeroes 390# efficiently. This essentially saves 1MB of zeroes per block on 391# the wire. Enabling requires source and target VM to support 392# this feature. To enable it is sufficient to enable the 393# capability on the source VM. The feature is disabled by default. 394# (since 1.6) 395# 396# @events: generate events for each migration state change (since 2.4) 397# 398# @auto-converge: If enabled, QEMU will automatically throttle down 399# the guest to speed up convergence of RAM migration. (since 1.6) 400# 401# @postcopy-ram: Start executing on the migration target before all of 402# RAM has been migrated, pulling the remaining pages along as 403# needed. The capacity must have the same setting on both source 404# and target or migration will not even start. NOTE: If the 405# migration fails during postcopy the VM will fail. (since 2.6) 406# 407# @x-colo: If enabled, migration will never end, and the state of the 408# VM on the primary side will be migrated continuously to the VM 409# on secondary side, this process is called COarse-Grain LOck 410# Stepping (COLO) for Non-stop Service. (since 2.8) 411# 412# @release-ram: if enabled, qemu will free the migrated ram pages on 413# the source during postcopy-ram migration. (since 2.9) 414# 415# @return-path: If enabled, migration will use the return path even 416# for precopy. (since 2.10) 417# 418# @pause-before-switchover: Pause outgoing migration before 419# serialising device state and before disabling block IO (since 420# 2.11) 421# 422# @multifd: Use more than one fd for migration (since 4.0) 423# 424# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 425# (since 2.12) 426# 427# @postcopy-blocktime: Calculate downtime for postcopy live migration 428# (since 3.0) 429# 430# @late-block-activate: If enabled, the destination will not activate 431# block devices (and thus take locks) immediately at the end of 432# migration. (since 3.0) 433# 434# @x-ignore-shared: If enabled, QEMU will not migrate shared memory 435# that is accessible on the destination machine. (since 4.0) 436# 437# @validate-uuid: Send the UUID of the source to allow the destination 438# to ensure it is the same. (since 4.2) 439# 440# @background-snapshot: If enabled, the migration stream will be a 441# snapshot of the VM exactly at the point when the migration 442# procedure starts. The VM RAM is saved with running VM. 443# (since 6.0) 444# 445# @zero-copy-send: Controls behavior on sending memory pages on 446# migration. When true, enables a zero-copy mechanism for sending 447# memory pages, if host supports it. Requires that QEMU be 448# permitted to use locked memory for guest RAM pages. (since 7.1) 449# 450# @postcopy-preempt: If enabled, the migration process will allow 451# postcopy requests to preempt precopy stream, so postcopy 452# requests will be handled faster. This is a performance feature 453# and should not affect the correctness of postcopy migration. 454# (since 7.1) 455# 456# @switchover-ack: If enabled, migration will not stop the source VM 457# and complete the migration until an ACK is received from the 458# destination that it's OK to do so. Exactly when this ACK is 459# sent depends on the migrated devices that use this feature. For 460# example, a device can use it to make sure some of its data is 461# sent and loaded in the destination before doing switchover. 462# This can reduce downtime if devices that support this capability 463# are present. 'return-path' capability must be enabled to use 464# it. (since 8.1) 465# 466# @dirty-limit: If enabled, migration will throttle vCPUs as needed to 467# keep their dirty page rate within @vcpu-dirty-limit. This can 468# improve responsiveness of large guests during live migration, 469# and can result in more stable read performance. Requires KVM 470# with accelerator property "dirty-ring-size" set. (Since 8.1) 471# 472# @mapped-ram: Migrate using fixed offsets in the migration file for 473# each RAM page. Requires a migration URI that supports seeking, 474# such as a file. (since 9.0) 475# 476# Features: 477# 478# @unstable: Members @x-colo and @x-ignore-shared are experimental. 479# 480# Since: 1.2 481## 482{ 'enum': 'MigrationCapability', 483 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 484 'events', 'postcopy-ram', 485 { 'name': 'x-colo', 'features': [ 'unstable' ] }, 486 'release-ram', 487 'return-path', 'pause-before-switchover', 'multifd', 488 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 489 { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 490 'validate-uuid', 'background-snapshot', 491 'zero-copy-send', 'postcopy-preempt', 'switchover-ack', 492 'dirty-limit', 'mapped-ram'] } 493 494## 495# @MigrationCapabilityStatus: 496# 497# Migration capability information 498# 499# @capability: capability enum 500# 501# @state: capability state bool 502# 503# Since: 1.2 504## 505{ 'struct': 'MigrationCapabilityStatus', 506 'data': { 'capability': 'MigrationCapability', 'state': 'bool' } } 507 508## 509# @migrate-set-capabilities: 510# 511# Enable/Disable the following migration capabilities (like xbzrle) 512# 513# @capabilities: json array of capability modifications to make 514# 515# Since: 1.2 516# 517# Example: 518# 519# -> { "execute": "migrate-set-capabilities" , "arguments": 520# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 521# <- { "return": {} } 522## 523{ 'command': 'migrate-set-capabilities', 524 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 525 526## 527# @query-migrate-capabilities: 528# 529# Returns information about the current migration capabilities status 530# 531# Returns: @MigrationCapabilityStatus 532# 533# Since: 1.2 534# 535# Example: 536# 537# -> { "execute": "query-migrate-capabilities" } 538# <- { "return": [ 539# {"state": false, "capability": "xbzrle"}, 540# {"state": false, "capability": "rdma-pin-all"}, 541# {"state": false, "capability": "auto-converge"}, 542# {"state": false, "capability": "zero-blocks"}, 543# {"state": true, "capability": "events"}, 544# {"state": false, "capability": "postcopy-ram"}, 545# {"state": false, "capability": "x-colo"} 546# ]} 547## 548{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 549 550## 551# @MultiFDCompression: 552# 553# An enumeration of multifd compression methods. 554# 555# @none: no compression. 556# 557# @zlib: use zlib compression method. 558# 559# @zstd: use zstd compression method. 560# 561# @qpl: use qpl compression method. Query Processing Library(qpl) is 562# based on the deflate compression algorithm and use the Intel 563# In-Memory Analytics Accelerator(IAA) accelerated compression 564# and decompression. (Since 9.1) 565# 566# @uadk: use UADK library compression method. (Since 9.1) 567# 568# Since: 5.0 569## 570{ 'enum': 'MultiFDCompression', 571 'data': [ 'none', 'zlib', 572 { 'name': 'zstd', 'if': 'CONFIG_ZSTD' }, 573 { 'name': 'qpl', 'if': 'CONFIG_QPL' }, 574 { 'name': 'uadk', 'if': 'CONFIG_UADK' } ] } 575 576## 577# @MigMode: 578# 579# @normal: the original form of migration. (since 8.2) 580# 581# @cpr-reboot: The migrate command stops the VM and saves state to the 582# URI. After quitting QEMU, the user resumes by running QEMU 583# -incoming. 584# 585# This mode allows the user to quit QEMU, optionally update and 586# reboot the OS, and restart QEMU. If the user reboots, the URI 587# must persist across the reboot, such as by using a file. 588# 589# Unlike normal mode, the use of certain local storage options 590# does not block the migration, but the user must not modify the 591# contents of guest block devices between the quit and restart. 592# 593# This mode supports VFIO devices provided the user first puts the 594# guest in the suspended runstate, such as by issuing 595# guest-suspend-ram to the QEMU guest agent. 596# 597# Best performance is achieved when the memory backend is shared 598# and the @x-ignore-shared migration capability is set, but this 599# is not required. Further, if the user reboots before restarting 600# such a configuration, the shared memory must persist across the 601# reboot, such as by backing it with a dax device. 602# 603# @cpr-reboot may not be used with postcopy, background-snapshot, 604# or COLO. 605# 606# (since 8.2) 607## 608{ 'enum': 'MigMode', 609 'data': [ 'normal', 'cpr-reboot' ] } 610 611## 612# @ZeroPageDetection: 613# 614# @none: Do not perform zero page checking. 615# 616# @legacy: Perform zero page checking in main migration thread. 617# 618# @multifd: Perform zero page checking in multifd sender thread if 619# multifd migration is enabled, else in the main migration thread 620# as for @legacy. 621# 622# Since: 9.0 623## 624{ 'enum': 'ZeroPageDetection', 625 'data': [ 'none', 'legacy', 'multifd' ] } 626 627## 628# @BitmapMigrationBitmapAliasTransform: 629# 630# @persistent: If present, the bitmap will be made persistent or 631# transient depending on this parameter. 632# 633# Since: 6.0 634## 635{ 'struct': 'BitmapMigrationBitmapAliasTransform', 636 'data': { 637 '*persistent': 'bool' 638 } } 639 640## 641# @BitmapMigrationBitmapAlias: 642# 643# @name: The name of the bitmap. 644# 645# @alias: An alias name for migration (for example the bitmap name on 646# the opposite site). 647# 648# @transform: Allows the modification of the migrated bitmap. (since 649# 6.0) 650# 651# Since: 5.2 652## 653{ 'struct': 'BitmapMigrationBitmapAlias', 654 'data': { 655 'name': 'str', 656 'alias': 'str', 657 '*transform': 'BitmapMigrationBitmapAliasTransform' 658 } } 659 660## 661# @BitmapMigrationNodeAlias: 662# 663# Maps a block node name and the bitmaps it has to aliases for dirty 664# bitmap migration. 665# 666# @node-name: A block node name. 667# 668# @alias: An alias block node name for migration (for example the node 669# name on the opposite site). 670# 671# @bitmaps: Mappings for the bitmaps on this node. 672# 673# Since: 5.2 674## 675{ 'struct': 'BitmapMigrationNodeAlias', 676 'data': { 677 'node-name': 'str', 678 'alias': 'str', 679 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] 680 } } 681 682## 683# @MigrationParameter: 684# 685# Migration parameters enumeration 686# 687# @announce-initial: Initial delay (in milliseconds) before sending 688# the first announce (Since 4.0) 689# 690# @announce-max: Maximum delay (in milliseconds) between packets in 691# the announcement (Since 4.0) 692# 693# @announce-rounds: Number of self-announce packets sent after 694# migration (Since 4.0) 695# 696# @announce-step: Increase in delay (in milliseconds) between 697# subsequent packets in the announcement (Since 4.0) 698# 699# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 700# bytes_xfer_period to trigger throttling. It is expressed as 701# percentage. The default value is 50. (Since 5.0) 702# 703# @cpu-throttle-initial: Initial percentage of time guest cpus are 704# throttled when migration auto-converge is activated. The 705# default value is 20. (Since 2.7) 706# 707# @cpu-throttle-increment: throttle percentage increase each time 708# auto-converge detects that migration is not making progress. 709# The default value is 10. (Since 2.7) 710# 711# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 712# the tail stage of throttling, the Guest is very sensitive to CPU 713# percentage while the @cpu-throttle -increment is excessive 714# usually at tail stage. If this parameter is true, we will 715# compute the ideal CPU percentage used by the Guest, which may 716# exactly make the dirty rate match the dirty rate threshold. 717# Then we will choose a smaller throttle increment between the one 718# specified by @cpu-throttle-increment and the one generated by 719# ideal CPU percentage. Therefore, it is compatible to 720# traditional throttling, meanwhile the throttle increment won't 721# be excessive at tail stage. The default value is false. (Since 722# 5.1) 723# 724# @tls-creds: ID of the 'tls-creds' object that provides credentials 725# for establishing a TLS connection over the migration data 726# channel. On the outgoing side of the migration, the credentials 727# must be for a 'client' endpoint, while for the incoming side the 728# credentials must be for a 'server' endpoint. Setting this to a 729# non-empty string enables TLS for all migrations. An empty 730# string means that QEMU will use plain text mode for migration, 731# rather than TLS. (Since 2.7) 732# 733# @tls-hostname: migration target's hostname for validating the 734# server's x509 certificate identity. If empty, QEMU will use the 735# hostname from the migration URI, if any. A non-empty value is 736# required when using x509 based TLS credentials and the migration 737# URI does not include a hostname, such as fd: or exec: based 738# migration. (Since 2.7) 739# 740# Note: empty value works only since 2.9. 741# 742# @tls-authz: ID of the 'authz' object subclass that provides access 743# control checking of the TLS x509 certificate distinguished name. 744# This object is only resolved at time of use, so can be deleted 745# and recreated on the fly while the migration server is active. 746# If missing, it will default to denying access (Since 4.0) 747# 748# @max-bandwidth: maximum speed for migration, in bytes per second. 749# (Since 2.8) 750# 751# @avail-switchover-bandwidth: to set the available bandwidth that 752# migration can use during switchover phase. NOTE! This does not 753# limit the bandwidth during switchover, but only for calculations 754# when making decisions to switchover. By default, this value is 755# zero, which means QEMU will estimate the bandwidth 756# automatically. This can be set when the estimated value is not 757# accurate, while the user is able to guarantee such bandwidth is 758# available when switching over. When specified correctly, this 759# can make the switchover decision much more accurate. 760# (Since 8.2) 761# 762# @downtime-limit: set maximum tolerated downtime for migration. 763# maximum downtime in milliseconds (Since 2.8) 764# 765# @x-checkpoint-delay: The delay time (in ms) between two COLO 766# checkpoints in periodic mode. (Since 2.8) 767# 768# @multifd-channels: Number of channels used to migrate data in 769# parallel. This is the same number that the number of sockets 770# used for migration. The default value is 2 (since 4.0) 771# 772# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 773# needs to be a multiple of the target page size and a power of 2 774# (Since 2.11) 775# 776# @max-postcopy-bandwidth: Background transfer bandwidth during 777# postcopy. Defaults to 0 (unlimited). In bytes per second. 778# (Since 3.0) 779# 780# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 781# (Since 3.1) 782# 783# @multifd-compression: Which compression method to use. Defaults to 784# none. (Since 5.0) 785# 786# @multifd-zlib-level: Set the compression level to be used in live 787# migration, the compression level is an integer between 0 and 9, 788# where 0 means no compression, 1 means the best compression 789# speed, and 9 means best compression ratio which will consume 790# more CPU. Defaults to 1. (Since 5.0) 791# 792# @multifd-zstd-level: Set the compression level to be used in live 793# migration, the compression level is an integer between 0 and 20, 794# where 0 means no compression, 1 means the best compression 795# speed, and 20 means best compression ratio which will consume 796# more CPU. Defaults to 1. (Since 5.0) 797# 798# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 799# aliases for the purpose of dirty bitmap migration. Such aliases 800# may for example be the corresponding names on the opposite site. 801# The mapping must be one-to-one, but not necessarily complete: On 802# the source, unmapped bitmaps and all bitmaps on unmapped nodes 803# will be ignored. On the destination, encountering an unmapped 804# alias in the incoming migration stream will result in a report, 805# and all further bitmap migration data will then be discarded. 806# Note that the destination does not know about bitmaps it does 807# not receive, so there is no limitation or requirement regarding 808# the number of bitmaps received, or how they are named, or on 809# which nodes they are placed. By default (when this parameter 810# has never been set), bitmap names are mapped to themselves. 811# Nodes are mapped to their block device name if there is one, and 812# to their node name otherwise. (Since 5.2) 813# 814# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 815# limit during live migration. Should be in the range 1 to 816# 1000ms. Defaults to 1000ms. (Since 8.1) 817# 818# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 819# Defaults to 1. (Since 8.1) 820# 821# @mode: Migration mode. See description in @MigMode. Default is 822# 'normal'. (Since 8.2) 823# 824# @zero-page-detection: Whether and how to detect zero pages. 825# See description in @ZeroPageDetection. Default is 'multifd'. 826# (since 9.0) 827# 828# @direct-io: Open migration files with O_DIRECT when possible. This 829# only has effect if the @mapped-ram capability is enabled. 830# (Since 9.1) 831# 832# Features: 833# 834# @unstable: Members @x-checkpoint-delay and 835# @x-vcpu-dirty-limit-period are experimental. 836# 837# Since: 2.4 838## 839{ 'enum': 'MigrationParameter', 840 'data': ['announce-initial', 'announce-max', 841 'announce-rounds', 'announce-step', 842 'throttle-trigger-threshold', 843 'cpu-throttle-initial', 'cpu-throttle-increment', 844 'cpu-throttle-tailslow', 845 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 846 'avail-switchover-bandwidth', 'downtime-limit', 847 { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, 848 'multifd-channels', 849 'xbzrle-cache-size', 'max-postcopy-bandwidth', 850 'max-cpu-throttle', 'multifd-compression', 851 'multifd-zlib-level', 'multifd-zstd-level', 852 'block-bitmap-mapping', 853 { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, 854 'vcpu-dirty-limit', 855 'mode', 856 'zero-page-detection', 857 'direct-io'] } 858 859## 860# @MigrateSetParameters: 861# 862# @announce-initial: Initial delay (in milliseconds) before sending 863# the first announce (Since 4.0) 864# 865# @announce-max: Maximum delay (in milliseconds) between packets in 866# the announcement (Since 4.0) 867# 868# @announce-rounds: Number of self-announce packets sent after 869# migration (Since 4.0) 870# 871# @announce-step: Increase in delay (in milliseconds) between 872# subsequent packets in the announcement (Since 4.0) 873# 874# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 875# bytes_xfer_period to trigger throttling. It is expressed as 876# percentage. The default value is 50. (Since 5.0) 877# 878# @cpu-throttle-initial: Initial percentage of time guest cpus are 879# throttled when migration auto-converge is activated. The 880# default value is 20. (Since 2.7) 881# 882# @cpu-throttle-increment: throttle percentage increase each time 883# auto-converge detects that migration is not making progress. 884# The default value is 10. (Since 2.7) 885# 886# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 887# the tail stage of throttling, the Guest is very sensitive to CPU 888# percentage while the @cpu-throttle -increment is excessive 889# usually at tail stage. If this parameter is true, we will 890# compute the ideal CPU percentage used by the Guest, which may 891# exactly make the dirty rate match the dirty rate threshold. 892# Then we will choose a smaller throttle increment between the one 893# specified by @cpu-throttle-increment and the one generated by 894# ideal CPU percentage. Therefore, it is compatible to 895# traditional throttling, meanwhile the throttle increment won't 896# be excessive at tail stage. The default value is false. (Since 897# 5.1) 898# 899# @tls-creds: ID of the 'tls-creds' object that provides credentials 900# for establishing a TLS connection over the migration data 901# channel. On the outgoing side of the migration, the credentials 902# must be for a 'client' endpoint, while for the incoming side the 903# credentials must be for a 'server' endpoint. Setting this to a 904# non-empty string enables TLS for all migrations. An empty 905# string means that QEMU will use plain text mode for migration, 906# rather than TLS. This is the default. (Since 2.7) 907# 908# @tls-hostname: migration target's hostname for validating the 909# server's x509 certificate identity. If empty, QEMU will use the 910# hostname from the migration URI, if any. A non-empty value is 911# required when using x509 based TLS credentials and the migration 912# URI does not include a hostname, such as fd: or exec: based 913# migration. (Since 2.7) 914# 915# Note: empty value works only since 2.9. 916# 917# @tls-authz: ID of the 'authz' object subclass that provides access 918# control checking of the TLS x509 certificate distinguished name. 919# This object is only resolved at time of use, so can be deleted 920# and recreated on the fly while the migration server is active. 921# If missing, it will default to denying access (Since 4.0) 922# 923# @max-bandwidth: maximum speed for migration, in bytes per second. 924# (Since 2.8) 925# 926# @avail-switchover-bandwidth: to set the available bandwidth that 927# migration can use during switchover phase. NOTE! This does not 928# limit the bandwidth during switchover, but only for calculations 929# when making decisions to switchover. By default, this value is 930# zero, which means QEMU will estimate the bandwidth 931# automatically. This can be set when the estimated value is not 932# accurate, while the user is able to guarantee such bandwidth is 933# available when switching over. When specified correctly, this 934# can make the switchover decision much more accurate. 935# (Since 8.2) 936# 937# @downtime-limit: set maximum tolerated downtime for migration. 938# maximum downtime in milliseconds (Since 2.8) 939# 940# @x-checkpoint-delay: The delay time (in ms) between two COLO 941# checkpoints in periodic mode. (Since 2.8) 942# 943# @multifd-channels: Number of channels used to migrate data in 944# parallel. This is the same number that the number of sockets 945# used for migration. The default value is 2 (since 4.0) 946# 947# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 948# needs to be a multiple of the target page size and a power of 2 949# (Since 2.11) 950# 951# @max-postcopy-bandwidth: Background transfer bandwidth during 952# postcopy. Defaults to 0 (unlimited). In bytes per second. 953# (Since 3.0) 954# 955# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 956# (Since 3.1) 957# 958# @multifd-compression: Which compression method to use. Defaults to 959# none. (Since 5.0) 960# 961# @multifd-zlib-level: Set the compression level to be used in live 962# migration, the compression level is an integer between 0 and 9, 963# where 0 means no compression, 1 means the best compression 964# speed, and 9 means best compression ratio which will consume 965# more CPU. Defaults to 1. (Since 5.0) 966# 967# @multifd-zstd-level: Set the compression level to be used in live 968# migration, the compression level is an integer between 0 and 20, 969# where 0 means no compression, 1 means the best compression 970# speed, and 20 means best compression ratio which will consume 971# more CPU. Defaults to 1. (Since 5.0) 972# 973# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 974# aliases for the purpose of dirty bitmap migration. Such aliases 975# may for example be the corresponding names on the opposite site. 976# The mapping must be one-to-one, but not necessarily complete: On 977# the source, unmapped bitmaps and all bitmaps on unmapped nodes 978# will be ignored. On the destination, encountering an unmapped 979# alias in the incoming migration stream will result in a report, 980# and all further bitmap migration data will then be discarded. 981# Note that the destination does not know about bitmaps it does 982# not receive, so there is no limitation or requirement regarding 983# the number of bitmaps received, or how they are named, or on 984# which nodes they are placed. By default (when this parameter 985# has never been set), bitmap names are mapped to themselves. 986# Nodes are mapped to their block device name if there is one, and 987# to their node name otherwise. (Since 5.2) 988# 989# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 990# limit during live migration. Should be in the range 1 to 991# 1000ms. Defaults to 1000ms. (Since 8.1) 992# 993# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 994# Defaults to 1. (Since 8.1) 995# 996# @mode: Migration mode. See description in @MigMode. Default is 997# 'normal'. (Since 8.2) 998# 999# @zero-page-detection: Whether and how to detect zero pages. 1000# See description in @ZeroPageDetection. Default is 'multifd'. 1001# (since 9.0) 1002# 1003# @direct-io: Open migration files with O_DIRECT when possible. This 1004# only has effect if the @mapped-ram capability is enabled. 1005# (Since 9.1) 1006# 1007# Features: 1008# 1009# @unstable: Members @x-checkpoint-delay and 1010# @x-vcpu-dirty-limit-period are experimental. 1011# 1012# TODO: either fuse back into MigrationParameters, or make 1013# MigrationParameters members mandatory 1014# 1015# Since: 2.4 1016## 1017{ 'struct': 'MigrateSetParameters', 1018 'data': { '*announce-initial': 'size', 1019 '*announce-max': 'size', 1020 '*announce-rounds': 'size', 1021 '*announce-step': 'size', 1022 '*throttle-trigger-threshold': 'uint8', 1023 '*cpu-throttle-initial': 'uint8', 1024 '*cpu-throttle-increment': 'uint8', 1025 '*cpu-throttle-tailslow': 'bool', 1026 '*tls-creds': 'StrOrNull', 1027 '*tls-hostname': 'StrOrNull', 1028 '*tls-authz': 'StrOrNull', 1029 '*max-bandwidth': 'size', 1030 '*avail-switchover-bandwidth': 'size', 1031 '*downtime-limit': 'uint64', 1032 '*x-checkpoint-delay': { 'type': 'uint32', 1033 'features': [ 'unstable' ] }, 1034 '*multifd-channels': 'uint8', 1035 '*xbzrle-cache-size': 'size', 1036 '*max-postcopy-bandwidth': 'size', 1037 '*max-cpu-throttle': 'uint8', 1038 '*multifd-compression': 'MultiFDCompression', 1039 '*multifd-zlib-level': 'uint8', 1040 '*multifd-zstd-level': 'uint8', 1041 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1042 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1043 'features': [ 'unstable' ] }, 1044 '*vcpu-dirty-limit': 'uint64', 1045 '*mode': 'MigMode', 1046 '*zero-page-detection': 'ZeroPageDetection', 1047 '*direct-io': 'bool' } } 1048 1049## 1050# @migrate-set-parameters: 1051# 1052# Set various migration parameters. 1053# 1054# Since: 2.4 1055# 1056# Example: 1057# 1058# -> { "execute": "migrate-set-parameters" , 1059# "arguments": { "multifd-channels": 5 } } 1060# <- { "return": {} } 1061## 1062{ 'command': 'migrate-set-parameters', 'boxed': true, 1063 'data': 'MigrateSetParameters' } 1064 1065## 1066# @MigrationParameters: 1067# 1068# The optional members aren't actually optional. 1069# 1070# @announce-initial: Initial delay (in milliseconds) before sending 1071# the first announce (Since 4.0) 1072# 1073# @announce-max: Maximum delay (in milliseconds) between packets in 1074# the announcement (Since 4.0) 1075# 1076# @announce-rounds: Number of self-announce packets sent after 1077# migration (Since 4.0) 1078# 1079# @announce-step: Increase in delay (in milliseconds) between 1080# subsequent packets in the announcement (Since 4.0) 1081# 1082# @throttle-trigger-threshold: The ratio of bytes_dirty_period and 1083# bytes_xfer_period to trigger throttling. It is expressed as 1084# percentage. The default value is 50. (Since 5.0) 1085# 1086# @cpu-throttle-initial: Initial percentage of time guest cpus are 1087# throttled when migration auto-converge is activated. (Since 1088# 2.7) 1089# 1090# @cpu-throttle-increment: throttle percentage increase each time 1091# auto-converge detects that migration is not making progress. 1092# (Since 2.7) 1093# 1094# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At 1095# the tail stage of throttling, the Guest is very sensitive to CPU 1096# percentage while the @cpu-throttle -increment is excessive 1097# usually at tail stage. If this parameter is true, we will 1098# compute the ideal CPU percentage used by the Guest, which may 1099# exactly make the dirty rate match the dirty rate threshold. 1100# Then we will choose a smaller throttle increment between the one 1101# specified by @cpu-throttle-increment and the one generated by 1102# ideal CPU percentage. Therefore, it is compatible to 1103# traditional throttling, meanwhile the throttle increment won't 1104# be excessive at tail stage. The default value is false. (Since 1105# 5.1) 1106# 1107# @tls-creds: ID of the 'tls-creds' object that provides credentials 1108# for establishing a TLS connection over the migration data 1109# channel. On the outgoing side of the migration, the credentials 1110# must be for a 'client' endpoint, while for the incoming side the 1111# credentials must be for a 'server' endpoint. An empty string 1112# means that QEMU will use plain text mode for migration, rather 1113# than TLS. (Since 2.7) 1114# 1115# Note: 2.8 omits empty @tls-creds instead. 1116# 1117# @tls-hostname: migration target's hostname for validating the 1118# server's x509 certificate identity. If empty, QEMU will use the 1119# hostname from the migration URI, if any. (Since 2.7) 1120# 1121# Note: 2.8 omits empty @tls-hostname instead. 1122# 1123# @tls-authz: ID of the 'authz' object subclass that provides access 1124# control checking of the TLS x509 certificate distinguished name. 1125# (Since 4.0) 1126# 1127# @max-bandwidth: maximum speed for migration, in bytes per second. 1128# (Since 2.8) 1129# 1130# @avail-switchover-bandwidth: to set the available bandwidth that 1131# migration can use during switchover phase. NOTE! This does not 1132# limit the bandwidth during switchover, but only for calculations 1133# when making decisions to switchover. By default, this value is 1134# zero, which means QEMU will estimate the bandwidth 1135# automatically. This can be set when the estimated value is not 1136# accurate, while the user is able to guarantee such bandwidth is 1137# available when switching over. When specified correctly, this 1138# can make the switchover decision much more accurate. 1139# (Since 8.2) 1140# 1141# @downtime-limit: set maximum tolerated downtime for migration. 1142# maximum downtime in milliseconds (Since 2.8) 1143# 1144# @x-checkpoint-delay: the delay time between two COLO checkpoints. 1145# (Since 2.8) 1146# 1147# @multifd-channels: Number of channels used to migrate data in 1148# parallel. This is the same number that the number of sockets 1149# used for migration. The default value is 2 (since 4.0) 1150# 1151# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 1152# needs to be a multiple of the target page size and a power of 2 1153# (Since 2.11) 1154# 1155# @max-postcopy-bandwidth: Background transfer bandwidth during 1156# postcopy. Defaults to 0 (unlimited). In bytes per second. 1157# (Since 3.0) 1158# 1159# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. 1160# (Since 3.1) 1161# 1162# @multifd-compression: Which compression method to use. Defaults to 1163# none. (Since 5.0) 1164# 1165# @multifd-zlib-level: Set the compression level to be used in live 1166# migration, the compression level is an integer between 0 and 9, 1167# where 0 means no compression, 1 means the best compression 1168# speed, and 9 means best compression ratio which will consume 1169# more CPU. Defaults to 1. (Since 5.0) 1170# 1171# @multifd-zstd-level: Set the compression level to be used in live 1172# migration, the compression level is an integer between 0 and 20, 1173# where 0 means no compression, 1 means the best compression 1174# speed, and 20 means best compression ratio which will consume 1175# more CPU. Defaults to 1. (Since 5.0) 1176# 1177# @block-bitmap-mapping: Maps block nodes and bitmaps on them to 1178# aliases for the purpose of dirty bitmap migration. Such aliases 1179# may for example be the corresponding names on the opposite site. 1180# The mapping must be one-to-one, but not necessarily complete: On 1181# the source, unmapped bitmaps and all bitmaps on unmapped nodes 1182# will be ignored. On the destination, encountering an unmapped 1183# alias in the incoming migration stream will result in a report, 1184# and all further bitmap migration data will then be discarded. 1185# Note that the destination does not know about bitmaps it does 1186# not receive, so there is no limitation or requirement regarding 1187# the number of bitmaps received, or how they are named, or on 1188# which nodes they are placed. By default (when this parameter 1189# has never been set), bitmap names are mapped to themselves. 1190# Nodes are mapped to their block device name if there is one, and 1191# to their node name otherwise. (Since 5.2) 1192# 1193# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty 1194# limit during live migration. Should be in the range 1 to 1195# 1000ms. Defaults to 1000ms. (Since 8.1) 1196# 1197# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. 1198# Defaults to 1. (Since 8.1) 1199# 1200# @mode: Migration mode. See description in @MigMode. Default is 1201# 'normal'. (Since 8.2) 1202# 1203# @zero-page-detection: Whether and how to detect zero pages. 1204# See description in @ZeroPageDetection. Default is 'multifd'. 1205# (since 9.0) 1206# 1207# @direct-io: Open migration files with O_DIRECT when possible. This 1208# only has effect if the @mapped-ram capability is enabled. 1209# (Since 9.1) 1210# 1211# Features: 1212# 1213# @unstable: Members @x-checkpoint-delay and 1214# @x-vcpu-dirty-limit-period are experimental. 1215# 1216# Since: 2.4 1217## 1218{ 'struct': 'MigrationParameters', 1219 'data': { '*announce-initial': 'size', 1220 '*announce-max': 'size', 1221 '*announce-rounds': 'size', 1222 '*announce-step': 'size', 1223 '*throttle-trigger-threshold': 'uint8', 1224 '*cpu-throttle-initial': 'uint8', 1225 '*cpu-throttle-increment': 'uint8', 1226 '*cpu-throttle-tailslow': 'bool', 1227 '*tls-creds': 'str', 1228 '*tls-hostname': 'str', 1229 '*tls-authz': 'str', 1230 '*max-bandwidth': 'size', 1231 '*avail-switchover-bandwidth': 'size', 1232 '*downtime-limit': 'uint64', 1233 '*x-checkpoint-delay': { 'type': 'uint32', 1234 'features': [ 'unstable' ] }, 1235 '*multifd-channels': 'uint8', 1236 '*xbzrle-cache-size': 'size', 1237 '*max-postcopy-bandwidth': 'size', 1238 '*max-cpu-throttle': 'uint8', 1239 '*multifd-compression': 'MultiFDCompression', 1240 '*multifd-zlib-level': 'uint8', 1241 '*multifd-zstd-level': 'uint8', 1242 '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], 1243 '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 1244 'features': [ 'unstable' ] }, 1245 '*vcpu-dirty-limit': 'uint64', 1246 '*mode': 'MigMode', 1247 '*zero-page-detection': 'ZeroPageDetection', 1248 '*direct-io': 'bool' } } 1249 1250## 1251# @query-migrate-parameters: 1252# 1253# Returns information about the current migration parameters 1254# 1255# Returns: @MigrationParameters 1256# 1257# Since: 2.4 1258# 1259# Example: 1260# 1261# -> { "execute": "query-migrate-parameters" } 1262# <- { "return": { 1263# "multifd-channels": 2, 1264# "cpu-throttle-increment": 10, 1265# "cpu-throttle-initial": 20, 1266# "max-bandwidth": 33554432, 1267# "downtime-limit": 300 1268# } 1269# } 1270## 1271{ 'command': 'query-migrate-parameters', 1272 'returns': 'MigrationParameters' } 1273 1274## 1275# @migrate-start-postcopy: 1276# 1277# Followup to a migration command to switch the migration to postcopy 1278# mode. The postcopy-ram capability must be set on both source and 1279# destination before the original migration command. 1280# 1281# Since: 2.5 1282# 1283# Example: 1284# 1285# -> { "execute": "migrate-start-postcopy" } 1286# <- { "return": {} } 1287## 1288{ 'command': 'migrate-start-postcopy' } 1289 1290## 1291# @MIGRATION: 1292# 1293# Emitted when a migration event happens 1294# 1295# @status: @MigrationStatus describing the current migration status. 1296# 1297# Since: 2.4 1298# 1299# Example: 1300# 1301# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1302# "event": "MIGRATION", 1303# "data": {"status": "completed"} } 1304## 1305{ 'event': 'MIGRATION', 1306 'data': {'status': 'MigrationStatus'}} 1307 1308## 1309# @MIGRATION_PASS: 1310# 1311# Emitted from the source side of a migration at the start of each 1312# pass (when it syncs the dirty bitmap) 1313# 1314# @pass: An incrementing count (starting at 1 on the first pass) 1315# 1316# Since: 2.6 1317# 1318# Example: 1319# 1320# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1321# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1322## 1323{ 'event': 'MIGRATION_PASS', 1324 'data': { 'pass': 'int' } } 1325 1326## 1327# @COLOMessage: 1328# 1329# The message transmission between Primary side and Secondary side. 1330# 1331# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1332# 1333# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for 1334# checkpointing 1335# 1336# @checkpoint-reply: SVM gets PVM's checkpoint request 1337# 1338# @vmstate-send: VM's state will be sent by PVM. 1339# 1340# @vmstate-size: The total size of VMstate. 1341# 1342# @vmstate-received: VM's state has been received by SVM. 1343# 1344# @vmstate-loaded: VM's state has been loaded by SVM. 1345# 1346# Since: 2.8 1347## 1348{ 'enum': 'COLOMessage', 1349 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1350 'vmstate-send', 'vmstate-size', 'vmstate-received', 1351 'vmstate-loaded' ] } 1352 1353## 1354# @COLOMode: 1355# 1356# The COLO current mode. 1357# 1358# @none: COLO is disabled. 1359# 1360# @primary: COLO node in primary side. 1361# 1362# @secondary: COLO node in slave side. 1363# 1364# Since: 2.8 1365## 1366{ 'enum': 'COLOMode', 1367 'data': [ 'none', 'primary', 'secondary'] } 1368 1369## 1370# @FailoverStatus: 1371# 1372# An enumeration of COLO failover status 1373# 1374# @none: no failover has ever happened 1375# 1376# @require: got failover requirement but not handled 1377# 1378# @active: in the process of doing failover 1379# 1380# @completed: finish the process of failover 1381# 1382# @relaunch: restart the failover process, from 'none' -> 'completed' 1383# (Since 2.9) 1384# 1385# Since: 2.8 1386## 1387{ 'enum': 'FailoverStatus', 1388 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1389 1390## 1391# @COLO_EXIT: 1392# 1393# Emitted when VM finishes COLO mode due to some errors happening or 1394# at the request of users. 1395# 1396# @mode: report COLO mode when COLO exited. 1397# 1398# @reason: describes the reason for the COLO exit. 1399# 1400# Since: 3.1 1401# 1402# Example: 1403# 1404# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1405# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1406## 1407{ 'event': 'COLO_EXIT', 1408 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1409 1410## 1411# @COLOExitReason: 1412# 1413# The reason for a COLO exit. 1414# 1415# @none: failover has never happened. This state does not occur in 1416# the COLO_EXIT event, and is only visible in the result of 1417# query-colo-status. 1418# 1419# @request: COLO exit is due to an external request. 1420# 1421# @error: COLO exit is due to an internal error. 1422# 1423# @processing: COLO is currently handling a failover (since 4.0). 1424# 1425# Since: 3.1 1426## 1427{ 'enum': 'COLOExitReason', 1428 'data': [ 'none', 'request', 'error' , 'processing' ] } 1429 1430## 1431# @x-colo-lost-heartbeat: 1432# 1433# Tell qemu that heartbeat is lost, request it to do takeover 1434# procedures. If this command is sent to the PVM, the Primary side 1435# will exit COLO mode. If sent to the Secondary, the Secondary side 1436# will run failover work, then takes over server operation to become 1437# the service VM. 1438# 1439# Features: 1440# 1441# @unstable: This command is experimental. 1442# 1443# Since: 2.8 1444# 1445# Example: 1446# 1447# -> { "execute": "x-colo-lost-heartbeat" } 1448# <- { "return": {} } 1449## 1450{ 'command': 'x-colo-lost-heartbeat', 1451 'features': [ 'unstable' ], 1452 'if': 'CONFIG_REPLICATION' } 1453 1454## 1455# @migrate_cancel: 1456# 1457# Cancel the current executing migration process. 1458# 1459# .. note:: This command succeeds even if there is no migration process 1460# running. 1461# 1462# Since: 0.14 1463# 1464# Example: 1465# 1466# -> { "execute": "migrate_cancel" } 1467# <- { "return": {} } 1468## 1469{ 'command': 'migrate_cancel' } 1470 1471## 1472# @migrate-continue: 1473# 1474# Continue migration when it's in a paused state. 1475# 1476# @state: The state the migration is currently expected to be in 1477# 1478# Since: 2.11 1479# 1480# Example: 1481# 1482# -> { "execute": "migrate-continue" , "arguments": 1483# { "state": "pre-switchover" } } 1484# <- { "return": {} } 1485## 1486{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1487 1488## 1489# @MigrationAddressType: 1490# 1491# The migration stream transport mechanisms. 1492# 1493# @socket: Migrate via socket. 1494# 1495# @exec: Direct the migration stream to another process. 1496# 1497# @rdma: Migrate via RDMA. 1498# 1499# @file: Direct the migration stream to a file. 1500# 1501# Since: 8.2 1502## 1503{ 'enum': 'MigrationAddressType', 1504 'data': [ 'socket', 'exec', 'rdma', 'file' ] } 1505 1506## 1507# @FileMigrationArgs: 1508# 1509# @filename: The file to receive the migration stream 1510# 1511# @offset: The file offset where the migration stream will start 1512# 1513# Since: 8.2 1514## 1515{ 'struct': 'FileMigrationArgs', 1516 'data': { 'filename': 'str', 1517 'offset': 'uint64' } } 1518 1519## 1520# @MigrationExecCommand: 1521# 1522# @args: command (list head) and arguments to execute. 1523# 1524# Since: 8.2 1525## 1526{ 'struct': 'MigrationExecCommand', 1527 'data': {'args': [ 'str' ] } } 1528 1529## 1530# @MigrationAddress: 1531# 1532# Migration endpoint configuration. 1533# 1534# @transport: The migration stream transport mechanism 1535# 1536# Since: 8.2 1537## 1538{ 'union': 'MigrationAddress', 1539 'base': { 'transport' : 'MigrationAddressType'}, 1540 'discriminator': 'transport', 1541 'data': { 1542 'socket': 'SocketAddress', 1543 'exec': 'MigrationExecCommand', 1544 'rdma': 'InetSocketAddress', 1545 'file': 'FileMigrationArgs' } } 1546 1547## 1548# @MigrationChannelType: 1549# 1550# The migration channel-type request options. 1551# 1552# @main: Main outbound migration channel. 1553# 1554# Since: 8.1 1555## 1556{ 'enum': 'MigrationChannelType', 1557 'data': [ 'main' ] } 1558 1559## 1560# @MigrationChannel: 1561# 1562# Migration stream channel parameters. 1563# 1564# @channel-type: Channel type for transferring packet information. 1565# 1566# @addr: Migration endpoint configuration on destination interface. 1567# 1568# Since: 8.1 1569## 1570{ 'struct': 'MigrationChannel', 1571 'data': { 1572 'channel-type': 'MigrationChannelType', 1573 'addr': 'MigrationAddress' } } 1574 1575## 1576# @migrate: 1577# 1578# Migrates the current running guest to another Virtual Machine. 1579# 1580# @uri: the Uniform Resource Identifier of the destination VM 1581# 1582# @channels: list of migration stream channels with each stream in the 1583# list connected to a destination interface endpoint. 1584# 1585# @detach: this argument exists only for compatibility reasons and is 1586# ignored by QEMU 1587# 1588# @resume: resume one paused migration, default "off". (since 3.0) 1589# 1590# Since: 0.14 1591# 1592# .. admonition:: Notes 1593# 1594# 1. The 'query-migrate' command should be used to check 1595# migration's progress and final result (this information is 1596# provided by the 'status' member). 1597# 1598# 2. All boolean arguments default to false. 1599# 1600# 3. The user Monitor's "detach" argument is invalid in QMP and 1601# should not be used. 1602# 1603# 4. The uri argument should have the Uniform Resource Identifier 1604# of default destination VM. This connection will be bound to 1605# default network. 1606# 1607# 5. For now, number of migration streams is restricted to one, 1608# i.e. number of items in 'channels' list is just 1. 1609# 1610# 6. The 'uri' and 'channels' arguments are mutually exclusive; 1611# exactly one of the two should be present. 1612# 1613# Example: 1614# 1615# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1616# <- { "return": {} } 1617# 1618# -> { "execute": "migrate", 1619# "arguments": { 1620# "channels": [ { "channel-type": "main", 1621# "addr": { "transport": "socket", 1622# "type": "inet", 1623# "host": "10.12.34.9", 1624# "port": "1050" } } ] } } 1625# <- { "return": {} } 1626# 1627# -> { "execute": "migrate", 1628# "arguments": { 1629# "channels": [ { "channel-type": "main", 1630# "addr": { "transport": "exec", 1631# "args": [ "/bin/nc", "-p", "6000", 1632# "/some/sock" ] } } ] } } 1633# <- { "return": {} } 1634# 1635# -> { "execute": "migrate", 1636# "arguments": { 1637# "channels": [ { "channel-type": "main", 1638# "addr": { "transport": "rdma", 1639# "host": "10.12.34.9", 1640# "port": "1050" } } ] } } 1641# <- { "return": {} } 1642# 1643# -> { "execute": "migrate", 1644# "arguments": { 1645# "channels": [ { "channel-type": "main", 1646# "addr": { "transport": "file", 1647# "filename": "/tmp/migfile", 1648# "offset": "0x1000" } } ] } } 1649# <- { "return": {} } 1650# 1651## 1652{ 'command': 'migrate', 1653 'data': {'*uri': 'str', 1654 '*channels': [ 'MigrationChannel' ], 1655 '*detach': 'bool', '*resume': 'bool' } } 1656 1657## 1658# @migrate-incoming: 1659# 1660# Start an incoming migration, the qemu must have been started with 1661# -incoming defer 1662# 1663# @uri: The Uniform Resource Identifier identifying the source or 1664# address to listen on 1665# 1666# @channels: list of migration stream channels with each stream in the 1667# list connected to a destination interface endpoint. 1668# 1669# @exit-on-error: Exit on incoming migration failure. Default true. 1670# When set to false, the failure triggers a MIGRATION event, and 1671# error details could be retrieved with query-migrate. (since 9.1) 1672# 1673# Since: 2.3 1674# 1675# .. admonition:: Notes 1676# 1677# 1. It's a bad idea to use a string for the uri, but it needs to 1678# stay compatible with -incoming and the format of the uri is 1679# already exposed above libvirt. 1680# 1681# 2. QEMU must be started with -incoming defer to allow 1682# migrate-incoming to be used. 1683# 1684# 3. The uri format is the same as for -incoming 1685# 1686# 4. For now, number of migration streams is restricted to one, 1687# i.e. number of items in 'channels' list is just 1. 1688# 1689# 5. The 'uri' and 'channels' arguments are mutually exclusive; 1690# exactly one of the two should be present. 1691# 1692# Example: 1693# 1694# -> { "execute": "migrate-incoming", 1695# "arguments": { "uri": "tcp:0:4446" } } 1696# <- { "return": {} } 1697# 1698# -> { "execute": "migrate-incoming", 1699# "arguments": { 1700# "channels": [ { "channel-type": "main", 1701# "addr": { "transport": "socket", 1702# "type": "inet", 1703# "host": "10.12.34.9", 1704# "port": "1050" } } ] } } 1705# <- { "return": {} } 1706# 1707# -> { "execute": "migrate-incoming", 1708# "arguments": { 1709# "channels": [ { "channel-type": "main", 1710# "addr": { "transport": "exec", 1711# "args": [ "/bin/nc", "-p", "6000", 1712# "/some/sock" ] } } ] } } 1713# <- { "return": {} } 1714# 1715# -> { "execute": "migrate-incoming", 1716# "arguments": { 1717# "channels": [ { "channel-type": "main", 1718# "addr": { "transport": "rdma", 1719# "host": "10.12.34.9", 1720# "port": "1050" } } ] } } 1721# <- { "return": {} } 1722## 1723{ 'command': 'migrate-incoming', 1724 'data': {'*uri': 'str', 1725 '*channels': [ 'MigrationChannel' ], 1726 '*exit-on-error': 'bool' } } 1727 1728## 1729# @xen-save-devices-state: 1730# 1731# Save the state of all devices to file. The RAM and the block 1732# devices of the VM are not saved by this command. 1733# 1734# @filename: the file to save the state of the devices to as binary 1735# data. See xen-save-devices-state.txt for a description of the 1736# binary format. 1737# 1738# @live: Optional argument to ask QEMU to treat this command as part 1739# of a live migration. Default to true. (since 2.11) 1740# 1741# Since: 1.1 1742# 1743# Example: 1744# 1745# -> { "execute": "xen-save-devices-state", 1746# "arguments": { "filename": "/tmp/save" } } 1747# <- { "return": {} } 1748## 1749{ 'command': 'xen-save-devices-state', 1750 'data': {'filename': 'str', '*live':'bool' } } 1751 1752## 1753# @xen-set-global-dirty-log: 1754# 1755# Enable or disable the global dirty log mode. 1756# 1757# @enable: true to enable, false to disable. 1758# 1759# Since: 1.3 1760# 1761# Example: 1762# 1763# -> { "execute": "xen-set-global-dirty-log", 1764# "arguments": { "enable": true } } 1765# <- { "return": {} } 1766## 1767{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } 1768 1769## 1770# @xen-load-devices-state: 1771# 1772# Load the state of all devices from file. The RAM and the block 1773# devices of the VM are not loaded by this command. 1774# 1775# @filename: the file to load the state of the devices from as binary 1776# data. See xen-save-devices-state.txt for a description of the 1777# binary format. 1778# 1779# Since: 2.7 1780# 1781# Example: 1782# 1783# -> { "execute": "xen-load-devices-state", 1784# "arguments": { "filename": "/tmp/resume" } } 1785# <- { "return": {} } 1786## 1787{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } 1788 1789## 1790# @xen-set-replication: 1791# 1792# Enable or disable replication. 1793# 1794# @enable: true to enable, false to disable. 1795# 1796# @primary: true for primary or false for secondary. 1797# 1798# @failover: true to do failover, false to stop. Cannot be specified 1799# if 'enable' is true. Default value is false. 1800# 1801# Example: 1802# 1803# -> { "execute": "xen-set-replication", 1804# "arguments": {"enable": true, "primary": false} } 1805# <- { "return": {} } 1806# 1807# Since: 2.9 1808## 1809{ 'command': 'xen-set-replication', 1810 'data': { 'enable': 'bool', 'primary': 'bool', '*failover': 'bool' }, 1811 'if': 'CONFIG_REPLICATION' } 1812 1813## 1814# @ReplicationStatus: 1815# 1816# The result format for 'query-xen-replication-status'. 1817# 1818# @error: true if an error happened, false if replication is normal. 1819# 1820# @desc: the human readable error description string, when @error is 1821# 'true'. 1822# 1823# Since: 2.9 1824## 1825{ 'struct': 'ReplicationStatus', 1826 'data': { 'error': 'bool', '*desc': 'str' }, 1827 'if': 'CONFIG_REPLICATION' } 1828 1829## 1830# @query-xen-replication-status: 1831# 1832# Query replication status while the vm is running. 1833# 1834# Returns: A @ReplicationStatus object showing the status. 1835# 1836# Example: 1837# 1838# -> { "execute": "query-xen-replication-status" } 1839# <- { "return": { "error": false } } 1840# 1841# Since: 2.9 1842## 1843{ 'command': 'query-xen-replication-status', 1844 'returns': 'ReplicationStatus', 1845 'if': 'CONFIG_REPLICATION' } 1846 1847## 1848# @xen-colo-do-checkpoint: 1849# 1850# Xen uses this command to notify replication to trigger a checkpoint. 1851# 1852# Example: 1853# 1854# -> { "execute": "xen-colo-do-checkpoint" } 1855# <- { "return": {} } 1856# 1857# Since: 2.9 1858## 1859{ 'command': 'xen-colo-do-checkpoint', 1860 'if': 'CONFIG_REPLICATION' } 1861 1862## 1863# @COLOStatus: 1864# 1865# The result format for 'query-colo-status'. 1866# 1867# @mode: COLO running mode. If COLO is running, this field will 1868# return 'primary' or 'secondary'. 1869# 1870# @last-mode: COLO last running mode. If COLO is running, this field 1871# will return same like mode field, after failover we can use this 1872# field to get last colo mode. (since 4.0) 1873# 1874# @reason: describes the reason for the COLO exit. 1875# 1876# Since: 3.1 1877## 1878{ 'struct': 'COLOStatus', 1879 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1880 'reason': 'COLOExitReason' }, 1881 'if': 'CONFIG_REPLICATION' } 1882 1883## 1884# @query-colo-status: 1885# 1886# Query COLO status while the vm is running. 1887# 1888# Returns: A @COLOStatus object showing the status. 1889# 1890# Example: 1891# 1892# -> { "execute": "query-colo-status" } 1893# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } 1894# 1895# Since: 3.1 1896## 1897{ 'command': 'query-colo-status', 1898 'returns': 'COLOStatus', 1899 'if': 'CONFIG_REPLICATION' } 1900 1901## 1902# @migrate-recover: 1903# 1904# Provide a recovery migration stream URI. 1905# 1906# @uri: the URI to be used for the recovery of migration stream. 1907# 1908# Example: 1909# 1910# -> { "execute": "migrate-recover", 1911# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1912# <- { "return": {} } 1913# 1914# Since: 3.0 1915## 1916{ 'command': 'migrate-recover', 1917 'data': { 'uri': 'str' }, 1918 'allow-oob': true } 1919 1920## 1921# @migrate-pause: 1922# 1923# Pause a migration. Currently it only supports postcopy. 1924# 1925# Example: 1926# 1927# -> { "execute": "migrate-pause" } 1928# <- { "return": {} } 1929# 1930# Since: 3.0 1931## 1932{ 'command': 'migrate-pause', 'allow-oob': true } 1933 1934## 1935# @UNPLUG_PRIMARY: 1936# 1937# Emitted from source side of a migration when migration state is 1938# WAIT_UNPLUG. Device was unplugged by guest operating system. Device 1939# resources in QEMU are kept on standby to be able to re-plug it in 1940# case of migration failure. 1941# 1942# @device-id: QEMU device id of the unplugged device 1943# 1944# Since: 4.2 1945# 1946# Example: 1947# 1948# <- { "event": "UNPLUG_PRIMARY", 1949# "data": { "device-id": "hostdev0" }, 1950# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } 1951## 1952{ 'event': 'UNPLUG_PRIMARY', 1953 'data': { 'device-id': 'str' } } 1954 1955## 1956# @DirtyRateVcpu: 1957# 1958# Dirty rate of vcpu. 1959# 1960# @id: vcpu index. 1961# 1962# @dirty-rate: dirty rate. 1963# 1964# Since: 6.2 1965## 1966{ 'struct': 'DirtyRateVcpu', 1967 'data': { 'id': 'int', 'dirty-rate': 'int64' } } 1968 1969## 1970# @DirtyRateStatus: 1971# 1972# Dirty page rate measurement status. 1973# 1974# @unstarted: measuring thread has not been started yet 1975# 1976# @measuring: measuring thread is running 1977# 1978# @measured: dirty page rate is measured and the results are available 1979# 1980# Since: 5.2 1981## 1982{ 'enum': 'DirtyRateStatus', 1983 'data': [ 'unstarted', 'measuring', 'measured'] } 1984 1985## 1986# @DirtyRateMeasureMode: 1987# 1988# Method used to measure dirty page rate. Differences between 1989# available methods are explained in @calc-dirty-rate. 1990# 1991# @page-sampling: use page sampling 1992# 1993# @dirty-ring: use dirty ring 1994# 1995# @dirty-bitmap: use dirty bitmap 1996# 1997# Since: 6.2 1998## 1999{ 'enum': 'DirtyRateMeasureMode', 2000 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } 2001 2002## 2003# @TimeUnit: 2004# 2005# Specifies unit in which time-related value is specified. 2006# 2007# @second: value is in seconds 2008# 2009# @millisecond: value is in milliseconds 2010# 2011# Since: 8.2 2012## 2013{ 'enum': 'TimeUnit', 2014 'data': ['second', 'millisecond'] } 2015 2016## 2017# @DirtyRateInfo: 2018# 2019# Information about measured dirty page rate. 2020# 2021# @dirty-rate: an estimate of the dirty page rate of the VM in units 2022# of MiB/s. Value is present only when @status is 'measured'. 2023# 2024# @status: current status of dirty page rate measurements 2025# 2026# @start-time: start time in units of second for calculation 2027# 2028# @calc-time: time period for which dirty page rate was measured, 2029# expressed and rounded down to @calc-time-unit. 2030# 2031# @calc-time-unit: time unit of @calc-time (Since 8.2) 2032# 2033# @sample-pages: number of sampled pages per GiB of guest memory. 2034# Valid only in page-sampling mode (Since 6.1) 2035# 2036# @mode: mode that was used to measure dirty page rate (Since 6.2) 2037# 2038# @vcpu-dirty-rate: dirty rate for each vCPU if dirty-ring mode was 2039# specified (Since 6.2) 2040# 2041# Since: 5.2 2042## 2043{ 'struct': 'DirtyRateInfo', 2044 'data': {'*dirty-rate': 'int64', 2045 'status': 'DirtyRateStatus', 2046 'start-time': 'int64', 2047 'calc-time': 'int64', 2048 'calc-time-unit': 'TimeUnit', 2049 'sample-pages': 'uint64', 2050 'mode': 'DirtyRateMeasureMode', 2051 '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } 2052 2053## 2054# @calc-dirty-rate: 2055# 2056# Start measuring dirty page rate of the VM. Results can be retrieved 2057# with @query-dirty-rate after measurements are completed. 2058# 2059# Dirty page rate is the number of pages changed in a given time 2060# period expressed in MiB/s. The following methods of calculation are 2061# available: 2062# 2063# 1. In page sampling mode, a random subset of pages are selected and 2064# hashed twice: once at the beginning of measurement time period, 2065# and once again at the end. If two hashes for some page are 2066# different, the page is counted as changed. Since this method 2067# relies on sampling and hashing, calculated dirty page rate is 2068# only an estimate of its true value. Increasing @sample-pages 2069# improves estimation quality at the cost of higher computational 2070# overhead. 2071# 2072# 2. Dirty bitmap mode captures writes to memory (for example by 2073# temporarily revoking write access to all pages) and counting page 2074# faults. Information about modified pages is collected into a 2075# bitmap, where each bit corresponds to one guest page. This mode 2076# requires that KVM accelerator property "dirty-ring-size" is *not* 2077# set. 2078# 2079# 3. Dirty ring mode is similar to dirty bitmap mode, but the 2080# information about modified pages is collected into ring buffer. 2081# This mode tracks page modification per each vCPU separately. It 2082# requires that KVM accelerator property "dirty-ring-size" is set. 2083# 2084# @calc-time: time period for which dirty page rate is calculated. 2085# By default it is specified in seconds, but the unit can be set 2086# explicitly with @calc-time-unit. Note that larger @calc-time 2087# values will typically result in smaller dirty page rates because 2088# page dirtying is a one-time event. Once some page is counted 2089# as dirty during @calc-time period, further writes to this page 2090# will not increase dirty page rate anymore. 2091# 2092# @calc-time-unit: time unit in which @calc-time is specified. 2093# By default it is seconds. (Since 8.2) 2094# 2095# @sample-pages: number of sampled pages per each GiB of guest memory. 2096# Default value is 512. For 4KiB guest pages this corresponds to 2097# sampling ratio of 0.2%. This argument is used only in page 2098# sampling mode. (Since 6.1) 2099# 2100# @mode: mechanism for tracking dirty pages. Default value is 2101# 'page-sampling'. Others are 'dirty-bitmap' and 'dirty-ring'. 2102# (Since 6.1) 2103# 2104# Since: 5.2 2105# 2106# Example: 2107# 2108# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, 2109# "sample-pages": 512} } 2110# <- { "return": {} } 2111# 2112# Measure dirty rate using dirty bitmap for 500 milliseconds: 2113# 2114# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500, 2115# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} } 2116# 2117# <- { "return": {} } 2118## 2119{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', 2120 '*calc-time-unit': 'TimeUnit', 2121 '*sample-pages': 'int', 2122 '*mode': 'DirtyRateMeasureMode'} } 2123 2124## 2125# @query-dirty-rate: 2126# 2127# Query results of the most recent invocation of @calc-dirty-rate. 2128# 2129# @calc-time-unit: time unit in which to report calculation time. 2130# By default it is reported in seconds. (Since 8.2) 2131# 2132# Since: 5.2 2133# 2134# Examples: 2135# 2136# 1. Measurement is in progress: 2137# 2138# <- {"status": "measuring", "sample-pages": 512, 2139# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2140# "calc-time-unit": "second"} 2141# 2142# 2. Measurement has been completed: 2143# 2144# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108, 2145# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, 2146# "calc-time-unit": "second"} 2147## 2148{ 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' }, 2149 'returns': 'DirtyRateInfo' } 2150 2151## 2152# @DirtyLimitInfo: 2153# 2154# Dirty page rate limit information of a virtual CPU. 2155# 2156# @cpu-index: index of a virtual CPU. 2157# 2158# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual 2159# CPU, 0 means unlimited. 2160# 2161# @current-rate: current dirty page rate (MB/s) for a virtual CPU. 2162# 2163# Since: 7.1 2164## 2165{ 'struct': 'DirtyLimitInfo', 2166 'data': { 'cpu-index': 'int', 2167 'limit-rate': 'uint64', 2168 'current-rate': 'uint64' } } 2169 2170## 2171# @set-vcpu-dirty-limit: 2172# 2173# Set the upper limit of dirty page rate for virtual CPUs. 2174# 2175# Requires KVM with accelerator property "dirty-ring-size" set. A 2176# virtual CPU's dirty page rate is a measure of its memory load. To 2177# observe dirty page rates, use @calc-dirty-rate. 2178# 2179# @cpu-index: index of a virtual CPU, default is all. 2180# 2181# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. 2182# 2183# Since: 7.1 2184# 2185# Example: 2186# 2187# -> {"execute": "set-vcpu-dirty-limit"} 2188# "arguments": { "dirty-rate": 200, 2189# "cpu-index": 1 } } 2190# <- { "return": {} } 2191## 2192{ 'command': 'set-vcpu-dirty-limit', 2193 'data': { '*cpu-index': 'int', 2194 'dirty-rate': 'uint64' } } 2195 2196## 2197# @cancel-vcpu-dirty-limit: 2198# 2199# Cancel the upper limit of dirty page rate for virtual CPUs. 2200# 2201# Cancel the dirty page limit for the vCPU which has been set with 2202# set-vcpu-dirty-limit command. Note that this command requires 2203# support from dirty ring, same as the "set-vcpu-dirty-limit". 2204# 2205# @cpu-index: index of a virtual CPU, default is all. 2206# 2207# Since: 7.1 2208# 2209# Example: 2210# 2211# -> {"execute": "cancel-vcpu-dirty-limit"}, 2212# "arguments": { "cpu-index": 1 } } 2213# <- { "return": {} } 2214## 2215{ 'command': 'cancel-vcpu-dirty-limit', 2216 'data': { '*cpu-index': 'int'} } 2217 2218## 2219# @query-vcpu-dirty-limit: 2220# 2221# Returns information about virtual CPU dirty page rate limits, if 2222# any. 2223# 2224# Since: 7.1 2225# 2226# Example: 2227# 2228# -> {"execute": "query-vcpu-dirty-limit"} 2229# <- {"return": [ 2230# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, 2231# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} 2232## 2233{ 'command': 'query-vcpu-dirty-limit', 2234 'returns': [ 'DirtyLimitInfo' ] } 2235 2236## 2237# @MigrationThreadInfo: 2238# 2239# Information about migrationthreads 2240# 2241# @name: the name of migration thread 2242# 2243# @thread-id: ID of the underlying host thread 2244# 2245# Since: 7.2 2246## 2247{ 'struct': 'MigrationThreadInfo', 2248 'data': {'name': 'str', 2249 'thread-id': 'int'} } 2250 2251## 2252# @query-migrationthreads: 2253# 2254# Returns information of migration threads 2255# 2256# Returns: @MigrationThreadInfo 2257# 2258# Since: 7.2 2259## 2260{ 'command': 'query-migrationthreads', 2261 'returns': ['MigrationThreadInfo'] } 2262 2263## 2264# @snapshot-save: 2265# 2266# Save a VM snapshot 2267# 2268# @job-id: identifier for the newly created job 2269# 2270# @tag: name of the snapshot to create 2271# 2272# @vmstate: block device node name to save vmstate to 2273# 2274# @devices: list of block device node names to save a snapshot to 2275# 2276# Applications should not assume that the snapshot save is complete 2277# when this command returns. The job commands / events must be used 2278# to determine completion and to fetch details of any errors that 2279# arise. 2280# 2281# Note that execution of the guest CPUs may be stopped during the time 2282# it takes to save the snapshot. A future version of QEMU may ensure 2283# CPUs are executing continuously. 2284# 2285# It is strongly recommended that @devices contain all writable block 2286# device nodes if a consistent snapshot is required. 2287# 2288# If @tag already exists, an error will be reported 2289# 2290# Example: 2291# 2292# -> { "execute": "snapshot-save", 2293# "arguments": { 2294# "job-id": "snapsave0", 2295# "tag": "my-snap", 2296# "vmstate": "disk0", 2297# "devices": ["disk0", "disk1"] 2298# } 2299# } 2300# <- { "return": { } } 2301# <- {"event": "JOB_STATUS_CHANGE", 2302# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, 2303# "data": {"status": "created", "id": "snapsave0"}} 2304# <- {"event": "JOB_STATUS_CHANGE", 2305# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, 2306# "data": {"status": "running", "id": "snapsave0"}} 2307# <- {"event": "STOP", 2308# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } 2309# <- {"event": "RESUME", 2310# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } 2311# <- {"event": "JOB_STATUS_CHANGE", 2312# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, 2313# "data": {"status": "waiting", "id": "snapsave0"}} 2314# <- {"event": "JOB_STATUS_CHANGE", 2315# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, 2316# "data": {"status": "pending", "id": "snapsave0"}} 2317# <- {"event": "JOB_STATUS_CHANGE", 2318# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, 2319# "data": {"status": "concluded", "id": "snapsave0"}} 2320# -> {"execute": "query-jobs"} 2321# <- {"return": [{"current-progress": 1, 2322# "status": "concluded", 2323# "total-progress": 1, 2324# "type": "snapshot-save", 2325# "id": "snapsave0"}]} 2326# 2327# Since: 6.0 2328## 2329{ 'command': 'snapshot-save', 2330 'data': { 'job-id': 'str', 2331 'tag': 'str', 2332 'vmstate': 'str', 2333 'devices': ['str'] } } 2334 2335## 2336# @snapshot-load: 2337# 2338# Load a VM snapshot 2339# 2340# @job-id: identifier for the newly created job 2341# 2342# @tag: name of the snapshot to load. 2343# 2344# @vmstate: block device node name to load vmstate from 2345# 2346# @devices: list of block device node names to load a snapshot from 2347# 2348# Applications should not assume that the snapshot load is complete 2349# when this command returns. The job commands / events must be used 2350# to determine completion and to fetch details of any errors that 2351# arise. 2352# 2353# Note that execution of the guest CPUs will be stopped during the 2354# time it takes to load the snapshot. 2355# 2356# It is strongly recommended that @devices contain all writable block 2357# device nodes that can have changed since the original @snapshot-save 2358# command execution. 2359# 2360# Example: 2361# 2362# -> { "execute": "snapshot-load", 2363# "arguments": { 2364# "job-id": "snapload0", 2365# "tag": "my-snap", 2366# "vmstate": "disk0", 2367# "devices": ["disk0", "disk1"] 2368# } 2369# } 2370# <- { "return": { } } 2371# <- {"event": "JOB_STATUS_CHANGE", 2372# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, 2373# "data": {"status": "created", "id": "snapload0"}} 2374# <- {"event": "JOB_STATUS_CHANGE", 2375# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, 2376# "data": {"status": "running", "id": "snapload0"}} 2377# <- {"event": "STOP", 2378# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } 2379# <- {"event": "RESUME", 2380# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } 2381# <- {"event": "JOB_STATUS_CHANGE", 2382# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, 2383# "data": {"status": "waiting", "id": "snapload0"}} 2384# <- {"event": "JOB_STATUS_CHANGE", 2385# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, 2386# "data": {"status": "pending", "id": "snapload0"}} 2387# <- {"event": "JOB_STATUS_CHANGE", 2388# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, 2389# "data": {"status": "concluded", "id": "snapload0"}} 2390# -> {"execute": "query-jobs"} 2391# <- {"return": [{"current-progress": 1, 2392# "status": "concluded", 2393# "total-progress": 1, 2394# "type": "snapshot-load", 2395# "id": "snapload0"}]} 2396# 2397# Since: 6.0 2398## 2399{ 'command': 'snapshot-load', 2400 'data': { 'job-id': 'str', 2401 'tag': 'str', 2402 'vmstate': 'str', 2403 'devices': ['str'] } } 2404 2405## 2406# @snapshot-delete: 2407# 2408# Delete a VM snapshot 2409# 2410# @job-id: identifier for the newly created job 2411# 2412# @tag: name of the snapshot to delete. 2413# 2414# @devices: list of block device node names to delete a snapshot from 2415# 2416# Applications should not assume that the snapshot delete is complete 2417# when this command returns. The job commands / events must be used 2418# to determine completion and to fetch details of any errors that 2419# arise. 2420# 2421# Example: 2422# 2423# -> { "execute": "snapshot-delete", 2424# "arguments": { 2425# "job-id": "snapdelete0", 2426# "tag": "my-snap", 2427# "devices": ["disk0", "disk1"] 2428# } 2429# } 2430# <- { "return": { } } 2431# <- {"event": "JOB_STATUS_CHANGE", 2432# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, 2433# "data": {"status": "created", "id": "snapdelete0"}} 2434# <- {"event": "JOB_STATUS_CHANGE", 2435# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, 2436# "data": {"status": "running", "id": "snapdelete0"}} 2437# <- {"event": "JOB_STATUS_CHANGE", 2438# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, 2439# "data": {"status": "waiting", "id": "snapdelete0"}} 2440# <- {"event": "JOB_STATUS_CHANGE", 2441# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, 2442# "data": {"status": "pending", "id": "snapdelete0"}} 2443# <- {"event": "JOB_STATUS_CHANGE", 2444# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, 2445# "data": {"status": "concluded", "id": "snapdelete0"}} 2446# -> {"execute": "query-jobs"} 2447# <- {"return": [{"current-progress": 1, 2448# "status": "concluded", 2449# "total-progress": 1, 2450# "type": "snapshot-delete", 2451# "id": "snapdelete0"}]} 2452# 2453# Since: 6.0 2454## 2455{ 'command': 'snapshot-delete', 2456 'data': { 'job-id': 'str', 2457 'tag': 'str', 2458 'devices': ['str'] } } 2459