1# -*- Mode: Python -*- 2# 3 4## 5# = Migration 6## 7 8{ 'include': 'common.json' } 9{ 'include': 'sockets.json' } 10 11## 12# @MigrationStats: 13# 14# Detailed migration status. 15# 16# @transferred: amount of bytes already transferred to the target VM 17# 18# @remaining: amount of bytes remaining to be transferred to the target VM 19# 20# @total: total amount of bytes involved in the migration process 21# 22# @duplicate: number of duplicate (zero) pages (since 1.2) 23# 24# @skipped: number of skipped zero pages (since 1.5) 25# 26# @normal: number of normal pages (since 1.2) 27# 28# @normal-bytes: number of normal bytes sent (since 1.2) 29# 30# @dirty-pages-rate: number of pages dirtied by second by the 31# guest (since 1.3) 32# 33# @mbps: throughput in megabits/sec. (since 1.6) 34# 35# @dirty-sync-count: number of times that dirty ram was synchronized (since 2.1) 36# 37# @postcopy-requests: The number of page requests received from the destination 38# (since 2.7) 39# 40# @page-size: The number of bytes per page for the various page-based 41# statistics (since 2.10) 42# 43# @multifd-bytes: The number of bytes sent through multifd (since 3.0) 44# 45# @pages-per-second: the number of memory pages transferred per second 46# (Since 4.0) 47# 48# Since: 0.14.0 49## 50{ 'struct': 'MigrationStats', 51 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 52 'duplicate': 'int', 'skipped': 'int', 'normal': 'int', 53 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', 54 'mbps' : 'number', 'dirty-sync-count' : 'int', 55 'postcopy-requests' : 'int', 'page-size' : 'int', 56 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64' } } 57 58## 59# @XBZRLECacheStats: 60# 61# Detailed XBZRLE migration cache statistics 62# 63# @cache-size: XBZRLE cache size 64# 65# @bytes: amount of bytes already transferred to the target VM 66# 67# @pages: amount of pages transferred to the target VM 68# 69# @cache-miss: number of cache miss 70# 71# @cache-miss-rate: rate of cache miss (since 2.1) 72# 73# @overflow: number of overflows 74# 75# Since: 1.2 76## 77{ 'struct': 'XBZRLECacheStats', 78 'data': {'cache-size': 'int', 'bytes': 'int', 'pages': 'int', 79 'cache-miss': 'int', 'cache-miss-rate': 'number', 80 'overflow': 'int' } } 81 82## 83# @CompressionStats: 84# 85# Detailed migration compression statistics 86# 87# @pages: amount of pages compressed and transferred to the target VM 88# 89# @busy: count of times that no free thread was available to compress data 90# 91# @busy-rate: rate of thread busy 92# 93# @compressed-size: amount of bytes after compression 94# 95# @compression-rate: rate of compressed size 96# 97# Since: 3.1 98## 99{ 'struct': 'CompressionStats', 100 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number', 101 'compressed-size': 'int', 'compression-rate': 'number' } } 102 103## 104# @MigrationStatus: 105# 106# An enumeration of migration status. 107# 108# @none: no migration has ever happened. 109# 110# @setup: migration process has been initiated. 111# 112# @cancelling: in the process of cancelling migration. 113# 114# @cancelled: cancelling migration is finished. 115# 116# @active: in the process of doing migration. 117# 118# @postcopy-active: like active, but now in postcopy mode. (since 2.5) 119# 120# @postcopy-paused: during postcopy but paused. (since 3.0) 121# 122# @postcopy-recover: trying to recover from a paused postcopy. (since 3.0) 123# 124# @completed: migration is finished. 125# 126# @failed: some error occurred during migration process. 127# 128# @colo: VM is in the process of fault tolerance, VM can not get into this 129# state unless colo capability is enabled for migration. (since 2.8) 130# 131# @pre-switchover: Paused before device serialisation. (since 2.11) 132# 133# @device: During device serialisation when pause-before-switchover is enabled 134# (since 2.11) 135# 136# @wait-unplug: wait for device unplug request by guest OS to be completed. 137# (since 4.2) 138# 139# Since: 2.3 140# 141## 142{ 'enum': 'MigrationStatus', 143 'data': [ 'none', 'setup', 'cancelling', 'cancelled', 144 'active', 'postcopy-active', 'postcopy-paused', 145 'postcopy-recover', 'completed', 'failed', 'colo', 146 'pre-switchover', 'device', 'wait-unplug' ] } 147 148## 149# @MigrationInfo: 150# 151# Information about current migration process. 152# 153# @status: @MigrationStatus describing the current migration status. 154# If this field is not returned, no migration process 155# has been initiated 156# 157# @ram: @MigrationStats containing detailed migration 158# status, only returned if status is 'active' or 159# 'completed'(since 1.2) 160# 161# @disk: @MigrationStats containing detailed disk migration 162# status, only returned if status is 'active' and it is a block 163# migration 164# 165# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE 166# migration statistics, only returned if XBZRLE feature is on and 167# status is 'active' or 'completed' (since 1.2) 168# 169# @total-time: total amount of milliseconds since migration started. 170# If migration has ended, it returns the total migration 171# time. (since 1.2) 172# 173# @downtime: only present when migration finishes correctly 174# total downtime in milliseconds for the guest. 175# (since 1.3) 176# 177# @expected-downtime: only present while migration is active 178# expected downtime in milliseconds for the guest in last walk 179# of the dirty bitmap. (since 1.3) 180# 181# @setup-time: amount of setup time in milliseconds *before* the 182# iterations begin but *after* the QMP command is issued. This is designed 183# to provide an accounting of any activities (such as RDMA pinning) which 184# may be expensive, but do not actually occur during the iterative 185# migration rounds themselves. (since 1.6) 186# 187# @cpu-throttle-percentage: percentage of time guest cpus are being 188# throttled during auto-converge. This is only present when auto-converge 189# has started throttling guest cpus. (Since 2.7) 190# 191# @error-desc: the human readable error description string, when 192# @status is 'failed'. Clients should not attempt to parse the 193# error strings. (Since 2.7) 194# 195# @postcopy-blocktime: total time when all vCPU were blocked during postcopy 196# live migration. This is only present when the postcopy-blocktime 197# migration capability is enabled. (Since 3.0) 198# 199# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. This is 200# only present when the postcopy-blocktime migration capability 201# is enabled. (Since 3.0) 202# 203# @compression: migration compression statistics, only returned if compression 204# feature is on and status is 'active' or 'completed' (Since 3.1) 205# 206# @socket-address: Only used for tcp, to know what the real port is (Since 4.0) 207# 208# Since: 0.14.0 209## 210{ 'struct': 'MigrationInfo', 211 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', 212 '*disk': 'MigrationStats', 213 '*xbzrle-cache': 'XBZRLECacheStats', 214 '*total-time': 'int', 215 '*expected-downtime': 'int', 216 '*downtime': 'int', 217 '*setup-time': 'int', 218 '*cpu-throttle-percentage': 'int', 219 '*error-desc': 'str', 220 '*postcopy-blocktime' : 'uint32', 221 '*postcopy-vcpu-blocktime': ['uint32'], 222 '*compression': 'CompressionStats', 223 '*socket-address': ['SocketAddress'] } } 224 225## 226# @query-migrate: 227# 228# Returns information about current migration process. If migration 229# is active there will be another json-object with RAM migration 230# status and if block migration is active another one with block 231# migration status. 232# 233# Returns: @MigrationInfo 234# 235# Since: 0.14.0 236# 237# Example: 238# 239# 1. Before the first migration 240# 241# -> { "execute": "query-migrate" } 242# <- { "return": {} } 243# 244# 2. Migration is done and has succeeded 245# 246# -> { "execute": "query-migrate" } 247# <- { "return": { 248# "status": "completed", 249# "total-time":12345, 250# "setup-time":12345, 251# "downtime":12345, 252# "ram":{ 253# "transferred":123, 254# "remaining":123, 255# "total":246, 256# "duplicate":123, 257# "normal":123, 258# "normal-bytes":123456, 259# "dirty-sync-count":15 260# } 261# } 262# } 263# 264# 3. Migration is done and has failed 265# 266# -> { "execute": "query-migrate" } 267# <- { "return": { "status": "failed" } } 268# 269# 4. Migration is being performed and is not a block migration: 270# 271# -> { "execute": "query-migrate" } 272# <- { 273# "return":{ 274# "status":"active", 275# "total-time":12345, 276# "setup-time":12345, 277# "expected-downtime":12345, 278# "ram":{ 279# "transferred":123, 280# "remaining":123, 281# "total":246, 282# "duplicate":123, 283# "normal":123, 284# "normal-bytes":123456, 285# "dirty-sync-count":15 286# } 287# } 288# } 289# 290# 5. Migration is being performed and is a block migration: 291# 292# -> { "execute": "query-migrate" } 293# <- { 294# "return":{ 295# "status":"active", 296# "total-time":12345, 297# "setup-time":12345, 298# "expected-downtime":12345, 299# "ram":{ 300# "total":1057024, 301# "remaining":1053304, 302# "transferred":3720, 303# "duplicate":123, 304# "normal":123, 305# "normal-bytes":123456, 306# "dirty-sync-count":15 307# }, 308# "disk":{ 309# "total":20971520, 310# "remaining":20880384, 311# "transferred":91136 312# } 313# } 314# } 315# 316# 6. Migration is being performed and XBZRLE is active: 317# 318# -> { "execute": "query-migrate" } 319# <- { 320# "return":{ 321# "status":"active", 322# "total-time":12345, 323# "setup-time":12345, 324# "expected-downtime":12345, 325# "ram":{ 326# "total":1057024, 327# "remaining":1053304, 328# "transferred":3720, 329# "duplicate":10, 330# "normal":3333, 331# "normal-bytes":3412992, 332# "dirty-sync-count":15 333# }, 334# "xbzrle-cache":{ 335# "cache-size":67108864, 336# "bytes":20971520, 337# "pages":2444343, 338# "cache-miss":2244, 339# "cache-miss-rate":0.123, 340# "overflow":34434 341# } 342# } 343# } 344# 345## 346{ 'command': 'query-migrate', 'returns': 'MigrationInfo' } 347 348## 349# @MigrationCapability: 350# 351# Migration capabilities enumeration 352# 353# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length Encoding). 354# This feature allows us to minimize migration traffic for certain work 355# loads, by sending compressed difference of the pages 356# 357# @rdma-pin-all: Controls whether or not the entire VM memory footprint is 358# mlock()'d on demand or all at once. Refer to docs/rdma.txt for usage. 359# Disabled by default. (since 2.0) 360# 361# @zero-blocks: During storage migration encode blocks of zeroes efficiently. This 362# essentially saves 1MB of zeroes per block on the wire. Enabling requires 363# source and target VM to support this feature. To enable it is sufficient 364# to enable the capability on the source VM. The feature is disabled by 365# default. (since 1.6) 366# 367# @compress: Use multiple compression threads to accelerate live migration. 368# This feature can help to reduce the migration traffic, by sending 369# compressed pages. Please note that if compress and xbzrle are both 370# on, compress only takes effect in the ram bulk stage, after that, 371# it will be disabled and only xbzrle takes effect, this can help to 372# minimize migration traffic. The feature is disabled by default. 373# (since 2.4 ) 374# 375# @events: generate events for each migration state change 376# (since 2.4 ) 377# 378# @auto-converge: If enabled, QEMU will automatically throttle down the guest 379# to speed up convergence of RAM migration. (since 1.6) 380# 381# @postcopy-ram: Start executing on the migration target before all of RAM has 382# been migrated, pulling the remaining pages along as needed. The 383# capacity must have the same setting on both source and target 384# or migration will not even start. NOTE: If the migration fails during 385# postcopy the VM will fail. (since 2.6) 386# 387# @x-colo: If enabled, migration will never end, and the state of the VM on the 388# primary side will be migrated continuously to the VM on secondary 389# side, this process is called COarse-Grain LOck Stepping (COLO) for 390# Non-stop Service. (since 2.8) 391# 392# @release-ram: if enabled, qemu will free the migrated ram pages on the source 393# during postcopy-ram migration. (since 2.9) 394# 395# @block: If enabled, QEMU will also migrate the contents of all block 396# devices. Default is disabled. A possible alternative uses 397# mirror jobs to a builtin NBD server on the destination, which 398# offers more flexibility. 399# (Since 2.10) 400# 401# @return-path: If enabled, migration will use the return path even 402# for precopy. (since 2.10) 403# 404# @pause-before-switchover: Pause outgoing migration before serialising device 405# state and before disabling block IO (since 2.11) 406# 407# @multifd: Use more than one fd for migration (since 4.0) 408# 409# @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. 410# (since 2.12) 411# 412# @postcopy-blocktime: Calculate downtime for postcopy live migration 413# (since 3.0) 414# 415# @late-block-activate: If enabled, the destination will not activate block 416# devices (and thus take locks) immediately at the end of migration. 417# (since 3.0) 418# 419# @x-ignore-shared: If enabled, QEMU will not migrate shared memory (since 4.0) 420# 421# @validate-uuid: Send the UUID of the source to allow the destination 422# to ensure it is the same. (since 4.2) 423# 424# Since: 1.2 425## 426{ 'enum': 'MigrationCapability', 427 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 428 'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram', 429 'block', 'return-path', 'pause-before-switchover', 'multifd', 430 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', 431 'x-ignore-shared', 'validate-uuid' ] } 432 433## 434# @MigrationCapabilityStatus: 435# 436# Migration capability information 437# 438# @capability: capability enum 439# 440# @state: capability state bool 441# 442# Since: 1.2 443## 444{ 'struct': 'MigrationCapabilityStatus', 445 'data': { 'capability' : 'MigrationCapability', 'state' : 'bool' } } 446 447## 448# @migrate-set-capabilities: 449# 450# Enable/Disable the following migration capabilities (like xbzrle) 451# 452# @capabilities: json array of capability modifications to make 453# 454# Since: 1.2 455# 456# Example: 457# 458# -> { "execute": "migrate-set-capabilities" , "arguments": 459# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } 460# 461## 462{ 'command': 'migrate-set-capabilities', 463 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } 464 465## 466# @query-migrate-capabilities: 467# 468# Returns information about the current migration capabilities status 469# 470# Returns: @MigrationCapabilitiesStatus 471# 472# Since: 1.2 473# 474# Example: 475# 476# -> { "execute": "query-migrate-capabilities" } 477# <- { "return": [ 478# {"state": false, "capability": "xbzrle"}, 479# {"state": false, "capability": "rdma-pin-all"}, 480# {"state": false, "capability": "auto-converge"}, 481# {"state": false, "capability": "zero-blocks"}, 482# {"state": false, "capability": "compress"}, 483# {"state": true, "capability": "events"}, 484# {"state": false, "capability": "postcopy-ram"}, 485# {"state": false, "capability": "x-colo"} 486# ]} 487# 488## 489{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} 490 491## 492# @MultiFDCompression: 493# 494# An enumeration of multifd compression methods. 495# 496# @none: no compression. 497# @zlib: use zlib compression method. 498# @zstd: use zstd compression method. 499# 500# Since: 5.0 501# 502## 503{ 'enum': 'MultiFDCompression', 504 'data': [ 'none', 'zlib', 505 { 'name': 'zstd', 'if': 'defined(CONFIG_ZSTD)' } ] } 506 507## 508# @MigrationParameter: 509# 510# Migration parameters enumeration 511# 512# @announce-initial: Initial delay (in milliseconds) before sending the first 513# announce (Since 4.0) 514# 515# @announce-max: Maximum delay (in milliseconds) between packets in the 516# announcement (Since 4.0) 517# 518# @announce-rounds: Number of self-announce packets sent after migration 519# (Since 4.0) 520# 521# @announce-step: Increase in delay (in milliseconds) between subsequent 522# packets in the announcement (Since 4.0) 523# 524# @compress-level: Set the compression level to be used in live migration, 525# the compression level is an integer between 0 and 9, where 0 means 526# no compression, 1 means the best compression speed, and 9 means best 527# compression ratio which will consume more CPU. 528# 529# @compress-threads: Set compression thread count to be used in live migration, 530# the compression thread count is an integer between 1 and 255. 531# 532# @compress-wait-thread: Controls behavior when all compression threads are 533# currently busy. If true (default), wait for a free 534# compression thread to become available; otherwise, 535# send the page uncompressed. (Since 3.1) 536# 537# @decompress-threads: Set decompression thread count to be used in live 538# migration, the decompression thread count is an integer between 1 539# and 255. Usually, decompression is at least 4 times as fast as 540# compression, so set the decompress-threads to the number about 1/4 541# of compress-threads is adequate. 542# 543# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 544# to trigger throttling. It is expressed as percentage. 545# The default value is 50. (Since 5.0) 546# 547# @cpu-throttle-initial: Initial percentage of time guest cpus are throttled 548# when migration auto-converge is activated. The 549# default value is 20. (Since 2.7) 550# 551# @cpu-throttle-increment: throttle percentage increase each time 552# auto-converge detects that migration is not making 553# progress. The default value is 10. (Since 2.7) 554# 555# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 556# At the tail stage of throttling, the Guest is very 557# sensitive to CPU percentage while the @cpu-throttle 558# -increment is excessive usually at tail stage. 559# If this parameter is true, we will compute the ideal 560# CPU percentage used by the Guest, which may exactly make 561# the dirty rate match the dirty rate threshold. Then we 562# will choose a smaller throttle increment between the 563# one specified by @cpu-throttle-increment and the one 564# generated by ideal CPU percentage. 565# Therefore, it is compatible to traditional throttling, 566# meanwhile the throttle increment won't be excessive 567# at tail stage. 568# The default value is false. (Since 5.1) 569# 570# @tls-creds: ID of the 'tls-creds' object that provides credentials for 571# establishing a TLS connection over the migration data channel. 572# On the outgoing side of the migration, the credentials must 573# be for a 'client' endpoint, while for the incoming side the 574# credentials must be for a 'server' endpoint. Setting this 575# will enable TLS for all migrations. The default is unset, 576# resulting in unsecured migration at the QEMU level. (Since 2.7) 577# 578# @tls-hostname: hostname of the target host for the migration. This is 579# required when using x509 based TLS credentials and the 580# migration URI does not already include a hostname. For 581# example if using fd: or exec: based migration, the 582# hostname must be provided so that the server's x509 583# certificate identity can be validated. (Since 2.7) 584# 585# @tls-authz: ID of the 'authz' object subclass that provides access control 586# checking of the TLS x509 certificate distinguished name. 587# This object is only resolved at time of use, so can be deleted 588# and recreated on the fly while the migration server is active. 589# If missing, it will default to denying access (Since 4.0) 590# 591# @max-bandwidth: to set maximum speed for migration. maximum speed in 592# bytes per second. (Since 2.8) 593# 594# @downtime-limit: set maximum tolerated downtime for migration. maximum 595# downtime in milliseconds (Since 2.8) 596# 597# @x-checkpoint-delay: The delay time (in ms) between two COLO checkpoints in 598# periodic mode. (Since 2.8) 599# 600# @block-incremental: Affects how much storage is migrated when the 601# block migration capability is enabled. When false, the entire 602# storage backing chain is migrated into a flattened image at 603# the destination; when true, only the active qcow2 layer is 604# migrated and the destination must already have access to the 605# same backing chain as was used on the source. (since 2.10) 606# 607# @multifd-channels: Number of channels used to migrate data in 608# parallel. This is the same number that the 609# number of sockets used for migration. The 610# default value is 2 (since 4.0) 611# 612# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 613# needs to be a multiple of the target page size 614# and a power of 2 615# (Since 2.11) 616# 617# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 618# Defaults to 0 (unlimited). In bytes per second. 619# (Since 3.0) 620# 621# @max-cpu-throttle: maximum cpu throttle percentage. 622# Defaults to 99. (Since 3.1) 623# 624# @multifd-compression: Which compression method to use. 625# Defaults to none. (Since 5.0) 626# 627# @multifd-zlib-level: Set the compression level to be used in live 628# migration, the compression level is an integer between 0 629# and 9, where 0 means no compression, 1 means the best 630# compression speed, and 9 means best compression ratio which 631# will consume more CPU. 632# Defaults to 1. (Since 5.0) 633# 634# @multifd-zstd-level: Set the compression level to be used in live 635# migration, the compression level is an integer between 0 636# and 20, where 0 means no compression, 1 means the best 637# compression speed, and 20 means best compression ratio which 638# will consume more CPU. 639# Defaults to 1. (Since 5.0) 640# 641# Since: 2.4 642## 643{ 'enum': 'MigrationParameter', 644 'data': ['announce-initial', 'announce-max', 645 'announce-rounds', 'announce-step', 646 'compress-level', 'compress-threads', 'decompress-threads', 647 'compress-wait-thread', 'throttle-trigger-threshold', 648 'cpu-throttle-initial', 'cpu-throttle-increment', 649 'cpu-throttle-tailslow', 650 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', 651 'downtime-limit', 'x-checkpoint-delay', 'block-incremental', 652 'multifd-channels', 653 'xbzrle-cache-size', 'max-postcopy-bandwidth', 654 'max-cpu-throttle', 'multifd-compression', 655 'multifd-zlib-level' ,'multifd-zstd-level' ] } 656 657## 658# @MigrateSetParameters: 659# 660# @announce-initial: Initial delay (in milliseconds) before sending the first 661# announce (Since 4.0) 662# 663# @announce-max: Maximum delay (in milliseconds) between packets in the 664# announcement (Since 4.0) 665# 666# @announce-rounds: Number of self-announce packets sent after migration 667# (Since 4.0) 668# 669# @announce-step: Increase in delay (in milliseconds) between subsequent 670# packets in the announcement (Since 4.0) 671# 672# @compress-level: compression level 673# 674# @compress-threads: compression thread count 675# 676# @compress-wait-thread: Controls behavior when all compression threads are 677# currently busy. If true (default), wait for a free 678# compression thread to become available; otherwise, 679# send the page uncompressed. (Since 3.1) 680# 681# @decompress-threads: decompression thread count 682# 683# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 684# to trigger throttling. It is expressed as percentage. 685# The default value is 50. (Since 5.0) 686# 687# @cpu-throttle-initial: Initial percentage of time guest cpus are 688# throttled when migration auto-converge is activated. 689# The default value is 20. (Since 2.7) 690# 691# @cpu-throttle-increment: throttle percentage increase each time 692# auto-converge detects that migration is not making 693# progress. The default value is 10. (Since 2.7) 694# 695# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 696# At the tail stage of throttling, the Guest is very 697# sensitive to CPU percentage while the @cpu-throttle 698# -increment is excessive usually at tail stage. 699# If this parameter is true, we will compute the ideal 700# CPU percentage used by the Guest, which may exactly make 701# the dirty rate match the dirty rate threshold. Then we 702# will choose a smaller throttle increment between the 703# one specified by @cpu-throttle-increment and the one 704# generated by ideal CPU percentage. 705# Therefore, it is compatible to traditional throttling, 706# meanwhile the throttle increment won't be excessive 707# at tail stage. 708# The default value is false. (Since 5.1) 709# 710# @tls-creds: ID of the 'tls-creds' object that provides credentials 711# for establishing a TLS connection over the migration data 712# channel. On the outgoing side of the migration, the credentials 713# must be for a 'client' endpoint, while for the incoming side the 714# credentials must be for a 'server' endpoint. Setting this 715# to a non-empty string enables TLS for all migrations. 716# An empty string means that QEMU will use plain text mode for 717# migration, rather than TLS (Since 2.9) 718# Previously (since 2.7), this was reported by omitting 719# tls-creds instead. 720# 721# @tls-hostname: hostname of the target host for the migration. This 722# is required when using x509 based TLS credentials and the 723# migration URI does not already include a hostname. For 724# example if using fd: or exec: based migration, the 725# hostname must be provided so that the server's x509 726# certificate identity can be validated. (Since 2.7) 727# An empty string means that QEMU will use the hostname 728# associated with the migration URI, if any. (Since 2.9) 729# Previously (since 2.7), this was reported by omitting 730# tls-hostname instead. 731# 732# @max-bandwidth: to set maximum speed for migration. maximum speed in 733# bytes per second. (Since 2.8) 734# 735# @downtime-limit: set maximum tolerated downtime for migration. maximum 736# downtime in milliseconds (Since 2.8) 737# 738# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) 739# 740# @block-incremental: Affects how much storage is migrated when the 741# block migration capability is enabled. When false, the entire 742# storage backing chain is migrated into a flattened image at 743# the destination; when true, only the active qcow2 layer is 744# migrated and the destination must already have access to the 745# same backing chain as was used on the source. (since 2.10) 746# 747# @multifd-channels: Number of channels used to migrate data in 748# parallel. This is the same number that the 749# number of sockets used for migration. The 750# default value is 2 (since 4.0) 751# 752# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 753# needs to be a multiple of the target page size 754# and a power of 2 755# (Since 2.11) 756# 757# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 758# Defaults to 0 (unlimited). In bytes per second. 759# (Since 3.0) 760# 761# @max-cpu-throttle: maximum cpu throttle percentage. 762# The default value is 99. (Since 3.1) 763# 764# @multifd-compression: Which compression method to use. 765# Defaults to none. (Since 5.0) 766# 767# @multifd-zlib-level: Set the compression level to be used in live 768# migration, the compression level is an integer between 0 769# and 9, where 0 means no compression, 1 means the best 770# compression speed, and 9 means best compression ratio which 771# will consume more CPU. 772# Defaults to 1. (Since 5.0) 773# 774# @multifd-zstd-level: Set the compression level to be used in live 775# migration, the compression level is an integer between 0 776# and 20, where 0 means no compression, 1 means the best 777# compression speed, and 20 means best compression ratio which 778# will consume more CPU. 779# Defaults to 1. (Since 5.0) 780# 781# Since: 2.4 782## 783# TODO either fuse back into MigrationParameters, or make 784# MigrationParameters members mandatory 785{ 'struct': 'MigrateSetParameters', 786 'data': { '*announce-initial': 'size', 787 '*announce-max': 'size', 788 '*announce-rounds': 'size', 789 '*announce-step': 'size', 790 '*compress-level': 'int', 791 '*compress-threads': 'int', 792 '*compress-wait-thread': 'bool', 793 '*decompress-threads': 'int', 794 '*throttle-trigger-threshold': 'int', 795 '*cpu-throttle-initial': 'int', 796 '*cpu-throttle-increment': 'int', 797 '*cpu-throttle-tailslow': 'bool', 798 '*tls-creds': 'StrOrNull', 799 '*tls-hostname': 'StrOrNull', 800 '*tls-authz': 'StrOrNull', 801 '*max-bandwidth': 'int', 802 '*downtime-limit': 'int', 803 '*x-checkpoint-delay': 'int', 804 '*block-incremental': 'bool', 805 '*multifd-channels': 'int', 806 '*xbzrle-cache-size': 'size', 807 '*max-postcopy-bandwidth': 'size', 808 '*max-cpu-throttle': 'int', 809 '*multifd-compression': 'MultiFDCompression', 810 '*multifd-zlib-level': 'int', 811 '*multifd-zstd-level': 'int' } } 812 813## 814# @migrate-set-parameters: 815# 816# Set various migration parameters. 817# 818# Since: 2.4 819# 820# Example: 821# 822# -> { "execute": "migrate-set-parameters" , 823# "arguments": { "compress-level": 1 } } 824# 825## 826{ 'command': 'migrate-set-parameters', 'boxed': true, 827 'data': 'MigrateSetParameters' } 828 829## 830# @MigrationParameters: 831# 832# The optional members aren't actually optional. 833# 834# @announce-initial: Initial delay (in milliseconds) before sending the 835# first announce (Since 4.0) 836# 837# @announce-max: Maximum delay (in milliseconds) between packets in the 838# announcement (Since 4.0) 839# 840# @announce-rounds: Number of self-announce packets sent after migration 841# (Since 4.0) 842# 843# @announce-step: Increase in delay (in milliseconds) between subsequent 844# packets in the announcement (Since 4.0) 845# 846# @compress-level: compression level 847# 848# @compress-threads: compression thread count 849# 850# @compress-wait-thread: Controls behavior when all compression threads are 851# currently busy. If true (default), wait for a free 852# compression thread to become available; otherwise, 853# send the page uncompressed. (Since 3.1) 854# 855# @decompress-threads: decompression thread count 856# 857# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period 858# to trigger throttling. It is expressed as percentage. 859# The default value is 50. (Since 5.0) 860# 861# @cpu-throttle-initial: Initial percentage of time guest cpus are 862# throttled when migration auto-converge is activated. 863# (Since 2.7) 864# 865# @cpu-throttle-increment: throttle percentage increase each time 866# auto-converge detects that migration is not making 867# progress. (Since 2.7) 868# 869# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage 870# At the tail stage of throttling, the Guest is very 871# sensitive to CPU percentage while the @cpu-throttle 872# -increment is excessive usually at tail stage. 873# If this parameter is true, we will compute the ideal 874# CPU percentage used by the Guest, which may exactly make 875# the dirty rate match the dirty rate threshold. Then we 876# will choose a smaller throttle increment between the 877# one specified by @cpu-throttle-increment and the one 878# generated by ideal CPU percentage. 879# Therefore, it is compatible to traditional throttling, 880# meanwhile the throttle increment won't be excessive 881# at tail stage. 882# The default value is false. (Since 5.1) 883# 884# @tls-creds: ID of the 'tls-creds' object that provides credentials 885# for establishing a TLS connection over the migration data 886# channel. On the outgoing side of the migration, the credentials 887# must be for a 'client' endpoint, while for the incoming side the 888# credentials must be for a 'server' endpoint. 889# An empty string means that QEMU will use plain text mode for 890# migration, rather than TLS (Since 2.7) 891# Note: 2.8 reports this by omitting tls-creds instead. 892# 893# @tls-hostname: hostname of the target host for the migration. This 894# is required when using x509 based TLS credentials and the 895# migration URI does not already include a hostname. For 896# example if using fd: or exec: based migration, the 897# hostname must be provided so that the server's x509 898# certificate identity can be validated. (Since 2.7) 899# An empty string means that QEMU will use the hostname 900# associated with the migration URI, if any. (Since 2.9) 901# Note: 2.8 reports this by omitting tls-hostname instead. 902# 903# @tls-authz: ID of the 'authz' object subclass that provides access control 904# checking of the TLS x509 certificate distinguished name. (Since 905# 4.0) 906# 907# @max-bandwidth: to set maximum speed for migration. maximum speed in 908# bytes per second. (Since 2.8) 909# 910# @downtime-limit: set maximum tolerated downtime for migration. maximum 911# downtime in milliseconds (Since 2.8) 912# 913# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) 914# 915# @block-incremental: Affects how much storage is migrated when the 916# block migration capability is enabled. When false, the entire 917# storage backing chain is migrated into a flattened image at 918# the destination; when true, only the active qcow2 layer is 919# migrated and the destination must already have access to the 920# same backing chain as was used on the source. (since 2.10) 921# 922# @multifd-channels: Number of channels used to migrate data in 923# parallel. This is the same number that the 924# number of sockets used for migration. 925# The default value is 2 (since 4.0) 926# 927# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It 928# needs to be a multiple of the target page size 929# and a power of 2 930# (Since 2.11) 931# 932# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. 933# Defaults to 0 (unlimited). In bytes per second. 934# (Since 3.0) 935# 936# @max-cpu-throttle: maximum cpu throttle percentage. 937# Defaults to 99. 938# (Since 3.1) 939# 940# @multifd-compression: Which compression method to use. 941# Defaults to none. (Since 5.0) 942# 943# @multifd-zlib-level: Set the compression level to be used in live 944# migration, the compression level is an integer between 0 945# and 9, where 0 means no compression, 1 means the best 946# compression speed, and 9 means best compression ratio which 947# will consume more CPU. 948# Defaults to 1. (Since 5.0) 949# 950# @multifd-zstd-level: Set the compression level to be used in live 951# migration, the compression level is an integer between 0 952# and 20, where 0 means no compression, 1 means the best 953# compression speed, and 20 means best compression ratio which 954# will consume more CPU. 955# Defaults to 1. (Since 5.0) 956# 957# Since: 2.4 958## 959{ 'struct': 'MigrationParameters', 960 'data': { '*announce-initial': 'size', 961 '*announce-max': 'size', 962 '*announce-rounds': 'size', 963 '*announce-step': 'size', 964 '*compress-level': 'uint8', 965 '*compress-threads': 'uint8', 966 '*compress-wait-thread': 'bool', 967 '*decompress-threads': 'uint8', 968 '*throttle-trigger-threshold': 'uint8', 969 '*cpu-throttle-initial': 'uint8', 970 '*cpu-throttle-increment': 'uint8', 971 '*cpu-throttle-tailslow': 'bool', 972 '*tls-creds': 'str', 973 '*tls-hostname': 'str', 974 '*tls-authz': 'str', 975 '*max-bandwidth': 'size', 976 '*downtime-limit': 'uint64', 977 '*x-checkpoint-delay': 'uint32', 978 '*block-incremental': 'bool' , 979 '*multifd-channels': 'uint8', 980 '*xbzrle-cache-size': 'size', 981 '*max-postcopy-bandwidth': 'size', 982 '*max-cpu-throttle': 'uint8', 983 '*multifd-compression': 'MultiFDCompression', 984 '*multifd-zlib-level': 'uint8', 985 '*multifd-zstd-level': 'uint8' } } 986 987## 988# @query-migrate-parameters: 989# 990# Returns information about the current migration parameters 991# 992# Returns: @MigrationParameters 993# 994# Since: 2.4 995# 996# Example: 997# 998# -> { "execute": "query-migrate-parameters" } 999# <- { "return": { 1000# "decompress-threads": 2, 1001# "cpu-throttle-increment": 10, 1002# "compress-threads": 8, 1003# "compress-level": 1, 1004# "cpu-throttle-initial": 20, 1005# "max-bandwidth": 33554432, 1006# "downtime-limit": 300 1007# } 1008# } 1009# 1010## 1011{ 'command': 'query-migrate-parameters', 1012 'returns': 'MigrationParameters' } 1013 1014## 1015# @client_migrate_info: 1016# 1017# Set migration information for remote display. This makes the server 1018# ask the client to automatically reconnect using the new parameters 1019# once migration finished successfully. Only implemented for SPICE. 1020# 1021# @protocol: must be "spice" 1022# @hostname: migration target hostname 1023# @port: spice tcp port for plaintext channels 1024# @tls-port: spice tcp port for tls-secured channels 1025# @cert-subject: server certificate subject 1026# 1027# Since: 0.14.0 1028# 1029# Example: 1030# 1031# -> { "execute": "client_migrate_info", 1032# "arguments": { "protocol": "spice", 1033# "hostname": "virt42.lab.kraxel.org", 1034# "port": 1234 } } 1035# <- { "return": {} } 1036# 1037## 1038{ 'command': 'client_migrate_info', 1039 'data': { 'protocol': 'str', 'hostname': 'str', '*port': 'int', 1040 '*tls-port': 'int', '*cert-subject': 'str' } } 1041 1042## 1043# @migrate-start-postcopy: 1044# 1045# Followup to a migration command to switch the migration to postcopy mode. 1046# The postcopy-ram capability must be set on both source and destination 1047# before the original migration command. 1048# 1049# Since: 2.5 1050# 1051# Example: 1052# 1053# -> { "execute": "migrate-start-postcopy" } 1054# <- { "return": {} } 1055# 1056## 1057{ 'command': 'migrate-start-postcopy' } 1058 1059## 1060# @MIGRATION: 1061# 1062# Emitted when a migration event happens 1063# 1064# @status: @MigrationStatus describing the current migration status. 1065# 1066# Since: 2.4 1067# 1068# Example: 1069# 1070# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, 1071# "event": "MIGRATION", 1072# "data": {"status": "completed"} } 1073# 1074## 1075{ 'event': 'MIGRATION', 1076 'data': {'status': 'MigrationStatus'}} 1077 1078## 1079# @MIGRATION_PASS: 1080# 1081# Emitted from the source side of a migration at the start of each pass 1082# (when it syncs the dirty bitmap) 1083# 1084# @pass: An incrementing count (starting at 1 on the first pass) 1085# 1086# Since: 2.6 1087# 1088# Example: 1089# 1090# { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, 1091# "event": "MIGRATION_PASS", "data": {"pass": 2} } 1092# 1093## 1094{ 'event': 'MIGRATION_PASS', 1095 'data': { 'pass': 'int' } } 1096 1097## 1098# @COLOMessage: 1099# 1100# The message transmission between Primary side and Secondary side. 1101# 1102# @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing 1103# 1104# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for checkpointing 1105# 1106# @checkpoint-reply: SVM gets PVM's checkpoint request 1107# 1108# @vmstate-send: VM's state will be sent by PVM. 1109# 1110# @vmstate-size: The total size of VMstate. 1111# 1112# @vmstate-received: VM's state has been received by SVM. 1113# 1114# @vmstate-loaded: VM's state has been loaded by SVM. 1115# 1116# Since: 2.8 1117## 1118{ 'enum': 'COLOMessage', 1119 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply', 1120 'vmstate-send', 'vmstate-size', 'vmstate-received', 1121 'vmstate-loaded' ] } 1122 1123## 1124# @COLOMode: 1125# 1126# The COLO current mode. 1127# 1128# @none: COLO is disabled. 1129# 1130# @primary: COLO node in primary side. 1131# 1132# @secondary: COLO node in slave side. 1133# 1134# Since: 2.8 1135## 1136{ 'enum': 'COLOMode', 1137 'data': [ 'none', 'primary', 'secondary'] } 1138 1139## 1140# @FailoverStatus: 1141# 1142# An enumeration of COLO failover status 1143# 1144# @none: no failover has ever happened 1145# 1146# @require: got failover requirement but not handled 1147# 1148# @active: in the process of doing failover 1149# 1150# @completed: finish the process of failover 1151# 1152# @relaunch: restart the failover process, from 'none' -> 'completed' (Since 2.9) 1153# 1154# Since: 2.8 1155## 1156{ 'enum': 'FailoverStatus', 1157 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } 1158 1159## 1160# @COLO_EXIT: 1161# 1162# Emitted when VM finishes COLO mode due to some errors happening or 1163# at the request of users. 1164# 1165# @mode: report COLO mode when COLO exited. 1166# 1167# @reason: describes the reason for the COLO exit. 1168# 1169# Since: 3.1 1170# 1171# Example: 1172# 1173# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, 1174# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } 1175# 1176## 1177{ 'event': 'COLO_EXIT', 1178 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } 1179 1180## 1181# @COLOExitReason: 1182# 1183# The reason for a COLO exit. 1184# 1185# @none: failover has never happened. This state does not occur 1186# in the COLO_EXIT event, and is only visible in the result of 1187# query-colo-status. 1188# 1189# @request: COLO exit is due to an external request. 1190# 1191# @error: COLO exit is due to an internal error. 1192# 1193# @processing: COLO is currently handling a failover (since 4.0). 1194# 1195# Since: 3.1 1196## 1197{ 'enum': 'COLOExitReason', 1198 'data': [ 'none', 'request', 'error' , 'processing' ] } 1199 1200## 1201# @x-colo-lost-heartbeat: 1202# 1203# Tell qemu that heartbeat is lost, request it to do takeover procedures. 1204# If this command is sent to the PVM, the Primary side will exit COLO mode. 1205# If sent to the Secondary, the Secondary side will run failover work, 1206# then takes over server operation to become the service VM. 1207# 1208# Since: 2.8 1209# 1210# Example: 1211# 1212# -> { "execute": "x-colo-lost-heartbeat" } 1213# <- { "return": {} } 1214# 1215## 1216{ 'command': 'x-colo-lost-heartbeat' } 1217 1218## 1219# @migrate_cancel: 1220# 1221# Cancel the current executing migration process. 1222# 1223# Returns: nothing on success 1224# 1225# Notes: This command succeeds even if there is no migration process running. 1226# 1227# Since: 0.14.0 1228# 1229# Example: 1230# 1231# -> { "execute": "migrate_cancel" } 1232# <- { "return": {} } 1233# 1234## 1235{ 'command': 'migrate_cancel' } 1236 1237## 1238# @migrate-continue: 1239# 1240# Continue migration when it's in a paused state. 1241# 1242# @state: The state the migration is currently expected to be in 1243# 1244# Returns: nothing on success 1245# Since: 2.11 1246# Example: 1247# 1248# -> { "execute": "migrate-continue" , "arguments": 1249# { "state": "pre-switchover" } } 1250# <- { "return": {} } 1251## 1252{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } 1253 1254## 1255# @migrate_set_downtime: 1256# 1257# Set maximum tolerated downtime for migration. 1258# 1259# @value: maximum downtime in seconds 1260# 1261# Features: 1262# @deprecated: This command is deprecated. Use 1263# 'migrate-set-parameters' instead. 1264# 1265# Returns: nothing on success 1266# 1267# Since: 0.14.0 1268# 1269# Example: 1270# 1271# -> { "execute": "migrate_set_downtime", "arguments": { "value": 0.1 } } 1272# <- { "return": {} } 1273# 1274## 1275{ 'command': 'migrate_set_downtime', 'data': {'value': 'number'}, 1276 'features': [ 'deprecated' ] } 1277 1278## 1279# @migrate_set_speed: 1280# 1281# Set maximum speed for migration. 1282# 1283# @value: maximum speed in bytes per second. 1284# 1285# Features: 1286# @deprecated: This command is deprecated. Use 1287# 'migrate-set-parameters' instead. 1288# 1289# Returns: nothing on success 1290# 1291# Since: 0.14.0 1292# 1293# Example: 1294# 1295# -> { "execute": "migrate_set_speed", "arguments": { "value": 1024 } } 1296# <- { "return": {} } 1297# 1298## 1299{ 'command': 'migrate_set_speed', 'data': {'value': 'int'}, 1300 'features': [ 'deprecated' ] } 1301 1302## 1303# @migrate-set-cache-size: 1304# 1305# Set cache size to be used by XBZRLE migration 1306# 1307# @value: cache size in bytes 1308# 1309# Features: 1310# @deprecated: This command is deprecated. Use 1311# 'migrate-set-parameters' instead. 1312# 1313# The size will be rounded down to the nearest power of 2. 1314# The cache size can be modified before and during ongoing migration 1315# 1316# Returns: nothing on success 1317# 1318# Since: 1.2 1319# 1320# Example: 1321# 1322# -> { "execute": "migrate-set-cache-size", 1323# "arguments": { "value": 536870912 } } 1324# <- { "return": {} } 1325# 1326## 1327{ 'command': 'migrate-set-cache-size', 'data': {'value': 'int'}, 1328 'features': [ 'deprecated' ] } 1329 1330## 1331# @query-migrate-cache-size: 1332# 1333# Query migration XBZRLE cache size 1334# 1335# Features: 1336# @deprecated: This command is deprecated. Use 1337# 'query-migrate-parameters' instead. 1338# 1339# Returns: XBZRLE cache size in bytes 1340# 1341# Since: 1.2 1342# 1343# Example: 1344# 1345# -> { "execute": "query-migrate-cache-size" } 1346# <- { "return": 67108864 } 1347# 1348## 1349{ 'command': 'query-migrate-cache-size', 'returns': 'int', 1350 'features': [ 'deprecated' ] } 1351 1352## 1353# @migrate: 1354# 1355# Migrates the current running guest to another Virtual Machine. 1356# 1357# @uri: the Uniform Resource Identifier of the destination VM 1358# 1359# @blk: do block migration (full disk copy) 1360# 1361# @inc: incremental disk copy migration 1362# 1363# @detach: this argument exists only for compatibility reasons and 1364# is ignored by QEMU 1365# 1366# @resume: resume one paused migration, default "off". (since 3.0) 1367# 1368# Returns: nothing on success 1369# 1370# Since: 0.14.0 1371# 1372# Notes: 1373# 1374# 1. The 'query-migrate' command should be used to check migration's progress 1375# and final result (this information is provided by the 'status' member) 1376# 1377# 2. All boolean arguments default to false 1378# 1379# 3. The user Monitor's "detach" argument is invalid in QMP and should not 1380# be used 1381# 1382# Example: 1383# 1384# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } 1385# <- { "return": {} } 1386# 1387## 1388{ 'command': 'migrate', 1389 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', 1390 '*detach': 'bool', '*resume': 'bool' } } 1391 1392## 1393# @migrate-incoming: 1394# 1395# Start an incoming migration, the qemu must have been started 1396# with -incoming defer 1397# 1398# @uri: The Uniform Resource Identifier identifying the source or 1399# address to listen on 1400# 1401# Returns: nothing on success 1402# 1403# Since: 2.3 1404# 1405# Notes: 1406# 1407# 1. It's a bad idea to use a string for the uri, but it needs to stay 1408# compatible with -incoming and the format of the uri is already exposed 1409# above libvirt. 1410# 1411# 2. QEMU must be started with -incoming defer to allow migrate-incoming to 1412# be used. 1413# 1414# 3. The uri format is the same as for -incoming 1415# 1416# Example: 1417# 1418# -> { "execute": "migrate-incoming", 1419# "arguments": { "uri": "tcp::4446" } } 1420# <- { "return": {} } 1421# 1422## 1423{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } } 1424 1425## 1426# @xen-save-devices-state: 1427# 1428# Save the state of all devices to file. The RAM and the block devices 1429# of the VM are not saved by this command. 1430# 1431# @filename: the file to save the state of the devices to as binary 1432# data. See xen-save-devices-state.txt for a description of the binary 1433# format. 1434# 1435# @live: Optional argument to ask QEMU to treat this command as part of a live 1436# migration. Default to true. (since 2.11) 1437# 1438# Returns: Nothing on success 1439# 1440# Since: 1.1 1441# 1442# Example: 1443# 1444# -> { "execute": "xen-save-devices-state", 1445# "arguments": { "filename": "/tmp/save" } } 1446# <- { "return": {} } 1447# 1448## 1449{ 'command': 'xen-save-devices-state', 1450 'data': {'filename': 'str', '*live':'bool' } } 1451 1452## 1453# @xen-set-replication: 1454# 1455# Enable or disable replication. 1456# 1457# @enable: true to enable, false to disable. 1458# 1459# @primary: true for primary or false for secondary. 1460# 1461# @failover: true to do failover, false to stop. but cannot be 1462# specified if 'enable' is true. default value is false. 1463# 1464# Returns: nothing. 1465# 1466# Example: 1467# 1468# -> { "execute": "xen-set-replication", 1469# "arguments": {"enable": true, "primary": false} } 1470# <- { "return": {} } 1471# 1472# Since: 2.9 1473## 1474{ 'command': 'xen-set-replication', 1475 'data': { 'enable': 'bool', 'primary': 'bool', '*failover' : 'bool' }, 1476 'if': 'defined(CONFIG_REPLICATION)' } 1477 1478## 1479# @ReplicationStatus: 1480# 1481# The result format for 'query-xen-replication-status'. 1482# 1483# @error: true if an error happened, false if replication is normal. 1484# 1485# @desc: the human readable error description string, when 1486# @error is 'true'. 1487# 1488# Since: 2.9 1489## 1490{ 'struct': 'ReplicationStatus', 1491 'data': { 'error': 'bool', '*desc': 'str' }, 1492 'if': 'defined(CONFIG_REPLICATION)' } 1493 1494## 1495# @query-xen-replication-status: 1496# 1497# Query replication status while the vm is running. 1498# 1499# Returns: A @ReplicationResult object showing the status. 1500# 1501# Example: 1502# 1503# -> { "execute": "query-xen-replication-status" } 1504# <- { "return": { "error": false } } 1505# 1506# Since: 2.9 1507## 1508{ 'command': 'query-xen-replication-status', 1509 'returns': 'ReplicationStatus', 1510 'if': 'defined(CONFIG_REPLICATION)' } 1511 1512## 1513# @xen-colo-do-checkpoint: 1514# 1515# Xen uses this command to notify replication to trigger a checkpoint. 1516# 1517# Returns: nothing. 1518# 1519# Example: 1520# 1521# -> { "execute": "xen-colo-do-checkpoint" } 1522# <- { "return": {} } 1523# 1524# Since: 2.9 1525## 1526{ 'command': 'xen-colo-do-checkpoint', 1527 'if': 'defined(CONFIG_REPLICATION)' } 1528 1529## 1530# @COLOStatus: 1531# 1532# The result format for 'query-colo-status'. 1533# 1534# @mode: COLO running mode. If COLO is running, this field will return 1535# 'primary' or 'secondary'. 1536# 1537# @last-mode: COLO last running mode. If COLO is running, this field 1538# will return same like mode field, after failover we can 1539# use this field to get last colo mode. (since 4.0) 1540# 1541# @reason: describes the reason for the COLO exit. 1542# 1543# Since: 3.1 1544## 1545{ 'struct': 'COLOStatus', 1546 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', 1547 'reason': 'COLOExitReason' } } 1548 1549## 1550# @query-colo-status: 1551# 1552# Query COLO status while the vm is running. 1553# 1554# Returns: A @COLOStatus object showing the status. 1555# 1556# Example: 1557# 1558# -> { "execute": "query-colo-status" } 1559# <- { "return": { "mode": "primary", "reason": "request" } } 1560# 1561# Since: 3.1 1562## 1563{ 'command': 'query-colo-status', 1564 'returns': 'COLOStatus' } 1565 1566## 1567# @migrate-recover: 1568# 1569# Provide a recovery migration stream URI. 1570# 1571# @uri: the URI to be used for the recovery of migration stream. 1572# 1573# Returns: nothing. 1574# 1575# Example: 1576# 1577# -> { "execute": "migrate-recover", 1578# "arguments": { "uri": "tcp:192.168.1.200:12345" } } 1579# <- { "return": {} } 1580# 1581# Since: 3.0 1582## 1583{ 'command': 'migrate-recover', 1584 'data': { 'uri': 'str' }, 1585 'allow-oob': true } 1586 1587## 1588# @migrate-pause: 1589# 1590# Pause a migration. Currently it only supports postcopy. 1591# 1592# Returns: nothing. 1593# 1594# Example: 1595# 1596# -> { "execute": "migrate-pause" } 1597# <- { "return": {} } 1598# 1599# Since: 3.0 1600## 1601{ 'command': 'migrate-pause', 'allow-oob': true } 1602 1603## 1604# @UNPLUG_PRIMARY: 1605# 1606# Emitted from source side of a migration when migration state is 1607# WAIT_UNPLUG. Device was unplugged by guest operating system. 1608# Device resources in QEMU are kept on standby to be able to re-plug it in case 1609# of migration failure. 1610# 1611# @device-id: QEMU device id of the unplugged device 1612# 1613# Since: 4.2 1614# 1615# Example: 1616# {"event": "UNPLUG_PRIMARY", "data": {"device-id": "hostdev0"} } 1617# 1618## 1619{ 'event': 'UNPLUG_PRIMARY', 1620 'data': { 'device-id': 'str' } } 1621