1 /* 2 * QEMU host block devices 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or 7 * later. See the COPYING file in the top-level directory. 8 * 9 * This file incorporates work covered by the following copyright and 10 * permission notice: 11 * 12 * Copyright (c) 2003-2008 Fabrice Bellard 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this software and associated documentation files (the "Software"), to deal 16 * in the Software without restriction, including without limitation the rights 17 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 18 * copies of the Software, and to permit persons to whom the Software is 19 * furnished to do so, subject to the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 27 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 29 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 30 * THE SOFTWARE. 31 */ 32 33 #include "sysemu/block-backend.h" 34 #include "sysemu/blockdev.h" 35 #include "hw/block/block.h" 36 #include "block/blockjob.h" 37 #include "block/throttle-groups.h" 38 #include "monitor/monitor.h" 39 #include "qemu/error-report.h" 40 #include "qemu/option.h" 41 #include "qemu/config-file.h" 42 #include "qapi/qmp/types.h" 43 #include "qapi-visit.h" 44 #include "qapi/qmp/qerror.h" 45 #include "qapi/qmp-output-visitor.h" 46 #include "qapi/util.h" 47 #include "sysemu/sysemu.h" 48 #include "block/block_int.h" 49 #include "qmp-commands.h" 50 #include "trace.h" 51 #include "sysemu/arch_init.h" 52 53 static const char *const if_name[IF_COUNT] = { 54 [IF_NONE] = "none", 55 [IF_IDE] = "ide", 56 [IF_SCSI] = "scsi", 57 [IF_FLOPPY] = "floppy", 58 [IF_PFLASH] = "pflash", 59 [IF_MTD] = "mtd", 60 [IF_SD] = "sd", 61 [IF_VIRTIO] = "virtio", 62 [IF_XEN] = "xen", 63 }; 64 65 static int if_max_devs[IF_COUNT] = { 66 /* 67 * Do not change these numbers! They govern how drive option 68 * index maps to unit and bus. That mapping is ABI. 69 * 70 * All controllers used to imlement if=T drives need to support 71 * if_max_devs[T] units, for any T with if_max_devs[T] != 0. 72 * Otherwise, some index values map to "impossible" bus, unit 73 * values. 74 * 75 * For instance, if you change [IF_SCSI] to 255, -drive 76 * if=scsi,index=12 no longer means bus=1,unit=5, but 77 * bus=0,unit=12. With an lsi53c895a controller (7 units max), 78 * the drive can't be set up. Regression. 79 */ 80 [IF_IDE] = 2, 81 [IF_SCSI] = 7, 82 }; 83 84 /** 85 * Boards may call this to offer board-by-board overrides 86 * of the default, global values. 87 */ 88 void override_max_devs(BlockInterfaceType type, int max_devs) 89 { 90 BlockBackend *blk; 91 DriveInfo *dinfo; 92 93 if (max_devs <= 0) { 94 return; 95 } 96 97 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 98 dinfo = blk_legacy_dinfo(blk); 99 if (dinfo->type == type) { 100 fprintf(stderr, "Cannot override units-per-bus property of" 101 " the %s interface, because a drive of that type has" 102 " already been added.\n", if_name[type]); 103 g_assert_not_reached(); 104 } 105 } 106 107 if_max_devs[type] = max_devs; 108 } 109 110 /* 111 * We automatically delete the drive when a device using it gets 112 * unplugged. Questionable feature, but we can't just drop it. 113 * Device models call blockdev_mark_auto_del() to schedule the 114 * automatic deletion, and generic qdev code calls blockdev_auto_del() 115 * when deletion is actually safe. 116 */ 117 void blockdev_mark_auto_del(BlockBackend *blk) 118 { 119 DriveInfo *dinfo = blk_legacy_dinfo(blk); 120 BlockDriverState *bs = blk_bs(blk); 121 AioContext *aio_context; 122 123 if (!dinfo) { 124 return; 125 } 126 127 if (bs) { 128 aio_context = bdrv_get_aio_context(bs); 129 aio_context_acquire(aio_context); 130 131 if (bs->job) { 132 block_job_cancel(bs->job); 133 } 134 135 aio_context_release(aio_context); 136 } 137 138 dinfo->auto_del = 1; 139 } 140 141 void blockdev_auto_del(BlockBackend *blk) 142 { 143 DriveInfo *dinfo = blk_legacy_dinfo(blk); 144 145 if (dinfo && dinfo->auto_del) { 146 blk_unref(blk); 147 } 148 } 149 150 /** 151 * Returns the current mapping of how many units per bus 152 * a particular interface can support. 153 * 154 * A positive integer indicates n units per bus. 155 * 0 implies the mapping has not been established. 156 * -1 indicates an invalid BlockInterfaceType was given. 157 */ 158 int drive_get_max_devs(BlockInterfaceType type) 159 { 160 if (type >= IF_IDE && type < IF_COUNT) { 161 return if_max_devs[type]; 162 } 163 164 return -1; 165 } 166 167 static int drive_index_to_bus_id(BlockInterfaceType type, int index) 168 { 169 int max_devs = if_max_devs[type]; 170 return max_devs ? index / max_devs : 0; 171 } 172 173 static int drive_index_to_unit_id(BlockInterfaceType type, int index) 174 { 175 int max_devs = if_max_devs[type]; 176 return max_devs ? index % max_devs : index; 177 } 178 179 QemuOpts *drive_def(const char *optstr) 180 { 181 return qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false); 182 } 183 184 QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file, 185 const char *optstr) 186 { 187 QemuOpts *opts; 188 189 opts = drive_def(optstr); 190 if (!opts) { 191 return NULL; 192 } 193 if (type != IF_DEFAULT) { 194 qemu_opt_set(opts, "if", if_name[type], &error_abort); 195 } 196 if (index >= 0) { 197 qemu_opt_set_number(opts, "index", index, &error_abort); 198 } 199 if (file) 200 qemu_opt_set(opts, "file", file, &error_abort); 201 return opts; 202 } 203 204 DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit) 205 { 206 BlockBackend *blk; 207 DriveInfo *dinfo; 208 209 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 210 dinfo = blk_legacy_dinfo(blk); 211 if (dinfo && dinfo->type == type 212 && dinfo->bus == bus && dinfo->unit == unit) { 213 return dinfo; 214 } 215 } 216 217 return NULL; 218 } 219 220 bool drive_check_orphaned(void) 221 { 222 BlockBackend *blk; 223 DriveInfo *dinfo; 224 bool rs = false; 225 226 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 227 dinfo = blk_legacy_dinfo(blk); 228 /* If dinfo->bdrv->dev is NULL, it has no device attached. */ 229 /* Unless this is a default drive, this may be an oversight. */ 230 if (!blk_get_attached_dev(blk) && !dinfo->is_default && 231 dinfo->type != IF_NONE) { 232 fprintf(stderr, "Warning: Orphaned drive without device: " 233 "id=%s,file=%s,if=%s,bus=%d,unit=%d\n", 234 blk_name(blk), blk_bs(blk) ? blk_bs(blk)->filename : "", 235 if_name[dinfo->type], dinfo->bus, dinfo->unit); 236 rs = true; 237 } 238 } 239 240 return rs; 241 } 242 243 DriveInfo *drive_get_by_index(BlockInterfaceType type, int index) 244 { 245 return drive_get(type, 246 drive_index_to_bus_id(type, index), 247 drive_index_to_unit_id(type, index)); 248 } 249 250 int drive_get_max_bus(BlockInterfaceType type) 251 { 252 int max_bus; 253 BlockBackend *blk; 254 DriveInfo *dinfo; 255 256 max_bus = -1; 257 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 258 dinfo = blk_legacy_dinfo(blk); 259 if (dinfo && dinfo->type == type && dinfo->bus > max_bus) { 260 max_bus = dinfo->bus; 261 } 262 } 263 return max_bus; 264 } 265 266 /* Get a block device. This should only be used for single-drive devices 267 (e.g. SD/Floppy/MTD). Multi-disk devices (scsi/ide) should use the 268 appropriate bus. */ 269 DriveInfo *drive_get_next(BlockInterfaceType type) 270 { 271 static int next_block_unit[IF_COUNT]; 272 273 return drive_get(type, 0, next_block_unit[type]++); 274 } 275 276 static void bdrv_format_print(void *opaque, const char *name) 277 { 278 error_printf(" %s", name); 279 } 280 281 typedef struct { 282 QEMUBH *bh; 283 BlockDriverState *bs; 284 } BDRVPutRefBH; 285 286 static void bdrv_put_ref_bh(void *opaque) 287 { 288 BDRVPutRefBH *s = opaque; 289 290 bdrv_unref(s->bs); 291 qemu_bh_delete(s->bh); 292 g_free(s); 293 } 294 295 /* 296 * Release a BDS reference in a BH 297 * 298 * It is not safe to use bdrv_unref() from a callback function when the callers 299 * still need the BlockDriverState. In such cases we schedule a BH to release 300 * the reference. 301 */ 302 static void bdrv_put_ref_bh_schedule(BlockDriverState *bs) 303 { 304 BDRVPutRefBH *s; 305 306 s = g_new(BDRVPutRefBH, 1); 307 s->bh = qemu_bh_new(bdrv_put_ref_bh, s); 308 s->bs = bs; 309 qemu_bh_schedule(s->bh); 310 } 311 312 static int parse_block_error_action(const char *buf, bool is_read, Error **errp) 313 { 314 if (!strcmp(buf, "ignore")) { 315 return BLOCKDEV_ON_ERROR_IGNORE; 316 } else if (!is_read && !strcmp(buf, "enospc")) { 317 return BLOCKDEV_ON_ERROR_ENOSPC; 318 } else if (!strcmp(buf, "stop")) { 319 return BLOCKDEV_ON_ERROR_STOP; 320 } else if (!strcmp(buf, "report")) { 321 return BLOCKDEV_ON_ERROR_REPORT; 322 } else { 323 error_setg(errp, "'%s' invalid %s error action", 324 buf, is_read ? "read" : "write"); 325 return -1; 326 } 327 } 328 329 static bool check_throttle_config(ThrottleConfig *cfg, Error **errp) 330 { 331 if (throttle_conflicting(cfg)) { 332 error_setg(errp, "bps/iops/max total values and read/write values" 333 " cannot be used at the same time"); 334 return false; 335 } 336 337 if (!throttle_is_valid(cfg)) { 338 error_setg(errp, "bps/iops/maxs values must be 0 or greater"); 339 return false; 340 } 341 342 if (throttle_max_is_missing_limit(cfg)) { 343 error_setg(errp, "bps_max/iops_max require corresponding" 344 " bps/iops values"); 345 return false; 346 } 347 348 return true; 349 } 350 351 typedef enum { MEDIA_DISK, MEDIA_CDROM } DriveMediaType; 352 353 /* All parameters but @opts are optional and may be set to NULL. */ 354 static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags, 355 const char **throttling_group, ThrottleConfig *throttle_cfg, 356 BlockdevDetectZeroesOptions *detect_zeroes, Error **errp) 357 { 358 const char *discard; 359 Error *local_error = NULL; 360 const char *aio; 361 362 if (bdrv_flags) { 363 if (!qemu_opt_get_bool(opts, "read-only", false)) { 364 *bdrv_flags |= BDRV_O_RDWR; 365 } 366 if (qemu_opt_get_bool(opts, "copy-on-read", false)) { 367 *bdrv_flags |= BDRV_O_COPY_ON_READ; 368 } 369 370 if ((discard = qemu_opt_get(opts, "discard")) != NULL) { 371 if (bdrv_parse_discard_flags(discard, bdrv_flags) != 0) { 372 error_setg(errp, "Invalid discard option"); 373 return; 374 } 375 } 376 377 if (qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true)) { 378 *bdrv_flags |= BDRV_O_CACHE_WB; 379 } 380 if (qemu_opt_get_bool(opts, BDRV_OPT_CACHE_DIRECT, false)) { 381 *bdrv_flags |= BDRV_O_NOCACHE; 382 } 383 if (qemu_opt_get_bool(opts, BDRV_OPT_CACHE_NO_FLUSH, false)) { 384 *bdrv_flags |= BDRV_O_NO_FLUSH; 385 } 386 387 if ((aio = qemu_opt_get(opts, "aio")) != NULL) { 388 if (!strcmp(aio, "native")) { 389 *bdrv_flags |= BDRV_O_NATIVE_AIO; 390 } else if (!strcmp(aio, "threads")) { 391 /* this is the default */ 392 } else { 393 error_setg(errp, "invalid aio option"); 394 return; 395 } 396 } 397 } 398 399 /* disk I/O throttling */ 400 if (throttling_group) { 401 *throttling_group = qemu_opt_get(opts, "throttling.group"); 402 } 403 404 if (throttle_cfg) { 405 memset(throttle_cfg, 0, sizeof(*throttle_cfg)); 406 throttle_cfg->buckets[THROTTLE_BPS_TOTAL].avg = 407 qemu_opt_get_number(opts, "throttling.bps-total", 0); 408 throttle_cfg->buckets[THROTTLE_BPS_READ].avg = 409 qemu_opt_get_number(opts, "throttling.bps-read", 0); 410 throttle_cfg->buckets[THROTTLE_BPS_WRITE].avg = 411 qemu_opt_get_number(opts, "throttling.bps-write", 0); 412 throttle_cfg->buckets[THROTTLE_OPS_TOTAL].avg = 413 qemu_opt_get_number(opts, "throttling.iops-total", 0); 414 throttle_cfg->buckets[THROTTLE_OPS_READ].avg = 415 qemu_opt_get_number(opts, "throttling.iops-read", 0); 416 throttle_cfg->buckets[THROTTLE_OPS_WRITE].avg = 417 qemu_opt_get_number(opts, "throttling.iops-write", 0); 418 419 throttle_cfg->buckets[THROTTLE_BPS_TOTAL].max = 420 qemu_opt_get_number(opts, "throttling.bps-total-max", 0); 421 throttle_cfg->buckets[THROTTLE_BPS_READ].max = 422 qemu_opt_get_number(opts, "throttling.bps-read-max", 0); 423 throttle_cfg->buckets[THROTTLE_BPS_WRITE].max = 424 qemu_opt_get_number(opts, "throttling.bps-write-max", 0); 425 throttle_cfg->buckets[THROTTLE_OPS_TOTAL].max = 426 qemu_opt_get_number(opts, "throttling.iops-total-max", 0); 427 throttle_cfg->buckets[THROTTLE_OPS_READ].max = 428 qemu_opt_get_number(opts, "throttling.iops-read-max", 0); 429 throttle_cfg->buckets[THROTTLE_OPS_WRITE].max = 430 qemu_opt_get_number(opts, "throttling.iops-write-max", 0); 431 432 throttle_cfg->op_size = 433 qemu_opt_get_number(opts, "throttling.iops-size", 0); 434 435 if (!check_throttle_config(throttle_cfg, errp)) { 436 return; 437 } 438 } 439 440 if (detect_zeroes) { 441 *detect_zeroes = 442 qapi_enum_parse(BlockdevDetectZeroesOptions_lookup, 443 qemu_opt_get(opts, "detect-zeroes"), 444 BLOCKDEV_DETECT_ZEROES_OPTIONS_MAX, 445 BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF, 446 &local_error); 447 if (local_error) { 448 error_propagate(errp, local_error); 449 return; 450 } 451 452 if (bdrv_flags && 453 *detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP && 454 !(*bdrv_flags & BDRV_O_UNMAP)) 455 { 456 error_setg(errp, "setting detect-zeroes to unmap is not allowed " 457 "without setting discard operation to unmap"); 458 return; 459 } 460 } 461 } 462 463 /* Takes the ownership of bs_opts */ 464 static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, 465 Error **errp) 466 { 467 const char *buf; 468 int bdrv_flags = 0; 469 int on_read_error, on_write_error; 470 BlockBackend *blk; 471 BlockDriverState *bs; 472 ThrottleConfig cfg; 473 int snapshot = 0; 474 Error *error = NULL; 475 QemuOpts *opts; 476 const char *id; 477 bool has_driver_specific_opts; 478 BlockdevDetectZeroesOptions detect_zeroes = 479 BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF; 480 const char *throttling_group = NULL; 481 482 /* Check common options by copying from bs_opts to opts, all other options 483 * stay in bs_opts for processing by bdrv_open(). */ 484 id = qdict_get_try_str(bs_opts, "id"); 485 opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error); 486 if (error) { 487 error_propagate(errp, error); 488 goto err_no_opts; 489 } 490 491 qemu_opts_absorb_qdict(opts, bs_opts, &error); 492 if (error) { 493 error_propagate(errp, error); 494 goto early_err; 495 } 496 497 if (id) { 498 qdict_del(bs_opts, "id"); 499 } 500 501 has_driver_specific_opts = !!qdict_size(bs_opts); 502 503 /* extract parameters */ 504 snapshot = qemu_opt_get_bool(opts, "snapshot", 0); 505 506 extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg, 507 &detect_zeroes, &error); 508 if (error) { 509 error_propagate(errp, error); 510 goto early_err; 511 } 512 513 if ((buf = qemu_opt_get(opts, "format")) != NULL) { 514 if (is_help_option(buf)) { 515 error_printf("Supported formats:"); 516 bdrv_iterate_format(bdrv_format_print, NULL); 517 error_printf("\n"); 518 goto early_err; 519 } 520 521 if (qdict_haskey(bs_opts, "driver")) { 522 error_setg(errp, "Cannot specify both 'driver' and 'format'"); 523 goto early_err; 524 } 525 qdict_put(bs_opts, "driver", qstring_from_str(buf)); 526 } 527 528 on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; 529 if ((buf = qemu_opt_get(opts, "werror")) != NULL) { 530 on_write_error = parse_block_error_action(buf, 0, &error); 531 if (error) { 532 error_propagate(errp, error); 533 goto early_err; 534 } 535 } 536 537 on_read_error = BLOCKDEV_ON_ERROR_REPORT; 538 if ((buf = qemu_opt_get(opts, "rerror")) != NULL) { 539 on_read_error = parse_block_error_action(buf, 1, &error); 540 if (error) { 541 error_propagate(errp, error); 542 goto early_err; 543 } 544 } 545 546 if (snapshot) { 547 /* always use cache=unsafe with snapshot */ 548 bdrv_flags &= ~BDRV_O_CACHE_MASK; 549 bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); 550 } 551 552 /* init */ 553 if ((!file || !*file) && !has_driver_specific_opts) { 554 BlockBackendRootState *blk_rs; 555 556 blk = blk_new(qemu_opts_id(opts), errp); 557 if (!blk) { 558 goto early_err; 559 } 560 561 blk_rs = blk_get_root_state(blk); 562 blk_rs->open_flags = bdrv_flags; 563 blk_rs->read_only = !(bdrv_flags & BDRV_O_RDWR); 564 blk_rs->detect_zeroes = detect_zeroes; 565 566 if (throttle_enabled(&cfg)) { 567 if (!throttling_group) { 568 throttling_group = blk_name(blk); 569 } 570 blk_rs->throttle_group = g_strdup(throttling_group); 571 blk_rs->throttle_state = throttle_group_incref(throttling_group); 572 blk_rs->throttle_state->cfg = cfg; 573 } 574 575 QDECREF(bs_opts); 576 } else { 577 if (file && !*file) { 578 file = NULL; 579 } 580 581 blk = blk_new_open(qemu_opts_id(opts), file, NULL, bs_opts, bdrv_flags, 582 errp); 583 if (!blk) { 584 goto err_no_bs_opts; 585 } 586 bs = blk_bs(blk); 587 588 bs->detect_zeroes = detect_zeroes; 589 590 /* disk I/O throttling */ 591 if (throttle_enabled(&cfg)) { 592 if (!throttling_group) { 593 throttling_group = blk_name(blk); 594 } 595 bdrv_io_limits_enable(bs, throttling_group); 596 bdrv_set_io_limits(bs, &cfg); 597 } 598 599 if (bdrv_key_required(bs)) { 600 autostart = 0; 601 } 602 } 603 604 blk_set_on_error(blk, on_read_error, on_write_error); 605 606 err_no_bs_opts: 607 qemu_opts_del(opts); 608 return blk; 609 610 early_err: 611 qemu_opts_del(opts); 612 err_no_opts: 613 QDECREF(bs_opts); 614 return NULL; 615 } 616 617 static QemuOptsList qemu_root_bds_opts; 618 619 /* Takes the ownership of bs_opts */ 620 static BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp) 621 { 622 BlockDriverState *bs; 623 QemuOpts *opts; 624 Error *local_error = NULL; 625 BlockdevDetectZeroesOptions detect_zeroes; 626 int ret; 627 int bdrv_flags = 0; 628 629 opts = qemu_opts_create(&qemu_root_bds_opts, NULL, 1, errp); 630 if (!opts) { 631 goto fail; 632 } 633 634 qemu_opts_absorb_qdict(opts, bs_opts, &local_error); 635 if (local_error) { 636 error_propagate(errp, local_error); 637 goto fail; 638 } 639 640 extract_common_blockdev_options(opts, &bdrv_flags, NULL, NULL, 641 &detect_zeroes, &local_error); 642 if (local_error) { 643 error_propagate(errp, local_error); 644 goto fail; 645 } 646 647 bs = NULL; 648 ret = bdrv_open(&bs, NULL, NULL, bs_opts, bdrv_flags, errp); 649 if (ret < 0) { 650 goto fail_no_bs_opts; 651 } 652 653 bs->detect_zeroes = detect_zeroes; 654 655 fail_no_bs_opts: 656 qemu_opts_del(opts); 657 return bs; 658 659 fail: 660 qemu_opts_del(opts); 661 QDECREF(bs_opts); 662 return NULL; 663 } 664 665 static void qemu_opt_rename(QemuOpts *opts, const char *from, const char *to, 666 Error **errp) 667 { 668 const char *value; 669 670 value = qemu_opt_get(opts, from); 671 if (value) { 672 if (qemu_opt_find(opts, to)) { 673 error_setg(errp, "'%s' and its alias '%s' can't be used at the " 674 "same time", to, from); 675 return; 676 } 677 } 678 679 /* rename all items in opts */ 680 while ((value = qemu_opt_get(opts, from))) { 681 qemu_opt_set(opts, to, value, &error_abort); 682 qemu_opt_unset(opts, from); 683 } 684 } 685 686 QemuOptsList qemu_legacy_drive_opts = { 687 .name = "drive", 688 .head = QTAILQ_HEAD_INITIALIZER(qemu_legacy_drive_opts.head), 689 .desc = { 690 { 691 .name = "bus", 692 .type = QEMU_OPT_NUMBER, 693 .help = "bus number", 694 },{ 695 .name = "unit", 696 .type = QEMU_OPT_NUMBER, 697 .help = "unit number (i.e. lun for scsi)", 698 },{ 699 .name = "index", 700 .type = QEMU_OPT_NUMBER, 701 .help = "index number", 702 },{ 703 .name = "media", 704 .type = QEMU_OPT_STRING, 705 .help = "media type (disk, cdrom)", 706 },{ 707 .name = "if", 708 .type = QEMU_OPT_STRING, 709 .help = "interface (ide, scsi, sd, mtd, floppy, pflash, virtio)", 710 },{ 711 .name = "cyls", 712 .type = QEMU_OPT_NUMBER, 713 .help = "number of cylinders (ide disk geometry)", 714 },{ 715 .name = "heads", 716 .type = QEMU_OPT_NUMBER, 717 .help = "number of heads (ide disk geometry)", 718 },{ 719 .name = "secs", 720 .type = QEMU_OPT_NUMBER, 721 .help = "number of sectors (ide disk geometry)", 722 },{ 723 .name = "trans", 724 .type = QEMU_OPT_STRING, 725 .help = "chs translation (auto, lba, none)", 726 },{ 727 .name = "boot", 728 .type = QEMU_OPT_BOOL, 729 .help = "(deprecated, ignored)", 730 },{ 731 .name = "addr", 732 .type = QEMU_OPT_STRING, 733 .help = "pci address (virtio only)", 734 },{ 735 .name = "serial", 736 .type = QEMU_OPT_STRING, 737 .help = "disk serial number", 738 },{ 739 .name = "file", 740 .type = QEMU_OPT_STRING, 741 .help = "file name", 742 }, 743 744 /* Options that are passed on, but have special semantics with -drive */ 745 { 746 .name = "read-only", 747 .type = QEMU_OPT_BOOL, 748 .help = "open drive file as read-only", 749 },{ 750 .name = "rerror", 751 .type = QEMU_OPT_STRING, 752 .help = "read error action", 753 },{ 754 .name = "werror", 755 .type = QEMU_OPT_STRING, 756 .help = "write error action", 757 },{ 758 .name = "copy-on-read", 759 .type = QEMU_OPT_BOOL, 760 .help = "copy read data from backing file into image file", 761 }, 762 763 { /* end of list */ } 764 }, 765 }; 766 767 DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) 768 { 769 const char *value; 770 BlockBackend *blk; 771 DriveInfo *dinfo = NULL; 772 QDict *bs_opts; 773 QemuOpts *legacy_opts; 774 DriveMediaType media = MEDIA_DISK; 775 BlockInterfaceType type; 776 int cyls, heads, secs, translation; 777 int max_devs, bus_id, unit_id, index; 778 const char *devaddr; 779 const char *werror, *rerror; 780 bool read_only = false; 781 bool copy_on_read; 782 const char *serial; 783 const char *filename; 784 Error *local_err = NULL; 785 int i; 786 787 /* Change legacy command line options into QMP ones */ 788 static const struct { 789 const char *from; 790 const char *to; 791 } opt_renames[] = { 792 { "iops", "throttling.iops-total" }, 793 { "iops_rd", "throttling.iops-read" }, 794 { "iops_wr", "throttling.iops-write" }, 795 796 { "bps", "throttling.bps-total" }, 797 { "bps_rd", "throttling.bps-read" }, 798 { "bps_wr", "throttling.bps-write" }, 799 800 { "iops_max", "throttling.iops-total-max" }, 801 { "iops_rd_max", "throttling.iops-read-max" }, 802 { "iops_wr_max", "throttling.iops-write-max" }, 803 804 { "bps_max", "throttling.bps-total-max" }, 805 { "bps_rd_max", "throttling.bps-read-max" }, 806 { "bps_wr_max", "throttling.bps-write-max" }, 807 808 { "iops_size", "throttling.iops-size" }, 809 810 { "group", "throttling.group" }, 811 812 { "readonly", "read-only" }, 813 }; 814 815 for (i = 0; i < ARRAY_SIZE(opt_renames); i++) { 816 qemu_opt_rename(all_opts, opt_renames[i].from, opt_renames[i].to, 817 &local_err); 818 if (local_err) { 819 error_report_err(local_err); 820 return NULL; 821 } 822 } 823 824 value = qemu_opt_get(all_opts, "cache"); 825 if (value) { 826 int flags = 0; 827 828 if (bdrv_parse_cache_flags(value, &flags) != 0) { 829 error_report("invalid cache option"); 830 return NULL; 831 } 832 833 /* Specific options take precedence */ 834 if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) { 835 qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB, 836 !!(flags & BDRV_O_CACHE_WB), &error_abort); 837 } 838 if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) { 839 qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT, 840 !!(flags & BDRV_O_NOCACHE), &error_abort); 841 } 842 if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) { 843 qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH, 844 !!(flags & BDRV_O_NO_FLUSH), &error_abort); 845 } 846 qemu_opt_unset(all_opts, "cache"); 847 } 848 849 /* Get a QDict for processing the options */ 850 bs_opts = qdict_new(); 851 qemu_opts_to_qdict(all_opts, bs_opts); 852 853 legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0, 854 &error_abort); 855 qemu_opts_absorb_qdict(legacy_opts, bs_opts, &local_err); 856 if (local_err) { 857 error_report_err(local_err); 858 goto fail; 859 } 860 861 /* Deprecated option boot=[on|off] */ 862 if (qemu_opt_get(legacy_opts, "boot") != NULL) { 863 fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be " 864 "ignored. Future versions will reject this parameter. Please " 865 "update your scripts.\n"); 866 } 867 868 /* Media type */ 869 value = qemu_opt_get(legacy_opts, "media"); 870 if (value) { 871 if (!strcmp(value, "disk")) { 872 media = MEDIA_DISK; 873 } else if (!strcmp(value, "cdrom")) { 874 media = MEDIA_CDROM; 875 read_only = true; 876 } else { 877 error_report("'%s' invalid media", value); 878 goto fail; 879 } 880 } 881 882 /* copy-on-read is disabled with a warning for read-only devices */ 883 read_only |= qemu_opt_get_bool(legacy_opts, "read-only", false); 884 copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false); 885 886 if (read_only && copy_on_read) { 887 error_report("warning: disabling copy-on-read on read-only drive"); 888 copy_on_read = false; 889 } 890 891 qdict_put(bs_opts, "read-only", 892 qstring_from_str(read_only ? "on" : "off")); 893 qdict_put(bs_opts, "copy-on-read", 894 qstring_from_str(copy_on_read ? "on" :"off")); 895 896 /* Controller type */ 897 value = qemu_opt_get(legacy_opts, "if"); 898 if (value) { 899 for (type = 0; 900 type < IF_COUNT && strcmp(value, if_name[type]); 901 type++) { 902 } 903 if (type == IF_COUNT) { 904 error_report("unsupported bus type '%s'", value); 905 goto fail; 906 } 907 } else { 908 type = block_default_type; 909 } 910 911 /* Geometry */ 912 cyls = qemu_opt_get_number(legacy_opts, "cyls", 0); 913 heads = qemu_opt_get_number(legacy_opts, "heads", 0); 914 secs = qemu_opt_get_number(legacy_opts, "secs", 0); 915 916 if (cyls || heads || secs) { 917 if (cyls < 1) { 918 error_report("invalid physical cyls number"); 919 goto fail; 920 } 921 if (heads < 1) { 922 error_report("invalid physical heads number"); 923 goto fail; 924 } 925 if (secs < 1) { 926 error_report("invalid physical secs number"); 927 goto fail; 928 } 929 } 930 931 translation = BIOS_ATA_TRANSLATION_AUTO; 932 value = qemu_opt_get(legacy_opts, "trans"); 933 if (value != NULL) { 934 if (!cyls) { 935 error_report("'%s' trans must be used with cyls, heads and secs", 936 value); 937 goto fail; 938 } 939 if (!strcmp(value, "none")) { 940 translation = BIOS_ATA_TRANSLATION_NONE; 941 } else if (!strcmp(value, "lba")) { 942 translation = BIOS_ATA_TRANSLATION_LBA; 943 } else if (!strcmp(value, "large")) { 944 translation = BIOS_ATA_TRANSLATION_LARGE; 945 } else if (!strcmp(value, "rechs")) { 946 translation = BIOS_ATA_TRANSLATION_RECHS; 947 } else if (!strcmp(value, "auto")) { 948 translation = BIOS_ATA_TRANSLATION_AUTO; 949 } else { 950 error_report("'%s' invalid translation type", value); 951 goto fail; 952 } 953 } 954 955 if (media == MEDIA_CDROM) { 956 if (cyls || secs || heads) { 957 error_report("CHS can't be set with media=cdrom"); 958 goto fail; 959 } 960 } 961 962 /* Device address specified by bus/unit or index. 963 * If none was specified, try to find the first free one. */ 964 bus_id = qemu_opt_get_number(legacy_opts, "bus", 0); 965 unit_id = qemu_opt_get_number(legacy_opts, "unit", -1); 966 index = qemu_opt_get_number(legacy_opts, "index", -1); 967 968 max_devs = if_max_devs[type]; 969 970 if (index != -1) { 971 if (bus_id != 0 || unit_id != -1) { 972 error_report("index cannot be used with bus and unit"); 973 goto fail; 974 } 975 bus_id = drive_index_to_bus_id(type, index); 976 unit_id = drive_index_to_unit_id(type, index); 977 } 978 979 if (unit_id == -1) { 980 unit_id = 0; 981 while (drive_get(type, bus_id, unit_id) != NULL) { 982 unit_id++; 983 if (max_devs && unit_id >= max_devs) { 984 unit_id -= max_devs; 985 bus_id++; 986 } 987 } 988 } 989 990 if (max_devs && unit_id >= max_devs) { 991 error_report("unit %d too big (max is %d)", unit_id, max_devs - 1); 992 goto fail; 993 } 994 995 if (drive_get(type, bus_id, unit_id) != NULL) { 996 error_report("drive with bus=%d, unit=%d (index=%d) exists", 997 bus_id, unit_id, index); 998 goto fail; 999 } 1000 1001 /* Serial number */ 1002 serial = qemu_opt_get(legacy_opts, "serial"); 1003 1004 /* no id supplied -> create one */ 1005 if (qemu_opts_id(all_opts) == NULL) { 1006 char *new_id; 1007 const char *mediastr = ""; 1008 if (type == IF_IDE || type == IF_SCSI) { 1009 mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; 1010 } 1011 if (max_devs) { 1012 new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id, 1013 mediastr, unit_id); 1014 } else { 1015 new_id = g_strdup_printf("%s%s%i", if_name[type], 1016 mediastr, unit_id); 1017 } 1018 qdict_put(bs_opts, "id", qstring_from_str(new_id)); 1019 g_free(new_id); 1020 } 1021 1022 /* Add virtio block device */ 1023 devaddr = qemu_opt_get(legacy_opts, "addr"); 1024 if (devaddr && type != IF_VIRTIO) { 1025 error_report("addr is not supported by this bus type"); 1026 goto fail; 1027 } 1028 1029 if (type == IF_VIRTIO) { 1030 QemuOpts *devopts; 1031 devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0, 1032 &error_abort); 1033 if (arch_type == QEMU_ARCH_S390X) { 1034 qemu_opt_set(devopts, "driver", "virtio-blk-ccw", &error_abort); 1035 } else { 1036 qemu_opt_set(devopts, "driver", "virtio-blk-pci", &error_abort); 1037 } 1038 qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"), 1039 &error_abort); 1040 if (devaddr) { 1041 qemu_opt_set(devopts, "addr", devaddr, &error_abort); 1042 } 1043 } 1044 1045 filename = qemu_opt_get(legacy_opts, "file"); 1046 1047 /* Check werror/rerror compatibility with if=... */ 1048 werror = qemu_opt_get(legacy_opts, "werror"); 1049 if (werror != NULL) { 1050 if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && 1051 type != IF_NONE) { 1052 error_report("werror is not supported by this bus type"); 1053 goto fail; 1054 } 1055 qdict_put(bs_opts, "werror", qstring_from_str(werror)); 1056 } 1057 1058 rerror = qemu_opt_get(legacy_opts, "rerror"); 1059 if (rerror != NULL) { 1060 if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && 1061 type != IF_NONE) { 1062 error_report("rerror is not supported by this bus type"); 1063 goto fail; 1064 } 1065 qdict_put(bs_opts, "rerror", qstring_from_str(rerror)); 1066 } 1067 1068 /* Actual block device init: Functionality shared with blockdev-add */ 1069 blk = blockdev_init(filename, bs_opts, &local_err); 1070 bs_opts = NULL; 1071 if (!blk) { 1072 if (local_err) { 1073 error_report_err(local_err); 1074 } 1075 goto fail; 1076 } else { 1077 assert(!local_err); 1078 } 1079 1080 /* Create legacy DriveInfo */ 1081 dinfo = g_malloc0(sizeof(*dinfo)); 1082 dinfo->opts = all_opts; 1083 1084 dinfo->cyls = cyls; 1085 dinfo->heads = heads; 1086 dinfo->secs = secs; 1087 dinfo->trans = translation; 1088 1089 dinfo->type = type; 1090 dinfo->bus = bus_id; 1091 dinfo->unit = unit_id; 1092 dinfo->devaddr = devaddr; 1093 dinfo->serial = g_strdup(serial); 1094 1095 blk_set_legacy_dinfo(blk, dinfo); 1096 1097 switch(type) { 1098 case IF_IDE: 1099 case IF_SCSI: 1100 case IF_XEN: 1101 case IF_NONE: 1102 dinfo->media_cd = media == MEDIA_CDROM; 1103 break; 1104 default: 1105 break; 1106 } 1107 1108 fail: 1109 qemu_opts_del(legacy_opts); 1110 QDECREF(bs_opts); 1111 return dinfo; 1112 } 1113 1114 void hmp_commit(Monitor *mon, const QDict *qdict) 1115 { 1116 const char *device = qdict_get_str(qdict, "device"); 1117 BlockBackend *blk; 1118 int ret; 1119 1120 if (!strcmp(device, "all")) { 1121 ret = bdrv_commit_all(); 1122 } else { 1123 BlockDriverState *bs; 1124 AioContext *aio_context; 1125 1126 blk = blk_by_name(device); 1127 if (!blk) { 1128 monitor_printf(mon, "Device '%s' not found\n", device); 1129 return; 1130 } 1131 if (!blk_is_available(blk)) { 1132 monitor_printf(mon, "Device '%s' has no medium\n", device); 1133 return; 1134 } 1135 1136 bs = blk_bs(blk); 1137 aio_context = bdrv_get_aio_context(bs); 1138 aio_context_acquire(aio_context); 1139 1140 ret = bdrv_commit(bs); 1141 1142 aio_context_release(aio_context); 1143 } 1144 if (ret < 0) { 1145 monitor_printf(mon, "'commit' error for '%s': %s\n", device, 1146 strerror(-ret)); 1147 } 1148 } 1149 1150 static void blockdev_do_action(TransactionActionKind type, void *data, 1151 Error **errp) 1152 { 1153 TransactionAction action; 1154 TransactionActionList list; 1155 1156 action.type = type; 1157 action.u.data = data; 1158 list.value = &action; 1159 list.next = NULL; 1160 qmp_transaction(&list, errp); 1161 } 1162 1163 void qmp_blockdev_snapshot_sync(bool has_device, const char *device, 1164 bool has_node_name, const char *node_name, 1165 const char *snapshot_file, 1166 bool has_snapshot_node_name, 1167 const char *snapshot_node_name, 1168 bool has_format, const char *format, 1169 bool has_mode, NewImageMode mode, Error **errp) 1170 { 1171 BlockdevSnapshotSync snapshot = { 1172 .has_device = has_device, 1173 .device = (char *) device, 1174 .has_node_name = has_node_name, 1175 .node_name = (char *) node_name, 1176 .snapshot_file = (char *) snapshot_file, 1177 .has_snapshot_node_name = has_snapshot_node_name, 1178 .snapshot_node_name = (char *) snapshot_node_name, 1179 .has_format = has_format, 1180 .format = (char *) format, 1181 .has_mode = has_mode, 1182 .mode = mode, 1183 }; 1184 blockdev_do_action(TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC, 1185 &snapshot, errp); 1186 } 1187 1188 void qmp_blockdev_snapshot(const char *node, const char *overlay, 1189 Error **errp) 1190 { 1191 BlockdevSnapshot snapshot_data = { 1192 .node = (char *) node, 1193 .overlay = (char *) overlay 1194 }; 1195 1196 blockdev_do_action(TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT, 1197 &snapshot_data, errp); 1198 } 1199 1200 void qmp_blockdev_snapshot_internal_sync(const char *device, 1201 const char *name, 1202 Error **errp) 1203 { 1204 BlockdevSnapshotInternal snapshot = { 1205 .device = (char *) device, 1206 .name = (char *) name 1207 }; 1208 1209 blockdev_do_action(TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC, 1210 &snapshot, errp); 1211 } 1212 1213 SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, 1214 bool has_id, 1215 const char *id, 1216 bool has_name, 1217 const char *name, 1218 Error **errp) 1219 { 1220 BlockDriverState *bs; 1221 BlockBackend *blk; 1222 AioContext *aio_context; 1223 QEMUSnapshotInfo sn; 1224 Error *local_err = NULL; 1225 SnapshotInfo *info = NULL; 1226 int ret; 1227 1228 blk = blk_by_name(device); 1229 if (!blk) { 1230 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 1231 "Device '%s' not found", device); 1232 return NULL; 1233 } 1234 1235 aio_context = blk_get_aio_context(blk); 1236 aio_context_acquire(aio_context); 1237 1238 if (!has_id) { 1239 id = NULL; 1240 } 1241 1242 if (!has_name) { 1243 name = NULL; 1244 } 1245 1246 if (!id && !name) { 1247 error_setg(errp, "Name or id must be provided"); 1248 goto out_aio_context; 1249 } 1250 1251 if (!blk_is_available(blk)) { 1252 error_setg(errp, "Device '%s' has no medium", device); 1253 goto out_aio_context; 1254 } 1255 bs = blk_bs(blk); 1256 1257 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) { 1258 goto out_aio_context; 1259 } 1260 1261 ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err); 1262 if (local_err) { 1263 error_propagate(errp, local_err); 1264 goto out_aio_context; 1265 } 1266 if (!ret) { 1267 error_setg(errp, 1268 "Snapshot with id '%s' and name '%s' does not exist on " 1269 "device '%s'", 1270 STR_OR_NULL(id), STR_OR_NULL(name), device); 1271 goto out_aio_context; 1272 } 1273 1274 bdrv_snapshot_delete(bs, id, name, &local_err); 1275 if (local_err) { 1276 error_propagate(errp, local_err); 1277 goto out_aio_context; 1278 } 1279 1280 aio_context_release(aio_context); 1281 1282 info = g_new0(SnapshotInfo, 1); 1283 info->id = g_strdup(sn.id_str); 1284 info->name = g_strdup(sn.name); 1285 info->date_nsec = sn.date_nsec; 1286 info->date_sec = sn.date_sec; 1287 info->vm_state_size = sn.vm_state_size; 1288 info->vm_clock_nsec = sn.vm_clock_nsec % 1000000000; 1289 info->vm_clock_sec = sn.vm_clock_nsec / 1000000000; 1290 1291 return info; 1292 1293 out_aio_context: 1294 aio_context_release(aio_context); 1295 return NULL; 1296 } 1297 1298 /** 1299 * block_dirty_bitmap_lookup: 1300 * Return a dirty bitmap (if present), after validating 1301 * the node reference and bitmap names. 1302 * 1303 * @node: The name of the BDS node to search for bitmaps 1304 * @name: The name of the bitmap to search for 1305 * @pbs: Output pointer for BDS lookup, if desired. Can be NULL. 1306 * @paio: Output pointer for aio_context acquisition, if desired. Can be NULL. 1307 * @errp: Output pointer for error information. Can be NULL. 1308 * 1309 * @return: A bitmap object on success, or NULL on failure. 1310 */ 1311 static BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node, 1312 const char *name, 1313 BlockDriverState **pbs, 1314 AioContext **paio, 1315 Error **errp) 1316 { 1317 BlockDriverState *bs; 1318 BdrvDirtyBitmap *bitmap; 1319 AioContext *aio_context; 1320 1321 if (!node) { 1322 error_setg(errp, "Node cannot be NULL"); 1323 return NULL; 1324 } 1325 if (!name) { 1326 error_setg(errp, "Bitmap name cannot be NULL"); 1327 return NULL; 1328 } 1329 bs = bdrv_lookup_bs(node, node, NULL); 1330 if (!bs) { 1331 error_setg(errp, "Node '%s' not found", node); 1332 return NULL; 1333 } 1334 1335 aio_context = bdrv_get_aio_context(bs); 1336 aio_context_acquire(aio_context); 1337 1338 bitmap = bdrv_find_dirty_bitmap(bs, name); 1339 if (!bitmap) { 1340 error_setg(errp, "Dirty bitmap '%s' not found", name); 1341 goto fail; 1342 } 1343 1344 if (pbs) { 1345 *pbs = bs; 1346 } 1347 if (paio) { 1348 *paio = aio_context; 1349 } else { 1350 aio_context_release(aio_context); 1351 } 1352 1353 return bitmap; 1354 1355 fail: 1356 aio_context_release(aio_context); 1357 return NULL; 1358 } 1359 1360 /* New and old BlockDriverState structs for atomic group operations */ 1361 1362 typedef struct BlkTransactionState BlkTransactionState; 1363 1364 /* Only prepare() may fail. In a single transaction, only one of commit() or 1365 abort() will be called, clean() will always be called if it present. */ 1366 typedef struct BdrvActionOps { 1367 /* Size of state struct, in bytes. */ 1368 size_t instance_size; 1369 /* Prepare the work, must NOT be NULL. */ 1370 void (*prepare)(BlkTransactionState *common, Error **errp); 1371 /* Commit the changes, can be NULL. */ 1372 void (*commit)(BlkTransactionState *common); 1373 /* Abort the changes on fail, can be NULL. */ 1374 void (*abort)(BlkTransactionState *common); 1375 /* Clean up resource in the end, can be NULL. */ 1376 void (*clean)(BlkTransactionState *common); 1377 } BdrvActionOps; 1378 1379 /* 1380 * This structure must be arranged as first member in child type, assuming 1381 * that compiler will also arrange it to the same address with parent instance. 1382 * Later it will be used in free(). 1383 */ 1384 struct BlkTransactionState { 1385 TransactionAction *action; 1386 const BdrvActionOps *ops; 1387 QSIMPLEQ_ENTRY(BlkTransactionState) entry; 1388 }; 1389 1390 /* internal snapshot private data */ 1391 typedef struct InternalSnapshotState { 1392 BlkTransactionState common; 1393 BlockDriverState *bs; 1394 AioContext *aio_context; 1395 QEMUSnapshotInfo sn; 1396 bool created; 1397 } InternalSnapshotState; 1398 1399 static void internal_snapshot_prepare(BlkTransactionState *common, 1400 Error **errp) 1401 { 1402 Error *local_err = NULL; 1403 const char *device; 1404 const char *name; 1405 BlockBackend *blk; 1406 BlockDriverState *bs; 1407 QEMUSnapshotInfo old_sn, *sn; 1408 bool ret; 1409 qemu_timeval tv; 1410 BlockdevSnapshotInternal *internal; 1411 InternalSnapshotState *state; 1412 int ret1; 1413 1414 g_assert(common->action->type == 1415 TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC); 1416 internal = common->action->u.blockdev_snapshot_internal_sync; 1417 state = DO_UPCAST(InternalSnapshotState, common, common); 1418 1419 /* 1. parse input */ 1420 device = internal->device; 1421 name = internal->name; 1422 1423 /* 2. check for validation */ 1424 blk = blk_by_name(device); 1425 if (!blk) { 1426 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 1427 "Device '%s' not found", device); 1428 return; 1429 } 1430 1431 /* AioContext is released in .clean() */ 1432 state->aio_context = blk_get_aio_context(blk); 1433 aio_context_acquire(state->aio_context); 1434 1435 if (!blk_is_available(blk)) { 1436 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); 1437 return; 1438 } 1439 bs = blk_bs(blk); 1440 1441 state->bs = bs; 1442 bdrv_drained_begin(bs); 1443 1444 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) { 1445 return; 1446 } 1447 1448 if (bdrv_is_read_only(bs)) { 1449 error_setg(errp, "Device '%s' is read only", device); 1450 return; 1451 } 1452 1453 if (!bdrv_can_snapshot(bs)) { 1454 error_setg(errp, "Block format '%s' used by device '%s' " 1455 "does not support internal snapshots", 1456 bs->drv->format_name, device); 1457 return; 1458 } 1459 1460 if (!strlen(name)) { 1461 error_setg(errp, "Name is empty"); 1462 return; 1463 } 1464 1465 /* check whether a snapshot with name exist */ 1466 ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn, 1467 &local_err); 1468 if (local_err) { 1469 error_propagate(errp, local_err); 1470 return; 1471 } else if (ret) { 1472 error_setg(errp, 1473 "Snapshot with name '%s' already exists on device '%s'", 1474 name, device); 1475 return; 1476 } 1477 1478 /* 3. take the snapshot */ 1479 sn = &state->sn; 1480 pstrcpy(sn->name, sizeof(sn->name), name); 1481 qemu_gettimeofday(&tv); 1482 sn->date_sec = tv.tv_sec; 1483 sn->date_nsec = tv.tv_usec * 1000; 1484 sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 1485 1486 ret1 = bdrv_snapshot_create(bs, sn); 1487 if (ret1 < 0) { 1488 error_setg_errno(errp, -ret1, 1489 "Failed to create snapshot '%s' on device '%s'", 1490 name, device); 1491 return; 1492 } 1493 1494 /* 4. succeed, mark a snapshot is created */ 1495 state->created = true; 1496 } 1497 1498 static void internal_snapshot_abort(BlkTransactionState *common) 1499 { 1500 InternalSnapshotState *state = 1501 DO_UPCAST(InternalSnapshotState, common, common); 1502 BlockDriverState *bs = state->bs; 1503 QEMUSnapshotInfo *sn = &state->sn; 1504 Error *local_error = NULL; 1505 1506 if (!state->created) { 1507 return; 1508 } 1509 1510 if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) { 1511 error_report("Failed to delete snapshot with id '%s' and name '%s' on " 1512 "device '%s' in abort: %s", 1513 sn->id_str, 1514 sn->name, 1515 bdrv_get_device_name(bs), 1516 error_get_pretty(local_error)); 1517 error_free(local_error); 1518 } 1519 } 1520 1521 static void internal_snapshot_clean(BlkTransactionState *common) 1522 { 1523 InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState, 1524 common, common); 1525 1526 if (state->aio_context) { 1527 if (state->bs) { 1528 bdrv_drained_end(state->bs); 1529 } 1530 aio_context_release(state->aio_context); 1531 } 1532 } 1533 1534 /* external snapshot private data */ 1535 typedef struct ExternalSnapshotState { 1536 BlkTransactionState common; 1537 BlockDriverState *old_bs; 1538 BlockDriverState *new_bs; 1539 AioContext *aio_context; 1540 } ExternalSnapshotState; 1541 1542 static void external_snapshot_prepare(BlkTransactionState *common, 1543 Error **errp) 1544 { 1545 int flags = 0, ret; 1546 QDict *options = NULL; 1547 Error *local_err = NULL; 1548 /* Device and node name of the image to generate the snapshot from */ 1549 const char *device; 1550 const char *node_name; 1551 /* Reference to the new image (for 'blockdev-snapshot') */ 1552 const char *snapshot_ref; 1553 /* File name of the new image (for 'blockdev-snapshot-sync') */ 1554 const char *new_image_file; 1555 ExternalSnapshotState *state = 1556 DO_UPCAST(ExternalSnapshotState, common, common); 1557 TransactionAction *action = common->action; 1558 1559 /* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar 1560 * purpose but a different set of parameters */ 1561 switch (action->type) { 1562 case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT: 1563 { 1564 BlockdevSnapshot *s = action->u.blockdev_snapshot; 1565 device = s->node; 1566 node_name = s->node; 1567 new_image_file = NULL; 1568 snapshot_ref = s->overlay; 1569 } 1570 break; 1571 case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC: 1572 { 1573 BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync; 1574 device = s->has_device ? s->device : NULL; 1575 node_name = s->has_node_name ? s->node_name : NULL; 1576 new_image_file = s->snapshot_file; 1577 snapshot_ref = NULL; 1578 } 1579 break; 1580 default: 1581 g_assert_not_reached(); 1582 } 1583 1584 /* start processing */ 1585 state->old_bs = bdrv_lookup_bs(device, node_name, errp); 1586 if (!state->old_bs) { 1587 return; 1588 } 1589 1590 /* Acquire AioContext now so any threads operating on old_bs stop */ 1591 state->aio_context = bdrv_get_aio_context(state->old_bs); 1592 aio_context_acquire(state->aio_context); 1593 bdrv_drained_begin(state->old_bs); 1594 1595 if (!bdrv_is_inserted(state->old_bs)) { 1596 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); 1597 return; 1598 } 1599 1600 if (bdrv_op_is_blocked(state->old_bs, 1601 BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) { 1602 return; 1603 } 1604 1605 if (!bdrv_is_read_only(state->old_bs)) { 1606 if (bdrv_flush(state->old_bs)) { 1607 error_setg(errp, QERR_IO_ERROR); 1608 return; 1609 } 1610 } 1611 1612 if (!bdrv_is_first_non_filter(state->old_bs)) { 1613 error_setg(errp, QERR_FEATURE_DISABLED, "snapshot"); 1614 return; 1615 } 1616 1617 if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) { 1618 BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync; 1619 const char *format = s->has_format ? s->format : "qcow2"; 1620 enum NewImageMode mode; 1621 const char *snapshot_node_name = 1622 s->has_snapshot_node_name ? s->snapshot_node_name : NULL; 1623 1624 if (node_name && !snapshot_node_name) { 1625 error_setg(errp, "New snapshot node name missing"); 1626 return; 1627 } 1628 1629 if (snapshot_node_name && 1630 bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) { 1631 error_setg(errp, "New snapshot node name already in use"); 1632 return; 1633 } 1634 1635 flags = state->old_bs->open_flags; 1636 1637 /* create new image w/backing file */ 1638 mode = s->has_mode ? s->mode : NEW_IMAGE_MODE_ABSOLUTE_PATHS; 1639 if (mode != NEW_IMAGE_MODE_EXISTING) { 1640 bdrv_img_create(new_image_file, format, 1641 state->old_bs->filename, 1642 state->old_bs->drv->format_name, 1643 NULL, -1, flags, &local_err, false); 1644 if (local_err) { 1645 error_propagate(errp, local_err); 1646 return; 1647 } 1648 } 1649 1650 options = qdict_new(); 1651 if (s->has_snapshot_node_name) { 1652 qdict_put(options, "node-name", 1653 qstring_from_str(snapshot_node_name)); 1654 } 1655 qdict_put(options, "driver", qstring_from_str(format)); 1656 1657 flags |= BDRV_O_NO_BACKING; 1658 } 1659 1660 assert(state->new_bs == NULL); 1661 ret = bdrv_open(&state->new_bs, new_image_file, snapshot_ref, options, 1662 flags, errp); 1663 /* We will manually add the backing_hd field to the bs later */ 1664 if (ret != 0) { 1665 return; 1666 } 1667 1668 if (state->new_bs->blk != NULL) { 1669 error_setg(errp, "The snapshot is already in use by %s", 1670 blk_name(state->new_bs->blk)); 1671 return; 1672 } 1673 1674 if (bdrv_op_is_blocked(state->new_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, 1675 errp)) { 1676 return; 1677 } 1678 1679 if (state->new_bs->backing != NULL) { 1680 error_setg(errp, "The snapshot already has a backing image"); 1681 return; 1682 } 1683 1684 if (!state->new_bs->drv->supports_backing) { 1685 error_setg(errp, "The snapshot does not support backing images"); 1686 } 1687 } 1688 1689 static void external_snapshot_commit(BlkTransactionState *common) 1690 { 1691 ExternalSnapshotState *state = 1692 DO_UPCAST(ExternalSnapshotState, common, common); 1693 1694 bdrv_set_aio_context(state->new_bs, state->aio_context); 1695 1696 /* This removes our old bs and adds the new bs */ 1697 bdrv_append(state->new_bs, state->old_bs); 1698 /* We don't need (or want) to use the transactional 1699 * bdrv_reopen_multiple() across all the entries at once, because we 1700 * don't want to abort all of them if one of them fails the reopen */ 1701 bdrv_reopen(state->old_bs, state->old_bs->open_flags & ~BDRV_O_RDWR, 1702 NULL); 1703 } 1704 1705 static void external_snapshot_abort(BlkTransactionState *common) 1706 { 1707 ExternalSnapshotState *state = 1708 DO_UPCAST(ExternalSnapshotState, common, common); 1709 if (state->new_bs) { 1710 bdrv_unref(state->new_bs); 1711 } 1712 } 1713 1714 static void external_snapshot_clean(BlkTransactionState *common) 1715 { 1716 ExternalSnapshotState *state = 1717 DO_UPCAST(ExternalSnapshotState, common, common); 1718 if (state->aio_context) { 1719 bdrv_drained_end(state->old_bs); 1720 aio_context_release(state->aio_context); 1721 } 1722 } 1723 1724 typedef struct DriveBackupState { 1725 BlkTransactionState common; 1726 BlockDriverState *bs; 1727 AioContext *aio_context; 1728 BlockJob *job; 1729 } DriveBackupState; 1730 1731 static void drive_backup_prepare(BlkTransactionState *common, Error **errp) 1732 { 1733 DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); 1734 BlockBackend *blk; 1735 DriveBackup *backup; 1736 Error *local_err = NULL; 1737 1738 assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP); 1739 backup = common->action->u.drive_backup; 1740 1741 blk = blk_by_name(backup->device); 1742 if (!blk) { 1743 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 1744 "Device '%s' not found", backup->device); 1745 return; 1746 } 1747 1748 if (!blk_is_available(blk)) { 1749 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device); 1750 return; 1751 } 1752 1753 /* AioContext is released in .clean() */ 1754 state->aio_context = blk_get_aio_context(blk); 1755 aio_context_acquire(state->aio_context); 1756 bdrv_drained_begin(blk_bs(blk)); 1757 state->bs = blk_bs(blk); 1758 1759 qmp_drive_backup(backup->device, backup->target, 1760 backup->has_format, backup->format, 1761 backup->sync, 1762 backup->has_mode, backup->mode, 1763 backup->has_speed, backup->speed, 1764 backup->has_bitmap, backup->bitmap, 1765 backup->has_on_source_error, backup->on_source_error, 1766 backup->has_on_target_error, backup->on_target_error, 1767 &local_err); 1768 if (local_err) { 1769 error_propagate(errp, local_err); 1770 return; 1771 } 1772 1773 state->job = state->bs->job; 1774 } 1775 1776 static void drive_backup_abort(BlkTransactionState *common) 1777 { 1778 DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); 1779 BlockDriverState *bs = state->bs; 1780 1781 /* Only cancel if it's the job we started */ 1782 if (bs && bs->job && bs->job == state->job) { 1783 block_job_cancel_sync(bs->job); 1784 } 1785 } 1786 1787 static void drive_backup_clean(BlkTransactionState *common) 1788 { 1789 DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); 1790 1791 if (state->aio_context) { 1792 bdrv_drained_end(state->bs); 1793 aio_context_release(state->aio_context); 1794 } 1795 } 1796 1797 typedef struct BlockdevBackupState { 1798 BlkTransactionState common; 1799 BlockDriverState *bs; 1800 BlockJob *job; 1801 AioContext *aio_context; 1802 } BlockdevBackupState; 1803 1804 static void blockdev_backup_prepare(BlkTransactionState *common, Error **errp) 1805 { 1806 BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); 1807 BlockdevBackup *backup; 1808 BlockBackend *blk, *target; 1809 Error *local_err = NULL; 1810 1811 assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP); 1812 backup = common->action->u.blockdev_backup; 1813 1814 blk = blk_by_name(backup->device); 1815 if (!blk) { 1816 error_setg(errp, "Device '%s' not found", backup->device); 1817 return; 1818 } 1819 1820 if (!blk_is_available(blk)) { 1821 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device); 1822 return; 1823 } 1824 1825 target = blk_by_name(backup->target); 1826 if (!target) { 1827 error_setg(errp, "Device '%s' not found", backup->target); 1828 return; 1829 } 1830 1831 /* AioContext is released in .clean() */ 1832 state->aio_context = blk_get_aio_context(blk); 1833 if (state->aio_context != blk_get_aio_context(target)) { 1834 state->aio_context = NULL; 1835 error_setg(errp, "Backup between two IO threads is not implemented"); 1836 return; 1837 } 1838 aio_context_acquire(state->aio_context); 1839 state->bs = blk_bs(blk); 1840 bdrv_drained_begin(state->bs); 1841 1842 qmp_blockdev_backup(backup->device, backup->target, 1843 backup->sync, 1844 backup->has_speed, backup->speed, 1845 backup->has_on_source_error, backup->on_source_error, 1846 backup->has_on_target_error, backup->on_target_error, 1847 &local_err); 1848 if (local_err) { 1849 error_propagate(errp, local_err); 1850 return; 1851 } 1852 1853 state->job = state->bs->job; 1854 } 1855 1856 static void blockdev_backup_abort(BlkTransactionState *common) 1857 { 1858 BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); 1859 BlockDriverState *bs = state->bs; 1860 1861 /* Only cancel if it's the job we started */ 1862 if (bs && bs->job && bs->job == state->job) { 1863 block_job_cancel_sync(bs->job); 1864 } 1865 } 1866 1867 static void blockdev_backup_clean(BlkTransactionState *common) 1868 { 1869 BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); 1870 1871 if (state->aio_context) { 1872 bdrv_drained_end(state->bs); 1873 aio_context_release(state->aio_context); 1874 } 1875 } 1876 1877 static void abort_prepare(BlkTransactionState *common, Error **errp) 1878 { 1879 error_setg(errp, "Transaction aborted using Abort action"); 1880 } 1881 1882 static void abort_commit(BlkTransactionState *common) 1883 { 1884 g_assert_not_reached(); /* this action never succeeds */ 1885 } 1886 1887 static const BdrvActionOps actions[] = { 1888 [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT] = { 1889 .instance_size = sizeof(ExternalSnapshotState), 1890 .prepare = external_snapshot_prepare, 1891 .commit = external_snapshot_commit, 1892 .abort = external_snapshot_abort, 1893 }, 1894 [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC] = { 1895 .instance_size = sizeof(ExternalSnapshotState), 1896 .prepare = external_snapshot_prepare, 1897 .commit = external_snapshot_commit, 1898 .abort = external_snapshot_abort, 1899 .clean = external_snapshot_clean, 1900 }, 1901 [TRANSACTION_ACTION_KIND_DRIVE_BACKUP] = { 1902 .instance_size = sizeof(DriveBackupState), 1903 .prepare = drive_backup_prepare, 1904 .abort = drive_backup_abort, 1905 .clean = drive_backup_clean, 1906 }, 1907 [TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP] = { 1908 .instance_size = sizeof(BlockdevBackupState), 1909 .prepare = blockdev_backup_prepare, 1910 .abort = blockdev_backup_abort, 1911 .clean = blockdev_backup_clean, 1912 }, 1913 [TRANSACTION_ACTION_KIND_ABORT] = { 1914 .instance_size = sizeof(BlkTransactionState), 1915 .prepare = abort_prepare, 1916 .commit = abort_commit, 1917 }, 1918 [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC] = { 1919 .instance_size = sizeof(InternalSnapshotState), 1920 .prepare = internal_snapshot_prepare, 1921 .abort = internal_snapshot_abort, 1922 .clean = internal_snapshot_clean, 1923 }, 1924 }; 1925 1926 /* 1927 * 'Atomic' group operations. The operations are performed as a set, and if 1928 * any fail then we roll back all operations in the group. 1929 */ 1930 void qmp_transaction(TransactionActionList *dev_list, Error **errp) 1931 { 1932 TransactionActionList *dev_entry = dev_list; 1933 BlkTransactionState *state, *next; 1934 Error *local_err = NULL; 1935 1936 QSIMPLEQ_HEAD(snap_bdrv_states, BlkTransactionState) snap_bdrv_states; 1937 QSIMPLEQ_INIT(&snap_bdrv_states); 1938 1939 /* drain all i/o before any operations */ 1940 bdrv_drain_all(); 1941 1942 /* We don't do anything in this loop that commits us to the operations */ 1943 while (NULL != dev_entry) { 1944 TransactionAction *dev_info = NULL; 1945 const BdrvActionOps *ops; 1946 1947 dev_info = dev_entry->value; 1948 dev_entry = dev_entry->next; 1949 1950 assert(dev_info->type < ARRAY_SIZE(actions)); 1951 1952 ops = &actions[dev_info->type]; 1953 assert(ops->instance_size > 0); 1954 1955 state = g_malloc0(ops->instance_size); 1956 state->ops = ops; 1957 state->action = dev_info; 1958 QSIMPLEQ_INSERT_TAIL(&snap_bdrv_states, state, entry); 1959 1960 state->ops->prepare(state, &local_err); 1961 if (local_err) { 1962 error_propagate(errp, local_err); 1963 goto delete_and_fail; 1964 } 1965 } 1966 1967 QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) { 1968 if (state->ops->commit) { 1969 state->ops->commit(state); 1970 } 1971 } 1972 1973 /* success */ 1974 goto exit; 1975 1976 delete_and_fail: 1977 /* failure, and it is all-or-none; roll back all operations */ 1978 QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) { 1979 if (state->ops->abort) { 1980 state->ops->abort(state); 1981 } 1982 } 1983 exit: 1984 QSIMPLEQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) { 1985 if (state->ops->clean) { 1986 state->ops->clean(state); 1987 } 1988 g_free(state); 1989 } 1990 } 1991 1992 void qmp_eject(const char *device, bool has_force, bool force, Error **errp) 1993 { 1994 Error *local_err = NULL; 1995 1996 qmp_blockdev_open_tray(device, has_force, force, &local_err); 1997 if (local_err) { 1998 error_propagate(errp, local_err); 1999 return; 2000 } 2001 2002 qmp_blockdev_remove_medium(device, errp); 2003 } 2004 2005 void qmp_block_passwd(bool has_device, const char *device, 2006 bool has_node_name, const char *node_name, 2007 const char *password, Error **errp) 2008 { 2009 Error *local_err = NULL; 2010 BlockDriverState *bs; 2011 AioContext *aio_context; 2012 2013 bs = bdrv_lookup_bs(has_device ? device : NULL, 2014 has_node_name ? node_name : NULL, 2015 &local_err); 2016 if (local_err) { 2017 error_propagate(errp, local_err); 2018 return; 2019 } 2020 2021 aio_context = bdrv_get_aio_context(bs); 2022 aio_context_acquire(aio_context); 2023 2024 bdrv_add_key(bs, password, errp); 2025 2026 aio_context_release(aio_context); 2027 } 2028 2029 void qmp_blockdev_open_tray(const char *device, bool has_force, bool force, 2030 Error **errp) 2031 { 2032 BlockBackend *blk; 2033 bool locked; 2034 2035 if (!has_force) { 2036 force = false; 2037 } 2038 2039 blk = blk_by_name(device); 2040 if (!blk) { 2041 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 2042 "Device '%s' not found", device); 2043 return; 2044 } 2045 2046 if (!blk_dev_has_removable_media(blk)) { 2047 error_setg(errp, "Device '%s' is not removable", device); 2048 return; 2049 } 2050 2051 if (blk_dev_is_tray_open(blk)) { 2052 return; 2053 } 2054 2055 locked = blk_dev_is_medium_locked(blk); 2056 if (locked) { 2057 blk_dev_eject_request(blk, force); 2058 } 2059 2060 if (!locked || force) { 2061 blk_dev_change_media_cb(blk, false); 2062 } 2063 } 2064 2065 void qmp_blockdev_close_tray(const char *device, Error **errp) 2066 { 2067 BlockBackend *blk; 2068 2069 blk = blk_by_name(device); 2070 if (!blk) { 2071 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 2072 "Device '%s' not found", device); 2073 return; 2074 } 2075 2076 if (!blk_dev_has_removable_media(blk)) { 2077 error_setg(errp, "Device '%s' is not removable", device); 2078 return; 2079 } 2080 2081 if (!blk_dev_is_tray_open(blk)) { 2082 return; 2083 } 2084 2085 blk_dev_change_media_cb(blk, true); 2086 } 2087 2088 void qmp_blockdev_remove_medium(const char *device, Error **errp) 2089 { 2090 BlockBackend *blk; 2091 BlockDriverState *bs; 2092 AioContext *aio_context; 2093 bool has_device; 2094 2095 blk = blk_by_name(device); 2096 if (!blk) { 2097 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 2098 "Device '%s' not found", device); 2099 return; 2100 } 2101 2102 /* For BBs without a device, we can exchange the BDS tree at will */ 2103 has_device = blk_get_attached_dev(blk); 2104 2105 if (has_device && !blk_dev_has_removable_media(blk)) { 2106 error_setg(errp, "Device '%s' is not removable", device); 2107 return; 2108 } 2109 2110 if (has_device && !blk_dev_is_tray_open(blk)) { 2111 error_setg(errp, "Tray of device '%s' is not open", device); 2112 return; 2113 } 2114 2115 bs = blk_bs(blk); 2116 if (!bs) { 2117 return; 2118 } 2119 2120 aio_context = bdrv_get_aio_context(bs); 2121 aio_context_acquire(aio_context); 2122 2123 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) { 2124 goto out; 2125 } 2126 2127 /* This follows the convention established by bdrv_make_anon() */ 2128 if (bs->device_list.tqe_prev) { 2129 QTAILQ_REMOVE(&bdrv_states, bs, device_list); 2130 bs->device_list.tqe_prev = NULL; 2131 } 2132 2133 blk_remove_bs(blk); 2134 2135 out: 2136 aio_context_release(aio_context); 2137 } 2138 2139 static void qmp_blockdev_insert_anon_medium(const char *device, 2140 BlockDriverState *bs, Error **errp) 2141 { 2142 BlockBackend *blk; 2143 bool has_device; 2144 2145 blk = blk_by_name(device); 2146 if (!blk) { 2147 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 2148 "Device '%s' not found", device); 2149 return; 2150 } 2151 2152 /* For BBs without a device, we can exchange the BDS tree at will */ 2153 has_device = blk_get_attached_dev(blk); 2154 2155 if (has_device && !blk_dev_has_removable_media(blk)) { 2156 error_setg(errp, "Device '%s' is not removable", device); 2157 return; 2158 } 2159 2160 if (has_device && !blk_dev_is_tray_open(blk)) { 2161 error_setg(errp, "Tray of device '%s' is not open", device); 2162 return; 2163 } 2164 2165 if (blk_bs(blk)) { 2166 error_setg(errp, "There already is a medium in device '%s'", device); 2167 return; 2168 } 2169 2170 blk_insert_bs(blk, bs); 2171 2172 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list); 2173 } 2174 2175 void qmp_blockdev_insert_medium(const char *device, const char *node_name, 2176 Error **errp) 2177 { 2178 BlockDriverState *bs; 2179 2180 bs = bdrv_find_node(node_name); 2181 if (!bs) { 2182 error_setg(errp, "Node '%s' not found", node_name); 2183 return; 2184 } 2185 2186 if (bs->blk) { 2187 error_setg(errp, "Node '%s' is already in use by '%s'", node_name, 2188 blk_name(bs->blk)); 2189 return; 2190 } 2191 2192 qmp_blockdev_insert_anon_medium(device, bs, errp); 2193 } 2194 2195 void qmp_blockdev_change_medium(const char *device, const char *filename, 2196 bool has_format, const char *format, 2197 bool has_read_only, 2198 BlockdevChangeReadOnlyMode read_only, 2199 Error **errp) 2200 { 2201 BlockBackend *blk; 2202 BlockDriverState *medium_bs = NULL; 2203 int bdrv_flags, ret; 2204 QDict *options = NULL; 2205 Error *err = NULL; 2206 2207 blk = blk_by_name(device); 2208 if (!blk) { 2209 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 2210 "Device '%s' not found", device); 2211 goto fail; 2212 } 2213 2214 if (blk_bs(blk)) { 2215 blk_update_root_state(blk); 2216 } 2217 2218 bdrv_flags = blk_get_open_flags_from_root_state(blk); 2219 2220 if (!has_read_only) { 2221 read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN; 2222 } 2223 2224 switch (read_only) { 2225 case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN: 2226 break; 2227 2228 case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY: 2229 bdrv_flags &= ~BDRV_O_RDWR; 2230 break; 2231 2232 case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE: 2233 bdrv_flags |= BDRV_O_RDWR; 2234 break; 2235 2236 default: 2237 abort(); 2238 } 2239 2240 if (has_format) { 2241 options = qdict_new(); 2242 qdict_put(options, "driver", qstring_from_str(format)); 2243 } 2244 2245 assert(!medium_bs); 2246 ret = bdrv_open(&medium_bs, filename, NULL, options, bdrv_flags, errp); 2247 if (ret < 0) { 2248 goto fail; 2249 } 2250 2251 blk_apply_root_state(blk, medium_bs); 2252 2253 bdrv_add_key(medium_bs, NULL, &err); 2254 if (err) { 2255 error_propagate(errp, err); 2256 goto fail; 2257 } 2258 2259 qmp_blockdev_open_tray(device, false, false, &err); 2260 if (err) { 2261 error_propagate(errp, err); 2262 goto fail; 2263 } 2264 2265 qmp_blockdev_remove_medium(device, &err); 2266 if (err) { 2267 error_propagate(errp, err); 2268 goto fail; 2269 } 2270 2271 qmp_blockdev_insert_anon_medium(device, medium_bs, &err); 2272 if (err) { 2273 error_propagate(errp, err); 2274 goto fail; 2275 } 2276 2277 qmp_blockdev_close_tray(device, errp); 2278 2279 fail: 2280 /* If the medium has been inserted, the device has its own reference, so 2281 * ours must be relinquished; and if it has not been inserted successfully, 2282 * the reference must be relinquished anyway */ 2283 bdrv_unref(medium_bs); 2284 } 2285 2286 /* throttling disk I/O limits */ 2287 void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd, 2288 int64_t bps_wr, 2289 int64_t iops, 2290 int64_t iops_rd, 2291 int64_t iops_wr, 2292 bool has_bps_max, 2293 int64_t bps_max, 2294 bool has_bps_rd_max, 2295 int64_t bps_rd_max, 2296 bool has_bps_wr_max, 2297 int64_t bps_wr_max, 2298 bool has_iops_max, 2299 int64_t iops_max, 2300 bool has_iops_rd_max, 2301 int64_t iops_rd_max, 2302 bool has_iops_wr_max, 2303 int64_t iops_wr_max, 2304 bool has_iops_size, 2305 int64_t iops_size, 2306 bool has_group, 2307 const char *group, Error **errp) 2308 { 2309 ThrottleConfig cfg; 2310 BlockDriverState *bs; 2311 BlockBackend *blk; 2312 AioContext *aio_context; 2313 2314 blk = blk_by_name(device); 2315 if (!blk) { 2316 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 2317 "Device '%s' not found", device); 2318 return; 2319 } 2320 2321 aio_context = blk_get_aio_context(blk); 2322 aio_context_acquire(aio_context); 2323 2324 bs = blk_bs(blk); 2325 if (!bs) { 2326 error_setg(errp, "Device '%s' has no medium", device); 2327 goto out; 2328 } 2329 2330 memset(&cfg, 0, sizeof(cfg)); 2331 cfg.buckets[THROTTLE_BPS_TOTAL].avg = bps; 2332 cfg.buckets[THROTTLE_BPS_READ].avg = bps_rd; 2333 cfg.buckets[THROTTLE_BPS_WRITE].avg = bps_wr; 2334 2335 cfg.buckets[THROTTLE_OPS_TOTAL].avg = iops; 2336 cfg.buckets[THROTTLE_OPS_READ].avg = iops_rd; 2337 cfg.buckets[THROTTLE_OPS_WRITE].avg = iops_wr; 2338 2339 if (has_bps_max) { 2340 cfg.buckets[THROTTLE_BPS_TOTAL].max = bps_max; 2341 } 2342 if (has_bps_rd_max) { 2343 cfg.buckets[THROTTLE_BPS_READ].max = bps_rd_max; 2344 } 2345 if (has_bps_wr_max) { 2346 cfg.buckets[THROTTLE_BPS_WRITE].max = bps_wr_max; 2347 } 2348 if (has_iops_max) { 2349 cfg.buckets[THROTTLE_OPS_TOTAL].max = iops_max; 2350 } 2351 if (has_iops_rd_max) { 2352 cfg.buckets[THROTTLE_OPS_READ].max = iops_rd_max; 2353 } 2354 if (has_iops_wr_max) { 2355 cfg.buckets[THROTTLE_OPS_WRITE].max = iops_wr_max; 2356 } 2357 2358 if (has_iops_size) { 2359 cfg.op_size = iops_size; 2360 } 2361 2362 if (!check_throttle_config(&cfg, errp)) { 2363 goto out; 2364 } 2365 2366 if (throttle_enabled(&cfg)) { 2367 /* Enable I/O limits if they're not enabled yet, otherwise 2368 * just update the throttling group. */ 2369 if (!bs->throttle_state) { 2370 bdrv_io_limits_enable(bs, has_group ? group : device); 2371 } else if (has_group) { 2372 bdrv_io_limits_update_group(bs, group); 2373 } 2374 /* Set the new throttling configuration */ 2375 bdrv_set_io_limits(bs, &cfg); 2376 } else if (bs->throttle_state) { 2377 /* If all throttling settings are set to 0, disable I/O limits */ 2378 bdrv_io_limits_disable(bs); 2379 } 2380 2381 out: 2382 aio_context_release(aio_context); 2383 } 2384 2385 void qmp_block_dirty_bitmap_add(const char *node, const char *name, 2386 bool has_granularity, uint32_t granularity, 2387 Error **errp) 2388 { 2389 AioContext *aio_context; 2390 BlockDriverState *bs; 2391 2392 if (!name || name[0] == '\0') { 2393 error_setg(errp, "Bitmap name cannot be empty"); 2394 return; 2395 } 2396 2397 bs = bdrv_lookup_bs(node, node, errp); 2398 if (!bs) { 2399 return; 2400 } 2401 2402 aio_context = bdrv_get_aio_context(bs); 2403 aio_context_acquire(aio_context); 2404 2405 if (has_granularity) { 2406 if (granularity < 512 || !is_power_of_2(granularity)) { 2407 error_setg(errp, "Granularity must be power of 2 " 2408 "and at least 512"); 2409 goto out; 2410 } 2411 } else { 2412 /* Default to cluster size, if available: */ 2413 granularity = bdrv_get_default_bitmap_granularity(bs); 2414 } 2415 2416 bdrv_create_dirty_bitmap(bs, granularity, name, errp); 2417 2418 out: 2419 aio_context_release(aio_context); 2420 } 2421 2422 void qmp_block_dirty_bitmap_remove(const char *node, const char *name, 2423 Error **errp) 2424 { 2425 AioContext *aio_context; 2426 BlockDriverState *bs; 2427 BdrvDirtyBitmap *bitmap; 2428 2429 bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp); 2430 if (!bitmap || !bs) { 2431 return; 2432 } 2433 2434 if (bdrv_dirty_bitmap_frozen(bitmap)) { 2435 error_setg(errp, 2436 "Bitmap '%s' is currently frozen and cannot be removed", 2437 name); 2438 goto out; 2439 } 2440 bdrv_dirty_bitmap_make_anon(bitmap); 2441 bdrv_release_dirty_bitmap(bs, bitmap); 2442 2443 out: 2444 aio_context_release(aio_context); 2445 } 2446 2447 /** 2448 * Completely clear a bitmap, for the purposes of synchronizing a bitmap 2449 * immediately after a full backup operation. 2450 */ 2451 void qmp_block_dirty_bitmap_clear(const char *node, const char *name, 2452 Error **errp) 2453 { 2454 AioContext *aio_context; 2455 BdrvDirtyBitmap *bitmap; 2456 BlockDriverState *bs; 2457 2458 bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp); 2459 if (!bitmap || !bs) { 2460 return; 2461 } 2462 2463 if (bdrv_dirty_bitmap_frozen(bitmap)) { 2464 error_setg(errp, 2465 "Bitmap '%s' is currently frozen and cannot be modified", 2466 name); 2467 goto out; 2468 } else if (!bdrv_dirty_bitmap_enabled(bitmap)) { 2469 error_setg(errp, 2470 "Bitmap '%s' is currently disabled and cannot be cleared", 2471 name); 2472 goto out; 2473 } 2474 2475 bdrv_clear_dirty_bitmap(bitmap); 2476 2477 out: 2478 aio_context_release(aio_context); 2479 } 2480 2481 void hmp_drive_del(Monitor *mon, const QDict *qdict) 2482 { 2483 const char *id = qdict_get_str(qdict, "id"); 2484 BlockBackend *blk; 2485 BlockDriverState *bs; 2486 AioContext *aio_context; 2487 Error *local_err = NULL; 2488 2489 blk = blk_by_name(id); 2490 if (!blk) { 2491 error_report("Device '%s' not found", id); 2492 return; 2493 } 2494 2495 if (!blk_legacy_dinfo(blk)) { 2496 error_report("Deleting device added with blockdev-add" 2497 " is not supported"); 2498 return; 2499 } 2500 2501 aio_context = blk_get_aio_context(blk); 2502 aio_context_acquire(aio_context); 2503 2504 bs = blk_bs(blk); 2505 if (bs) { 2506 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) { 2507 error_report_err(local_err); 2508 aio_context_release(aio_context); 2509 return; 2510 } 2511 2512 bdrv_close(bs); 2513 } 2514 2515 /* if we have a device attached to this BlockDriverState 2516 * then we need to make the drive anonymous until the device 2517 * can be removed. If this is a drive with no device backing 2518 * then we can just get rid of the block driver state right here. 2519 */ 2520 if (blk_get_attached_dev(blk)) { 2521 blk_hide_on_behalf_of_hmp_drive_del(blk); 2522 /* Further I/O must not pause the guest */ 2523 blk_set_on_error(blk, BLOCKDEV_ON_ERROR_REPORT, 2524 BLOCKDEV_ON_ERROR_REPORT); 2525 } else { 2526 blk_unref(blk); 2527 } 2528 2529 aio_context_release(aio_context); 2530 } 2531 2532 void qmp_block_resize(bool has_device, const char *device, 2533 bool has_node_name, const char *node_name, 2534 int64_t size, Error **errp) 2535 { 2536 Error *local_err = NULL; 2537 BlockDriverState *bs; 2538 AioContext *aio_context; 2539 int ret; 2540 2541 bs = bdrv_lookup_bs(has_device ? device : NULL, 2542 has_node_name ? node_name : NULL, 2543 &local_err); 2544 if (local_err) { 2545 error_propagate(errp, local_err); 2546 return; 2547 } 2548 2549 aio_context = bdrv_get_aio_context(bs); 2550 aio_context_acquire(aio_context); 2551 2552 if (!bdrv_is_first_non_filter(bs)) { 2553 error_setg(errp, QERR_FEATURE_DISABLED, "resize"); 2554 goto out; 2555 } 2556 2557 if (size < 0) { 2558 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size"); 2559 goto out; 2560 } 2561 2562 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) { 2563 error_setg(errp, QERR_DEVICE_IN_USE, device); 2564 goto out; 2565 } 2566 2567 /* complete all in-flight operations before resizing the device */ 2568 bdrv_drain_all(); 2569 2570 ret = bdrv_truncate(bs, size); 2571 switch (ret) { 2572 case 0: 2573 break; 2574 case -ENOMEDIUM: 2575 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); 2576 break; 2577 case -ENOTSUP: 2578 error_setg(errp, QERR_UNSUPPORTED); 2579 break; 2580 case -EACCES: 2581 error_setg(errp, "Device '%s' is read only", device); 2582 break; 2583 case -EBUSY: 2584 error_setg(errp, QERR_DEVICE_IN_USE, device); 2585 break; 2586 default: 2587 error_setg_errno(errp, -ret, "Could not resize"); 2588 break; 2589 } 2590 2591 out: 2592 aio_context_release(aio_context); 2593 } 2594 2595 static void block_job_cb(void *opaque, int ret) 2596 { 2597 /* Note that this function may be executed from another AioContext besides 2598 * the QEMU main loop. If you need to access anything that assumes the 2599 * QEMU global mutex, use a BH or introduce a mutex. 2600 */ 2601 2602 BlockDriverState *bs = opaque; 2603 const char *msg = NULL; 2604 2605 trace_block_job_cb(bs, bs->job, ret); 2606 2607 assert(bs->job); 2608 2609 if (ret < 0) { 2610 msg = strerror(-ret); 2611 } 2612 2613 if (block_job_is_cancelled(bs->job)) { 2614 block_job_event_cancelled(bs->job); 2615 } else { 2616 block_job_event_completed(bs->job, msg); 2617 } 2618 2619 bdrv_put_ref_bh_schedule(bs); 2620 } 2621 2622 void qmp_block_stream(const char *device, 2623 bool has_base, const char *base, 2624 bool has_backing_file, const char *backing_file, 2625 bool has_speed, int64_t speed, 2626 bool has_on_error, BlockdevOnError on_error, 2627 Error **errp) 2628 { 2629 BlockBackend *blk; 2630 BlockDriverState *bs; 2631 BlockDriverState *base_bs = NULL; 2632 AioContext *aio_context; 2633 Error *local_err = NULL; 2634 const char *base_name = NULL; 2635 2636 if (!has_on_error) { 2637 on_error = BLOCKDEV_ON_ERROR_REPORT; 2638 } 2639 2640 blk = blk_by_name(device); 2641 if (!blk) { 2642 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 2643 "Device '%s' not found", device); 2644 return; 2645 } 2646 2647 aio_context = blk_get_aio_context(blk); 2648 aio_context_acquire(aio_context); 2649 2650 if (!blk_is_available(blk)) { 2651 error_setg(errp, "Device '%s' has no medium", device); 2652 goto out; 2653 } 2654 bs = blk_bs(blk); 2655 2656 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_STREAM, errp)) { 2657 goto out; 2658 } 2659 2660 if (has_base) { 2661 base_bs = bdrv_find_backing_image(bs, base); 2662 if (base_bs == NULL) { 2663 error_setg(errp, QERR_BASE_NOT_FOUND, base); 2664 goto out; 2665 } 2666 assert(bdrv_get_aio_context(base_bs) == aio_context); 2667 base_name = base; 2668 } 2669 2670 /* if we are streaming the entire chain, the result will have no backing 2671 * file, and specifying one is therefore an error */ 2672 if (base_bs == NULL && has_backing_file) { 2673 error_setg(errp, "backing file specified, but streaming the " 2674 "entire chain"); 2675 goto out; 2676 } 2677 2678 /* backing_file string overrides base bs filename */ 2679 base_name = has_backing_file ? backing_file : base_name; 2680 2681 stream_start(bs, base_bs, base_name, has_speed ? speed : 0, 2682 on_error, block_job_cb, bs, &local_err); 2683 if (local_err) { 2684 error_propagate(errp, local_err); 2685 goto out; 2686 } 2687 2688 trace_qmp_block_stream(bs, bs->job); 2689 2690 out: 2691 aio_context_release(aio_context); 2692 } 2693 2694 void qmp_block_commit(const char *device, 2695 bool has_base, const char *base, 2696 bool has_top, const char *top, 2697 bool has_backing_file, const char *backing_file, 2698 bool has_speed, int64_t speed, 2699 Error **errp) 2700 { 2701 BlockBackend *blk; 2702 BlockDriverState *bs; 2703 BlockDriverState *base_bs, *top_bs; 2704 AioContext *aio_context; 2705 Error *local_err = NULL; 2706 /* This will be part of the QMP command, if/when the 2707 * BlockdevOnError change for blkmirror makes it in 2708 */ 2709 BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT; 2710 2711 if (!has_speed) { 2712 speed = 0; 2713 } 2714 2715 /* Important Note: 2716 * libvirt relies on the DeviceNotFound error class in order to probe for 2717 * live commit feature versions; for this to work, we must make sure to 2718 * perform the device lookup before any generic errors that may occur in a 2719 * scenario in which all optional arguments are omitted. */ 2720 blk = blk_by_name(device); 2721 if (!blk) { 2722 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 2723 "Device '%s' not found", device); 2724 return; 2725 } 2726 2727 aio_context = blk_get_aio_context(blk); 2728 aio_context_acquire(aio_context); 2729 2730 if (!blk_is_available(blk)) { 2731 error_setg(errp, "Device '%s' has no medium", device); 2732 goto out; 2733 } 2734 bs = blk_bs(blk); 2735 2736 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) { 2737 goto out; 2738 } 2739 2740 /* default top_bs is the active layer */ 2741 top_bs = bs; 2742 2743 if (has_top && top) { 2744 if (strcmp(bs->filename, top) != 0) { 2745 top_bs = bdrv_find_backing_image(bs, top); 2746 } 2747 } 2748 2749 if (top_bs == NULL) { 2750 error_setg(errp, "Top image file %s not found", top ? top : "NULL"); 2751 goto out; 2752 } 2753 2754 assert(bdrv_get_aio_context(top_bs) == aio_context); 2755 2756 if (has_base && base) { 2757 base_bs = bdrv_find_backing_image(top_bs, base); 2758 } else { 2759 base_bs = bdrv_find_base(top_bs); 2760 } 2761 2762 if (base_bs == NULL) { 2763 error_setg(errp, QERR_BASE_NOT_FOUND, base ? base : "NULL"); 2764 goto out; 2765 } 2766 2767 assert(bdrv_get_aio_context(base_bs) == aio_context); 2768 2769 if (bdrv_op_is_blocked(base_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) { 2770 goto out; 2771 } 2772 2773 /* Do not allow attempts to commit an image into itself */ 2774 if (top_bs == base_bs) { 2775 error_setg(errp, "cannot commit an image into itself"); 2776 goto out; 2777 } 2778 2779 if (top_bs == bs) { 2780 if (has_backing_file) { 2781 error_setg(errp, "'backing-file' specified," 2782 " but 'top' is the active layer"); 2783 goto out; 2784 } 2785 commit_active_start(bs, base_bs, speed, on_error, block_job_cb, 2786 bs, &local_err); 2787 } else { 2788 commit_start(bs, base_bs, top_bs, speed, on_error, block_job_cb, bs, 2789 has_backing_file ? backing_file : NULL, &local_err); 2790 } 2791 if (local_err != NULL) { 2792 error_propagate(errp, local_err); 2793 goto out; 2794 } 2795 2796 out: 2797 aio_context_release(aio_context); 2798 } 2799 2800 void qmp_drive_backup(const char *device, const char *target, 2801 bool has_format, const char *format, 2802 enum MirrorSyncMode sync, 2803 bool has_mode, enum NewImageMode mode, 2804 bool has_speed, int64_t speed, 2805 bool has_bitmap, const char *bitmap, 2806 bool has_on_source_error, BlockdevOnError on_source_error, 2807 bool has_on_target_error, BlockdevOnError on_target_error, 2808 Error **errp) 2809 { 2810 BlockBackend *blk; 2811 BlockDriverState *bs; 2812 BlockDriverState *target_bs; 2813 BlockDriverState *source = NULL; 2814 BdrvDirtyBitmap *bmap = NULL; 2815 AioContext *aio_context; 2816 QDict *options = NULL; 2817 Error *local_err = NULL; 2818 int flags; 2819 int64_t size; 2820 int ret; 2821 2822 if (!has_speed) { 2823 speed = 0; 2824 } 2825 if (!has_on_source_error) { 2826 on_source_error = BLOCKDEV_ON_ERROR_REPORT; 2827 } 2828 if (!has_on_target_error) { 2829 on_target_error = BLOCKDEV_ON_ERROR_REPORT; 2830 } 2831 if (!has_mode) { 2832 mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; 2833 } 2834 2835 blk = blk_by_name(device); 2836 if (!blk) { 2837 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 2838 "Device '%s' not found", device); 2839 return; 2840 } 2841 2842 aio_context = blk_get_aio_context(blk); 2843 aio_context_acquire(aio_context); 2844 2845 /* Although backup_run has this check too, we need to use bs->drv below, so 2846 * do an early check redundantly. */ 2847 if (!blk_is_available(blk)) { 2848 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); 2849 goto out; 2850 } 2851 bs = blk_bs(blk); 2852 2853 if (!has_format) { 2854 format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name; 2855 } 2856 2857 /* Early check to avoid creating target */ 2858 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { 2859 goto out; 2860 } 2861 2862 flags = bs->open_flags | BDRV_O_RDWR; 2863 2864 /* See if we have a backing HD we can use to create our new image 2865 * on top of. */ 2866 if (sync == MIRROR_SYNC_MODE_TOP) { 2867 source = backing_bs(bs); 2868 if (!source) { 2869 sync = MIRROR_SYNC_MODE_FULL; 2870 } 2871 } 2872 if (sync == MIRROR_SYNC_MODE_NONE) { 2873 source = bs; 2874 } 2875 2876 size = bdrv_getlength(bs); 2877 if (size < 0) { 2878 error_setg_errno(errp, -size, "bdrv_getlength failed"); 2879 goto out; 2880 } 2881 2882 if (mode != NEW_IMAGE_MODE_EXISTING) { 2883 assert(format); 2884 if (source) { 2885 bdrv_img_create(target, format, source->filename, 2886 source->drv->format_name, NULL, 2887 size, flags, &local_err, false); 2888 } else { 2889 bdrv_img_create(target, format, NULL, NULL, NULL, 2890 size, flags, &local_err, false); 2891 } 2892 } 2893 2894 if (local_err) { 2895 error_propagate(errp, local_err); 2896 goto out; 2897 } 2898 2899 if (format) { 2900 options = qdict_new(); 2901 qdict_put(options, "driver", qstring_from_str(format)); 2902 } 2903 2904 target_bs = NULL; 2905 ret = bdrv_open(&target_bs, target, NULL, options, flags, &local_err); 2906 if (ret < 0) { 2907 error_propagate(errp, local_err); 2908 goto out; 2909 } 2910 2911 bdrv_set_aio_context(target_bs, aio_context); 2912 2913 if (has_bitmap) { 2914 bmap = bdrv_find_dirty_bitmap(bs, bitmap); 2915 if (!bmap) { 2916 error_setg(errp, "Bitmap '%s' could not be found", bitmap); 2917 goto out; 2918 } 2919 } 2920 2921 backup_start(bs, target_bs, speed, sync, bmap, 2922 on_source_error, on_target_error, 2923 block_job_cb, bs, &local_err); 2924 if (local_err != NULL) { 2925 bdrv_unref(target_bs); 2926 error_propagate(errp, local_err); 2927 goto out; 2928 } 2929 2930 out: 2931 aio_context_release(aio_context); 2932 } 2933 2934 BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp) 2935 { 2936 return bdrv_named_nodes_list(errp); 2937 } 2938 2939 void qmp_blockdev_backup(const char *device, const char *target, 2940 enum MirrorSyncMode sync, 2941 bool has_speed, int64_t speed, 2942 bool has_on_source_error, 2943 BlockdevOnError on_source_error, 2944 bool has_on_target_error, 2945 BlockdevOnError on_target_error, 2946 Error **errp) 2947 { 2948 BlockBackend *blk, *target_blk; 2949 BlockDriverState *bs; 2950 BlockDriverState *target_bs; 2951 Error *local_err = NULL; 2952 AioContext *aio_context; 2953 2954 if (!has_speed) { 2955 speed = 0; 2956 } 2957 if (!has_on_source_error) { 2958 on_source_error = BLOCKDEV_ON_ERROR_REPORT; 2959 } 2960 if (!has_on_target_error) { 2961 on_target_error = BLOCKDEV_ON_ERROR_REPORT; 2962 } 2963 2964 blk = blk_by_name(device); 2965 if (!blk) { 2966 error_setg(errp, "Device '%s' not found", device); 2967 return; 2968 } 2969 2970 aio_context = blk_get_aio_context(blk); 2971 aio_context_acquire(aio_context); 2972 2973 if (!blk_is_available(blk)) { 2974 error_setg(errp, "Device '%s' has no medium", device); 2975 goto out; 2976 } 2977 bs = blk_bs(blk); 2978 2979 target_blk = blk_by_name(target); 2980 if (!target_blk) { 2981 error_setg(errp, "Device '%s' not found", target); 2982 goto out; 2983 } 2984 2985 if (!blk_is_available(target_blk)) { 2986 error_setg(errp, "Device '%s' has no medium", target); 2987 goto out; 2988 } 2989 target_bs = blk_bs(target_blk); 2990 2991 bdrv_ref(target_bs); 2992 bdrv_set_aio_context(target_bs, aio_context); 2993 backup_start(bs, target_bs, speed, sync, NULL, on_source_error, 2994 on_target_error, block_job_cb, bs, &local_err); 2995 if (local_err != NULL) { 2996 bdrv_unref(target_bs); 2997 error_propagate(errp, local_err); 2998 } 2999 out: 3000 aio_context_release(aio_context); 3001 } 3002 3003 void qmp_drive_mirror(const char *device, const char *target, 3004 bool has_format, const char *format, 3005 bool has_node_name, const char *node_name, 3006 bool has_replaces, const char *replaces, 3007 enum MirrorSyncMode sync, 3008 bool has_mode, enum NewImageMode mode, 3009 bool has_speed, int64_t speed, 3010 bool has_granularity, uint32_t granularity, 3011 bool has_buf_size, int64_t buf_size, 3012 bool has_on_source_error, BlockdevOnError on_source_error, 3013 bool has_on_target_error, BlockdevOnError on_target_error, 3014 bool has_unmap, bool unmap, 3015 Error **errp) 3016 { 3017 BlockBackend *blk; 3018 BlockDriverState *bs; 3019 BlockDriverState *source, *target_bs; 3020 AioContext *aio_context; 3021 Error *local_err = NULL; 3022 QDict *options; 3023 int flags; 3024 int64_t size; 3025 int ret; 3026 3027 if (!has_speed) { 3028 speed = 0; 3029 } 3030 if (!has_on_source_error) { 3031 on_source_error = BLOCKDEV_ON_ERROR_REPORT; 3032 } 3033 if (!has_on_target_error) { 3034 on_target_error = BLOCKDEV_ON_ERROR_REPORT; 3035 } 3036 if (!has_mode) { 3037 mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; 3038 } 3039 if (!has_granularity) { 3040 granularity = 0; 3041 } 3042 if (!has_buf_size) { 3043 buf_size = 0; 3044 } 3045 if (!has_unmap) { 3046 unmap = true; 3047 } 3048 3049 if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) { 3050 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity", 3051 "a value in range [512B, 64MB]"); 3052 return; 3053 } 3054 if (granularity & (granularity - 1)) { 3055 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity", 3056 "power of 2"); 3057 return; 3058 } 3059 3060 blk = blk_by_name(device); 3061 if (!blk) { 3062 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 3063 "Device '%s' not found", device); 3064 return; 3065 } 3066 3067 aio_context = blk_get_aio_context(blk); 3068 aio_context_acquire(aio_context); 3069 3070 if (!blk_is_available(blk)) { 3071 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); 3072 goto out; 3073 } 3074 bs = blk_bs(blk); 3075 3076 if (!has_format) { 3077 format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name; 3078 } 3079 3080 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR, errp)) { 3081 goto out; 3082 } 3083 3084 flags = bs->open_flags | BDRV_O_RDWR; 3085 source = backing_bs(bs); 3086 if (!source && sync == MIRROR_SYNC_MODE_TOP) { 3087 sync = MIRROR_SYNC_MODE_FULL; 3088 } 3089 if (sync == MIRROR_SYNC_MODE_NONE) { 3090 source = bs; 3091 } 3092 3093 size = bdrv_getlength(bs); 3094 if (size < 0) { 3095 error_setg_errno(errp, -size, "bdrv_getlength failed"); 3096 goto out; 3097 } 3098 3099 if (has_replaces) { 3100 BlockDriverState *to_replace_bs; 3101 AioContext *replace_aio_context; 3102 int64_t replace_size; 3103 3104 if (!has_node_name) { 3105 error_setg(errp, "a node-name must be provided when replacing a" 3106 " named node of the graph"); 3107 goto out; 3108 } 3109 3110 to_replace_bs = check_to_replace_node(bs, replaces, &local_err); 3111 3112 if (!to_replace_bs) { 3113 error_propagate(errp, local_err); 3114 goto out; 3115 } 3116 3117 replace_aio_context = bdrv_get_aio_context(to_replace_bs); 3118 aio_context_acquire(replace_aio_context); 3119 replace_size = bdrv_getlength(to_replace_bs); 3120 aio_context_release(replace_aio_context); 3121 3122 if (size != replace_size) { 3123 error_setg(errp, "cannot replace image with a mirror image of " 3124 "different size"); 3125 goto out; 3126 } 3127 } 3128 3129 if ((sync == MIRROR_SYNC_MODE_FULL || !source) 3130 && mode != NEW_IMAGE_MODE_EXISTING) 3131 { 3132 /* create new image w/o backing file */ 3133 assert(format); 3134 bdrv_img_create(target, format, 3135 NULL, NULL, NULL, size, flags, &local_err, false); 3136 } else { 3137 switch (mode) { 3138 case NEW_IMAGE_MODE_EXISTING: 3139 break; 3140 case NEW_IMAGE_MODE_ABSOLUTE_PATHS: 3141 /* create new image with backing file */ 3142 bdrv_img_create(target, format, 3143 source->filename, 3144 source->drv->format_name, 3145 NULL, size, flags, &local_err, false); 3146 break; 3147 default: 3148 abort(); 3149 } 3150 } 3151 3152 if (local_err) { 3153 error_propagate(errp, local_err); 3154 goto out; 3155 } 3156 3157 options = qdict_new(); 3158 if (has_node_name) { 3159 qdict_put(options, "node-name", qstring_from_str(node_name)); 3160 } 3161 if (format) { 3162 qdict_put(options, "driver", qstring_from_str(format)); 3163 } 3164 3165 /* Mirroring takes care of copy-on-write using the source's backing 3166 * file. 3167 */ 3168 target_bs = NULL; 3169 ret = bdrv_open(&target_bs, target, NULL, options, 3170 flags | BDRV_O_NO_BACKING, &local_err); 3171 if (ret < 0) { 3172 error_propagate(errp, local_err); 3173 goto out; 3174 } 3175 3176 bdrv_set_aio_context(target_bs, aio_context); 3177 3178 /* pass the node name to replace to mirror start since it's loose coupling 3179 * and will allow to check whether the node still exist at mirror completion 3180 */ 3181 mirror_start(bs, target_bs, 3182 has_replaces ? replaces : NULL, 3183 speed, granularity, buf_size, sync, 3184 on_source_error, on_target_error, 3185 unmap, 3186 block_job_cb, bs, &local_err); 3187 if (local_err != NULL) { 3188 bdrv_unref(target_bs); 3189 error_propagate(errp, local_err); 3190 goto out; 3191 } 3192 3193 out: 3194 aio_context_release(aio_context); 3195 } 3196 3197 /* Get the block job for a given device name and acquire its AioContext */ 3198 static BlockJob *find_block_job(const char *device, AioContext **aio_context, 3199 Error **errp) 3200 { 3201 BlockBackend *blk; 3202 BlockDriverState *bs; 3203 3204 *aio_context = NULL; 3205 3206 blk = blk_by_name(device); 3207 if (!blk) { 3208 goto notfound; 3209 } 3210 3211 *aio_context = blk_get_aio_context(blk); 3212 aio_context_acquire(*aio_context); 3213 3214 if (!blk_is_available(blk)) { 3215 goto notfound; 3216 } 3217 bs = blk_bs(blk); 3218 3219 if (!bs->job) { 3220 goto notfound; 3221 } 3222 3223 return bs->job; 3224 3225 notfound: 3226 error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, 3227 "No active block job on device '%s'", device); 3228 if (*aio_context) { 3229 aio_context_release(*aio_context); 3230 *aio_context = NULL; 3231 } 3232 return NULL; 3233 } 3234 3235 void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) 3236 { 3237 AioContext *aio_context; 3238 BlockJob *job = find_block_job(device, &aio_context, errp); 3239 3240 if (!job) { 3241 return; 3242 } 3243 3244 block_job_set_speed(job, speed, errp); 3245 aio_context_release(aio_context); 3246 } 3247 3248 void qmp_block_job_cancel(const char *device, 3249 bool has_force, bool force, Error **errp) 3250 { 3251 AioContext *aio_context; 3252 BlockJob *job = find_block_job(device, &aio_context, errp); 3253 3254 if (!job) { 3255 return; 3256 } 3257 3258 if (!has_force) { 3259 force = false; 3260 } 3261 3262 if (job->user_paused && !force) { 3263 error_setg(errp, "The block job for device '%s' is currently paused", 3264 device); 3265 goto out; 3266 } 3267 3268 trace_qmp_block_job_cancel(job); 3269 block_job_cancel(job); 3270 out: 3271 aio_context_release(aio_context); 3272 } 3273 3274 void qmp_block_job_pause(const char *device, Error **errp) 3275 { 3276 AioContext *aio_context; 3277 BlockJob *job = find_block_job(device, &aio_context, errp); 3278 3279 if (!job || job->user_paused) { 3280 return; 3281 } 3282 3283 job->user_paused = true; 3284 trace_qmp_block_job_pause(job); 3285 block_job_pause(job); 3286 aio_context_release(aio_context); 3287 } 3288 3289 void qmp_block_job_resume(const char *device, Error **errp) 3290 { 3291 AioContext *aio_context; 3292 BlockJob *job = find_block_job(device, &aio_context, errp); 3293 3294 if (!job || !job->user_paused) { 3295 return; 3296 } 3297 3298 job->user_paused = false; 3299 trace_qmp_block_job_resume(job); 3300 block_job_resume(job); 3301 aio_context_release(aio_context); 3302 } 3303 3304 void qmp_block_job_complete(const char *device, Error **errp) 3305 { 3306 AioContext *aio_context; 3307 BlockJob *job = find_block_job(device, &aio_context, errp); 3308 3309 if (!job) { 3310 return; 3311 } 3312 3313 trace_qmp_block_job_complete(job); 3314 block_job_complete(job, errp); 3315 aio_context_release(aio_context); 3316 } 3317 3318 void qmp_change_backing_file(const char *device, 3319 const char *image_node_name, 3320 const char *backing_file, 3321 Error **errp) 3322 { 3323 BlockBackend *blk; 3324 BlockDriverState *bs = NULL; 3325 AioContext *aio_context; 3326 BlockDriverState *image_bs = NULL; 3327 Error *local_err = NULL; 3328 bool ro; 3329 int open_flags; 3330 int ret; 3331 3332 blk = blk_by_name(device); 3333 if (!blk) { 3334 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 3335 "Device '%s' not found", device); 3336 return; 3337 } 3338 3339 aio_context = blk_get_aio_context(blk); 3340 aio_context_acquire(aio_context); 3341 3342 if (!blk_is_available(blk)) { 3343 error_setg(errp, "Device '%s' has no medium", device); 3344 goto out; 3345 } 3346 bs = blk_bs(blk); 3347 3348 image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err); 3349 if (local_err) { 3350 error_propagate(errp, local_err); 3351 goto out; 3352 } 3353 3354 if (!image_bs) { 3355 error_setg(errp, "image file not found"); 3356 goto out; 3357 } 3358 3359 if (bdrv_find_base(image_bs) == image_bs) { 3360 error_setg(errp, "not allowing backing file change on an image " 3361 "without a backing file"); 3362 goto out; 3363 } 3364 3365 /* even though we are not necessarily operating on bs, we need it to 3366 * determine if block ops are currently prohibited on the chain */ 3367 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) { 3368 goto out; 3369 } 3370 3371 /* final sanity check */ 3372 if (!bdrv_chain_contains(bs, image_bs)) { 3373 error_setg(errp, "'%s' and image file are not in the same chain", 3374 device); 3375 goto out; 3376 } 3377 3378 /* if not r/w, reopen to make r/w */ 3379 open_flags = image_bs->open_flags; 3380 ro = bdrv_is_read_only(image_bs); 3381 3382 if (ro) { 3383 bdrv_reopen(image_bs, open_flags | BDRV_O_RDWR, &local_err); 3384 if (local_err) { 3385 error_propagate(errp, local_err); 3386 goto out; 3387 } 3388 } 3389 3390 ret = bdrv_change_backing_file(image_bs, backing_file, 3391 image_bs->drv ? image_bs->drv->format_name : ""); 3392 3393 if (ret < 0) { 3394 error_setg_errno(errp, -ret, "Could not change backing file to '%s'", 3395 backing_file); 3396 /* don't exit here, so we can try to restore open flags if 3397 * appropriate */ 3398 } 3399 3400 if (ro) { 3401 bdrv_reopen(image_bs, open_flags, &local_err); 3402 if (local_err) { 3403 error_propagate(errp, local_err); /* will preserve prior errp */ 3404 } 3405 } 3406 3407 out: 3408 aio_context_release(aio_context); 3409 } 3410 3411 void qmp_blockdev_add(BlockdevOptions *options, Error **errp) 3412 { 3413 QmpOutputVisitor *ov = qmp_output_visitor_new(); 3414 BlockDriverState *bs; 3415 BlockBackend *blk = NULL; 3416 QObject *obj; 3417 QDict *qdict; 3418 Error *local_err = NULL; 3419 3420 /* TODO Sort it out in raw-posix and drive_new(): Reject aio=native with 3421 * cache.direct=false instead of silently switching to aio=threads, except 3422 * when called from drive_new(). 3423 * 3424 * For now, simply forbidding the combination for all drivers will do. */ 3425 if (options->has_aio && options->aio == BLOCKDEV_AIO_OPTIONS_NATIVE) { 3426 bool direct = options->has_cache && 3427 options->cache->has_direct && 3428 options->cache->direct; 3429 if (!direct) { 3430 error_setg(errp, "aio=native requires cache.direct=true"); 3431 goto fail; 3432 } 3433 } 3434 3435 visit_type_BlockdevOptions(qmp_output_get_visitor(ov), 3436 &options, NULL, &local_err); 3437 if (local_err) { 3438 error_propagate(errp, local_err); 3439 goto fail; 3440 } 3441 3442 obj = qmp_output_get_qobject(ov); 3443 qdict = qobject_to_qdict(obj); 3444 3445 qdict_flatten(qdict); 3446 3447 if (options->has_id) { 3448 blk = blockdev_init(NULL, qdict, &local_err); 3449 if (local_err) { 3450 error_propagate(errp, local_err); 3451 goto fail; 3452 } 3453 3454 bs = blk_bs(blk); 3455 } else { 3456 if (!qdict_get_try_str(qdict, "node-name")) { 3457 error_setg(errp, "'id' and/or 'node-name' need to be specified for " 3458 "the root node"); 3459 goto fail; 3460 } 3461 3462 bs = bds_tree_init(qdict, errp); 3463 if (!bs) { 3464 goto fail; 3465 } 3466 } 3467 3468 if (bs && bdrv_key_required(bs)) { 3469 if (blk) { 3470 blk_unref(blk); 3471 } else { 3472 bdrv_unref(bs); 3473 } 3474 error_setg(errp, "blockdev-add doesn't support encrypted devices"); 3475 goto fail; 3476 } 3477 3478 fail: 3479 qmp_output_visitor_cleanup(ov); 3480 } 3481 3482 void qmp_x_blockdev_del(bool has_id, const char *id, 3483 bool has_node_name, const char *node_name, Error **errp) 3484 { 3485 AioContext *aio_context; 3486 BlockBackend *blk; 3487 BlockDriverState *bs; 3488 3489 if (has_id && has_node_name) { 3490 error_setg(errp, "Only one of id and node-name must be specified"); 3491 return; 3492 } else if (!has_id && !has_node_name) { 3493 error_setg(errp, "No block device specified"); 3494 return; 3495 } 3496 3497 if (has_id) { 3498 blk = blk_by_name(id); 3499 if (!blk) { 3500 error_setg(errp, "Cannot find block backend %s", id); 3501 return; 3502 } 3503 if (blk_get_refcnt(blk) > 1) { 3504 error_setg(errp, "Block backend %s is in use", id); 3505 return; 3506 } 3507 bs = blk_bs(blk); 3508 aio_context = blk_get_aio_context(blk); 3509 } else { 3510 bs = bdrv_find_node(node_name); 3511 if (!bs) { 3512 error_setg(errp, "Cannot find node %s", node_name); 3513 return; 3514 } 3515 blk = bs->blk; 3516 if (blk) { 3517 error_setg(errp, "Node %s is in use by %s", 3518 node_name, blk_name(blk)); 3519 return; 3520 } 3521 aio_context = bdrv_get_aio_context(bs); 3522 } 3523 3524 aio_context_acquire(aio_context); 3525 3526 if (bs) { 3527 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) { 3528 goto out; 3529 } 3530 3531 if (bs->refcnt > 1 || !QLIST_EMPTY(&bs->parents)) { 3532 error_setg(errp, "Block device %s is in use", 3533 bdrv_get_device_or_node_name(bs)); 3534 goto out; 3535 } 3536 } 3537 3538 if (blk) { 3539 blk_unref(blk); 3540 } else { 3541 bdrv_unref(bs); 3542 } 3543 3544 out: 3545 aio_context_release(aio_context); 3546 } 3547 3548 BlockJobInfoList *qmp_query_block_jobs(Error **errp) 3549 { 3550 BlockJobInfoList *head = NULL, **p_next = &head; 3551 BlockDriverState *bs; 3552 3553 for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) { 3554 AioContext *aio_context = bdrv_get_aio_context(bs); 3555 3556 aio_context_acquire(aio_context); 3557 3558 if (bs->job) { 3559 BlockJobInfoList *elem = g_new0(BlockJobInfoList, 1); 3560 elem->value = block_job_query(bs->job); 3561 *p_next = elem; 3562 p_next = &elem->next; 3563 } 3564 3565 aio_context_release(aio_context); 3566 } 3567 3568 return head; 3569 } 3570 3571 QemuOptsList qemu_common_drive_opts = { 3572 .name = "drive", 3573 .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head), 3574 .desc = { 3575 { 3576 .name = "snapshot", 3577 .type = QEMU_OPT_BOOL, 3578 .help = "enable/disable snapshot mode", 3579 },{ 3580 .name = "discard", 3581 .type = QEMU_OPT_STRING, 3582 .help = "discard operation (ignore/off, unmap/on)", 3583 },{ 3584 .name = BDRV_OPT_CACHE_WB, 3585 .type = QEMU_OPT_BOOL, 3586 .help = "enables writeback mode for any caches", 3587 },{ 3588 .name = BDRV_OPT_CACHE_DIRECT, 3589 .type = QEMU_OPT_BOOL, 3590 .help = "enables use of O_DIRECT (bypass the host page cache)", 3591 },{ 3592 .name = BDRV_OPT_CACHE_NO_FLUSH, 3593 .type = QEMU_OPT_BOOL, 3594 .help = "ignore any flush requests for the device", 3595 },{ 3596 .name = "aio", 3597 .type = QEMU_OPT_STRING, 3598 .help = "host AIO implementation (threads, native)", 3599 },{ 3600 .name = "format", 3601 .type = QEMU_OPT_STRING, 3602 .help = "disk format (raw, qcow2, ...)", 3603 },{ 3604 .name = "rerror", 3605 .type = QEMU_OPT_STRING, 3606 .help = "read error action", 3607 },{ 3608 .name = "werror", 3609 .type = QEMU_OPT_STRING, 3610 .help = "write error action", 3611 },{ 3612 .name = "read-only", 3613 .type = QEMU_OPT_BOOL, 3614 .help = "open drive file as read-only", 3615 },{ 3616 .name = "throttling.iops-total", 3617 .type = QEMU_OPT_NUMBER, 3618 .help = "limit total I/O operations per second", 3619 },{ 3620 .name = "throttling.iops-read", 3621 .type = QEMU_OPT_NUMBER, 3622 .help = "limit read operations per second", 3623 },{ 3624 .name = "throttling.iops-write", 3625 .type = QEMU_OPT_NUMBER, 3626 .help = "limit write operations per second", 3627 },{ 3628 .name = "throttling.bps-total", 3629 .type = QEMU_OPT_NUMBER, 3630 .help = "limit total bytes per second", 3631 },{ 3632 .name = "throttling.bps-read", 3633 .type = QEMU_OPT_NUMBER, 3634 .help = "limit read bytes per second", 3635 },{ 3636 .name = "throttling.bps-write", 3637 .type = QEMU_OPT_NUMBER, 3638 .help = "limit write bytes per second", 3639 },{ 3640 .name = "throttling.iops-total-max", 3641 .type = QEMU_OPT_NUMBER, 3642 .help = "I/O operations burst", 3643 },{ 3644 .name = "throttling.iops-read-max", 3645 .type = QEMU_OPT_NUMBER, 3646 .help = "I/O operations read burst", 3647 },{ 3648 .name = "throttling.iops-write-max", 3649 .type = QEMU_OPT_NUMBER, 3650 .help = "I/O operations write burst", 3651 },{ 3652 .name = "throttling.bps-total-max", 3653 .type = QEMU_OPT_NUMBER, 3654 .help = "total bytes burst", 3655 },{ 3656 .name = "throttling.bps-read-max", 3657 .type = QEMU_OPT_NUMBER, 3658 .help = "total bytes read burst", 3659 },{ 3660 .name = "throttling.bps-write-max", 3661 .type = QEMU_OPT_NUMBER, 3662 .help = "total bytes write burst", 3663 },{ 3664 .name = "throttling.iops-size", 3665 .type = QEMU_OPT_NUMBER, 3666 .help = "when limiting by iops max size of an I/O in bytes", 3667 },{ 3668 .name = "throttling.group", 3669 .type = QEMU_OPT_STRING, 3670 .help = "name of the block throttling group", 3671 },{ 3672 .name = "copy-on-read", 3673 .type = QEMU_OPT_BOOL, 3674 .help = "copy read data from backing file into image file", 3675 },{ 3676 .name = "detect-zeroes", 3677 .type = QEMU_OPT_STRING, 3678 .help = "try to optimize zero writes (off, on, unmap)", 3679 }, 3680 { /* end of list */ } 3681 }, 3682 }; 3683 3684 static QemuOptsList qemu_root_bds_opts = { 3685 .name = "root-bds", 3686 .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head), 3687 .desc = { 3688 { 3689 .name = "discard", 3690 .type = QEMU_OPT_STRING, 3691 .help = "discard operation (ignore/off, unmap/on)", 3692 },{ 3693 .name = "cache.writeback", 3694 .type = QEMU_OPT_BOOL, 3695 .help = "enables writeback mode for any caches", 3696 },{ 3697 .name = "cache.direct", 3698 .type = QEMU_OPT_BOOL, 3699 .help = "enables use of O_DIRECT (bypass the host page cache)", 3700 },{ 3701 .name = "cache.no-flush", 3702 .type = QEMU_OPT_BOOL, 3703 .help = "ignore any flush requests for the device", 3704 },{ 3705 .name = "aio", 3706 .type = QEMU_OPT_STRING, 3707 .help = "host AIO implementation (threads, native)", 3708 },{ 3709 .name = "read-only", 3710 .type = QEMU_OPT_BOOL, 3711 .help = "open drive file as read-only", 3712 },{ 3713 .name = "copy-on-read", 3714 .type = QEMU_OPT_BOOL, 3715 .help = "copy read data from backing file into image file", 3716 },{ 3717 .name = "detect-zeroes", 3718 .type = QEMU_OPT_STRING, 3719 .help = "try to optimize zero writes (off, on, unmap)", 3720 }, 3721 { /* end of list */ } 3722 }, 3723 }; 3724 3725 QemuOptsList qemu_drive_opts = { 3726 .name = "drive", 3727 .head = QTAILQ_HEAD_INITIALIZER(qemu_drive_opts.head), 3728 .desc = { 3729 /* 3730 * no elements => accept any params 3731 * validation will happen later 3732 */ 3733 { /* end of list */ } 3734 }, 3735 }; 3736