1 /* 2 * QEMU host block devices 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or 7 * later. See the COPYING file in the top-level directory. 8 * 9 * This file incorporates work covered by the following copyright and 10 * permission notice: 11 * 12 * Copyright (c) 2003-2008 Fabrice Bellard 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a copy 15 * of this software and associated documentation files (the "Software"), to deal 16 * in the Software without restriction, including without limitation the rights 17 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 18 * copies of the Software, and to permit persons to whom the Software is 19 * furnished to do so, subject to the following conditions: 20 * 21 * The above copyright notice and this permission notice shall be included in 22 * all copies or substantial portions of the Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 27 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 29 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 30 * THE SOFTWARE. 31 */ 32 33 #include "qemu/osdep.h" 34 #include "sysemu/block-backend.h" 35 #include "sysemu/blockdev.h" 36 #include "hw/block/block.h" 37 #include "block/blockjob.h" 38 #include "block/throttle-groups.h" 39 #include "monitor/monitor.h" 40 #include "qemu/error-report.h" 41 #include "qemu/option.h" 42 #include "qemu/config-file.h" 43 #include "qapi/qmp/types.h" 44 #include "qapi-visit.h" 45 #include "qapi/qmp/qerror.h" 46 #include "qapi/qmp-output-visitor.h" 47 #include "qapi/util.h" 48 #include "sysemu/sysemu.h" 49 #include "block/block_int.h" 50 #include "qmp-commands.h" 51 #include "trace.h" 52 #include "sysemu/arch_init.h" 53 #include "qemu/cutils.h" 54 #include "qemu/help_option.h" 55 56 static QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states = 57 QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states); 58 59 static int do_open_tray(const char *blk_name, const char *qdev_id, 60 bool force, Error **errp); 61 62 static const char *const if_name[IF_COUNT] = { 63 [IF_NONE] = "none", 64 [IF_IDE] = "ide", 65 [IF_SCSI] = "scsi", 66 [IF_FLOPPY] = "floppy", 67 [IF_PFLASH] = "pflash", 68 [IF_MTD] = "mtd", 69 [IF_SD] = "sd", 70 [IF_VIRTIO] = "virtio", 71 [IF_XEN] = "xen", 72 }; 73 74 static int if_max_devs[IF_COUNT] = { 75 /* 76 * Do not change these numbers! They govern how drive option 77 * index maps to unit and bus. That mapping is ABI. 78 * 79 * All controllers used to implement if=T drives need to support 80 * if_max_devs[T] units, for any T with if_max_devs[T] != 0. 81 * Otherwise, some index values map to "impossible" bus, unit 82 * values. 83 * 84 * For instance, if you change [IF_SCSI] to 255, -drive 85 * if=scsi,index=12 no longer means bus=1,unit=5, but 86 * bus=0,unit=12. With an lsi53c895a controller (7 units max), 87 * the drive can't be set up. Regression. 88 */ 89 [IF_IDE] = 2, 90 [IF_SCSI] = 7, 91 }; 92 93 /** 94 * Boards may call this to offer board-by-board overrides 95 * of the default, global values. 96 */ 97 void override_max_devs(BlockInterfaceType type, int max_devs) 98 { 99 BlockBackend *blk; 100 DriveInfo *dinfo; 101 102 if (max_devs <= 0) { 103 return; 104 } 105 106 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 107 dinfo = blk_legacy_dinfo(blk); 108 if (dinfo->type == type) { 109 fprintf(stderr, "Cannot override units-per-bus property of" 110 " the %s interface, because a drive of that type has" 111 " already been added.\n", if_name[type]); 112 g_assert_not_reached(); 113 } 114 } 115 116 if_max_devs[type] = max_devs; 117 } 118 119 /* 120 * We automatically delete the drive when a device using it gets 121 * unplugged. Questionable feature, but we can't just drop it. 122 * Device models call blockdev_mark_auto_del() to schedule the 123 * automatic deletion, and generic qdev code calls blockdev_auto_del() 124 * when deletion is actually safe. 125 */ 126 void blockdev_mark_auto_del(BlockBackend *blk) 127 { 128 DriveInfo *dinfo = blk_legacy_dinfo(blk); 129 BlockDriverState *bs = blk_bs(blk); 130 AioContext *aio_context; 131 132 if (!dinfo) { 133 return; 134 } 135 136 if (bs) { 137 aio_context = bdrv_get_aio_context(bs); 138 aio_context_acquire(aio_context); 139 140 if (bs->job) { 141 block_job_cancel(bs->job); 142 } 143 144 aio_context_release(aio_context); 145 } 146 147 dinfo->auto_del = 1; 148 } 149 150 void blockdev_auto_del(BlockBackend *blk) 151 { 152 DriveInfo *dinfo = blk_legacy_dinfo(blk); 153 154 if (dinfo && dinfo->auto_del) { 155 monitor_remove_blk(blk); 156 blk_unref(blk); 157 } 158 } 159 160 /** 161 * Returns the current mapping of how many units per bus 162 * a particular interface can support. 163 * 164 * A positive integer indicates n units per bus. 165 * 0 implies the mapping has not been established. 166 * -1 indicates an invalid BlockInterfaceType was given. 167 */ 168 int drive_get_max_devs(BlockInterfaceType type) 169 { 170 if (type >= IF_IDE && type < IF_COUNT) { 171 return if_max_devs[type]; 172 } 173 174 return -1; 175 } 176 177 static int drive_index_to_bus_id(BlockInterfaceType type, int index) 178 { 179 int max_devs = if_max_devs[type]; 180 return max_devs ? index / max_devs : 0; 181 } 182 183 static int drive_index_to_unit_id(BlockInterfaceType type, int index) 184 { 185 int max_devs = if_max_devs[type]; 186 return max_devs ? index % max_devs : index; 187 } 188 189 QemuOpts *drive_def(const char *optstr) 190 { 191 return qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false); 192 } 193 194 QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file, 195 const char *optstr) 196 { 197 QemuOpts *opts; 198 199 opts = drive_def(optstr); 200 if (!opts) { 201 return NULL; 202 } 203 if (type != IF_DEFAULT) { 204 qemu_opt_set(opts, "if", if_name[type], &error_abort); 205 } 206 if (index >= 0) { 207 qemu_opt_set_number(opts, "index", index, &error_abort); 208 } 209 if (file) 210 qemu_opt_set(opts, "file", file, &error_abort); 211 return opts; 212 } 213 214 DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit) 215 { 216 BlockBackend *blk; 217 DriveInfo *dinfo; 218 219 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 220 dinfo = blk_legacy_dinfo(blk); 221 if (dinfo && dinfo->type == type 222 && dinfo->bus == bus && dinfo->unit == unit) { 223 return dinfo; 224 } 225 } 226 227 return NULL; 228 } 229 230 bool drive_check_orphaned(void) 231 { 232 BlockBackend *blk; 233 DriveInfo *dinfo; 234 bool rs = false; 235 236 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 237 dinfo = blk_legacy_dinfo(blk); 238 /* If dinfo->bdrv->dev is NULL, it has no device attached. */ 239 /* Unless this is a default drive, this may be an oversight. */ 240 if (!blk_get_attached_dev(blk) && !dinfo->is_default && 241 dinfo->type != IF_NONE) { 242 fprintf(stderr, "Warning: Orphaned drive without device: " 243 "id=%s,file=%s,if=%s,bus=%d,unit=%d\n", 244 blk_name(blk), blk_bs(blk) ? blk_bs(blk)->filename : "", 245 if_name[dinfo->type], dinfo->bus, dinfo->unit); 246 rs = true; 247 } 248 } 249 250 return rs; 251 } 252 253 DriveInfo *drive_get_by_index(BlockInterfaceType type, int index) 254 { 255 return drive_get(type, 256 drive_index_to_bus_id(type, index), 257 drive_index_to_unit_id(type, index)); 258 } 259 260 int drive_get_max_bus(BlockInterfaceType type) 261 { 262 int max_bus; 263 BlockBackend *blk; 264 DriveInfo *dinfo; 265 266 max_bus = -1; 267 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 268 dinfo = blk_legacy_dinfo(blk); 269 if (dinfo && dinfo->type == type && dinfo->bus > max_bus) { 270 max_bus = dinfo->bus; 271 } 272 } 273 return max_bus; 274 } 275 276 /* Get a block device. This should only be used for single-drive devices 277 (e.g. SD/Floppy/MTD). Multi-disk devices (scsi/ide) should use the 278 appropriate bus. */ 279 DriveInfo *drive_get_next(BlockInterfaceType type) 280 { 281 static int next_block_unit[IF_COUNT]; 282 283 return drive_get(type, 0, next_block_unit[type]++); 284 } 285 286 static void bdrv_format_print(void *opaque, const char *name) 287 { 288 error_printf(" %s", name); 289 } 290 291 typedef struct { 292 QEMUBH *bh; 293 BlockDriverState *bs; 294 } BDRVPutRefBH; 295 296 static int parse_block_error_action(const char *buf, bool is_read, Error **errp) 297 { 298 if (!strcmp(buf, "ignore")) { 299 return BLOCKDEV_ON_ERROR_IGNORE; 300 } else if (!is_read && !strcmp(buf, "enospc")) { 301 return BLOCKDEV_ON_ERROR_ENOSPC; 302 } else if (!strcmp(buf, "stop")) { 303 return BLOCKDEV_ON_ERROR_STOP; 304 } else if (!strcmp(buf, "report")) { 305 return BLOCKDEV_ON_ERROR_REPORT; 306 } else { 307 error_setg(errp, "'%s' invalid %s error action", 308 buf, is_read ? "read" : "write"); 309 return -1; 310 } 311 } 312 313 static bool parse_stats_intervals(BlockAcctStats *stats, QList *intervals, 314 Error **errp) 315 { 316 const QListEntry *entry; 317 for (entry = qlist_first(intervals); entry; entry = qlist_next(entry)) { 318 switch (qobject_type(entry->value)) { 319 320 case QTYPE_QSTRING: { 321 unsigned long long length; 322 const char *str = qstring_get_str(qobject_to_qstring(entry->value)); 323 if (parse_uint_full(str, &length, 10) == 0 && 324 length > 0 && length <= UINT_MAX) { 325 block_acct_add_interval(stats, (unsigned) length); 326 } else { 327 error_setg(errp, "Invalid interval length: %s", str); 328 return false; 329 } 330 break; 331 } 332 333 case QTYPE_QINT: { 334 int64_t length = qint_get_int(qobject_to_qint(entry->value)); 335 if (length > 0 && length <= UINT_MAX) { 336 block_acct_add_interval(stats, (unsigned) length); 337 } else { 338 error_setg(errp, "Invalid interval length: %" PRId64, length); 339 return false; 340 } 341 break; 342 } 343 344 default: 345 error_setg(errp, "The specification of stats-intervals is invalid"); 346 return false; 347 } 348 } 349 return true; 350 } 351 352 typedef enum { MEDIA_DISK, MEDIA_CDROM } DriveMediaType; 353 354 /* All parameters but @opts are optional and may be set to NULL. */ 355 static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags, 356 const char **throttling_group, ThrottleConfig *throttle_cfg, 357 BlockdevDetectZeroesOptions *detect_zeroes, Error **errp) 358 { 359 const char *discard; 360 Error *local_error = NULL; 361 const char *aio; 362 363 if (bdrv_flags) { 364 if (qemu_opt_get_bool(opts, "copy-on-read", false)) { 365 *bdrv_flags |= BDRV_O_COPY_ON_READ; 366 } 367 368 if ((discard = qemu_opt_get(opts, "discard")) != NULL) { 369 if (bdrv_parse_discard_flags(discard, bdrv_flags) != 0) { 370 error_setg(errp, "Invalid discard option"); 371 return; 372 } 373 } 374 375 if ((aio = qemu_opt_get(opts, "aio")) != NULL) { 376 if (!strcmp(aio, "native")) { 377 *bdrv_flags |= BDRV_O_NATIVE_AIO; 378 } else if (!strcmp(aio, "threads")) { 379 /* this is the default */ 380 } else { 381 error_setg(errp, "invalid aio option"); 382 return; 383 } 384 } 385 } 386 387 /* disk I/O throttling */ 388 if (throttling_group) { 389 *throttling_group = qemu_opt_get(opts, "throttling.group"); 390 } 391 392 if (throttle_cfg) { 393 throttle_config_init(throttle_cfg); 394 throttle_cfg->buckets[THROTTLE_BPS_TOTAL].avg = 395 qemu_opt_get_number(opts, "throttling.bps-total", 0); 396 throttle_cfg->buckets[THROTTLE_BPS_READ].avg = 397 qemu_opt_get_number(opts, "throttling.bps-read", 0); 398 throttle_cfg->buckets[THROTTLE_BPS_WRITE].avg = 399 qemu_opt_get_number(opts, "throttling.bps-write", 0); 400 throttle_cfg->buckets[THROTTLE_OPS_TOTAL].avg = 401 qemu_opt_get_number(opts, "throttling.iops-total", 0); 402 throttle_cfg->buckets[THROTTLE_OPS_READ].avg = 403 qemu_opt_get_number(opts, "throttling.iops-read", 0); 404 throttle_cfg->buckets[THROTTLE_OPS_WRITE].avg = 405 qemu_opt_get_number(opts, "throttling.iops-write", 0); 406 407 throttle_cfg->buckets[THROTTLE_BPS_TOTAL].max = 408 qemu_opt_get_number(opts, "throttling.bps-total-max", 0); 409 throttle_cfg->buckets[THROTTLE_BPS_READ].max = 410 qemu_opt_get_number(opts, "throttling.bps-read-max", 0); 411 throttle_cfg->buckets[THROTTLE_BPS_WRITE].max = 412 qemu_opt_get_number(opts, "throttling.bps-write-max", 0); 413 throttle_cfg->buckets[THROTTLE_OPS_TOTAL].max = 414 qemu_opt_get_number(opts, "throttling.iops-total-max", 0); 415 throttle_cfg->buckets[THROTTLE_OPS_READ].max = 416 qemu_opt_get_number(opts, "throttling.iops-read-max", 0); 417 throttle_cfg->buckets[THROTTLE_OPS_WRITE].max = 418 qemu_opt_get_number(opts, "throttling.iops-write-max", 0); 419 420 throttle_cfg->buckets[THROTTLE_BPS_TOTAL].burst_length = 421 qemu_opt_get_number(opts, "throttling.bps-total-max-length", 1); 422 throttle_cfg->buckets[THROTTLE_BPS_READ].burst_length = 423 qemu_opt_get_number(opts, "throttling.bps-read-max-length", 1); 424 throttle_cfg->buckets[THROTTLE_BPS_WRITE].burst_length = 425 qemu_opt_get_number(opts, "throttling.bps-write-max-length", 1); 426 throttle_cfg->buckets[THROTTLE_OPS_TOTAL].burst_length = 427 qemu_opt_get_number(opts, "throttling.iops-total-max-length", 1); 428 throttle_cfg->buckets[THROTTLE_OPS_READ].burst_length = 429 qemu_opt_get_number(opts, "throttling.iops-read-max-length", 1); 430 throttle_cfg->buckets[THROTTLE_OPS_WRITE].burst_length = 431 qemu_opt_get_number(opts, "throttling.iops-write-max-length", 1); 432 433 throttle_cfg->op_size = 434 qemu_opt_get_number(opts, "throttling.iops-size", 0); 435 436 if (!throttle_is_valid(throttle_cfg, errp)) { 437 return; 438 } 439 } 440 441 if (detect_zeroes) { 442 *detect_zeroes = 443 qapi_enum_parse(BlockdevDetectZeroesOptions_lookup, 444 qemu_opt_get(opts, "detect-zeroes"), 445 BLOCKDEV_DETECT_ZEROES_OPTIONS__MAX, 446 BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF, 447 &local_error); 448 if (local_error) { 449 error_propagate(errp, local_error); 450 return; 451 } 452 453 if (bdrv_flags && 454 *detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP && 455 !(*bdrv_flags & BDRV_O_UNMAP)) 456 { 457 error_setg(errp, "setting detect-zeroes to unmap is not allowed " 458 "without setting discard operation to unmap"); 459 return; 460 } 461 } 462 } 463 464 /* Takes the ownership of bs_opts */ 465 static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, 466 Error **errp) 467 { 468 const char *buf; 469 int bdrv_flags = 0; 470 int on_read_error, on_write_error; 471 bool account_invalid, account_failed; 472 bool writethrough, read_only; 473 BlockBackend *blk; 474 BlockDriverState *bs; 475 ThrottleConfig cfg; 476 int snapshot = 0; 477 Error *error = NULL; 478 QemuOpts *opts; 479 QDict *interval_dict = NULL; 480 QList *interval_list = NULL; 481 const char *id; 482 BlockdevDetectZeroesOptions detect_zeroes = 483 BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF; 484 const char *throttling_group = NULL; 485 486 /* Check common options by copying from bs_opts to opts, all other options 487 * stay in bs_opts for processing by bdrv_open(). */ 488 id = qdict_get_try_str(bs_opts, "id"); 489 opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error); 490 if (error) { 491 error_propagate(errp, error); 492 goto err_no_opts; 493 } 494 495 qemu_opts_absorb_qdict(opts, bs_opts, &error); 496 if (error) { 497 error_propagate(errp, error); 498 goto early_err; 499 } 500 501 if (id) { 502 qdict_del(bs_opts, "id"); 503 } 504 505 /* extract parameters */ 506 snapshot = qemu_opt_get_bool(opts, "snapshot", 0); 507 508 account_invalid = qemu_opt_get_bool(opts, "stats-account-invalid", true); 509 account_failed = qemu_opt_get_bool(opts, "stats-account-failed", true); 510 511 writethrough = !qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true); 512 513 id = qemu_opts_id(opts); 514 515 qdict_extract_subqdict(bs_opts, &interval_dict, "stats-intervals."); 516 qdict_array_split(interval_dict, &interval_list); 517 518 if (qdict_size(interval_dict) != 0) { 519 error_setg(errp, "Invalid option stats-intervals.%s", 520 qdict_first(interval_dict)->key); 521 goto early_err; 522 } 523 524 extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg, 525 &detect_zeroes, &error); 526 if (error) { 527 error_propagate(errp, error); 528 goto early_err; 529 } 530 531 if ((buf = qemu_opt_get(opts, "format")) != NULL) { 532 if (is_help_option(buf)) { 533 error_printf("Supported formats:"); 534 bdrv_iterate_format(bdrv_format_print, NULL); 535 error_printf("\n"); 536 goto early_err; 537 } 538 539 if (qdict_haskey(bs_opts, "driver")) { 540 error_setg(errp, "Cannot specify both 'driver' and 'format'"); 541 goto early_err; 542 } 543 qdict_put(bs_opts, "driver", qstring_from_str(buf)); 544 } 545 546 on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; 547 if ((buf = qemu_opt_get(opts, "werror")) != NULL) { 548 on_write_error = parse_block_error_action(buf, 0, &error); 549 if (error) { 550 error_propagate(errp, error); 551 goto early_err; 552 } 553 } 554 555 on_read_error = BLOCKDEV_ON_ERROR_REPORT; 556 if ((buf = qemu_opt_get(opts, "rerror")) != NULL) { 557 on_read_error = parse_block_error_action(buf, 1, &error); 558 if (error) { 559 error_propagate(errp, error); 560 goto early_err; 561 } 562 } 563 564 if (snapshot) { 565 bdrv_flags |= BDRV_O_SNAPSHOT; 566 } 567 568 read_only = qemu_opt_get_bool(opts, BDRV_OPT_READ_ONLY, false); 569 570 /* init */ 571 if ((!file || !*file) && !qdict_size(bs_opts)) { 572 BlockBackendRootState *blk_rs; 573 574 blk = blk_new(); 575 blk_rs = blk_get_root_state(blk); 576 blk_rs->open_flags = bdrv_flags; 577 blk_rs->read_only = read_only; 578 blk_rs->detect_zeroes = detect_zeroes; 579 580 QDECREF(bs_opts); 581 } else { 582 if (file && !*file) { 583 file = NULL; 584 } 585 586 /* bdrv_open() defaults to the values in bdrv_flags (for compatibility 587 * with other callers) rather than what we want as the real defaults. 588 * Apply the defaults here instead. */ 589 qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off"); 590 qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off"); 591 qdict_set_default_str(bs_opts, BDRV_OPT_READ_ONLY, 592 read_only ? "on" : "off"); 593 assert((bdrv_flags & BDRV_O_CACHE_MASK) == 0); 594 595 if (runstate_check(RUN_STATE_INMIGRATE)) { 596 bdrv_flags |= BDRV_O_INACTIVE; 597 } 598 599 blk = blk_new_open(file, NULL, bs_opts, bdrv_flags, errp); 600 if (!blk) { 601 goto err_no_bs_opts; 602 } 603 bs = blk_bs(blk); 604 605 bs->detect_zeroes = detect_zeroes; 606 607 if (bdrv_key_required(bs)) { 608 autostart = 0; 609 } 610 611 block_acct_init(blk_get_stats(blk), account_invalid, account_failed); 612 613 if (!parse_stats_intervals(blk_get_stats(blk), interval_list, errp)) { 614 blk_unref(blk); 615 blk = NULL; 616 goto err_no_bs_opts; 617 } 618 } 619 620 /* disk I/O throttling */ 621 if (throttle_enabled(&cfg)) { 622 if (!throttling_group) { 623 throttling_group = id; 624 } 625 blk_io_limits_enable(blk, throttling_group); 626 blk_set_io_limits(blk, &cfg); 627 } 628 629 blk_set_enable_write_cache(blk, !writethrough); 630 blk_set_on_error(blk, on_read_error, on_write_error); 631 632 if (!monitor_add_blk(blk, id, errp)) { 633 blk_unref(blk); 634 blk = NULL; 635 goto err_no_bs_opts; 636 } 637 638 err_no_bs_opts: 639 qemu_opts_del(opts); 640 QDECREF(interval_dict); 641 QDECREF(interval_list); 642 return blk; 643 644 early_err: 645 qemu_opts_del(opts); 646 QDECREF(interval_dict); 647 QDECREF(interval_list); 648 err_no_opts: 649 QDECREF(bs_opts); 650 return NULL; 651 } 652 653 static QemuOptsList qemu_root_bds_opts; 654 655 /* Takes the ownership of bs_opts */ 656 static BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp) 657 { 658 BlockDriverState *bs; 659 QemuOpts *opts; 660 Error *local_error = NULL; 661 BlockdevDetectZeroesOptions detect_zeroes; 662 int bdrv_flags = 0; 663 664 opts = qemu_opts_create(&qemu_root_bds_opts, NULL, 1, errp); 665 if (!opts) { 666 goto fail; 667 } 668 669 qemu_opts_absorb_qdict(opts, bs_opts, &local_error); 670 if (local_error) { 671 error_propagate(errp, local_error); 672 goto fail; 673 } 674 675 extract_common_blockdev_options(opts, &bdrv_flags, NULL, NULL, 676 &detect_zeroes, &local_error); 677 if (local_error) { 678 error_propagate(errp, local_error); 679 goto fail; 680 } 681 682 /* bdrv_open() defaults to the values in bdrv_flags (for compatibility 683 * with other callers) rather than what we want as the real defaults. 684 * Apply the defaults here instead. */ 685 qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off"); 686 qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off"); 687 qdict_set_default_str(bs_opts, BDRV_OPT_READ_ONLY, "off"); 688 689 if (runstate_check(RUN_STATE_INMIGRATE)) { 690 bdrv_flags |= BDRV_O_INACTIVE; 691 } 692 693 bs = bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp); 694 if (!bs) { 695 goto fail_no_bs_opts; 696 } 697 698 bs->detect_zeroes = detect_zeroes; 699 700 fail_no_bs_opts: 701 qemu_opts_del(opts); 702 return bs; 703 704 fail: 705 qemu_opts_del(opts); 706 QDECREF(bs_opts); 707 return NULL; 708 } 709 710 void blockdev_close_all_bdrv_states(void) 711 { 712 BlockDriverState *bs, *next_bs; 713 714 QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) { 715 AioContext *ctx = bdrv_get_aio_context(bs); 716 717 aio_context_acquire(ctx); 718 bdrv_unref(bs); 719 aio_context_release(ctx); 720 } 721 } 722 723 /* Iterates over the list of monitor-owned BlockDriverStates */ 724 BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs) 725 { 726 return bs ? QTAILQ_NEXT(bs, monitor_list) 727 : QTAILQ_FIRST(&monitor_bdrv_states); 728 } 729 730 static void qemu_opt_rename(QemuOpts *opts, const char *from, const char *to, 731 Error **errp) 732 { 733 const char *value; 734 735 value = qemu_opt_get(opts, from); 736 if (value) { 737 if (qemu_opt_find(opts, to)) { 738 error_setg(errp, "'%s' and its alias '%s' can't be used at the " 739 "same time", to, from); 740 return; 741 } 742 } 743 744 /* rename all items in opts */ 745 while ((value = qemu_opt_get(opts, from))) { 746 qemu_opt_set(opts, to, value, &error_abort); 747 qemu_opt_unset(opts, from); 748 } 749 } 750 751 QemuOptsList qemu_legacy_drive_opts = { 752 .name = "drive", 753 .head = QTAILQ_HEAD_INITIALIZER(qemu_legacy_drive_opts.head), 754 .desc = { 755 { 756 .name = "bus", 757 .type = QEMU_OPT_NUMBER, 758 .help = "bus number", 759 },{ 760 .name = "unit", 761 .type = QEMU_OPT_NUMBER, 762 .help = "unit number (i.e. lun for scsi)", 763 },{ 764 .name = "index", 765 .type = QEMU_OPT_NUMBER, 766 .help = "index number", 767 },{ 768 .name = "media", 769 .type = QEMU_OPT_STRING, 770 .help = "media type (disk, cdrom)", 771 },{ 772 .name = "if", 773 .type = QEMU_OPT_STRING, 774 .help = "interface (ide, scsi, sd, mtd, floppy, pflash, virtio)", 775 },{ 776 .name = "cyls", 777 .type = QEMU_OPT_NUMBER, 778 .help = "number of cylinders (ide disk geometry)", 779 },{ 780 .name = "heads", 781 .type = QEMU_OPT_NUMBER, 782 .help = "number of heads (ide disk geometry)", 783 },{ 784 .name = "secs", 785 .type = QEMU_OPT_NUMBER, 786 .help = "number of sectors (ide disk geometry)", 787 },{ 788 .name = "trans", 789 .type = QEMU_OPT_STRING, 790 .help = "chs translation (auto, lba, none)", 791 },{ 792 .name = "boot", 793 .type = QEMU_OPT_BOOL, 794 .help = "(deprecated, ignored)", 795 },{ 796 .name = "addr", 797 .type = QEMU_OPT_STRING, 798 .help = "pci address (virtio only)", 799 },{ 800 .name = "serial", 801 .type = QEMU_OPT_STRING, 802 .help = "disk serial number", 803 },{ 804 .name = "file", 805 .type = QEMU_OPT_STRING, 806 .help = "file name", 807 }, 808 809 /* Options that are passed on, but have special semantics with -drive */ 810 { 811 .name = BDRV_OPT_READ_ONLY, 812 .type = QEMU_OPT_BOOL, 813 .help = "open drive file as read-only", 814 },{ 815 .name = "rerror", 816 .type = QEMU_OPT_STRING, 817 .help = "read error action", 818 },{ 819 .name = "werror", 820 .type = QEMU_OPT_STRING, 821 .help = "write error action", 822 },{ 823 .name = "copy-on-read", 824 .type = QEMU_OPT_BOOL, 825 .help = "copy read data from backing file into image file", 826 }, 827 828 { /* end of list */ } 829 }, 830 }; 831 832 DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) 833 { 834 const char *value; 835 BlockBackend *blk; 836 DriveInfo *dinfo = NULL; 837 QDict *bs_opts; 838 QemuOpts *legacy_opts; 839 DriveMediaType media = MEDIA_DISK; 840 BlockInterfaceType type; 841 int cyls, heads, secs, translation; 842 int max_devs, bus_id, unit_id, index; 843 const char *devaddr; 844 const char *werror, *rerror; 845 bool read_only = false; 846 bool copy_on_read; 847 const char *serial; 848 const char *filename; 849 Error *local_err = NULL; 850 int i; 851 852 /* Change legacy command line options into QMP ones */ 853 static const struct { 854 const char *from; 855 const char *to; 856 } opt_renames[] = { 857 { "iops", "throttling.iops-total" }, 858 { "iops_rd", "throttling.iops-read" }, 859 { "iops_wr", "throttling.iops-write" }, 860 861 { "bps", "throttling.bps-total" }, 862 { "bps_rd", "throttling.bps-read" }, 863 { "bps_wr", "throttling.bps-write" }, 864 865 { "iops_max", "throttling.iops-total-max" }, 866 { "iops_rd_max", "throttling.iops-read-max" }, 867 { "iops_wr_max", "throttling.iops-write-max" }, 868 869 { "bps_max", "throttling.bps-total-max" }, 870 { "bps_rd_max", "throttling.bps-read-max" }, 871 { "bps_wr_max", "throttling.bps-write-max" }, 872 873 { "iops_size", "throttling.iops-size" }, 874 875 { "group", "throttling.group" }, 876 877 { "readonly", BDRV_OPT_READ_ONLY }, 878 }; 879 880 for (i = 0; i < ARRAY_SIZE(opt_renames); i++) { 881 qemu_opt_rename(all_opts, opt_renames[i].from, opt_renames[i].to, 882 &local_err); 883 if (local_err) { 884 error_report_err(local_err); 885 return NULL; 886 } 887 } 888 889 value = qemu_opt_get(all_opts, "cache"); 890 if (value) { 891 int flags = 0; 892 bool writethrough; 893 894 if (bdrv_parse_cache_mode(value, &flags, &writethrough) != 0) { 895 error_report("invalid cache option"); 896 return NULL; 897 } 898 899 /* Specific options take precedence */ 900 if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) { 901 qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB, 902 !writethrough, &error_abort); 903 } 904 if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) { 905 qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT, 906 !!(flags & BDRV_O_NOCACHE), &error_abort); 907 } 908 if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) { 909 qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH, 910 !!(flags & BDRV_O_NO_FLUSH), &error_abort); 911 } 912 qemu_opt_unset(all_opts, "cache"); 913 } 914 915 /* Get a QDict for processing the options */ 916 bs_opts = qdict_new(); 917 qemu_opts_to_qdict(all_opts, bs_opts); 918 919 legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0, 920 &error_abort); 921 qemu_opts_absorb_qdict(legacy_opts, bs_opts, &local_err); 922 if (local_err) { 923 error_report_err(local_err); 924 goto fail; 925 } 926 927 /* Deprecated option boot=[on|off] */ 928 if (qemu_opt_get(legacy_opts, "boot") != NULL) { 929 fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be " 930 "ignored. Future versions will reject this parameter. Please " 931 "update your scripts.\n"); 932 } 933 934 /* Media type */ 935 value = qemu_opt_get(legacy_opts, "media"); 936 if (value) { 937 if (!strcmp(value, "disk")) { 938 media = MEDIA_DISK; 939 } else if (!strcmp(value, "cdrom")) { 940 media = MEDIA_CDROM; 941 read_only = true; 942 } else { 943 error_report("'%s' invalid media", value); 944 goto fail; 945 } 946 } 947 948 /* copy-on-read is disabled with a warning for read-only devices */ 949 read_only |= qemu_opt_get_bool(legacy_opts, BDRV_OPT_READ_ONLY, false); 950 copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false); 951 952 if (read_only && copy_on_read) { 953 error_report("warning: disabling copy-on-read on read-only drive"); 954 copy_on_read = false; 955 } 956 957 qdict_put(bs_opts, BDRV_OPT_READ_ONLY, 958 qstring_from_str(read_only ? "on" : "off")); 959 qdict_put(bs_opts, "copy-on-read", 960 qstring_from_str(copy_on_read ? "on" :"off")); 961 962 /* Controller type */ 963 value = qemu_opt_get(legacy_opts, "if"); 964 if (value) { 965 for (type = 0; 966 type < IF_COUNT && strcmp(value, if_name[type]); 967 type++) { 968 } 969 if (type == IF_COUNT) { 970 error_report("unsupported bus type '%s'", value); 971 goto fail; 972 } 973 } else { 974 type = block_default_type; 975 } 976 977 /* Geometry */ 978 cyls = qemu_opt_get_number(legacy_opts, "cyls", 0); 979 heads = qemu_opt_get_number(legacy_opts, "heads", 0); 980 secs = qemu_opt_get_number(legacy_opts, "secs", 0); 981 982 if (cyls || heads || secs) { 983 if (cyls < 1) { 984 error_report("invalid physical cyls number"); 985 goto fail; 986 } 987 if (heads < 1) { 988 error_report("invalid physical heads number"); 989 goto fail; 990 } 991 if (secs < 1) { 992 error_report("invalid physical secs number"); 993 goto fail; 994 } 995 } 996 997 translation = BIOS_ATA_TRANSLATION_AUTO; 998 value = qemu_opt_get(legacy_opts, "trans"); 999 if (value != NULL) { 1000 if (!cyls) { 1001 error_report("'%s' trans must be used with cyls, heads and secs", 1002 value); 1003 goto fail; 1004 } 1005 if (!strcmp(value, "none")) { 1006 translation = BIOS_ATA_TRANSLATION_NONE; 1007 } else if (!strcmp(value, "lba")) { 1008 translation = BIOS_ATA_TRANSLATION_LBA; 1009 } else if (!strcmp(value, "large")) { 1010 translation = BIOS_ATA_TRANSLATION_LARGE; 1011 } else if (!strcmp(value, "rechs")) { 1012 translation = BIOS_ATA_TRANSLATION_RECHS; 1013 } else if (!strcmp(value, "auto")) { 1014 translation = BIOS_ATA_TRANSLATION_AUTO; 1015 } else { 1016 error_report("'%s' invalid translation type", value); 1017 goto fail; 1018 } 1019 } 1020 1021 if (media == MEDIA_CDROM) { 1022 if (cyls || secs || heads) { 1023 error_report("CHS can't be set with media=cdrom"); 1024 goto fail; 1025 } 1026 } 1027 1028 /* Device address specified by bus/unit or index. 1029 * If none was specified, try to find the first free one. */ 1030 bus_id = qemu_opt_get_number(legacy_opts, "bus", 0); 1031 unit_id = qemu_opt_get_number(legacy_opts, "unit", -1); 1032 index = qemu_opt_get_number(legacy_opts, "index", -1); 1033 1034 max_devs = if_max_devs[type]; 1035 1036 if (index != -1) { 1037 if (bus_id != 0 || unit_id != -1) { 1038 error_report("index cannot be used with bus and unit"); 1039 goto fail; 1040 } 1041 bus_id = drive_index_to_bus_id(type, index); 1042 unit_id = drive_index_to_unit_id(type, index); 1043 } 1044 1045 if (unit_id == -1) { 1046 unit_id = 0; 1047 while (drive_get(type, bus_id, unit_id) != NULL) { 1048 unit_id++; 1049 if (max_devs && unit_id >= max_devs) { 1050 unit_id -= max_devs; 1051 bus_id++; 1052 } 1053 } 1054 } 1055 1056 if (max_devs && unit_id >= max_devs) { 1057 error_report("unit %d too big (max is %d)", unit_id, max_devs - 1); 1058 goto fail; 1059 } 1060 1061 if (drive_get(type, bus_id, unit_id) != NULL) { 1062 error_report("drive with bus=%d, unit=%d (index=%d) exists", 1063 bus_id, unit_id, index); 1064 goto fail; 1065 } 1066 1067 /* Serial number */ 1068 serial = qemu_opt_get(legacy_opts, "serial"); 1069 1070 /* no id supplied -> create one */ 1071 if (qemu_opts_id(all_opts) == NULL) { 1072 char *new_id; 1073 const char *mediastr = ""; 1074 if (type == IF_IDE || type == IF_SCSI) { 1075 mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; 1076 } 1077 if (max_devs) { 1078 new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id, 1079 mediastr, unit_id); 1080 } else { 1081 new_id = g_strdup_printf("%s%s%i", if_name[type], 1082 mediastr, unit_id); 1083 } 1084 qdict_put(bs_opts, "id", qstring_from_str(new_id)); 1085 g_free(new_id); 1086 } 1087 1088 /* Add virtio block device */ 1089 devaddr = qemu_opt_get(legacy_opts, "addr"); 1090 if (devaddr && type != IF_VIRTIO) { 1091 error_report("addr is not supported by this bus type"); 1092 goto fail; 1093 } 1094 1095 if (type == IF_VIRTIO) { 1096 QemuOpts *devopts; 1097 devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0, 1098 &error_abort); 1099 if (arch_type == QEMU_ARCH_S390X) { 1100 qemu_opt_set(devopts, "driver", "virtio-blk-ccw", &error_abort); 1101 } else { 1102 qemu_opt_set(devopts, "driver", "virtio-blk-pci", &error_abort); 1103 } 1104 qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"), 1105 &error_abort); 1106 if (devaddr) { 1107 qemu_opt_set(devopts, "addr", devaddr, &error_abort); 1108 } 1109 } 1110 1111 filename = qemu_opt_get(legacy_opts, "file"); 1112 1113 /* Check werror/rerror compatibility with if=... */ 1114 werror = qemu_opt_get(legacy_opts, "werror"); 1115 if (werror != NULL) { 1116 if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && 1117 type != IF_NONE) { 1118 error_report("werror is not supported by this bus type"); 1119 goto fail; 1120 } 1121 qdict_put(bs_opts, "werror", qstring_from_str(werror)); 1122 } 1123 1124 rerror = qemu_opt_get(legacy_opts, "rerror"); 1125 if (rerror != NULL) { 1126 if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && 1127 type != IF_NONE) { 1128 error_report("rerror is not supported by this bus type"); 1129 goto fail; 1130 } 1131 qdict_put(bs_opts, "rerror", qstring_from_str(rerror)); 1132 } 1133 1134 /* Actual block device init: Functionality shared with blockdev-add */ 1135 blk = blockdev_init(filename, bs_opts, &local_err); 1136 bs_opts = NULL; 1137 if (!blk) { 1138 if (local_err) { 1139 error_report_err(local_err); 1140 } 1141 goto fail; 1142 } else { 1143 assert(!local_err); 1144 } 1145 1146 /* Create legacy DriveInfo */ 1147 dinfo = g_malloc0(sizeof(*dinfo)); 1148 dinfo->opts = all_opts; 1149 1150 dinfo->cyls = cyls; 1151 dinfo->heads = heads; 1152 dinfo->secs = secs; 1153 dinfo->trans = translation; 1154 1155 dinfo->type = type; 1156 dinfo->bus = bus_id; 1157 dinfo->unit = unit_id; 1158 dinfo->devaddr = devaddr; 1159 dinfo->serial = g_strdup(serial); 1160 1161 blk_set_legacy_dinfo(blk, dinfo); 1162 1163 switch(type) { 1164 case IF_IDE: 1165 case IF_SCSI: 1166 case IF_XEN: 1167 case IF_NONE: 1168 dinfo->media_cd = media == MEDIA_CDROM; 1169 break; 1170 default: 1171 break; 1172 } 1173 1174 fail: 1175 qemu_opts_del(legacy_opts); 1176 QDECREF(bs_opts); 1177 return dinfo; 1178 } 1179 1180 static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp) 1181 { 1182 BlockDriverState *bs; 1183 1184 bs = bdrv_lookup_bs(name, name, errp); 1185 if (bs == NULL) { 1186 return NULL; 1187 } 1188 1189 if (!bdrv_is_root_node(bs)) { 1190 error_setg(errp, "Need a root block node"); 1191 return NULL; 1192 } 1193 1194 if (!bdrv_is_inserted(bs)) { 1195 error_setg(errp, "Device has no medium"); 1196 return NULL; 1197 } 1198 1199 return bs; 1200 } 1201 1202 static BlockBackend *qmp_get_blk(const char *blk_name, const char *qdev_id, 1203 Error **errp) 1204 { 1205 BlockBackend *blk; 1206 1207 if (!blk_name == !qdev_id) { 1208 error_setg(errp, "Need exactly one of 'device' and 'id'"); 1209 return NULL; 1210 } 1211 1212 if (qdev_id) { 1213 blk = blk_by_qdev_id(qdev_id, errp); 1214 } else { 1215 blk = blk_by_name(blk_name); 1216 if (blk == NULL) { 1217 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 1218 "Device '%s' not found", blk_name); 1219 } 1220 } 1221 1222 return blk; 1223 } 1224 1225 void hmp_commit(Monitor *mon, const QDict *qdict) 1226 { 1227 const char *device = qdict_get_str(qdict, "device"); 1228 BlockBackend *blk; 1229 int ret; 1230 1231 if (!strcmp(device, "all")) { 1232 ret = blk_commit_all(); 1233 } else { 1234 BlockDriverState *bs; 1235 AioContext *aio_context; 1236 1237 blk = blk_by_name(device); 1238 if (!blk) { 1239 monitor_printf(mon, "Device '%s' not found\n", device); 1240 return; 1241 } 1242 if (!blk_is_available(blk)) { 1243 monitor_printf(mon, "Device '%s' has no medium\n", device); 1244 return; 1245 } 1246 1247 bs = blk_bs(blk); 1248 aio_context = bdrv_get_aio_context(bs); 1249 aio_context_acquire(aio_context); 1250 1251 ret = bdrv_commit(bs); 1252 1253 aio_context_release(aio_context); 1254 } 1255 if (ret < 0) { 1256 monitor_printf(mon, "'commit' error for '%s': %s\n", device, 1257 strerror(-ret)); 1258 } 1259 } 1260 1261 static void blockdev_do_action(TransactionAction *action, Error **errp) 1262 { 1263 TransactionActionList list; 1264 1265 list.value = action; 1266 list.next = NULL; 1267 qmp_transaction(&list, false, NULL, errp); 1268 } 1269 1270 void qmp_blockdev_snapshot_sync(bool has_device, const char *device, 1271 bool has_node_name, const char *node_name, 1272 const char *snapshot_file, 1273 bool has_snapshot_node_name, 1274 const char *snapshot_node_name, 1275 bool has_format, const char *format, 1276 bool has_mode, NewImageMode mode, Error **errp) 1277 { 1278 BlockdevSnapshotSync snapshot = { 1279 .has_device = has_device, 1280 .device = (char *) device, 1281 .has_node_name = has_node_name, 1282 .node_name = (char *) node_name, 1283 .snapshot_file = (char *) snapshot_file, 1284 .has_snapshot_node_name = has_snapshot_node_name, 1285 .snapshot_node_name = (char *) snapshot_node_name, 1286 .has_format = has_format, 1287 .format = (char *) format, 1288 .has_mode = has_mode, 1289 .mode = mode, 1290 }; 1291 TransactionAction action = { 1292 .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC, 1293 .u.blockdev_snapshot_sync.data = &snapshot, 1294 }; 1295 blockdev_do_action(&action, errp); 1296 } 1297 1298 void qmp_blockdev_snapshot(const char *node, const char *overlay, 1299 Error **errp) 1300 { 1301 BlockdevSnapshot snapshot_data = { 1302 .node = (char *) node, 1303 .overlay = (char *) overlay 1304 }; 1305 TransactionAction action = { 1306 .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT, 1307 .u.blockdev_snapshot.data = &snapshot_data, 1308 }; 1309 blockdev_do_action(&action, errp); 1310 } 1311 1312 void qmp_blockdev_snapshot_internal_sync(const char *device, 1313 const char *name, 1314 Error **errp) 1315 { 1316 BlockdevSnapshotInternal snapshot = { 1317 .device = (char *) device, 1318 .name = (char *) name 1319 }; 1320 TransactionAction action = { 1321 .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC, 1322 .u.blockdev_snapshot_internal_sync.data = &snapshot, 1323 }; 1324 blockdev_do_action(&action, errp); 1325 } 1326 1327 SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, 1328 bool has_id, 1329 const char *id, 1330 bool has_name, 1331 const char *name, 1332 Error **errp) 1333 { 1334 BlockDriverState *bs; 1335 AioContext *aio_context; 1336 QEMUSnapshotInfo sn; 1337 Error *local_err = NULL; 1338 SnapshotInfo *info = NULL; 1339 int ret; 1340 1341 bs = qmp_get_root_bs(device, errp); 1342 if (!bs) { 1343 return NULL; 1344 } 1345 aio_context = bdrv_get_aio_context(bs); 1346 aio_context_acquire(aio_context); 1347 1348 if (!has_id) { 1349 id = NULL; 1350 } 1351 1352 if (!has_name) { 1353 name = NULL; 1354 } 1355 1356 if (!id && !name) { 1357 error_setg(errp, "Name or id must be provided"); 1358 goto out_aio_context; 1359 } 1360 1361 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) { 1362 goto out_aio_context; 1363 } 1364 1365 ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err); 1366 if (local_err) { 1367 error_propagate(errp, local_err); 1368 goto out_aio_context; 1369 } 1370 if (!ret) { 1371 error_setg(errp, 1372 "Snapshot with id '%s' and name '%s' does not exist on " 1373 "device '%s'", 1374 STR_OR_NULL(id), STR_OR_NULL(name), device); 1375 goto out_aio_context; 1376 } 1377 1378 bdrv_snapshot_delete(bs, id, name, &local_err); 1379 if (local_err) { 1380 error_propagate(errp, local_err); 1381 goto out_aio_context; 1382 } 1383 1384 aio_context_release(aio_context); 1385 1386 info = g_new0(SnapshotInfo, 1); 1387 info->id = g_strdup(sn.id_str); 1388 info->name = g_strdup(sn.name); 1389 info->date_nsec = sn.date_nsec; 1390 info->date_sec = sn.date_sec; 1391 info->vm_state_size = sn.vm_state_size; 1392 info->vm_clock_nsec = sn.vm_clock_nsec % 1000000000; 1393 info->vm_clock_sec = sn.vm_clock_nsec / 1000000000; 1394 1395 return info; 1396 1397 out_aio_context: 1398 aio_context_release(aio_context); 1399 return NULL; 1400 } 1401 1402 /** 1403 * block_dirty_bitmap_lookup: 1404 * Return a dirty bitmap (if present), after validating 1405 * the node reference and bitmap names. 1406 * 1407 * @node: The name of the BDS node to search for bitmaps 1408 * @name: The name of the bitmap to search for 1409 * @pbs: Output pointer for BDS lookup, if desired. Can be NULL. 1410 * @paio: Output pointer for aio_context acquisition, if desired. Can be NULL. 1411 * @errp: Output pointer for error information. Can be NULL. 1412 * 1413 * @return: A bitmap object on success, or NULL on failure. 1414 */ 1415 static BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node, 1416 const char *name, 1417 BlockDriverState **pbs, 1418 AioContext **paio, 1419 Error **errp) 1420 { 1421 BlockDriverState *bs; 1422 BdrvDirtyBitmap *bitmap; 1423 AioContext *aio_context; 1424 1425 if (!node) { 1426 error_setg(errp, "Node cannot be NULL"); 1427 return NULL; 1428 } 1429 if (!name) { 1430 error_setg(errp, "Bitmap name cannot be NULL"); 1431 return NULL; 1432 } 1433 bs = bdrv_lookup_bs(node, node, NULL); 1434 if (!bs) { 1435 error_setg(errp, "Node '%s' not found", node); 1436 return NULL; 1437 } 1438 1439 aio_context = bdrv_get_aio_context(bs); 1440 aio_context_acquire(aio_context); 1441 1442 bitmap = bdrv_find_dirty_bitmap(bs, name); 1443 if (!bitmap) { 1444 error_setg(errp, "Dirty bitmap '%s' not found", name); 1445 goto fail; 1446 } 1447 1448 if (pbs) { 1449 *pbs = bs; 1450 } 1451 if (paio) { 1452 *paio = aio_context; 1453 } else { 1454 aio_context_release(aio_context); 1455 } 1456 1457 return bitmap; 1458 1459 fail: 1460 aio_context_release(aio_context); 1461 return NULL; 1462 } 1463 1464 /* New and old BlockDriverState structs for atomic group operations */ 1465 1466 typedef struct BlkActionState BlkActionState; 1467 1468 /** 1469 * BlkActionOps: 1470 * Table of operations that define an Action. 1471 * 1472 * @instance_size: Size of state struct, in bytes. 1473 * @prepare: Prepare the work, must NOT be NULL. 1474 * @commit: Commit the changes, can be NULL. 1475 * @abort: Abort the changes on fail, can be NULL. 1476 * @clean: Clean up resources after all transaction actions have called 1477 * commit() or abort(). Can be NULL. 1478 * 1479 * Only prepare() may fail. In a single transaction, only one of commit() or 1480 * abort() will be called. clean() will always be called if it is present. 1481 */ 1482 typedef struct BlkActionOps { 1483 size_t instance_size; 1484 void (*prepare)(BlkActionState *common, Error **errp); 1485 void (*commit)(BlkActionState *common); 1486 void (*abort)(BlkActionState *common); 1487 void (*clean)(BlkActionState *common); 1488 } BlkActionOps; 1489 1490 /** 1491 * BlkActionState: 1492 * Describes one Action's state within a Transaction. 1493 * 1494 * @action: QAPI-defined enum identifying which Action to perform. 1495 * @ops: Table of ActionOps this Action can perform. 1496 * @block_job_txn: Transaction which this action belongs to. 1497 * @entry: List membership for all Actions in this Transaction. 1498 * 1499 * This structure must be arranged as first member in a subclassed type, 1500 * assuming that the compiler will also arrange it to the same offsets as the 1501 * base class. 1502 */ 1503 struct BlkActionState { 1504 TransactionAction *action; 1505 const BlkActionOps *ops; 1506 BlockJobTxn *block_job_txn; 1507 TransactionProperties *txn_props; 1508 QSIMPLEQ_ENTRY(BlkActionState) entry; 1509 }; 1510 1511 /* internal snapshot private data */ 1512 typedef struct InternalSnapshotState { 1513 BlkActionState common; 1514 BlockDriverState *bs; 1515 AioContext *aio_context; 1516 QEMUSnapshotInfo sn; 1517 bool created; 1518 } InternalSnapshotState; 1519 1520 1521 static int action_check_completion_mode(BlkActionState *s, Error **errp) 1522 { 1523 if (s->txn_props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) { 1524 error_setg(errp, 1525 "Action '%s' does not support Transaction property " 1526 "completion-mode = %s", 1527 TransactionActionKind_lookup[s->action->type], 1528 ActionCompletionMode_lookup[s->txn_props->completion_mode]); 1529 return -1; 1530 } 1531 return 0; 1532 } 1533 1534 static void internal_snapshot_prepare(BlkActionState *common, 1535 Error **errp) 1536 { 1537 Error *local_err = NULL; 1538 const char *device; 1539 const char *name; 1540 BlockDriverState *bs; 1541 QEMUSnapshotInfo old_sn, *sn; 1542 bool ret; 1543 qemu_timeval tv; 1544 BlockdevSnapshotInternal *internal; 1545 InternalSnapshotState *state; 1546 int ret1; 1547 1548 g_assert(common->action->type == 1549 TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC); 1550 internal = common->action->u.blockdev_snapshot_internal_sync.data; 1551 state = DO_UPCAST(InternalSnapshotState, common, common); 1552 1553 /* 1. parse input */ 1554 device = internal->device; 1555 name = internal->name; 1556 1557 /* 2. check for validation */ 1558 if (action_check_completion_mode(common, errp) < 0) { 1559 return; 1560 } 1561 1562 bs = qmp_get_root_bs(device, errp); 1563 if (!bs) { 1564 return; 1565 } 1566 1567 /* AioContext is released in .clean() */ 1568 state->aio_context = bdrv_get_aio_context(bs); 1569 aio_context_acquire(state->aio_context); 1570 1571 state->bs = bs; 1572 bdrv_drained_begin(bs); 1573 1574 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) { 1575 return; 1576 } 1577 1578 if (bdrv_is_read_only(bs)) { 1579 error_setg(errp, "Device '%s' is read only", device); 1580 return; 1581 } 1582 1583 if (!bdrv_can_snapshot(bs)) { 1584 error_setg(errp, "Block format '%s' used by device '%s' " 1585 "does not support internal snapshots", 1586 bs->drv->format_name, device); 1587 return; 1588 } 1589 1590 if (!strlen(name)) { 1591 error_setg(errp, "Name is empty"); 1592 return; 1593 } 1594 1595 /* check whether a snapshot with name exist */ 1596 ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn, 1597 &local_err); 1598 if (local_err) { 1599 error_propagate(errp, local_err); 1600 return; 1601 } else if (ret) { 1602 error_setg(errp, 1603 "Snapshot with name '%s' already exists on device '%s'", 1604 name, device); 1605 return; 1606 } 1607 1608 /* 3. take the snapshot */ 1609 sn = &state->sn; 1610 pstrcpy(sn->name, sizeof(sn->name), name); 1611 qemu_gettimeofday(&tv); 1612 sn->date_sec = tv.tv_sec; 1613 sn->date_nsec = tv.tv_usec * 1000; 1614 sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 1615 1616 ret1 = bdrv_snapshot_create(bs, sn); 1617 if (ret1 < 0) { 1618 error_setg_errno(errp, -ret1, 1619 "Failed to create snapshot '%s' on device '%s'", 1620 name, device); 1621 return; 1622 } 1623 1624 /* 4. succeed, mark a snapshot is created */ 1625 state->created = true; 1626 } 1627 1628 static void internal_snapshot_abort(BlkActionState *common) 1629 { 1630 InternalSnapshotState *state = 1631 DO_UPCAST(InternalSnapshotState, common, common); 1632 BlockDriverState *bs = state->bs; 1633 QEMUSnapshotInfo *sn = &state->sn; 1634 Error *local_error = NULL; 1635 1636 if (!state->created) { 1637 return; 1638 } 1639 1640 if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) { 1641 error_reportf_err(local_error, 1642 "Failed to delete snapshot with id '%s' and " 1643 "name '%s' on device '%s' in abort: ", 1644 sn->id_str, sn->name, 1645 bdrv_get_device_name(bs)); 1646 } 1647 } 1648 1649 static void internal_snapshot_clean(BlkActionState *common) 1650 { 1651 InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState, 1652 common, common); 1653 1654 if (state->aio_context) { 1655 if (state->bs) { 1656 bdrv_drained_end(state->bs); 1657 } 1658 aio_context_release(state->aio_context); 1659 } 1660 } 1661 1662 /* external snapshot private data */ 1663 typedef struct ExternalSnapshotState { 1664 BlkActionState common; 1665 BlockDriverState *old_bs; 1666 BlockDriverState *new_bs; 1667 AioContext *aio_context; 1668 } ExternalSnapshotState; 1669 1670 static void external_snapshot_prepare(BlkActionState *common, 1671 Error **errp) 1672 { 1673 int flags = 0; 1674 QDict *options = NULL; 1675 Error *local_err = NULL; 1676 /* Device and node name of the image to generate the snapshot from */ 1677 const char *device; 1678 const char *node_name; 1679 /* Reference to the new image (for 'blockdev-snapshot') */ 1680 const char *snapshot_ref; 1681 /* File name of the new image (for 'blockdev-snapshot-sync') */ 1682 const char *new_image_file; 1683 ExternalSnapshotState *state = 1684 DO_UPCAST(ExternalSnapshotState, common, common); 1685 TransactionAction *action = common->action; 1686 1687 /* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar 1688 * purpose but a different set of parameters */ 1689 switch (action->type) { 1690 case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT: 1691 { 1692 BlockdevSnapshot *s = action->u.blockdev_snapshot.data; 1693 device = s->node; 1694 node_name = s->node; 1695 new_image_file = NULL; 1696 snapshot_ref = s->overlay; 1697 } 1698 break; 1699 case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC: 1700 { 1701 BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data; 1702 device = s->has_device ? s->device : NULL; 1703 node_name = s->has_node_name ? s->node_name : NULL; 1704 new_image_file = s->snapshot_file; 1705 snapshot_ref = NULL; 1706 } 1707 break; 1708 default: 1709 g_assert_not_reached(); 1710 } 1711 1712 /* start processing */ 1713 if (action_check_completion_mode(common, errp) < 0) { 1714 return; 1715 } 1716 1717 state->old_bs = bdrv_lookup_bs(device, node_name, errp); 1718 if (!state->old_bs) { 1719 return; 1720 } 1721 1722 /* Acquire AioContext now so any threads operating on old_bs stop */ 1723 state->aio_context = bdrv_get_aio_context(state->old_bs); 1724 aio_context_acquire(state->aio_context); 1725 bdrv_drained_begin(state->old_bs); 1726 1727 if (!bdrv_is_inserted(state->old_bs)) { 1728 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); 1729 return; 1730 } 1731 1732 if (bdrv_op_is_blocked(state->old_bs, 1733 BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) { 1734 return; 1735 } 1736 1737 if (!bdrv_is_read_only(state->old_bs)) { 1738 if (bdrv_flush(state->old_bs)) { 1739 error_setg(errp, QERR_IO_ERROR); 1740 return; 1741 } 1742 } 1743 1744 if (!bdrv_is_first_non_filter(state->old_bs)) { 1745 error_setg(errp, QERR_FEATURE_DISABLED, "snapshot"); 1746 return; 1747 } 1748 1749 if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) { 1750 BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data; 1751 const char *format = s->has_format ? s->format : "qcow2"; 1752 enum NewImageMode mode; 1753 const char *snapshot_node_name = 1754 s->has_snapshot_node_name ? s->snapshot_node_name : NULL; 1755 1756 if (node_name && !snapshot_node_name) { 1757 error_setg(errp, "New snapshot node name missing"); 1758 return; 1759 } 1760 1761 if (snapshot_node_name && 1762 bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) { 1763 error_setg(errp, "New snapshot node name already in use"); 1764 return; 1765 } 1766 1767 flags = state->old_bs->open_flags; 1768 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ); 1769 1770 /* create new image w/backing file */ 1771 mode = s->has_mode ? s->mode : NEW_IMAGE_MODE_ABSOLUTE_PATHS; 1772 if (mode != NEW_IMAGE_MODE_EXISTING) { 1773 int64_t size = bdrv_getlength(state->old_bs); 1774 if (size < 0) { 1775 error_setg_errno(errp, -size, "bdrv_getlength failed"); 1776 return; 1777 } 1778 bdrv_img_create(new_image_file, format, 1779 state->old_bs->filename, 1780 state->old_bs->drv->format_name, 1781 NULL, size, flags, &local_err, false); 1782 if (local_err) { 1783 error_propagate(errp, local_err); 1784 return; 1785 } 1786 } 1787 1788 options = qdict_new(); 1789 if (s->has_snapshot_node_name) { 1790 qdict_put(options, "node-name", 1791 qstring_from_str(snapshot_node_name)); 1792 } 1793 qdict_put(options, "driver", qstring_from_str(format)); 1794 1795 flags |= BDRV_O_NO_BACKING; 1796 } 1797 1798 state->new_bs = bdrv_open(new_image_file, snapshot_ref, options, flags, 1799 errp); 1800 /* We will manually add the backing_hd field to the bs later */ 1801 if (!state->new_bs) { 1802 return; 1803 } 1804 1805 if (bdrv_has_blk(state->new_bs)) { 1806 error_setg(errp, "The snapshot is already in use"); 1807 return; 1808 } 1809 1810 if (bdrv_op_is_blocked(state->new_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, 1811 errp)) { 1812 return; 1813 } 1814 1815 if (state->new_bs->backing != NULL) { 1816 error_setg(errp, "The snapshot already has a backing image"); 1817 return; 1818 } 1819 1820 if (!state->new_bs->drv->supports_backing) { 1821 error_setg(errp, "The snapshot does not support backing images"); 1822 } 1823 } 1824 1825 static void external_snapshot_commit(BlkActionState *common) 1826 { 1827 ExternalSnapshotState *state = 1828 DO_UPCAST(ExternalSnapshotState, common, common); 1829 1830 bdrv_set_aio_context(state->new_bs, state->aio_context); 1831 1832 /* This removes our old bs and adds the new bs */ 1833 bdrv_append(state->new_bs, state->old_bs); 1834 /* We don't need (or want) to use the transactional 1835 * bdrv_reopen_multiple() across all the entries at once, because we 1836 * don't want to abort all of them if one of them fails the reopen */ 1837 if (!state->old_bs->copy_on_read) { 1838 bdrv_reopen(state->old_bs, state->old_bs->open_flags & ~BDRV_O_RDWR, 1839 NULL); 1840 } 1841 } 1842 1843 static void external_snapshot_abort(BlkActionState *common) 1844 { 1845 ExternalSnapshotState *state = 1846 DO_UPCAST(ExternalSnapshotState, common, common); 1847 if (state->new_bs) { 1848 bdrv_unref(state->new_bs); 1849 } 1850 } 1851 1852 static void external_snapshot_clean(BlkActionState *common) 1853 { 1854 ExternalSnapshotState *state = 1855 DO_UPCAST(ExternalSnapshotState, common, common); 1856 if (state->aio_context) { 1857 bdrv_drained_end(state->old_bs); 1858 aio_context_release(state->aio_context); 1859 } 1860 } 1861 1862 typedef struct DriveBackupState { 1863 BlkActionState common; 1864 BlockDriverState *bs; 1865 AioContext *aio_context; 1866 BlockJob *job; 1867 } DriveBackupState; 1868 1869 static void do_drive_backup(DriveBackup *backup, BlockJobTxn *txn, 1870 Error **errp); 1871 1872 static void drive_backup_prepare(BlkActionState *common, Error **errp) 1873 { 1874 DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); 1875 BlockDriverState *bs; 1876 DriveBackup *backup; 1877 Error *local_err = NULL; 1878 1879 assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP); 1880 backup = common->action->u.drive_backup.data; 1881 1882 bs = qmp_get_root_bs(backup->device, errp); 1883 if (!bs) { 1884 return; 1885 } 1886 1887 /* AioContext is released in .clean() */ 1888 state->aio_context = bdrv_get_aio_context(bs); 1889 aio_context_acquire(state->aio_context); 1890 bdrv_drained_begin(bs); 1891 state->bs = bs; 1892 1893 do_drive_backup(backup, common->block_job_txn, &local_err); 1894 if (local_err) { 1895 error_propagate(errp, local_err); 1896 return; 1897 } 1898 1899 state->job = state->bs->job; 1900 } 1901 1902 static void drive_backup_abort(BlkActionState *common) 1903 { 1904 DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); 1905 BlockDriverState *bs = state->bs; 1906 1907 /* Only cancel if it's the job we started */ 1908 if (bs && bs->job && bs->job == state->job) { 1909 block_job_cancel_sync(bs->job); 1910 } 1911 } 1912 1913 static void drive_backup_clean(BlkActionState *common) 1914 { 1915 DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); 1916 1917 if (state->aio_context) { 1918 bdrv_drained_end(state->bs); 1919 aio_context_release(state->aio_context); 1920 } 1921 } 1922 1923 typedef struct BlockdevBackupState { 1924 BlkActionState common; 1925 BlockDriverState *bs; 1926 BlockJob *job; 1927 AioContext *aio_context; 1928 } BlockdevBackupState; 1929 1930 static void do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn, 1931 Error **errp); 1932 1933 static void blockdev_backup_prepare(BlkActionState *common, Error **errp) 1934 { 1935 BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); 1936 BlockdevBackup *backup; 1937 BlockDriverState *bs, *target; 1938 Error *local_err = NULL; 1939 1940 assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP); 1941 backup = common->action->u.blockdev_backup.data; 1942 1943 bs = qmp_get_root_bs(backup->device, errp); 1944 if (!bs) { 1945 return; 1946 } 1947 1948 target = bdrv_lookup_bs(backup->target, backup->target, errp); 1949 if (!target) { 1950 return; 1951 } 1952 1953 /* AioContext is released in .clean() */ 1954 state->aio_context = bdrv_get_aio_context(bs); 1955 if (state->aio_context != bdrv_get_aio_context(target)) { 1956 state->aio_context = NULL; 1957 error_setg(errp, "Backup between two IO threads is not implemented"); 1958 return; 1959 } 1960 aio_context_acquire(state->aio_context); 1961 state->bs = bs; 1962 bdrv_drained_begin(state->bs); 1963 1964 do_blockdev_backup(backup, common->block_job_txn, &local_err); 1965 if (local_err) { 1966 error_propagate(errp, local_err); 1967 return; 1968 } 1969 1970 state->job = state->bs->job; 1971 } 1972 1973 static void blockdev_backup_abort(BlkActionState *common) 1974 { 1975 BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); 1976 BlockDriverState *bs = state->bs; 1977 1978 /* Only cancel if it's the job we started */ 1979 if (bs && bs->job && bs->job == state->job) { 1980 block_job_cancel_sync(bs->job); 1981 } 1982 } 1983 1984 static void blockdev_backup_clean(BlkActionState *common) 1985 { 1986 BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); 1987 1988 if (state->aio_context) { 1989 bdrv_drained_end(state->bs); 1990 aio_context_release(state->aio_context); 1991 } 1992 } 1993 1994 typedef struct BlockDirtyBitmapState { 1995 BlkActionState common; 1996 BdrvDirtyBitmap *bitmap; 1997 BlockDriverState *bs; 1998 AioContext *aio_context; 1999 HBitmap *backup; 2000 bool prepared; 2001 } BlockDirtyBitmapState; 2002 2003 static void block_dirty_bitmap_add_prepare(BlkActionState *common, 2004 Error **errp) 2005 { 2006 Error *local_err = NULL; 2007 BlockDirtyBitmapAdd *action; 2008 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, 2009 common, common); 2010 2011 if (action_check_completion_mode(common, errp) < 0) { 2012 return; 2013 } 2014 2015 action = common->action->u.block_dirty_bitmap_add.data; 2016 /* AIO context taken and released within qmp_block_dirty_bitmap_add */ 2017 qmp_block_dirty_bitmap_add(action->node, action->name, 2018 action->has_granularity, action->granularity, 2019 &local_err); 2020 2021 if (!local_err) { 2022 state->prepared = true; 2023 } else { 2024 error_propagate(errp, local_err); 2025 } 2026 } 2027 2028 static void block_dirty_bitmap_add_abort(BlkActionState *common) 2029 { 2030 BlockDirtyBitmapAdd *action; 2031 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, 2032 common, common); 2033 2034 action = common->action->u.block_dirty_bitmap_add.data; 2035 /* Should not be able to fail: IF the bitmap was added via .prepare(), 2036 * then the node reference and bitmap name must have been valid. 2037 */ 2038 if (state->prepared) { 2039 qmp_block_dirty_bitmap_remove(action->node, action->name, &error_abort); 2040 } 2041 } 2042 2043 static void block_dirty_bitmap_clear_prepare(BlkActionState *common, 2044 Error **errp) 2045 { 2046 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, 2047 common, common); 2048 BlockDirtyBitmap *action; 2049 2050 if (action_check_completion_mode(common, errp) < 0) { 2051 return; 2052 } 2053 2054 action = common->action->u.block_dirty_bitmap_clear.data; 2055 state->bitmap = block_dirty_bitmap_lookup(action->node, 2056 action->name, 2057 &state->bs, 2058 &state->aio_context, 2059 errp); 2060 if (!state->bitmap) { 2061 return; 2062 } 2063 2064 if (bdrv_dirty_bitmap_frozen(state->bitmap)) { 2065 error_setg(errp, "Cannot modify a frozen bitmap"); 2066 return; 2067 } else if (!bdrv_dirty_bitmap_enabled(state->bitmap)) { 2068 error_setg(errp, "Cannot clear a disabled bitmap"); 2069 return; 2070 } 2071 2072 bdrv_clear_dirty_bitmap(state->bitmap, &state->backup); 2073 /* AioContext is released in .clean() */ 2074 } 2075 2076 static void block_dirty_bitmap_clear_abort(BlkActionState *common) 2077 { 2078 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, 2079 common, common); 2080 2081 bdrv_undo_clear_dirty_bitmap(state->bitmap, state->backup); 2082 } 2083 2084 static void block_dirty_bitmap_clear_commit(BlkActionState *common) 2085 { 2086 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, 2087 common, common); 2088 2089 hbitmap_free(state->backup); 2090 } 2091 2092 static void block_dirty_bitmap_clear_clean(BlkActionState *common) 2093 { 2094 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, 2095 common, common); 2096 2097 if (state->aio_context) { 2098 aio_context_release(state->aio_context); 2099 } 2100 } 2101 2102 static void abort_prepare(BlkActionState *common, Error **errp) 2103 { 2104 error_setg(errp, "Transaction aborted using Abort action"); 2105 } 2106 2107 static void abort_commit(BlkActionState *common) 2108 { 2109 g_assert_not_reached(); /* this action never succeeds */ 2110 } 2111 2112 static const BlkActionOps actions[] = { 2113 [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT] = { 2114 .instance_size = sizeof(ExternalSnapshotState), 2115 .prepare = external_snapshot_prepare, 2116 .commit = external_snapshot_commit, 2117 .abort = external_snapshot_abort, 2118 .clean = external_snapshot_clean, 2119 }, 2120 [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC] = { 2121 .instance_size = sizeof(ExternalSnapshotState), 2122 .prepare = external_snapshot_prepare, 2123 .commit = external_snapshot_commit, 2124 .abort = external_snapshot_abort, 2125 .clean = external_snapshot_clean, 2126 }, 2127 [TRANSACTION_ACTION_KIND_DRIVE_BACKUP] = { 2128 .instance_size = sizeof(DriveBackupState), 2129 .prepare = drive_backup_prepare, 2130 .abort = drive_backup_abort, 2131 .clean = drive_backup_clean, 2132 }, 2133 [TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP] = { 2134 .instance_size = sizeof(BlockdevBackupState), 2135 .prepare = blockdev_backup_prepare, 2136 .abort = blockdev_backup_abort, 2137 .clean = blockdev_backup_clean, 2138 }, 2139 [TRANSACTION_ACTION_KIND_ABORT] = { 2140 .instance_size = sizeof(BlkActionState), 2141 .prepare = abort_prepare, 2142 .commit = abort_commit, 2143 }, 2144 [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC] = { 2145 .instance_size = sizeof(InternalSnapshotState), 2146 .prepare = internal_snapshot_prepare, 2147 .abort = internal_snapshot_abort, 2148 .clean = internal_snapshot_clean, 2149 }, 2150 [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ADD] = { 2151 .instance_size = sizeof(BlockDirtyBitmapState), 2152 .prepare = block_dirty_bitmap_add_prepare, 2153 .abort = block_dirty_bitmap_add_abort, 2154 }, 2155 [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_CLEAR] = { 2156 .instance_size = sizeof(BlockDirtyBitmapState), 2157 .prepare = block_dirty_bitmap_clear_prepare, 2158 .commit = block_dirty_bitmap_clear_commit, 2159 .abort = block_dirty_bitmap_clear_abort, 2160 .clean = block_dirty_bitmap_clear_clean, 2161 } 2162 }; 2163 2164 /** 2165 * Allocate a TransactionProperties structure if necessary, and fill 2166 * that structure with desired defaults if they are unset. 2167 */ 2168 static TransactionProperties *get_transaction_properties( 2169 TransactionProperties *props) 2170 { 2171 if (!props) { 2172 props = g_new0(TransactionProperties, 1); 2173 } 2174 2175 if (!props->has_completion_mode) { 2176 props->has_completion_mode = true; 2177 props->completion_mode = ACTION_COMPLETION_MODE_INDIVIDUAL; 2178 } 2179 2180 return props; 2181 } 2182 2183 /* 2184 * 'Atomic' group operations. The operations are performed as a set, and if 2185 * any fail then we roll back all operations in the group. 2186 */ 2187 void qmp_transaction(TransactionActionList *dev_list, 2188 bool has_props, 2189 struct TransactionProperties *props, 2190 Error **errp) 2191 { 2192 TransactionActionList *dev_entry = dev_list; 2193 BlockJobTxn *block_job_txn = NULL; 2194 BlkActionState *state, *next; 2195 Error *local_err = NULL; 2196 2197 QSIMPLEQ_HEAD(snap_bdrv_states, BlkActionState) snap_bdrv_states; 2198 QSIMPLEQ_INIT(&snap_bdrv_states); 2199 2200 /* Does this transaction get canceled as a group on failure? 2201 * If not, we don't really need to make a BlockJobTxn. 2202 */ 2203 props = get_transaction_properties(props); 2204 if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) { 2205 block_job_txn = block_job_txn_new(); 2206 } 2207 2208 /* drain all i/o before any operations */ 2209 bdrv_drain_all(); 2210 2211 /* We don't do anything in this loop that commits us to the operations */ 2212 while (NULL != dev_entry) { 2213 TransactionAction *dev_info = NULL; 2214 const BlkActionOps *ops; 2215 2216 dev_info = dev_entry->value; 2217 dev_entry = dev_entry->next; 2218 2219 assert(dev_info->type < ARRAY_SIZE(actions)); 2220 2221 ops = &actions[dev_info->type]; 2222 assert(ops->instance_size > 0); 2223 2224 state = g_malloc0(ops->instance_size); 2225 state->ops = ops; 2226 state->action = dev_info; 2227 state->block_job_txn = block_job_txn; 2228 state->txn_props = props; 2229 QSIMPLEQ_INSERT_TAIL(&snap_bdrv_states, state, entry); 2230 2231 state->ops->prepare(state, &local_err); 2232 if (local_err) { 2233 error_propagate(errp, local_err); 2234 goto delete_and_fail; 2235 } 2236 } 2237 2238 QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) { 2239 if (state->ops->commit) { 2240 state->ops->commit(state); 2241 } 2242 } 2243 2244 /* success */ 2245 goto exit; 2246 2247 delete_and_fail: 2248 /* failure, and it is all-or-none; roll back all operations */ 2249 QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) { 2250 if (state->ops->abort) { 2251 state->ops->abort(state); 2252 } 2253 } 2254 exit: 2255 QSIMPLEQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) { 2256 if (state->ops->clean) { 2257 state->ops->clean(state); 2258 } 2259 g_free(state); 2260 } 2261 if (!has_props) { 2262 qapi_free_TransactionProperties(props); 2263 } 2264 block_job_txn_unref(block_job_txn); 2265 } 2266 2267 void qmp_eject(bool has_device, const char *device, 2268 bool has_id, const char *id, 2269 bool has_force, bool force, Error **errp) 2270 { 2271 Error *local_err = NULL; 2272 int rc; 2273 2274 if (!has_force) { 2275 force = false; 2276 } 2277 2278 rc = do_open_tray(has_device ? device : NULL, 2279 has_id ? id : NULL, 2280 force, &local_err); 2281 if (rc && rc != -ENOSYS) { 2282 error_propagate(errp, local_err); 2283 return; 2284 } 2285 error_free(local_err); 2286 2287 qmp_x_blockdev_remove_medium(has_device, device, has_id, id, errp); 2288 } 2289 2290 void qmp_block_passwd(bool has_device, const char *device, 2291 bool has_node_name, const char *node_name, 2292 const char *password, Error **errp) 2293 { 2294 Error *local_err = NULL; 2295 BlockDriverState *bs; 2296 AioContext *aio_context; 2297 2298 bs = bdrv_lookup_bs(has_device ? device : NULL, 2299 has_node_name ? node_name : NULL, 2300 &local_err); 2301 if (local_err) { 2302 error_propagate(errp, local_err); 2303 return; 2304 } 2305 2306 aio_context = bdrv_get_aio_context(bs); 2307 aio_context_acquire(aio_context); 2308 2309 bdrv_add_key(bs, password, errp); 2310 2311 aio_context_release(aio_context); 2312 } 2313 2314 /* 2315 * Attempt to open the tray of @device. 2316 * If @force, ignore its tray lock. 2317 * Else, if the tray is locked, don't open it, but ask the guest to open it. 2318 * On error, store an error through @errp and return -errno. 2319 * If @device does not exist, return -ENODEV. 2320 * If it has no removable media, return -ENOTSUP. 2321 * If it has no tray, return -ENOSYS. 2322 * If the guest was asked to open the tray, return -EINPROGRESS. 2323 * Else, return 0. 2324 */ 2325 static int do_open_tray(const char *blk_name, const char *qdev_id, 2326 bool force, Error **errp) 2327 { 2328 BlockBackend *blk; 2329 const char *device = qdev_id ?: blk_name; 2330 bool locked; 2331 2332 blk = qmp_get_blk(blk_name, qdev_id, errp); 2333 if (!blk) { 2334 return -ENODEV; 2335 } 2336 2337 if (!blk_dev_has_removable_media(blk)) { 2338 error_setg(errp, "Device '%s' is not removable", device); 2339 return -ENOTSUP; 2340 } 2341 2342 if (!blk_dev_has_tray(blk)) { 2343 error_setg(errp, "Device '%s' does not have a tray", device); 2344 return -ENOSYS; 2345 } 2346 2347 if (blk_dev_is_tray_open(blk)) { 2348 return 0; 2349 } 2350 2351 locked = blk_dev_is_medium_locked(blk); 2352 if (locked) { 2353 blk_dev_eject_request(blk, force); 2354 } 2355 2356 if (!locked || force) { 2357 blk_dev_change_media_cb(blk, false); 2358 } 2359 2360 if (locked && !force) { 2361 error_setg(errp, "Device '%s' is locked and force was not specified, " 2362 "wait for tray to open and try again", device); 2363 return -EINPROGRESS; 2364 } 2365 2366 return 0; 2367 } 2368 2369 void qmp_blockdev_open_tray(bool has_device, const char *device, 2370 bool has_id, const char *id, 2371 bool has_force, bool force, 2372 Error **errp) 2373 { 2374 Error *local_err = NULL; 2375 int rc; 2376 2377 if (!has_force) { 2378 force = false; 2379 } 2380 rc = do_open_tray(has_device ? device : NULL, 2381 has_id ? id : NULL, 2382 force, &local_err); 2383 if (rc && rc != -ENOSYS && rc != -EINPROGRESS) { 2384 error_propagate(errp, local_err); 2385 return; 2386 } 2387 error_free(local_err); 2388 } 2389 2390 void qmp_blockdev_close_tray(bool has_device, const char *device, 2391 bool has_id, const char *id, 2392 Error **errp) 2393 { 2394 BlockBackend *blk; 2395 2396 device = has_device ? device : NULL; 2397 id = has_id ? id : NULL; 2398 2399 blk = qmp_get_blk(device, id, errp); 2400 if (!blk) { 2401 return; 2402 } 2403 2404 if (!blk_dev_has_removable_media(blk)) { 2405 error_setg(errp, "Device '%s' is not removable", device ?: id); 2406 return; 2407 } 2408 2409 if (!blk_dev_has_tray(blk)) { 2410 /* Ignore this command on tray-less devices */ 2411 return; 2412 } 2413 2414 if (!blk_dev_is_tray_open(blk)) { 2415 return; 2416 } 2417 2418 blk_dev_change_media_cb(blk, true); 2419 } 2420 2421 void qmp_x_blockdev_remove_medium(bool has_device, const char *device, 2422 bool has_id, const char *id, Error **errp) 2423 { 2424 BlockBackend *blk; 2425 BlockDriverState *bs; 2426 AioContext *aio_context; 2427 bool has_attached_device; 2428 2429 device = has_device ? device : NULL; 2430 id = has_id ? id : NULL; 2431 2432 blk = qmp_get_blk(device, id, errp); 2433 if (!blk) { 2434 return; 2435 } 2436 2437 /* For BBs without a device, we can exchange the BDS tree at will */ 2438 has_attached_device = blk_get_attached_dev(blk); 2439 2440 if (has_attached_device && !blk_dev_has_removable_media(blk)) { 2441 error_setg(errp, "Device '%s' is not removable", device ?: id); 2442 return; 2443 } 2444 2445 if (has_attached_device && blk_dev_has_tray(blk) && 2446 !blk_dev_is_tray_open(blk)) 2447 { 2448 error_setg(errp, "Tray of device '%s' is not open", device ?: id); 2449 return; 2450 } 2451 2452 bs = blk_bs(blk); 2453 if (!bs) { 2454 return; 2455 } 2456 2457 aio_context = bdrv_get_aio_context(bs); 2458 aio_context_acquire(aio_context); 2459 2460 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) { 2461 goto out; 2462 } 2463 2464 blk_remove_bs(blk); 2465 2466 if (!blk_dev_has_tray(blk)) { 2467 /* For tray-less devices, blockdev-open-tray is a no-op (or may not be 2468 * called at all); therefore, the medium needs to be ejected here. 2469 * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load 2470 * value passed here (i.e. false). */ 2471 blk_dev_change_media_cb(blk, false); 2472 } 2473 2474 out: 2475 aio_context_release(aio_context); 2476 } 2477 2478 static void qmp_blockdev_insert_anon_medium(BlockBackend *blk, 2479 BlockDriverState *bs, Error **errp) 2480 { 2481 bool has_device; 2482 2483 /* For BBs without a device, we can exchange the BDS tree at will */ 2484 has_device = blk_get_attached_dev(blk); 2485 2486 if (has_device && !blk_dev_has_removable_media(blk)) { 2487 error_setg(errp, "Device is not removable"); 2488 return; 2489 } 2490 2491 if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) { 2492 error_setg(errp, "Tray of the device is not open"); 2493 return; 2494 } 2495 2496 if (blk_bs(blk)) { 2497 error_setg(errp, "There already is a medium in the device"); 2498 return; 2499 } 2500 2501 blk_insert_bs(blk, bs); 2502 2503 if (!blk_dev_has_tray(blk)) { 2504 /* For tray-less devices, blockdev-close-tray is a no-op (or may not be 2505 * called at all); therefore, the medium needs to be pushed into the 2506 * slot here. 2507 * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load 2508 * value passed here (i.e. true). */ 2509 blk_dev_change_media_cb(blk, true); 2510 } 2511 } 2512 2513 void qmp_x_blockdev_insert_medium(bool has_device, const char *device, 2514 bool has_id, const char *id, 2515 const char *node_name, Error **errp) 2516 { 2517 BlockBackend *blk; 2518 BlockDriverState *bs; 2519 2520 blk = qmp_get_blk(has_device ? device : NULL, 2521 has_id ? id : NULL, 2522 errp); 2523 if (!blk) { 2524 return; 2525 } 2526 2527 bs = bdrv_find_node(node_name); 2528 if (!bs) { 2529 error_setg(errp, "Node '%s' not found", node_name); 2530 return; 2531 } 2532 2533 if (bdrv_has_blk(bs)) { 2534 error_setg(errp, "Node '%s' is already in use", node_name); 2535 return; 2536 } 2537 2538 qmp_blockdev_insert_anon_medium(blk, bs, errp); 2539 } 2540 2541 void qmp_blockdev_change_medium(bool has_device, const char *device, 2542 bool has_id, const char *id, 2543 const char *filename, 2544 bool has_format, const char *format, 2545 bool has_read_only, 2546 BlockdevChangeReadOnlyMode read_only, 2547 Error **errp) 2548 { 2549 BlockBackend *blk; 2550 BlockDriverState *medium_bs = NULL; 2551 int bdrv_flags; 2552 int rc; 2553 QDict *options = NULL; 2554 Error *err = NULL; 2555 2556 blk = qmp_get_blk(has_device ? device : NULL, 2557 has_id ? id : NULL, 2558 errp); 2559 if (!blk) { 2560 goto fail; 2561 } 2562 2563 if (blk_bs(blk)) { 2564 blk_update_root_state(blk); 2565 } 2566 2567 bdrv_flags = blk_get_open_flags_from_root_state(blk); 2568 bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | 2569 BDRV_O_PROTOCOL); 2570 2571 if (!has_read_only) { 2572 read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN; 2573 } 2574 2575 switch (read_only) { 2576 case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN: 2577 break; 2578 2579 case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY: 2580 bdrv_flags &= ~BDRV_O_RDWR; 2581 break; 2582 2583 case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE: 2584 bdrv_flags |= BDRV_O_RDWR; 2585 break; 2586 2587 default: 2588 abort(); 2589 } 2590 2591 if (has_format) { 2592 options = qdict_new(); 2593 qdict_put(options, "driver", qstring_from_str(format)); 2594 } 2595 2596 medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp); 2597 if (!medium_bs) { 2598 goto fail; 2599 } 2600 2601 bdrv_add_key(medium_bs, NULL, &err); 2602 if (err) { 2603 error_propagate(errp, err); 2604 goto fail; 2605 } 2606 2607 rc = do_open_tray(has_device ? device : NULL, 2608 has_id ? id : NULL, 2609 false, &err); 2610 if (rc && rc != -ENOSYS) { 2611 error_propagate(errp, err); 2612 goto fail; 2613 } 2614 error_free(err); 2615 err = NULL; 2616 2617 qmp_x_blockdev_remove_medium(has_device, device, has_id, id, errp); 2618 if (err) { 2619 error_propagate(errp, err); 2620 goto fail; 2621 } 2622 2623 qmp_blockdev_insert_anon_medium(blk, medium_bs, &err); 2624 if (err) { 2625 error_propagate(errp, err); 2626 goto fail; 2627 } 2628 2629 blk_apply_root_state(blk, medium_bs); 2630 2631 qmp_blockdev_close_tray(has_device, device, has_id, id, errp); 2632 2633 fail: 2634 /* If the medium has been inserted, the device has its own reference, so 2635 * ours must be relinquished; and if it has not been inserted successfully, 2636 * the reference must be relinquished anyway */ 2637 bdrv_unref(medium_bs); 2638 } 2639 2640 /* throttling disk I/O limits */ 2641 void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) 2642 { 2643 ThrottleConfig cfg; 2644 BlockDriverState *bs; 2645 BlockBackend *blk; 2646 AioContext *aio_context; 2647 2648 blk = qmp_get_blk(arg->has_device ? arg->device : NULL, 2649 arg->has_id ? arg->id : NULL, 2650 errp); 2651 if (!blk) { 2652 return; 2653 } 2654 2655 aio_context = blk_get_aio_context(blk); 2656 aio_context_acquire(aio_context); 2657 2658 bs = blk_bs(blk); 2659 if (!bs) { 2660 error_setg(errp, "Device has no medium"); 2661 goto out; 2662 } 2663 2664 throttle_config_init(&cfg); 2665 cfg.buckets[THROTTLE_BPS_TOTAL].avg = arg->bps; 2666 cfg.buckets[THROTTLE_BPS_READ].avg = arg->bps_rd; 2667 cfg.buckets[THROTTLE_BPS_WRITE].avg = arg->bps_wr; 2668 2669 cfg.buckets[THROTTLE_OPS_TOTAL].avg = arg->iops; 2670 cfg.buckets[THROTTLE_OPS_READ].avg = arg->iops_rd; 2671 cfg.buckets[THROTTLE_OPS_WRITE].avg = arg->iops_wr; 2672 2673 if (arg->has_bps_max) { 2674 cfg.buckets[THROTTLE_BPS_TOTAL].max = arg->bps_max; 2675 } 2676 if (arg->has_bps_rd_max) { 2677 cfg.buckets[THROTTLE_BPS_READ].max = arg->bps_rd_max; 2678 } 2679 if (arg->has_bps_wr_max) { 2680 cfg.buckets[THROTTLE_BPS_WRITE].max = arg->bps_wr_max; 2681 } 2682 if (arg->has_iops_max) { 2683 cfg.buckets[THROTTLE_OPS_TOTAL].max = arg->iops_max; 2684 } 2685 if (arg->has_iops_rd_max) { 2686 cfg.buckets[THROTTLE_OPS_READ].max = arg->iops_rd_max; 2687 } 2688 if (arg->has_iops_wr_max) { 2689 cfg.buckets[THROTTLE_OPS_WRITE].max = arg->iops_wr_max; 2690 } 2691 2692 if (arg->has_bps_max_length) { 2693 cfg.buckets[THROTTLE_BPS_TOTAL].burst_length = arg->bps_max_length; 2694 } 2695 if (arg->has_bps_rd_max_length) { 2696 cfg.buckets[THROTTLE_BPS_READ].burst_length = arg->bps_rd_max_length; 2697 } 2698 if (arg->has_bps_wr_max_length) { 2699 cfg.buckets[THROTTLE_BPS_WRITE].burst_length = arg->bps_wr_max_length; 2700 } 2701 if (arg->has_iops_max_length) { 2702 cfg.buckets[THROTTLE_OPS_TOTAL].burst_length = arg->iops_max_length; 2703 } 2704 if (arg->has_iops_rd_max_length) { 2705 cfg.buckets[THROTTLE_OPS_READ].burst_length = arg->iops_rd_max_length; 2706 } 2707 if (arg->has_iops_wr_max_length) { 2708 cfg.buckets[THROTTLE_OPS_WRITE].burst_length = arg->iops_wr_max_length; 2709 } 2710 2711 if (arg->has_iops_size) { 2712 cfg.op_size = arg->iops_size; 2713 } 2714 2715 if (!throttle_is_valid(&cfg, errp)) { 2716 goto out; 2717 } 2718 2719 if (throttle_enabled(&cfg)) { 2720 /* Enable I/O limits if they're not enabled yet, otherwise 2721 * just update the throttling group. */ 2722 if (!blk_get_public(blk)->throttle_state) { 2723 blk_io_limits_enable(blk, 2724 arg->has_group ? arg->group : 2725 arg->has_device ? arg->device : 2726 arg->id); 2727 } else if (arg->has_group) { 2728 blk_io_limits_update_group(blk, arg->group); 2729 } 2730 /* Set the new throttling configuration */ 2731 blk_set_io_limits(blk, &cfg); 2732 } else if (blk_get_public(blk)->throttle_state) { 2733 /* If all throttling settings are set to 0, disable I/O limits */ 2734 blk_io_limits_disable(blk); 2735 } 2736 2737 out: 2738 aio_context_release(aio_context); 2739 } 2740 2741 void qmp_block_dirty_bitmap_add(const char *node, const char *name, 2742 bool has_granularity, uint32_t granularity, 2743 Error **errp) 2744 { 2745 AioContext *aio_context; 2746 BlockDriverState *bs; 2747 2748 if (!name || name[0] == '\0') { 2749 error_setg(errp, "Bitmap name cannot be empty"); 2750 return; 2751 } 2752 2753 bs = bdrv_lookup_bs(node, node, errp); 2754 if (!bs) { 2755 return; 2756 } 2757 2758 aio_context = bdrv_get_aio_context(bs); 2759 aio_context_acquire(aio_context); 2760 2761 if (has_granularity) { 2762 if (granularity < 512 || !is_power_of_2(granularity)) { 2763 error_setg(errp, "Granularity must be power of 2 " 2764 "and at least 512"); 2765 goto out; 2766 } 2767 } else { 2768 /* Default to cluster size, if available: */ 2769 granularity = bdrv_get_default_bitmap_granularity(bs); 2770 } 2771 2772 bdrv_create_dirty_bitmap(bs, granularity, name, errp); 2773 2774 out: 2775 aio_context_release(aio_context); 2776 } 2777 2778 void qmp_block_dirty_bitmap_remove(const char *node, const char *name, 2779 Error **errp) 2780 { 2781 AioContext *aio_context; 2782 BlockDriverState *bs; 2783 BdrvDirtyBitmap *bitmap; 2784 2785 bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp); 2786 if (!bitmap || !bs) { 2787 return; 2788 } 2789 2790 if (bdrv_dirty_bitmap_frozen(bitmap)) { 2791 error_setg(errp, 2792 "Bitmap '%s' is currently frozen and cannot be removed", 2793 name); 2794 goto out; 2795 } 2796 bdrv_dirty_bitmap_make_anon(bitmap); 2797 bdrv_release_dirty_bitmap(bs, bitmap); 2798 2799 out: 2800 aio_context_release(aio_context); 2801 } 2802 2803 /** 2804 * Completely clear a bitmap, for the purposes of synchronizing a bitmap 2805 * immediately after a full backup operation. 2806 */ 2807 void qmp_block_dirty_bitmap_clear(const char *node, const char *name, 2808 Error **errp) 2809 { 2810 AioContext *aio_context; 2811 BdrvDirtyBitmap *bitmap; 2812 BlockDriverState *bs; 2813 2814 bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp); 2815 if (!bitmap || !bs) { 2816 return; 2817 } 2818 2819 if (bdrv_dirty_bitmap_frozen(bitmap)) { 2820 error_setg(errp, 2821 "Bitmap '%s' is currently frozen and cannot be modified", 2822 name); 2823 goto out; 2824 } else if (!bdrv_dirty_bitmap_enabled(bitmap)) { 2825 error_setg(errp, 2826 "Bitmap '%s' is currently disabled and cannot be cleared", 2827 name); 2828 goto out; 2829 } 2830 2831 bdrv_clear_dirty_bitmap(bitmap, NULL); 2832 2833 out: 2834 aio_context_release(aio_context); 2835 } 2836 2837 void hmp_drive_del(Monitor *mon, const QDict *qdict) 2838 { 2839 const char *id = qdict_get_str(qdict, "id"); 2840 BlockBackend *blk; 2841 BlockDriverState *bs; 2842 AioContext *aio_context; 2843 Error *local_err = NULL; 2844 2845 bs = bdrv_find_node(id); 2846 if (bs) { 2847 qmp_x_blockdev_del(id, &local_err); 2848 if (local_err) { 2849 error_report_err(local_err); 2850 } 2851 return; 2852 } 2853 2854 blk = blk_by_name(id); 2855 if (!blk) { 2856 error_report("Device '%s' not found", id); 2857 return; 2858 } 2859 2860 if (!blk_legacy_dinfo(blk)) { 2861 error_report("Deleting device added with blockdev-add" 2862 " is not supported"); 2863 return; 2864 } 2865 2866 aio_context = blk_get_aio_context(blk); 2867 aio_context_acquire(aio_context); 2868 2869 bs = blk_bs(blk); 2870 if (bs) { 2871 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) { 2872 error_report_err(local_err); 2873 aio_context_release(aio_context); 2874 return; 2875 } 2876 2877 blk_remove_bs(blk); 2878 } 2879 2880 /* Make the BlockBackend and the attached BlockDriverState anonymous */ 2881 monitor_remove_blk(blk); 2882 2883 /* If this BlockBackend has a device attached to it, its refcount will be 2884 * decremented when the device is removed; otherwise we have to do so here. 2885 */ 2886 if (blk_get_attached_dev(blk)) { 2887 /* Further I/O must not pause the guest */ 2888 blk_set_on_error(blk, BLOCKDEV_ON_ERROR_REPORT, 2889 BLOCKDEV_ON_ERROR_REPORT); 2890 } else { 2891 blk_unref(blk); 2892 } 2893 2894 aio_context_release(aio_context); 2895 } 2896 2897 void qmp_block_resize(bool has_device, const char *device, 2898 bool has_node_name, const char *node_name, 2899 int64_t size, Error **errp) 2900 { 2901 Error *local_err = NULL; 2902 BlockDriverState *bs; 2903 AioContext *aio_context; 2904 int ret; 2905 2906 bs = bdrv_lookup_bs(has_device ? device : NULL, 2907 has_node_name ? node_name : NULL, 2908 &local_err); 2909 if (local_err) { 2910 error_propagate(errp, local_err); 2911 return; 2912 } 2913 2914 aio_context = bdrv_get_aio_context(bs); 2915 aio_context_acquire(aio_context); 2916 2917 if (!bdrv_is_first_non_filter(bs)) { 2918 error_setg(errp, QERR_FEATURE_DISABLED, "resize"); 2919 goto out; 2920 } 2921 2922 if (size < 0) { 2923 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size"); 2924 goto out; 2925 } 2926 2927 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) { 2928 error_setg(errp, QERR_DEVICE_IN_USE, device); 2929 goto out; 2930 } 2931 2932 /* complete all in-flight operations before resizing the device */ 2933 bdrv_drain_all(); 2934 2935 ret = bdrv_truncate(bs, size); 2936 switch (ret) { 2937 case 0: 2938 break; 2939 case -ENOMEDIUM: 2940 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); 2941 break; 2942 case -ENOTSUP: 2943 error_setg(errp, QERR_UNSUPPORTED); 2944 break; 2945 case -EACCES: 2946 error_setg(errp, "Device '%s' is read only", device); 2947 break; 2948 case -EBUSY: 2949 error_setg(errp, QERR_DEVICE_IN_USE, device); 2950 break; 2951 default: 2952 error_setg_errno(errp, -ret, "Could not resize"); 2953 break; 2954 } 2955 2956 out: 2957 aio_context_release(aio_context); 2958 } 2959 2960 static void block_job_cb(void *opaque, int ret) 2961 { 2962 /* Note that this function may be executed from another AioContext besides 2963 * the QEMU main loop. If you need to access anything that assumes the 2964 * QEMU global mutex, use a BH or introduce a mutex. 2965 */ 2966 2967 BlockDriverState *bs = opaque; 2968 const char *msg = NULL; 2969 2970 trace_block_job_cb(bs, bs->job, ret); 2971 2972 assert(bs->job); 2973 2974 if (ret < 0) { 2975 msg = strerror(-ret); 2976 } 2977 2978 if (block_job_is_cancelled(bs->job)) { 2979 block_job_event_cancelled(bs->job); 2980 } else { 2981 block_job_event_completed(bs->job, msg); 2982 } 2983 } 2984 2985 void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, 2986 bool has_base, const char *base, 2987 bool has_backing_file, const char *backing_file, 2988 bool has_speed, int64_t speed, 2989 bool has_on_error, BlockdevOnError on_error, 2990 Error **errp) 2991 { 2992 BlockDriverState *bs; 2993 BlockDriverState *base_bs = NULL; 2994 AioContext *aio_context; 2995 Error *local_err = NULL; 2996 const char *base_name = NULL; 2997 2998 if (!has_on_error) { 2999 on_error = BLOCKDEV_ON_ERROR_REPORT; 3000 } 3001 3002 bs = qmp_get_root_bs(device, errp); 3003 if (!bs) { 3004 return; 3005 } 3006 3007 aio_context = bdrv_get_aio_context(bs); 3008 aio_context_acquire(aio_context); 3009 3010 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_STREAM, errp)) { 3011 goto out; 3012 } 3013 3014 if (has_base) { 3015 base_bs = bdrv_find_backing_image(bs, base); 3016 if (base_bs == NULL) { 3017 error_setg(errp, QERR_BASE_NOT_FOUND, base); 3018 goto out; 3019 } 3020 assert(bdrv_get_aio_context(base_bs) == aio_context); 3021 base_name = base; 3022 } 3023 3024 /* if we are streaming the entire chain, the result will have no backing 3025 * file, and specifying one is therefore an error */ 3026 if (base_bs == NULL && has_backing_file) { 3027 error_setg(errp, "backing file specified, but streaming the " 3028 "entire chain"); 3029 goto out; 3030 } 3031 3032 /* backing_file string overrides base bs filename */ 3033 base_name = has_backing_file ? backing_file : base_name; 3034 3035 stream_start(has_job_id ? job_id : NULL, bs, base_bs, base_name, 3036 has_speed ? speed : 0, on_error, block_job_cb, bs, &local_err); 3037 if (local_err) { 3038 error_propagate(errp, local_err); 3039 goto out; 3040 } 3041 3042 trace_qmp_block_stream(bs, bs->job); 3043 3044 out: 3045 aio_context_release(aio_context); 3046 } 3047 3048 void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, 3049 bool has_base, const char *base, 3050 bool has_top, const char *top, 3051 bool has_backing_file, const char *backing_file, 3052 bool has_speed, int64_t speed, 3053 Error **errp) 3054 { 3055 BlockDriverState *bs; 3056 BlockDriverState *base_bs, *top_bs; 3057 AioContext *aio_context; 3058 Error *local_err = NULL; 3059 /* This will be part of the QMP command, if/when the 3060 * BlockdevOnError change for blkmirror makes it in 3061 */ 3062 BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT; 3063 3064 if (!has_speed) { 3065 speed = 0; 3066 } 3067 3068 /* Important Note: 3069 * libvirt relies on the DeviceNotFound error class in order to probe for 3070 * live commit feature versions; for this to work, we must make sure to 3071 * perform the device lookup before any generic errors that may occur in a 3072 * scenario in which all optional arguments are omitted. */ 3073 bs = qmp_get_root_bs(device, &local_err); 3074 if (!bs) { 3075 bs = bdrv_lookup_bs(device, device, NULL); 3076 if (!bs) { 3077 error_free(local_err); 3078 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, 3079 "Device '%s' not found", device); 3080 } else { 3081 error_propagate(errp, local_err); 3082 } 3083 return; 3084 } 3085 3086 aio_context = bdrv_get_aio_context(bs); 3087 aio_context_acquire(aio_context); 3088 3089 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) { 3090 goto out; 3091 } 3092 3093 /* default top_bs is the active layer */ 3094 top_bs = bs; 3095 3096 if (has_top && top) { 3097 if (strcmp(bs->filename, top) != 0) { 3098 top_bs = bdrv_find_backing_image(bs, top); 3099 } 3100 } 3101 3102 if (top_bs == NULL) { 3103 error_setg(errp, "Top image file %s not found", top ? top : "NULL"); 3104 goto out; 3105 } 3106 3107 assert(bdrv_get_aio_context(top_bs) == aio_context); 3108 3109 if (has_base && base) { 3110 base_bs = bdrv_find_backing_image(top_bs, base); 3111 } else { 3112 base_bs = bdrv_find_base(top_bs); 3113 } 3114 3115 if (base_bs == NULL) { 3116 error_setg(errp, QERR_BASE_NOT_FOUND, base ? base : "NULL"); 3117 goto out; 3118 } 3119 3120 assert(bdrv_get_aio_context(base_bs) == aio_context); 3121 3122 if (bdrv_op_is_blocked(base_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) { 3123 goto out; 3124 } 3125 3126 /* Do not allow attempts to commit an image into itself */ 3127 if (top_bs == base_bs) { 3128 error_setg(errp, "cannot commit an image into itself"); 3129 goto out; 3130 } 3131 3132 if (top_bs == bs) { 3133 if (has_backing_file) { 3134 error_setg(errp, "'backing-file' specified," 3135 " but 'top' is the active layer"); 3136 goto out; 3137 } 3138 commit_active_start(has_job_id ? job_id : NULL, bs, base_bs, speed, 3139 on_error, block_job_cb, bs, &local_err, false); 3140 } else { 3141 commit_start(has_job_id ? job_id : NULL, bs, base_bs, top_bs, speed, 3142 on_error, block_job_cb, bs, 3143 has_backing_file ? backing_file : NULL, &local_err); 3144 } 3145 if (local_err != NULL) { 3146 error_propagate(errp, local_err); 3147 goto out; 3148 } 3149 3150 out: 3151 aio_context_release(aio_context); 3152 } 3153 3154 static void do_drive_backup(DriveBackup *backup, BlockJobTxn *txn, Error **errp) 3155 { 3156 BlockDriverState *bs; 3157 BlockDriverState *target_bs; 3158 BlockDriverState *source = NULL; 3159 BdrvDirtyBitmap *bmap = NULL; 3160 AioContext *aio_context; 3161 QDict *options = NULL; 3162 Error *local_err = NULL; 3163 int flags; 3164 int64_t size; 3165 3166 if (!backup->has_speed) { 3167 backup->speed = 0; 3168 } 3169 if (!backup->has_on_source_error) { 3170 backup->on_source_error = BLOCKDEV_ON_ERROR_REPORT; 3171 } 3172 if (!backup->has_on_target_error) { 3173 backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT; 3174 } 3175 if (!backup->has_mode) { 3176 backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; 3177 } 3178 if (!backup->has_job_id) { 3179 backup->job_id = NULL; 3180 } 3181 if (!backup->has_compress) { 3182 backup->compress = false; 3183 } 3184 3185 bs = qmp_get_root_bs(backup->device, errp); 3186 if (!bs) { 3187 return; 3188 } 3189 3190 aio_context = bdrv_get_aio_context(bs); 3191 aio_context_acquire(aio_context); 3192 3193 if (!backup->has_format) { 3194 backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ? 3195 NULL : (char*) bs->drv->format_name; 3196 } 3197 3198 /* Early check to avoid creating target */ 3199 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { 3200 goto out; 3201 } 3202 3203 flags = bs->open_flags | BDRV_O_RDWR; 3204 3205 /* See if we have a backing HD we can use to create our new image 3206 * on top of. */ 3207 if (backup->sync == MIRROR_SYNC_MODE_TOP) { 3208 source = backing_bs(bs); 3209 if (!source) { 3210 backup->sync = MIRROR_SYNC_MODE_FULL; 3211 } 3212 } 3213 if (backup->sync == MIRROR_SYNC_MODE_NONE) { 3214 source = bs; 3215 } 3216 3217 size = bdrv_getlength(bs); 3218 if (size < 0) { 3219 error_setg_errno(errp, -size, "bdrv_getlength failed"); 3220 goto out; 3221 } 3222 3223 if (backup->mode != NEW_IMAGE_MODE_EXISTING) { 3224 assert(backup->format); 3225 if (source) { 3226 bdrv_img_create(backup->target, backup->format, source->filename, 3227 source->drv->format_name, NULL, 3228 size, flags, &local_err, false); 3229 } else { 3230 bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL, 3231 size, flags, &local_err, false); 3232 } 3233 } 3234 3235 if (local_err) { 3236 error_propagate(errp, local_err); 3237 goto out; 3238 } 3239 3240 if (backup->format) { 3241 options = qdict_new(); 3242 qdict_put(options, "driver", qstring_from_str(backup->format)); 3243 } 3244 3245 target_bs = bdrv_open(backup->target, NULL, options, flags, errp); 3246 if (!target_bs) { 3247 goto out; 3248 } 3249 3250 bdrv_set_aio_context(target_bs, aio_context); 3251 3252 if (backup->has_bitmap) { 3253 bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap); 3254 if (!bmap) { 3255 error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap); 3256 bdrv_unref(target_bs); 3257 goto out; 3258 } 3259 } 3260 3261 backup_start(backup->job_id, bs, target_bs, backup->speed, backup->sync, 3262 bmap, backup->compress, backup->on_source_error, 3263 backup->on_target_error, block_job_cb, bs, txn, &local_err); 3264 bdrv_unref(target_bs); 3265 if (local_err != NULL) { 3266 error_propagate(errp, local_err); 3267 goto out; 3268 } 3269 3270 out: 3271 aio_context_release(aio_context); 3272 } 3273 3274 void qmp_drive_backup(DriveBackup *arg, Error **errp) 3275 { 3276 return do_drive_backup(arg, NULL, errp); 3277 } 3278 3279 BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp) 3280 { 3281 return bdrv_named_nodes_list(errp); 3282 } 3283 3284 void do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn, Error **errp) 3285 { 3286 BlockDriverState *bs; 3287 BlockDriverState *target_bs; 3288 Error *local_err = NULL; 3289 AioContext *aio_context; 3290 3291 if (!backup->has_speed) { 3292 backup->speed = 0; 3293 } 3294 if (!backup->has_on_source_error) { 3295 backup->on_source_error = BLOCKDEV_ON_ERROR_REPORT; 3296 } 3297 if (!backup->has_on_target_error) { 3298 backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT; 3299 } 3300 if (!backup->has_job_id) { 3301 backup->job_id = NULL; 3302 } 3303 if (!backup->has_compress) { 3304 backup->compress = false; 3305 } 3306 3307 bs = qmp_get_root_bs(backup->device, errp); 3308 if (!bs) { 3309 return; 3310 } 3311 3312 aio_context = bdrv_get_aio_context(bs); 3313 aio_context_acquire(aio_context); 3314 3315 target_bs = bdrv_lookup_bs(backup->target, backup->target, errp); 3316 if (!target_bs) { 3317 goto out; 3318 } 3319 3320 if (bdrv_get_aio_context(target_bs) != aio_context) { 3321 if (!bdrv_has_blk(target_bs)) { 3322 /* The target BDS is not attached, we can safely move it to another 3323 * AioContext. */ 3324 bdrv_set_aio_context(target_bs, aio_context); 3325 } else { 3326 error_setg(errp, "Target is attached to a different thread from " 3327 "source."); 3328 goto out; 3329 } 3330 } 3331 backup_start(backup->job_id, bs, target_bs, backup->speed, backup->sync, 3332 NULL, backup->compress, backup->on_source_error, 3333 backup->on_target_error, block_job_cb, bs, txn, &local_err); 3334 if (local_err != NULL) { 3335 error_propagate(errp, local_err); 3336 } 3337 out: 3338 aio_context_release(aio_context); 3339 } 3340 3341 void qmp_blockdev_backup(BlockdevBackup *arg, Error **errp) 3342 { 3343 do_blockdev_backup(arg, NULL, errp); 3344 } 3345 3346 /* Parameter check and block job starting for drive mirroring. 3347 * Caller should hold @device and @target's aio context (must be the same). 3348 **/ 3349 static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, 3350 BlockDriverState *target, 3351 bool has_replaces, const char *replaces, 3352 enum MirrorSyncMode sync, 3353 BlockMirrorBackingMode backing_mode, 3354 bool has_speed, int64_t speed, 3355 bool has_granularity, uint32_t granularity, 3356 bool has_buf_size, int64_t buf_size, 3357 bool has_on_source_error, 3358 BlockdevOnError on_source_error, 3359 bool has_on_target_error, 3360 BlockdevOnError on_target_error, 3361 bool has_unmap, bool unmap, 3362 Error **errp) 3363 { 3364 3365 if (!has_speed) { 3366 speed = 0; 3367 } 3368 if (!has_on_source_error) { 3369 on_source_error = BLOCKDEV_ON_ERROR_REPORT; 3370 } 3371 if (!has_on_target_error) { 3372 on_target_error = BLOCKDEV_ON_ERROR_REPORT; 3373 } 3374 if (!has_granularity) { 3375 granularity = 0; 3376 } 3377 if (!has_buf_size) { 3378 buf_size = 0; 3379 } 3380 if (!has_unmap) { 3381 unmap = true; 3382 } 3383 3384 if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) { 3385 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity", 3386 "a value in range [512B, 64MB]"); 3387 return; 3388 } 3389 if (granularity & (granularity - 1)) { 3390 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity", 3391 "power of 2"); 3392 return; 3393 } 3394 3395 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) { 3396 return; 3397 } 3398 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_MIRROR_TARGET, errp)) { 3399 return; 3400 } 3401 3402 if (!bs->backing && sync == MIRROR_SYNC_MODE_TOP) { 3403 sync = MIRROR_SYNC_MODE_FULL; 3404 } 3405 3406 /* pass the node name to replace to mirror start since it's loose coupling 3407 * and will allow to check whether the node still exist at mirror completion 3408 */ 3409 mirror_start(job_id, bs, target, 3410 has_replaces ? replaces : NULL, 3411 speed, granularity, buf_size, sync, backing_mode, 3412 on_source_error, on_target_error, unmap, 3413 block_job_cb, bs, errp); 3414 } 3415 3416 void qmp_drive_mirror(DriveMirror *arg, Error **errp) 3417 { 3418 BlockDriverState *bs; 3419 BlockDriverState *source, *target_bs; 3420 AioContext *aio_context; 3421 BlockMirrorBackingMode backing_mode; 3422 Error *local_err = NULL; 3423 QDict *options = NULL; 3424 int flags; 3425 int64_t size; 3426 const char *format = arg->format; 3427 3428 bs = qmp_get_root_bs(arg->device, errp); 3429 if (!bs) { 3430 return; 3431 } 3432 3433 aio_context = bdrv_get_aio_context(bs); 3434 aio_context_acquire(aio_context); 3435 3436 if (!arg->has_mode) { 3437 arg->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; 3438 } 3439 3440 if (!arg->has_format) { 3441 format = (arg->mode == NEW_IMAGE_MODE_EXISTING 3442 ? NULL : bs->drv->format_name); 3443 } 3444 3445 flags = bs->open_flags | BDRV_O_RDWR; 3446 source = backing_bs(bs); 3447 if (!source && arg->sync == MIRROR_SYNC_MODE_TOP) { 3448 arg->sync = MIRROR_SYNC_MODE_FULL; 3449 } 3450 if (arg->sync == MIRROR_SYNC_MODE_NONE) { 3451 source = bs; 3452 } 3453 3454 size = bdrv_getlength(bs); 3455 if (size < 0) { 3456 error_setg_errno(errp, -size, "bdrv_getlength failed"); 3457 goto out; 3458 } 3459 3460 if (arg->has_replaces) { 3461 BlockDriverState *to_replace_bs; 3462 AioContext *replace_aio_context; 3463 int64_t replace_size; 3464 3465 if (!arg->has_node_name) { 3466 error_setg(errp, "a node-name must be provided when replacing a" 3467 " named node of the graph"); 3468 goto out; 3469 } 3470 3471 to_replace_bs = check_to_replace_node(bs, arg->replaces, &local_err); 3472 3473 if (!to_replace_bs) { 3474 error_propagate(errp, local_err); 3475 goto out; 3476 } 3477 3478 replace_aio_context = bdrv_get_aio_context(to_replace_bs); 3479 aio_context_acquire(replace_aio_context); 3480 replace_size = bdrv_getlength(to_replace_bs); 3481 aio_context_release(replace_aio_context); 3482 3483 if (size != replace_size) { 3484 error_setg(errp, "cannot replace image with a mirror image of " 3485 "different size"); 3486 goto out; 3487 } 3488 } 3489 3490 if (arg->mode == NEW_IMAGE_MODE_ABSOLUTE_PATHS) { 3491 backing_mode = MIRROR_SOURCE_BACKING_CHAIN; 3492 } else { 3493 backing_mode = MIRROR_OPEN_BACKING_CHAIN; 3494 } 3495 3496 if ((arg->sync == MIRROR_SYNC_MODE_FULL || !source) 3497 && arg->mode != NEW_IMAGE_MODE_EXISTING) 3498 { 3499 /* create new image w/o backing file */ 3500 assert(format); 3501 bdrv_img_create(arg->target, format, 3502 NULL, NULL, NULL, size, flags, &local_err, false); 3503 } else { 3504 switch (arg->mode) { 3505 case NEW_IMAGE_MODE_EXISTING: 3506 break; 3507 case NEW_IMAGE_MODE_ABSOLUTE_PATHS: 3508 /* create new image with backing file */ 3509 bdrv_img_create(arg->target, format, 3510 source->filename, 3511 source->drv->format_name, 3512 NULL, size, flags, &local_err, false); 3513 break; 3514 default: 3515 abort(); 3516 } 3517 } 3518 3519 if (local_err) { 3520 error_propagate(errp, local_err); 3521 goto out; 3522 } 3523 3524 options = qdict_new(); 3525 if (arg->has_node_name) { 3526 qdict_put(options, "node-name", qstring_from_str(arg->node_name)); 3527 } 3528 if (format) { 3529 qdict_put(options, "driver", qstring_from_str(format)); 3530 } 3531 3532 /* Mirroring takes care of copy-on-write using the source's backing 3533 * file. 3534 */ 3535 target_bs = bdrv_open(arg->target, NULL, options, 3536 flags | BDRV_O_NO_BACKING, errp); 3537 if (!target_bs) { 3538 goto out; 3539 } 3540 3541 bdrv_set_aio_context(target_bs, aio_context); 3542 3543 blockdev_mirror_common(arg->has_job_id ? arg->job_id : NULL, bs, target_bs, 3544 arg->has_replaces, arg->replaces, arg->sync, 3545 backing_mode, arg->has_speed, arg->speed, 3546 arg->has_granularity, arg->granularity, 3547 arg->has_buf_size, arg->buf_size, 3548 arg->has_on_source_error, arg->on_source_error, 3549 arg->has_on_target_error, arg->on_target_error, 3550 arg->has_unmap, arg->unmap, 3551 &local_err); 3552 bdrv_unref(target_bs); 3553 error_propagate(errp, local_err); 3554 out: 3555 aio_context_release(aio_context); 3556 } 3557 3558 void qmp_blockdev_mirror(bool has_job_id, const char *job_id, 3559 const char *device, const char *target, 3560 bool has_replaces, const char *replaces, 3561 MirrorSyncMode sync, 3562 bool has_speed, int64_t speed, 3563 bool has_granularity, uint32_t granularity, 3564 bool has_buf_size, int64_t buf_size, 3565 bool has_on_source_error, 3566 BlockdevOnError on_source_error, 3567 bool has_on_target_error, 3568 BlockdevOnError on_target_error, 3569 Error **errp) 3570 { 3571 BlockDriverState *bs; 3572 BlockDriverState *target_bs; 3573 AioContext *aio_context; 3574 BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN; 3575 Error *local_err = NULL; 3576 3577 bs = qmp_get_root_bs(device, errp); 3578 if (!bs) { 3579 return; 3580 } 3581 3582 target_bs = bdrv_lookup_bs(target, target, errp); 3583 if (!target_bs) { 3584 return; 3585 } 3586 3587 aio_context = bdrv_get_aio_context(bs); 3588 aio_context_acquire(aio_context); 3589 3590 bdrv_set_aio_context(target_bs, aio_context); 3591 3592 blockdev_mirror_common(has_job_id ? job_id : NULL, bs, target_bs, 3593 has_replaces, replaces, sync, backing_mode, 3594 has_speed, speed, 3595 has_granularity, granularity, 3596 has_buf_size, buf_size, 3597 has_on_source_error, on_source_error, 3598 has_on_target_error, on_target_error, 3599 true, true, 3600 &local_err); 3601 error_propagate(errp, local_err); 3602 3603 aio_context_release(aio_context); 3604 } 3605 3606 /* Get a block job using its ID and acquire its AioContext */ 3607 static BlockJob *find_block_job(const char *id, AioContext **aio_context, 3608 Error **errp) 3609 { 3610 BlockJob *job; 3611 3612 assert(id != NULL); 3613 3614 *aio_context = NULL; 3615 3616 job = block_job_get(id); 3617 3618 if (!job) { 3619 error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, 3620 "Block job '%s' not found", id); 3621 return NULL; 3622 } 3623 3624 *aio_context = blk_get_aio_context(job->blk); 3625 aio_context_acquire(*aio_context); 3626 3627 return job; 3628 } 3629 3630 void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) 3631 { 3632 AioContext *aio_context; 3633 BlockJob *job = find_block_job(device, &aio_context, errp); 3634 3635 if (!job) { 3636 return; 3637 } 3638 3639 block_job_set_speed(job, speed, errp); 3640 aio_context_release(aio_context); 3641 } 3642 3643 void qmp_block_job_cancel(const char *device, 3644 bool has_force, bool force, Error **errp) 3645 { 3646 AioContext *aio_context; 3647 BlockJob *job = find_block_job(device, &aio_context, errp); 3648 3649 if (!job) { 3650 return; 3651 } 3652 3653 if (!has_force) { 3654 force = false; 3655 } 3656 3657 if (job->user_paused && !force) { 3658 error_setg(errp, "The block job for device '%s' is currently paused", 3659 device); 3660 goto out; 3661 } 3662 3663 trace_qmp_block_job_cancel(job); 3664 block_job_cancel(job); 3665 out: 3666 aio_context_release(aio_context); 3667 } 3668 3669 void qmp_block_job_pause(const char *device, Error **errp) 3670 { 3671 AioContext *aio_context; 3672 BlockJob *job = find_block_job(device, &aio_context, errp); 3673 3674 if (!job || job->user_paused) { 3675 return; 3676 } 3677 3678 job->user_paused = true; 3679 trace_qmp_block_job_pause(job); 3680 block_job_pause(job); 3681 aio_context_release(aio_context); 3682 } 3683 3684 void qmp_block_job_resume(const char *device, Error **errp) 3685 { 3686 AioContext *aio_context; 3687 BlockJob *job = find_block_job(device, &aio_context, errp); 3688 3689 if (!job || !job->user_paused) { 3690 return; 3691 } 3692 3693 job->user_paused = false; 3694 trace_qmp_block_job_resume(job); 3695 block_job_iostatus_reset(job); 3696 block_job_resume(job); 3697 aio_context_release(aio_context); 3698 } 3699 3700 void qmp_block_job_complete(const char *device, Error **errp) 3701 { 3702 AioContext *aio_context; 3703 BlockJob *job = find_block_job(device, &aio_context, errp); 3704 3705 if (!job) { 3706 return; 3707 } 3708 3709 trace_qmp_block_job_complete(job); 3710 block_job_complete(job, errp); 3711 aio_context_release(aio_context); 3712 } 3713 3714 void qmp_change_backing_file(const char *device, 3715 const char *image_node_name, 3716 const char *backing_file, 3717 Error **errp) 3718 { 3719 BlockDriverState *bs = NULL; 3720 AioContext *aio_context; 3721 BlockDriverState *image_bs = NULL; 3722 Error *local_err = NULL; 3723 bool ro; 3724 int open_flags; 3725 int ret; 3726 3727 bs = qmp_get_root_bs(device, errp); 3728 if (!bs) { 3729 return; 3730 } 3731 3732 aio_context = bdrv_get_aio_context(bs); 3733 aio_context_acquire(aio_context); 3734 3735 image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err); 3736 if (local_err) { 3737 error_propagate(errp, local_err); 3738 goto out; 3739 } 3740 3741 if (!image_bs) { 3742 error_setg(errp, "image file not found"); 3743 goto out; 3744 } 3745 3746 if (bdrv_find_base(image_bs) == image_bs) { 3747 error_setg(errp, "not allowing backing file change on an image " 3748 "without a backing file"); 3749 goto out; 3750 } 3751 3752 /* even though we are not necessarily operating on bs, we need it to 3753 * determine if block ops are currently prohibited on the chain */ 3754 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) { 3755 goto out; 3756 } 3757 3758 /* final sanity check */ 3759 if (!bdrv_chain_contains(bs, image_bs)) { 3760 error_setg(errp, "'%s' and image file are not in the same chain", 3761 device); 3762 goto out; 3763 } 3764 3765 /* if not r/w, reopen to make r/w */ 3766 open_flags = image_bs->open_flags; 3767 ro = bdrv_is_read_only(image_bs); 3768 3769 if (ro) { 3770 bdrv_reopen(image_bs, open_flags | BDRV_O_RDWR, &local_err); 3771 if (local_err) { 3772 error_propagate(errp, local_err); 3773 goto out; 3774 } 3775 } 3776 3777 ret = bdrv_change_backing_file(image_bs, backing_file, 3778 image_bs->drv ? image_bs->drv->format_name : ""); 3779 3780 if (ret < 0) { 3781 error_setg_errno(errp, -ret, "Could not change backing file to '%s'", 3782 backing_file); 3783 /* don't exit here, so we can try to restore open flags if 3784 * appropriate */ 3785 } 3786 3787 if (ro) { 3788 bdrv_reopen(image_bs, open_flags, &local_err); 3789 error_propagate(errp, local_err); 3790 } 3791 3792 out: 3793 aio_context_release(aio_context); 3794 } 3795 3796 void hmp_drive_add_node(Monitor *mon, const char *optstr) 3797 { 3798 QemuOpts *opts; 3799 QDict *qdict; 3800 Error *local_err = NULL; 3801 3802 opts = qemu_opts_parse_noisily(&qemu_drive_opts, optstr, false); 3803 if (!opts) { 3804 return; 3805 } 3806 3807 qdict = qemu_opts_to_qdict(opts, NULL); 3808 3809 if (!qdict_get_try_str(qdict, "node-name")) { 3810 QDECREF(qdict); 3811 error_report("'node-name' needs to be specified"); 3812 goto out; 3813 } 3814 3815 BlockDriverState *bs = bds_tree_init(qdict, &local_err); 3816 if (!bs) { 3817 error_report_err(local_err); 3818 goto out; 3819 } 3820 3821 QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list); 3822 3823 out: 3824 qemu_opts_del(opts); 3825 } 3826 3827 void qmp_blockdev_add(BlockdevOptions *options, Error **errp) 3828 { 3829 BlockDriverState *bs; 3830 QObject *obj; 3831 Visitor *v = qmp_output_visitor_new(&obj); 3832 QDict *qdict; 3833 Error *local_err = NULL; 3834 3835 /* TODO Sort it out in raw-posix and drive_new(): Reject aio=native with 3836 * cache.direct=false instead of silently switching to aio=threads, except 3837 * when called from drive_new(). 3838 * 3839 * For now, simply forbidding the combination for all drivers will do. */ 3840 if (options->has_aio && options->aio == BLOCKDEV_AIO_OPTIONS_NATIVE) { 3841 bool direct = options->has_cache && 3842 options->cache->has_direct && 3843 options->cache->direct; 3844 if (!direct) { 3845 error_setg(errp, "aio=native requires cache.direct=true"); 3846 goto fail; 3847 } 3848 } 3849 3850 visit_type_BlockdevOptions(v, NULL, &options, &local_err); 3851 if (local_err) { 3852 error_propagate(errp, local_err); 3853 goto fail; 3854 } 3855 3856 visit_complete(v, &obj); 3857 qdict = qobject_to_qdict(obj); 3858 3859 qdict_flatten(qdict); 3860 3861 if (!qdict_get_try_str(qdict, "node-name")) { 3862 error_setg(errp, "'node-name' must be specified for the root node"); 3863 goto fail; 3864 } 3865 3866 bs = bds_tree_init(qdict, errp); 3867 if (!bs) { 3868 goto fail; 3869 } 3870 3871 QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list); 3872 3873 if (bs && bdrv_key_required(bs)) { 3874 QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list); 3875 bdrv_unref(bs); 3876 error_setg(errp, "blockdev-add doesn't support encrypted devices"); 3877 goto fail; 3878 } 3879 3880 fail: 3881 visit_free(v); 3882 } 3883 3884 void qmp_x_blockdev_del(const char *node_name, Error **errp) 3885 { 3886 AioContext *aio_context; 3887 BlockDriverState *bs; 3888 3889 bs = bdrv_find_node(node_name); 3890 if (!bs) { 3891 error_setg(errp, "Cannot find node %s", node_name); 3892 return; 3893 } 3894 if (bdrv_has_blk(bs)) { 3895 error_setg(errp, "Node %s is in use", node_name); 3896 return; 3897 } 3898 aio_context = bdrv_get_aio_context(bs); 3899 aio_context_acquire(aio_context); 3900 3901 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) { 3902 goto out; 3903 } 3904 3905 if (!bs->monitor_list.tqe_prev) { 3906 error_setg(errp, "Node %s is not owned by the monitor", 3907 bs->node_name); 3908 goto out; 3909 } 3910 3911 if (bs->refcnt > 1) { 3912 error_setg(errp, "Block device %s is in use", 3913 bdrv_get_device_or_node_name(bs)); 3914 goto out; 3915 } 3916 3917 QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list); 3918 bdrv_unref(bs); 3919 3920 out: 3921 aio_context_release(aio_context); 3922 } 3923 3924 static BdrvChild *bdrv_find_child(BlockDriverState *parent_bs, 3925 const char *child_name) 3926 { 3927 BdrvChild *child; 3928 3929 QLIST_FOREACH(child, &parent_bs->children, next) { 3930 if (strcmp(child->name, child_name) == 0) { 3931 return child; 3932 } 3933 } 3934 3935 return NULL; 3936 } 3937 3938 void qmp_x_blockdev_change(const char *parent, bool has_child, 3939 const char *child, bool has_node, 3940 const char *node, Error **errp) 3941 { 3942 BlockDriverState *parent_bs, *new_bs = NULL; 3943 BdrvChild *p_child; 3944 3945 parent_bs = bdrv_lookup_bs(parent, parent, errp); 3946 if (!parent_bs) { 3947 return; 3948 } 3949 3950 if (has_child == has_node) { 3951 if (has_child) { 3952 error_setg(errp, "The parameters child and node are in conflict"); 3953 } else { 3954 error_setg(errp, "Either child or node must be specified"); 3955 } 3956 return; 3957 } 3958 3959 if (has_child) { 3960 p_child = bdrv_find_child(parent_bs, child); 3961 if (!p_child) { 3962 error_setg(errp, "Node '%s' does not have child '%s'", 3963 parent, child); 3964 return; 3965 } 3966 bdrv_del_child(parent_bs, p_child, errp); 3967 } 3968 3969 if (has_node) { 3970 new_bs = bdrv_find_node(node); 3971 if (!new_bs) { 3972 error_setg(errp, "Node '%s' not found", node); 3973 return; 3974 } 3975 bdrv_add_child(parent_bs, new_bs, errp); 3976 } 3977 } 3978 3979 BlockJobInfoList *qmp_query_block_jobs(Error **errp) 3980 { 3981 BlockJobInfoList *head = NULL, **p_next = &head; 3982 BlockJob *job; 3983 3984 for (job = block_job_next(NULL); job; job = block_job_next(job)) { 3985 BlockJobInfoList *elem = g_new0(BlockJobInfoList, 1); 3986 AioContext *aio_context = blk_get_aio_context(job->blk); 3987 3988 aio_context_acquire(aio_context); 3989 elem->value = block_job_query(job); 3990 aio_context_release(aio_context); 3991 3992 *p_next = elem; 3993 p_next = &elem->next; 3994 } 3995 3996 return head; 3997 } 3998 3999 QemuOptsList qemu_common_drive_opts = { 4000 .name = "drive", 4001 .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head), 4002 .desc = { 4003 { 4004 .name = "snapshot", 4005 .type = QEMU_OPT_BOOL, 4006 .help = "enable/disable snapshot mode", 4007 },{ 4008 .name = "discard", 4009 .type = QEMU_OPT_STRING, 4010 .help = "discard operation (ignore/off, unmap/on)", 4011 },{ 4012 .name = "aio", 4013 .type = QEMU_OPT_STRING, 4014 .help = "host AIO implementation (threads, native)", 4015 },{ 4016 .name = BDRV_OPT_CACHE_WB, 4017 .type = QEMU_OPT_BOOL, 4018 .help = "Enable writeback mode", 4019 },{ 4020 .name = "format", 4021 .type = QEMU_OPT_STRING, 4022 .help = "disk format (raw, qcow2, ...)", 4023 },{ 4024 .name = "rerror", 4025 .type = QEMU_OPT_STRING, 4026 .help = "read error action", 4027 },{ 4028 .name = "werror", 4029 .type = QEMU_OPT_STRING, 4030 .help = "write error action", 4031 },{ 4032 .name = BDRV_OPT_READ_ONLY, 4033 .type = QEMU_OPT_BOOL, 4034 .help = "open drive file as read-only", 4035 },{ 4036 .name = "throttling.iops-total", 4037 .type = QEMU_OPT_NUMBER, 4038 .help = "limit total I/O operations per second", 4039 },{ 4040 .name = "throttling.iops-read", 4041 .type = QEMU_OPT_NUMBER, 4042 .help = "limit read operations per second", 4043 },{ 4044 .name = "throttling.iops-write", 4045 .type = QEMU_OPT_NUMBER, 4046 .help = "limit write operations per second", 4047 },{ 4048 .name = "throttling.bps-total", 4049 .type = QEMU_OPT_NUMBER, 4050 .help = "limit total bytes per second", 4051 },{ 4052 .name = "throttling.bps-read", 4053 .type = QEMU_OPT_NUMBER, 4054 .help = "limit read bytes per second", 4055 },{ 4056 .name = "throttling.bps-write", 4057 .type = QEMU_OPT_NUMBER, 4058 .help = "limit write bytes per second", 4059 },{ 4060 .name = "throttling.iops-total-max", 4061 .type = QEMU_OPT_NUMBER, 4062 .help = "I/O operations burst", 4063 },{ 4064 .name = "throttling.iops-read-max", 4065 .type = QEMU_OPT_NUMBER, 4066 .help = "I/O operations read burst", 4067 },{ 4068 .name = "throttling.iops-write-max", 4069 .type = QEMU_OPT_NUMBER, 4070 .help = "I/O operations write burst", 4071 },{ 4072 .name = "throttling.bps-total-max", 4073 .type = QEMU_OPT_NUMBER, 4074 .help = "total bytes burst", 4075 },{ 4076 .name = "throttling.bps-read-max", 4077 .type = QEMU_OPT_NUMBER, 4078 .help = "total bytes read burst", 4079 },{ 4080 .name = "throttling.bps-write-max", 4081 .type = QEMU_OPT_NUMBER, 4082 .help = "total bytes write burst", 4083 },{ 4084 .name = "throttling.iops-total-max-length", 4085 .type = QEMU_OPT_NUMBER, 4086 .help = "length of the iops-total-max burst period, in seconds", 4087 },{ 4088 .name = "throttling.iops-read-max-length", 4089 .type = QEMU_OPT_NUMBER, 4090 .help = "length of the iops-read-max burst period, in seconds", 4091 },{ 4092 .name = "throttling.iops-write-max-length", 4093 .type = QEMU_OPT_NUMBER, 4094 .help = "length of the iops-write-max burst period, in seconds", 4095 },{ 4096 .name = "throttling.bps-total-max-length", 4097 .type = QEMU_OPT_NUMBER, 4098 .help = "length of the bps-total-max burst period, in seconds", 4099 },{ 4100 .name = "throttling.bps-read-max-length", 4101 .type = QEMU_OPT_NUMBER, 4102 .help = "length of the bps-read-max burst period, in seconds", 4103 },{ 4104 .name = "throttling.bps-write-max-length", 4105 .type = QEMU_OPT_NUMBER, 4106 .help = "length of the bps-write-max burst period, in seconds", 4107 },{ 4108 .name = "throttling.iops-size", 4109 .type = QEMU_OPT_NUMBER, 4110 .help = "when limiting by iops max size of an I/O in bytes", 4111 },{ 4112 .name = "throttling.group", 4113 .type = QEMU_OPT_STRING, 4114 .help = "name of the block throttling group", 4115 },{ 4116 .name = "copy-on-read", 4117 .type = QEMU_OPT_BOOL, 4118 .help = "copy read data from backing file into image file", 4119 },{ 4120 .name = "detect-zeroes", 4121 .type = QEMU_OPT_STRING, 4122 .help = "try to optimize zero writes (off, on, unmap)", 4123 },{ 4124 .name = "stats-account-invalid", 4125 .type = QEMU_OPT_BOOL, 4126 .help = "whether to account for invalid I/O operations " 4127 "in the statistics", 4128 },{ 4129 .name = "stats-account-failed", 4130 .type = QEMU_OPT_BOOL, 4131 .help = "whether to account for failed I/O operations " 4132 "in the statistics", 4133 }, 4134 { /* end of list */ } 4135 }, 4136 }; 4137 4138 static QemuOptsList qemu_root_bds_opts = { 4139 .name = "root-bds", 4140 .head = QTAILQ_HEAD_INITIALIZER(qemu_root_bds_opts.head), 4141 .desc = { 4142 { 4143 .name = "discard", 4144 .type = QEMU_OPT_STRING, 4145 .help = "discard operation (ignore/off, unmap/on)", 4146 },{ 4147 .name = "aio", 4148 .type = QEMU_OPT_STRING, 4149 .help = "host AIO implementation (threads, native)", 4150 },{ 4151 .name = "copy-on-read", 4152 .type = QEMU_OPT_BOOL, 4153 .help = "copy read data from backing file into image file", 4154 },{ 4155 .name = "detect-zeroes", 4156 .type = QEMU_OPT_STRING, 4157 .help = "try to optimize zero writes (off, on, unmap)", 4158 }, 4159 { /* end of list */ } 4160 }, 4161 }; 4162 4163 QemuOptsList qemu_drive_opts = { 4164 .name = "drive", 4165 .head = QTAILQ_HEAD_INITIALIZER(qemu_drive_opts.head), 4166 .desc = { 4167 /* 4168 * no elements => accept any params 4169 * validation will happen later 4170 */ 4171 { /* end of list */ } 4172 }, 4173 }; 4174