1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "config-host.h" 25 #include "qemu-common.h" 26 #include "trace.h" 27 #include "block/block_int.h" 28 #include "block/blockjob.h" 29 #include "qemu/module.h" 30 #include "qapi/qmp/qjson.h" 31 #include "sysemu/sysemu.h" 32 #include "qemu/notify.h" 33 #include "block/coroutine.h" 34 #include "block/qapi.h" 35 #include "qmp-commands.h" 36 #include "qemu/timer.h" 37 #include "qapi-event.h" 38 39 #ifdef CONFIG_BSD 40 #include <sys/types.h> 41 #include <sys/stat.h> 42 #include <sys/ioctl.h> 43 #include <sys/queue.h> 44 #ifndef __DragonFly__ 45 #include <sys/disk.h> 46 #endif 47 #endif 48 49 #ifdef _WIN32 50 #include <windows.h> 51 #endif 52 53 struct BdrvDirtyBitmap { 54 HBitmap *bitmap; 55 QLIST_ENTRY(BdrvDirtyBitmap) list; 56 }; 57 58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 59 60 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load); 61 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 63 BlockDriverCompletionFunc *cb, void *opaque); 64 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 66 BlockDriverCompletionFunc *cb, void *opaque); 67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 68 int64_t sector_num, int nb_sectors, 69 QEMUIOVector *iov); 70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 71 int64_t sector_num, int nb_sectors, 72 QEMUIOVector *iov); 73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 75 BdrvRequestFlags flags); 76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 78 BdrvRequestFlags flags); 79 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 80 int64_t sector_num, 81 QEMUIOVector *qiov, 82 int nb_sectors, 83 BdrvRequestFlags flags, 84 BlockDriverCompletionFunc *cb, 85 void *opaque, 86 bool is_write); 87 static void coroutine_fn bdrv_co_do_rw(void *opaque); 88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 90 91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states = 92 QTAILQ_HEAD_INITIALIZER(bdrv_states); 93 94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states = 95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states); 96 97 static QLIST_HEAD(, BlockDriver) bdrv_drivers = 98 QLIST_HEAD_INITIALIZER(bdrv_drivers); 99 100 /* If non-zero, use only whitelisted block drivers */ 101 static int use_bdrv_whitelist; 102 103 #ifdef _WIN32 104 static int is_windows_drive_prefix(const char *filename) 105 { 106 return (((filename[0] >= 'a' && filename[0] <= 'z') || 107 (filename[0] >= 'A' && filename[0] <= 'Z')) && 108 filename[1] == ':'); 109 } 110 111 int is_windows_drive(const char *filename) 112 { 113 if (is_windows_drive_prefix(filename) && 114 filename[2] == '\0') 115 return 1; 116 if (strstart(filename, "\\\\.\\", NULL) || 117 strstart(filename, "//./", NULL)) 118 return 1; 119 return 0; 120 } 121 #endif 122 123 /* throttling disk I/O limits */ 124 void bdrv_set_io_limits(BlockDriverState *bs, 125 ThrottleConfig *cfg) 126 { 127 int i; 128 129 throttle_config(&bs->throttle_state, cfg); 130 131 for (i = 0; i < 2; i++) { 132 qemu_co_enter_next(&bs->throttled_reqs[i]); 133 } 134 } 135 136 /* this function drain all the throttled IOs */ 137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs) 138 { 139 bool drained = false; 140 bool enabled = bs->io_limits_enabled; 141 int i; 142 143 bs->io_limits_enabled = false; 144 145 for (i = 0; i < 2; i++) { 146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) { 147 drained = true; 148 } 149 } 150 151 bs->io_limits_enabled = enabled; 152 153 return drained; 154 } 155 156 void bdrv_io_limits_disable(BlockDriverState *bs) 157 { 158 bs->io_limits_enabled = false; 159 160 bdrv_start_throttled_reqs(bs); 161 162 throttle_destroy(&bs->throttle_state); 163 } 164 165 static void bdrv_throttle_read_timer_cb(void *opaque) 166 { 167 BlockDriverState *bs = opaque; 168 qemu_co_enter_next(&bs->throttled_reqs[0]); 169 } 170 171 static void bdrv_throttle_write_timer_cb(void *opaque) 172 { 173 BlockDriverState *bs = opaque; 174 qemu_co_enter_next(&bs->throttled_reqs[1]); 175 } 176 177 /* should be called before bdrv_set_io_limits if a limit is set */ 178 void bdrv_io_limits_enable(BlockDriverState *bs) 179 { 180 assert(!bs->io_limits_enabled); 181 throttle_init(&bs->throttle_state, 182 bdrv_get_aio_context(bs), 183 QEMU_CLOCK_VIRTUAL, 184 bdrv_throttle_read_timer_cb, 185 bdrv_throttle_write_timer_cb, 186 bs); 187 bs->io_limits_enabled = true; 188 } 189 190 /* This function makes an IO wait if needed 191 * 192 * @nb_sectors: the number of sectors of the IO 193 * @is_write: is the IO a write 194 */ 195 static void bdrv_io_limits_intercept(BlockDriverState *bs, 196 unsigned int bytes, 197 bool is_write) 198 { 199 /* does this io must wait */ 200 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write); 201 202 /* if must wait or any request of this type throttled queue the IO */ 203 if (must_wait || 204 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { 205 qemu_co_queue_wait(&bs->throttled_reqs[is_write]); 206 } 207 208 /* the IO will be executed, do the accounting */ 209 throttle_account(&bs->throttle_state, is_write, bytes); 210 211 212 /* if the next request must wait -> do nothing */ 213 if (throttle_schedule_timer(&bs->throttle_state, is_write)) { 214 return; 215 } 216 217 /* else queue next request for execution */ 218 qemu_co_queue_next(&bs->throttled_reqs[is_write]); 219 } 220 221 size_t bdrv_opt_mem_align(BlockDriverState *bs) 222 { 223 if (!bs || !bs->drv) { 224 /* 4k should be on the safe side */ 225 return 4096; 226 } 227 228 return bs->bl.opt_mem_alignment; 229 } 230 231 /* check if the path starts with "<protocol>:" */ 232 static int path_has_protocol(const char *path) 233 { 234 const char *p; 235 236 #ifdef _WIN32 237 if (is_windows_drive(path) || 238 is_windows_drive_prefix(path)) { 239 return 0; 240 } 241 p = path + strcspn(path, ":/\\"); 242 #else 243 p = path + strcspn(path, ":/"); 244 #endif 245 246 return *p == ':'; 247 } 248 249 int path_is_absolute(const char *path) 250 { 251 #ifdef _WIN32 252 /* specific case for names like: "\\.\d:" */ 253 if (is_windows_drive(path) || is_windows_drive_prefix(path)) { 254 return 1; 255 } 256 return (*path == '/' || *path == '\\'); 257 #else 258 return (*path == '/'); 259 #endif 260 } 261 262 /* if filename is absolute, just copy it to dest. Otherwise, build a 263 path to it by considering it is relative to base_path. URL are 264 supported. */ 265 void path_combine(char *dest, int dest_size, 266 const char *base_path, 267 const char *filename) 268 { 269 const char *p, *p1; 270 int len; 271 272 if (dest_size <= 0) 273 return; 274 if (path_is_absolute(filename)) { 275 pstrcpy(dest, dest_size, filename); 276 } else { 277 p = strchr(base_path, ':'); 278 if (p) 279 p++; 280 else 281 p = base_path; 282 p1 = strrchr(base_path, '/'); 283 #ifdef _WIN32 284 { 285 const char *p2; 286 p2 = strrchr(base_path, '\\'); 287 if (!p1 || p2 > p1) 288 p1 = p2; 289 } 290 #endif 291 if (p1) 292 p1++; 293 else 294 p1 = base_path; 295 if (p1 > p) 296 p = p1; 297 len = p - base_path; 298 if (len > dest_size - 1) 299 len = dest_size - 1; 300 memcpy(dest, base_path, len); 301 dest[len] = '\0'; 302 pstrcat(dest, dest_size, filename); 303 } 304 } 305 306 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz) 307 { 308 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) { 309 pstrcpy(dest, sz, bs->backing_file); 310 } else { 311 path_combine(dest, sz, bs->filename, bs->backing_file); 312 } 313 } 314 315 void bdrv_register(BlockDriver *bdrv) 316 { 317 /* Block drivers without coroutine functions need emulation */ 318 if (!bdrv->bdrv_co_readv) { 319 bdrv->bdrv_co_readv = bdrv_co_readv_em; 320 bdrv->bdrv_co_writev = bdrv_co_writev_em; 321 322 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if 323 * the block driver lacks aio we need to emulate that too. 324 */ 325 if (!bdrv->bdrv_aio_readv) { 326 /* add AIO emulation layer */ 327 bdrv->bdrv_aio_readv = bdrv_aio_readv_em; 328 bdrv->bdrv_aio_writev = bdrv_aio_writev_em; 329 } 330 } 331 332 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list); 333 } 334 335 /* create a new block device (by default it is empty) */ 336 BlockDriverState *bdrv_new(const char *device_name, Error **errp) 337 { 338 BlockDriverState *bs; 339 int i; 340 341 if (bdrv_find(device_name)) { 342 error_setg(errp, "Device with id '%s' already exists", 343 device_name); 344 return NULL; 345 } 346 if (bdrv_find_node(device_name)) { 347 error_setg(errp, "Device with node-name '%s' already exists", 348 device_name); 349 return NULL; 350 } 351 352 bs = g_malloc0(sizeof(BlockDriverState)); 353 QLIST_INIT(&bs->dirty_bitmaps); 354 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name); 355 if (device_name[0] != '\0') { 356 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list); 357 } 358 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { 359 QLIST_INIT(&bs->op_blockers[i]); 360 } 361 bdrv_iostatus_disable(bs); 362 notifier_list_init(&bs->close_notifiers); 363 notifier_with_return_list_init(&bs->before_write_notifiers); 364 qemu_co_queue_init(&bs->throttled_reqs[0]); 365 qemu_co_queue_init(&bs->throttled_reqs[1]); 366 bs->refcnt = 1; 367 bs->aio_context = qemu_get_aio_context(); 368 369 return bs; 370 } 371 372 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify) 373 { 374 notifier_list_add(&bs->close_notifiers, notify); 375 } 376 377 BlockDriver *bdrv_find_format(const char *format_name) 378 { 379 BlockDriver *drv1; 380 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 381 if (!strcmp(drv1->format_name, format_name)) { 382 return drv1; 383 } 384 } 385 return NULL; 386 } 387 388 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only) 389 { 390 static const char *whitelist_rw[] = { 391 CONFIG_BDRV_RW_WHITELIST 392 }; 393 static const char *whitelist_ro[] = { 394 CONFIG_BDRV_RO_WHITELIST 395 }; 396 const char **p; 397 398 if (!whitelist_rw[0] && !whitelist_ro[0]) { 399 return 1; /* no whitelist, anything goes */ 400 } 401 402 for (p = whitelist_rw; *p; p++) { 403 if (!strcmp(drv->format_name, *p)) { 404 return 1; 405 } 406 } 407 if (read_only) { 408 for (p = whitelist_ro; *p; p++) { 409 if (!strcmp(drv->format_name, *p)) { 410 return 1; 411 } 412 } 413 } 414 return 0; 415 } 416 417 BlockDriver *bdrv_find_whitelisted_format(const char *format_name, 418 bool read_only) 419 { 420 BlockDriver *drv = bdrv_find_format(format_name); 421 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL; 422 } 423 424 typedef struct CreateCo { 425 BlockDriver *drv; 426 char *filename; 427 QemuOpts *opts; 428 int ret; 429 Error *err; 430 } CreateCo; 431 432 static void coroutine_fn bdrv_create_co_entry(void *opaque) 433 { 434 Error *local_err = NULL; 435 int ret; 436 437 CreateCo *cco = opaque; 438 assert(cco->drv); 439 440 ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err); 441 if (local_err) { 442 error_propagate(&cco->err, local_err); 443 } 444 cco->ret = ret; 445 } 446 447 int bdrv_create(BlockDriver *drv, const char* filename, 448 QemuOpts *opts, Error **errp) 449 { 450 int ret; 451 452 Coroutine *co; 453 CreateCo cco = { 454 .drv = drv, 455 .filename = g_strdup(filename), 456 .opts = opts, 457 .ret = NOT_DONE, 458 .err = NULL, 459 }; 460 461 if (!drv->bdrv_create) { 462 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name); 463 ret = -ENOTSUP; 464 goto out; 465 } 466 467 if (qemu_in_coroutine()) { 468 /* Fast-path if already in coroutine context */ 469 bdrv_create_co_entry(&cco); 470 } else { 471 co = qemu_coroutine_create(bdrv_create_co_entry); 472 qemu_coroutine_enter(co, &cco); 473 while (cco.ret == NOT_DONE) { 474 aio_poll(qemu_get_aio_context(), true); 475 } 476 } 477 478 ret = cco.ret; 479 if (ret < 0) { 480 if (cco.err) { 481 error_propagate(errp, cco.err); 482 } else { 483 error_setg_errno(errp, -ret, "Could not create image"); 484 } 485 } 486 487 out: 488 g_free(cco.filename); 489 return ret; 490 } 491 492 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp) 493 { 494 BlockDriver *drv; 495 Error *local_err = NULL; 496 int ret; 497 498 drv = bdrv_find_protocol(filename, true); 499 if (drv == NULL) { 500 error_setg(errp, "Could not find protocol for file '%s'", filename); 501 return -ENOENT; 502 } 503 504 ret = bdrv_create(drv, filename, opts, &local_err); 505 if (local_err) { 506 error_propagate(errp, local_err); 507 } 508 return ret; 509 } 510 511 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) 512 { 513 BlockDriver *drv = bs->drv; 514 Error *local_err = NULL; 515 516 memset(&bs->bl, 0, sizeof(bs->bl)); 517 518 if (!drv) { 519 return; 520 } 521 522 /* Take some limits from the children as a default */ 523 if (bs->file) { 524 bdrv_refresh_limits(bs->file, &local_err); 525 if (local_err) { 526 error_propagate(errp, local_err); 527 return; 528 } 529 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length; 530 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment; 531 } else { 532 bs->bl.opt_mem_alignment = 512; 533 } 534 535 if (bs->backing_hd) { 536 bdrv_refresh_limits(bs->backing_hd, &local_err); 537 if (local_err) { 538 error_propagate(errp, local_err); 539 return; 540 } 541 bs->bl.opt_transfer_length = 542 MAX(bs->bl.opt_transfer_length, 543 bs->backing_hd->bl.opt_transfer_length); 544 bs->bl.opt_mem_alignment = 545 MAX(bs->bl.opt_mem_alignment, 546 bs->backing_hd->bl.opt_mem_alignment); 547 } 548 549 /* Then let the driver override it */ 550 if (drv->bdrv_refresh_limits) { 551 drv->bdrv_refresh_limits(bs, errp); 552 } 553 } 554 555 /* 556 * Create a uniquely-named empty temporary file. 557 * Return 0 upon success, otherwise a negative errno value. 558 */ 559 int get_tmp_filename(char *filename, int size) 560 { 561 #ifdef _WIN32 562 char temp_dir[MAX_PATH]; 563 /* GetTempFileName requires that its output buffer (4th param) 564 have length MAX_PATH or greater. */ 565 assert(size >= MAX_PATH); 566 return (GetTempPath(MAX_PATH, temp_dir) 567 && GetTempFileName(temp_dir, "qem", 0, filename) 568 ? 0 : -GetLastError()); 569 #else 570 int fd; 571 const char *tmpdir; 572 tmpdir = getenv("TMPDIR"); 573 if (!tmpdir) { 574 tmpdir = "/var/tmp"; 575 } 576 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) { 577 return -EOVERFLOW; 578 } 579 fd = mkstemp(filename); 580 if (fd < 0) { 581 return -errno; 582 } 583 if (close(fd) != 0) { 584 unlink(filename); 585 return -errno; 586 } 587 return 0; 588 #endif 589 } 590 591 /* 592 * Detect host devices. By convention, /dev/cdrom[N] is always 593 * recognized as a host CDROM. 594 */ 595 static BlockDriver *find_hdev_driver(const char *filename) 596 { 597 int score_max = 0, score; 598 BlockDriver *drv = NULL, *d; 599 600 QLIST_FOREACH(d, &bdrv_drivers, list) { 601 if (d->bdrv_probe_device) { 602 score = d->bdrv_probe_device(filename); 603 if (score > score_max) { 604 score_max = score; 605 drv = d; 606 } 607 } 608 } 609 610 return drv; 611 } 612 613 BlockDriver *bdrv_find_protocol(const char *filename, 614 bool allow_protocol_prefix) 615 { 616 BlockDriver *drv1; 617 char protocol[128]; 618 int len; 619 const char *p; 620 621 /* TODO Drivers without bdrv_file_open must be specified explicitly */ 622 623 /* 624 * XXX(hch): we really should not let host device detection 625 * override an explicit protocol specification, but moving this 626 * later breaks access to device names with colons in them. 627 * Thanks to the brain-dead persistent naming schemes on udev- 628 * based Linux systems those actually are quite common. 629 */ 630 drv1 = find_hdev_driver(filename); 631 if (drv1) { 632 return drv1; 633 } 634 635 if (!path_has_protocol(filename) || !allow_protocol_prefix) { 636 return bdrv_find_format("file"); 637 } 638 639 p = strchr(filename, ':'); 640 assert(p != NULL); 641 len = p - filename; 642 if (len > sizeof(protocol) - 1) 643 len = sizeof(protocol) - 1; 644 memcpy(protocol, filename, len); 645 protocol[len] = '\0'; 646 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 647 if (drv1->protocol_name && 648 !strcmp(drv1->protocol_name, protocol)) { 649 return drv1; 650 } 651 } 652 return NULL; 653 } 654 655 static int find_image_format(BlockDriverState *bs, const char *filename, 656 BlockDriver **pdrv, Error **errp) 657 { 658 int score, score_max; 659 BlockDriver *drv1, *drv; 660 uint8_t buf[2048]; 661 int ret = 0; 662 663 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */ 664 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) { 665 drv = bdrv_find_format("raw"); 666 if (!drv) { 667 error_setg(errp, "Could not find raw image format"); 668 ret = -ENOENT; 669 } 670 *pdrv = drv; 671 return ret; 672 } 673 674 ret = bdrv_pread(bs, 0, buf, sizeof(buf)); 675 if (ret < 0) { 676 error_setg_errno(errp, -ret, "Could not read image for determining its " 677 "format"); 678 *pdrv = NULL; 679 return ret; 680 } 681 682 score_max = 0; 683 drv = NULL; 684 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 685 if (drv1->bdrv_probe) { 686 score = drv1->bdrv_probe(buf, ret, filename); 687 if (score > score_max) { 688 score_max = score; 689 drv = drv1; 690 } 691 } 692 } 693 if (!drv) { 694 error_setg(errp, "Could not determine image format: No compatible " 695 "driver found"); 696 ret = -ENOENT; 697 } 698 *pdrv = drv; 699 return ret; 700 } 701 702 /** 703 * Set the current 'total_sectors' value 704 * Return 0 on success, -errno on error. 705 */ 706 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint) 707 { 708 BlockDriver *drv = bs->drv; 709 710 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */ 711 if (bs->sg) 712 return 0; 713 714 /* query actual device if possible, otherwise just trust the hint */ 715 if (drv->bdrv_getlength) { 716 int64_t length = drv->bdrv_getlength(bs); 717 if (length < 0) { 718 return length; 719 } 720 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE); 721 } 722 723 bs->total_sectors = hint; 724 return 0; 725 } 726 727 /** 728 * Set open flags for a given discard mode 729 * 730 * Return 0 on success, -1 if the discard mode was invalid. 731 */ 732 int bdrv_parse_discard_flags(const char *mode, int *flags) 733 { 734 *flags &= ~BDRV_O_UNMAP; 735 736 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) { 737 /* do nothing */ 738 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) { 739 *flags |= BDRV_O_UNMAP; 740 } else { 741 return -1; 742 } 743 744 return 0; 745 } 746 747 /** 748 * Set open flags for a given cache mode 749 * 750 * Return 0 on success, -1 if the cache mode was invalid. 751 */ 752 int bdrv_parse_cache_flags(const char *mode, int *flags) 753 { 754 *flags &= ~BDRV_O_CACHE_MASK; 755 756 if (!strcmp(mode, "off") || !strcmp(mode, "none")) { 757 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; 758 } else if (!strcmp(mode, "directsync")) { 759 *flags |= BDRV_O_NOCACHE; 760 } else if (!strcmp(mode, "writeback")) { 761 *flags |= BDRV_O_CACHE_WB; 762 } else if (!strcmp(mode, "unsafe")) { 763 *flags |= BDRV_O_CACHE_WB; 764 *flags |= BDRV_O_NO_FLUSH; 765 } else if (!strcmp(mode, "writethrough")) { 766 /* this is the default */ 767 } else { 768 return -1; 769 } 770 771 return 0; 772 } 773 774 /** 775 * The copy-on-read flag is actually a reference count so multiple users may 776 * use the feature without worrying about clobbering its previous state. 777 * Copy-on-read stays enabled until all users have called to disable it. 778 */ 779 void bdrv_enable_copy_on_read(BlockDriverState *bs) 780 { 781 bs->copy_on_read++; 782 } 783 784 void bdrv_disable_copy_on_read(BlockDriverState *bs) 785 { 786 assert(bs->copy_on_read > 0); 787 bs->copy_on_read--; 788 } 789 790 /* 791 * Returns the flags that a temporary snapshot should get, based on the 792 * originally requested flags (the originally requested image will have flags 793 * like a backing file) 794 */ 795 static int bdrv_temp_snapshot_flags(int flags) 796 { 797 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY; 798 } 799 800 /* 801 * Returns the flags that bs->file should get, based on the given flags for 802 * the parent BDS 803 */ 804 static int bdrv_inherited_flags(int flags) 805 { 806 /* Enable protocol handling, disable format probing for bs->file */ 807 flags |= BDRV_O_PROTOCOL; 808 809 /* Our block drivers take care to send flushes and respect unmap policy, 810 * so we can enable both unconditionally on lower layers. */ 811 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP; 812 813 /* Clear flags that only apply to the top layer */ 814 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ); 815 816 return flags; 817 } 818 819 /* 820 * Returns the flags that bs->backing_hd should get, based on the given flags 821 * for the parent BDS 822 */ 823 static int bdrv_backing_flags(int flags) 824 { 825 /* backing files always opened read-only */ 826 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ); 827 828 /* snapshot=on is handled on the top layer */ 829 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY); 830 831 return flags; 832 } 833 834 static int bdrv_open_flags(BlockDriverState *bs, int flags) 835 { 836 int open_flags = flags | BDRV_O_CACHE_WB; 837 838 /* 839 * Clear flags that are internal to the block layer before opening the 840 * image. 841 */ 842 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL); 843 844 /* 845 * Snapshots should be writable. 846 */ 847 if (flags & BDRV_O_TEMPORARY) { 848 open_flags |= BDRV_O_RDWR; 849 } 850 851 return open_flags; 852 } 853 854 static void bdrv_assign_node_name(BlockDriverState *bs, 855 const char *node_name, 856 Error **errp) 857 { 858 if (!node_name) { 859 return; 860 } 861 862 /* empty string node name is invalid */ 863 if (node_name[0] == '\0') { 864 error_setg(errp, "Empty node name"); 865 return; 866 } 867 868 /* takes care of avoiding namespaces collisions */ 869 if (bdrv_find(node_name)) { 870 error_setg(errp, "node-name=%s is conflicting with a device id", 871 node_name); 872 return; 873 } 874 875 /* takes care of avoiding duplicates node names */ 876 if (bdrv_find_node(node_name)) { 877 error_setg(errp, "Duplicate node name"); 878 return; 879 } 880 881 /* copy node name into the bs and insert it into the graph list */ 882 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name); 883 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list); 884 } 885 886 /* 887 * Common part for opening disk images and files 888 * 889 * Removes all processed options from *options. 890 */ 891 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file, 892 QDict *options, int flags, BlockDriver *drv, Error **errp) 893 { 894 int ret, open_flags; 895 const char *filename; 896 const char *node_name = NULL; 897 Error *local_err = NULL; 898 899 assert(drv != NULL); 900 assert(bs->file == NULL); 901 assert(options != NULL && bs->options != options); 902 903 if (file != NULL) { 904 filename = file->filename; 905 } else { 906 filename = qdict_get_try_str(options, "filename"); 907 } 908 909 if (drv->bdrv_needs_filename && !filename) { 910 error_setg(errp, "The '%s' block driver requires a file name", 911 drv->format_name); 912 return -EINVAL; 913 } 914 915 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name); 916 917 node_name = qdict_get_try_str(options, "node-name"); 918 bdrv_assign_node_name(bs, node_name, &local_err); 919 if (local_err) { 920 error_propagate(errp, local_err); 921 return -EINVAL; 922 } 923 qdict_del(options, "node-name"); 924 925 /* bdrv_open() with directly using a protocol as drv. This layer is already 926 * opened, so assign it to bs (while file becomes a closed BlockDriverState) 927 * and return immediately. */ 928 if (file != NULL && drv->bdrv_file_open) { 929 bdrv_swap(file, bs); 930 return 0; 931 } 932 933 bs->open_flags = flags; 934 bs->guest_block_size = 512; 935 bs->request_alignment = 512; 936 bs->zero_beyond_eof = true; 937 open_flags = bdrv_open_flags(bs, flags); 938 bs->read_only = !(open_flags & BDRV_O_RDWR); 939 bs->growable = !!(flags & BDRV_O_PROTOCOL); 940 941 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) { 942 error_setg(errp, 943 !bs->read_only && bdrv_is_whitelisted(drv, true) 944 ? "Driver '%s' can only be used for read-only devices" 945 : "Driver '%s' is not whitelisted", 946 drv->format_name); 947 return -ENOTSUP; 948 } 949 950 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */ 951 if (flags & BDRV_O_COPY_ON_READ) { 952 if (!bs->read_only) { 953 bdrv_enable_copy_on_read(bs); 954 } else { 955 error_setg(errp, "Can't use copy-on-read on read-only device"); 956 return -EINVAL; 957 } 958 } 959 960 if (filename != NULL) { 961 pstrcpy(bs->filename, sizeof(bs->filename), filename); 962 } else { 963 bs->filename[0] = '\0'; 964 } 965 966 bs->drv = drv; 967 bs->opaque = g_malloc0(drv->instance_size); 968 969 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB); 970 971 /* Open the image, either directly or using a protocol */ 972 if (drv->bdrv_file_open) { 973 assert(file == NULL); 974 assert(!drv->bdrv_needs_filename || filename != NULL); 975 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err); 976 } else { 977 if (file == NULL) { 978 error_setg(errp, "Can't use '%s' as a block driver for the " 979 "protocol level", drv->format_name); 980 ret = -EINVAL; 981 goto free_and_fail; 982 } 983 bs->file = file; 984 ret = drv->bdrv_open(bs, options, open_flags, &local_err); 985 } 986 987 if (ret < 0) { 988 if (local_err) { 989 error_propagate(errp, local_err); 990 } else if (bs->filename[0]) { 991 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename); 992 } else { 993 error_setg_errno(errp, -ret, "Could not open image"); 994 } 995 goto free_and_fail; 996 } 997 998 ret = refresh_total_sectors(bs, bs->total_sectors); 999 if (ret < 0) { 1000 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 1001 goto free_and_fail; 1002 } 1003 1004 bdrv_refresh_limits(bs, &local_err); 1005 if (local_err) { 1006 error_propagate(errp, local_err); 1007 ret = -EINVAL; 1008 goto free_and_fail; 1009 } 1010 1011 assert(bdrv_opt_mem_align(bs) != 0); 1012 assert((bs->request_alignment != 0) || bs->sg); 1013 return 0; 1014 1015 free_and_fail: 1016 bs->file = NULL; 1017 g_free(bs->opaque); 1018 bs->opaque = NULL; 1019 bs->drv = NULL; 1020 return ret; 1021 } 1022 1023 static QDict *parse_json_filename(const char *filename, Error **errp) 1024 { 1025 QObject *options_obj; 1026 QDict *options; 1027 int ret; 1028 1029 ret = strstart(filename, "json:", &filename); 1030 assert(ret); 1031 1032 options_obj = qobject_from_json(filename); 1033 if (!options_obj) { 1034 error_setg(errp, "Could not parse the JSON options"); 1035 return NULL; 1036 } 1037 1038 if (qobject_type(options_obj) != QTYPE_QDICT) { 1039 qobject_decref(options_obj); 1040 error_setg(errp, "Invalid JSON object given"); 1041 return NULL; 1042 } 1043 1044 options = qobject_to_qdict(options_obj); 1045 qdict_flatten(options); 1046 1047 return options; 1048 } 1049 1050 /* 1051 * Fills in default options for opening images and converts the legacy 1052 * filename/flags pair to option QDict entries. 1053 */ 1054 static int bdrv_fill_options(QDict **options, const char **pfilename, int flags, 1055 BlockDriver *drv, Error **errp) 1056 { 1057 const char *filename = *pfilename; 1058 const char *drvname; 1059 bool protocol = flags & BDRV_O_PROTOCOL; 1060 bool parse_filename = false; 1061 Error *local_err = NULL; 1062 1063 /* Parse json: pseudo-protocol */ 1064 if (filename && g_str_has_prefix(filename, "json:")) { 1065 QDict *json_options = parse_json_filename(filename, &local_err); 1066 if (local_err) { 1067 error_propagate(errp, local_err); 1068 return -EINVAL; 1069 } 1070 1071 /* Options given in the filename have lower priority than options 1072 * specified directly */ 1073 qdict_join(*options, json_options, false); 1074 QDECREF(json_options); 1075 *pfilename = filename = NULL; 1076 } 1077 1078 /* Fetch the file name from the options QDict if necessary */ 1079 if (protocol && filename) { 1080 if (!qdict_haskey(*options, "filename")) { 1081 qdict_put(*options, "filename", qstring_from_str(filename)); 1082 parse_filename = true; 1083 } else { 1084 error_setg(errp, "Can't specify 'file' and 'filename' options at " 1085 "the same time"); 1086 return -EINVAL; 1087 } 1088 } 1089 1090 /* Find the right block driver */ 1091 filename = qdict_get_try_str(*options, "filename"); 1092 drvname = qdict_get_try_str(*options, "driver"); 1093 1094 if (drv) { 1095 if (drvname) { 1096 error_setg(errp, "Driver specified twice"); 1097 return -EINVAL; 1098 } 1099 drvname = drv->format_name; 1100 qdict_put(*options, "driver", qstring_from_str(drvname)); 1101 } else { 1102 if (!drvname && protocol) { 1103 if (filename) { 1104 drv = bdrv_find_protocol(filename, parse_filename); 1105 if (!drv) { 1106 error_setg(errp, "Unknown protocol"); 1107 return -EINVAL; 1108 } 1109 1110 drvname = drv->format_name; 1111 qdict_put(*options, "driver", qstring_from_str(drvname)); 1112 } else { 1113 error_setg(errp, "Must specify either driver or file"); 1114 return -EINVAL; 1115 } 1116 } else if (drvname) { 1117 drv = bdrv_find_format(drvname); 1118 if (!drv) { 1119 error_setg(errp, "Unknown driver '%s'", drvname); 1120 return -ENOENT; 1121 } 1122 } 1123 } 1124 1125 assert(drv || !protocol); 1126 1127 /* Driver-specific filename parsing */ 1128 if (drv && drv->bdrv_parse_filename && parse_filename) { 1129 drv->bdrv_parse_filename(filename, *options, &local_err); 1130 if (local_err) { 1131 error_propagate(errp, local_err); 1132 return -EINVAL; 1133 } 1134 1135 if (!drv->bdrv_needs_filename) { 1136 qdict_del(*options, "filename"); 1137 } 1138 } 1139 1140 return 0; 1141 } 1142 1143 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd) 1144 { 1145 1146 if (bs->backing_hd) { 1147 assert(bs->backing_blocker); 1148 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker); 1149 } else if (backing_hd) { 1150 error_setg(&bs->backing_blocker, 1151 "device is used as backing hd of '%s'", 1152 bs->device_name); 1153 } 1154 1155 bs->backing_hd = backing_hd; 1156 if (!backing_hd) { 1157 error_free(bs->backing_blocker); 1158 bs->backing_blocker = NULL; 1159 goto out; 1160 } 1161 bs->open_flags &= ~BDRV_O_NO_BACKING; 1162 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename); 1163 pstrcpy(bs->backing_format, sizeof(bs->backing_format), 1164 backing_hd->drv ? backing_hd->drv->format_name : ""); 1165 1166 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker); 1167 /* Otherwise we won't be able to commit due to check in bdrv_commit */ 1168 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, 1169 bs->backing_blocker); 1170 out: 1171 bdrv_refresh_limits(bs, NULL); 1172 } 1173 1174 /* 1175 * Opens the backing file for a BlockDriverState if not yet open 1176 * 1177 * options is a QDict of options to pass to the block drivers, or NULL for an 1178 * empty set of options. The reference to the QDict is transferred to this 1179 * function (even on failure), so if the caller intends to reuse the dictionary, 1180 * it needs to use QINCREF() before calling bdrv_file_open. 1181 */ 1182 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp) 1183 { 1184 char *backing_filename = g_malloc0(PATH_MAX); 1185 int ret = 0; 1186 BlockDriver *back_drv = NULL; 1187 BlockDriverState *backing_hd; 1188 Error *local_err = NULL; 1189 1190 if (bs->backing_hd != NULL) { 1191 QDECREF(options); 1192 goto free_exit; 1193 } 1194 1195 /* NULL means an empty set of options */ 1196 if (options == NULL) { 1197 options = qdict_new(); 1198 } 1199 1200 bs->open_flags &= ~BDRV_O_NO_BACKING; 1201 if (qdict_haskey(options, "file.filename")) { 1202 backing_filename[0] = '\0'; 1203 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) { 1204 QDECREF(options); 1205 goto free_exit; 1206 } else { 1207 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX); 1208 } 1209 1210 if (!bs->drv || !bs->drv->supports_backing) { 1211 ret = -EINVAL; 1212 error_setg(errp, "Driver doesn't support backing files"); 1213 QDECREF(options); 1214 goto free_exit; 1215 } 1216 1217 backing_hd = bdrv_new("", errp); 1218 1219 if (bs->backing_format[0] != '\0') { 1220 back_drv = bdrv_find_format(bs->backing_format); 1221 } 1222 1223 assert(bs->backing_hd == NULL); 1224 ret = bdrv_open(&backing_hd, 1225 *backing_filename ? backing_filename : NULL, NULL, options, 1226 bdrv_backing_flags(bs->open_flags), back_drv, &local_err); 1227 if (ret < 0) { 1228 bdrv_unref(backing_hd); 1229 backing_hd = NULL; 1230 bs->open_flags |= BDRV_O_NO_BACKING; 1231 error_setg(errp, "Could not open backing file: %s", 1232 error_get_pretty(local_err)); 1233 error_free(local_err); 1234 goto free_exit; 1235 } 1236 bdrv_set_backing_hd(bs, backing_hd); 1237 1238 free_exit: 1239 g_free(backing_filename); 1240 return ret; 1241 } 1242 1243 /* 1244 * Opens a disk image whose options are given as BlockdevRef in another block 1245 * device's options. 1246 * 1247 * If allow_none is true, no image will be opened if filename is false and no 1248 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned. 1249 * 1250 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict. 1251 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict 1252 * itself, all options starting with "${bdref_key}." are considered part of the 1253 * BlockdevRef. 1254 * 1255 * The BlockdevRef will be removed from the options QDict. 1256 * 1257 * To conform with the behavior of bdrv_open(), *pbs has to be NULL. 1258 */ 1259 int bdrv_open_image(BlockDriverState **pbs, const char *filename, 1260 QDict *options, const char *bdref_key, int flags, 1261 bool allow_none, Error **errp) 1262 { 1263 QDict *image_options; 1264 int ret; 1265 char *bdref_key_dot; 1266 const char *reference; 1267 1268 assert(pbs); 1269 assert(*pbs == NULL); 1270 1271 bdref_key_dot = g_strdup_printf("%s.", bdref_key); 1272 qdict_extract_subqdict(options, &image_options, bdref_key_dot); 1273 g_free(bdref_key_dot); 1274 1275 reference = qdict_get_try_str(options, bdref_key); 1276 if (!filename && !reference && !qdict_size(image_options)) { 1277 if (allow_none) { 1278 ret = 0; 1279 } else { 1280 error_setg(errp, "A block device must be specified for \"%s\"", 1281 bdref_key); 1282 ret = -EINVAL; 1283 } 1284 QDECREF(image_options); 1285 goto done; 1286 } 1287 1288 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp); 1289 1290 done: 1291 qdict_del(options, bdref_key); 1292 return ret; 1293 } 1294 1295 int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp) 1296 { 1297 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */ 1298 char *tmp_filename = g_malloc0(PATH_MAX + 1); 1299 int64_t total_size; 1300 BlockDriver *bdrv_qcow2; 1301 QemuOpts *opts = NULL; 1302 QDict *snapshot_options; 1303 BlockDriverState *bs_snapshot; 1304 Error *local_err; 1305 int ret; 1306 1307 /* if snapshot, we create a temporary backing file and open it 1308 instead of opening 'filename' directly */ 1309 1310 /* Get the required size from the image */ 1311 total_size = bdrv_getlength(bs); 1312 if (total_size < 0) { 1313 ret = total_size; 1314 error_setg_errno(errp, -total_size, "Could not get image size"); 1315 goto out; 1316 } 1317 1318 /* Create the temporary image */ 1319 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1); 1320 if (ret < 0) { 1321 error_setg_errno(errp, -ret, "Could not get temporary filename"); 1322 goto out; 1323 } 1324 1325 bdrv_qcow2 = bdrv_find_format("qcow2"); 1326 opts = qemu_opts_create(bdrv_qcow2->create_opts, NULL, 0, 1327 &error_abort); 1328 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size); 1329 ret = bdrv_create(bdrv_qcow2, tmp_filename, opts, &local_err); 1330 qemu_opts_del(opts); 1331 if (ret < 0) { 1332 error_setg_errno(errp, -ret, "Could not create temporary overlay " 1333 "'%s': %s", tmp_filename, 1334 error_get_pretty(local_err)); 1335 error_free(local_err); 1336 goto out; 1337 } 1338 1339 /* Prepare a new options QDict for the temporary file */ 1340 snapshot_options = qdict_new(); 1341 qdict_put(snapshot_options, "file.driver", 1342 qstring_from_str("file")); 1343 qdict_put(snapshot_options, "file.filename", 1344 qstring_from_str(tmp_filename)); 1345 1346 bs_snapshot = bdrv_new("", &error_abort); 1347 1348 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options, 1349 flags, bdrv_qcow2, &local_err); 1350 if (ret < 0) { 1351 error_propagate(errp, local_err); 1352 goto out; 1353 } 1354 1355 bdrv_append(bs_snapshot, bs); 1356 1357 out: 1358 g_free(tmp_filename); 1359 return ret; 1360 } 1361 1362 /* 1363 * Opens a disk image (raw, qcow2, vmdk, ...) 1364 * 1365 * options is a QDict of options to pass to the block drivers, or NULL for an 1366 * empty set of options. The reference to the QDict belongs to the block layer 1367 * after the call (even on failure), so if the caller intends to reuse the 1368 * dictionary, it needs to use QINCREF() before calling bdrv_open. 1369 * 1370 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there. 1371 * If it is not NULL, the referenced BDS will be reused. 1372 * 1373 * The reference parameter may be used to specify an existing block device which 1374 * should be opened. If specified, neither options nor a filename may be given, 1375 * nor can an existing BDS be reused (that is, *pbs has to be NULL). 1376 */ 1377 int bdrv_open(BlockDriverState **pbs, const char *filename, 1378 const char *reference, QDict *options, int flags, 1379 BlockDriver *drv, Error **errp) 1380 { 1381 int ret; 1382 BlockDriverState *file = NULL, *bs; 1383 const char *drvname; 1384 Error *local_err = NULL; 1385 int snapshot_flags = 0; 1386 1387 assert(pbs); 1388 1389 if (reference) { 1390 bool options_non_empty = options ? qdict_size(options) : false; 1391 QDECREF(options); 1392 1393 if (*pbs) { 1394 error_setg(errp, "Cannot reuse an existing BDS when referencing " 1395 "another block device"); 1396 return -EINVAL; 1397 } 1398 1399 if (filename || options_non_empty) { 1400 error_setg(errp, "Cannot reference an existing block device with " 1401 "additional options or a new filename"); 1402 return -EINVAL; 1403 } 1404 1405 bs = bdrv_lookup_bs(reference, reference, errp); 1406 if (!bs) { 1407 return -ENODEV; 1408 } 1409 bdrv_ref(bs); 1410 *pbs = bs; 1411 return 0; 1412 } 1413 1414 if (*pbs) { 1415 bs = *pbs; 1416 } else { 1417 bs = bdrv_new("", &error_abort); 1418 } 1419 1420 /* NULL means an empty set of options */ 1421 if (options == NULL) { 1422 options = qdict_new(); 1423 } 1424 1425 ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err); 1426 if (local_err) { 1427 goto fail; 1428 } 1429 1430 /* Find the right image format driver */ 1431 drv = NULL; 1432 drvname = qdict_get_try_str(options, "driver"); 1433 if (drvname) { 1434 drv = bdrv_find_format(drvname); 1435 qdict_del(options, "driver"); 1436 if (!drv) { 1437 error_setg(errp, "Unknown driver: '%s'", drvname); 1438 ret = -EINVAL; 1439 goto fail; 1440 } 1441 } 1442 1443 assert(drvname || !(flags & BDRV_O_PROTOCOL)); 1444 if (drv && !drv->bdrv_file_open) { 1445 /* If the user explicitly wants a format driver here, we'll need to add 1446 * another layer for the protocol in bs->file */ 1447 flags &= ~BDRV_O_PROTOCOL; 1448 } 1449 1450 bs->options = options; 1451 options = qdict_clone_shallow(options); 1452 1453 /* Open image file without format layer */ 1454 if ((flags & BDRV_O_PROTOCOL) == 0) { 1455 if (flags & BDRV_O_RDWR) { 1456 flags |= BDRV_O_ALLOW_RDWR; 1457 } 1458 if (flags & BDRV_O_SNAPSHOT) { 1459 snapshot_flags = bdrv_temp_snapshot_flags(flags); 1460 flags = bdrv_backing_flags(flags); 1461 } 1462 1463 assert(file == NULL); 1464 ret = bdrv_open_image(&file, filename, options, "file", 1465 bdrv_inherited_flags(flags), 1466 true, &local_err); 1467 if (ret < 0) { 1468 goto fail; 1469 } 1470 } 1471 1472 /* Image format probing */ 1473 if (!drv && file) { 1474 ret = find_image_format(file, filename, &drv, &local_err); 1475 if (ret < 0) { 1476 goto fail; 1477 } 1478 } else if (!drv) { 1479 error_setg(errp, "Must specify either driver or file"); 1480 ret = -EINVAL; 1481 goto fail; 1482 } 1483 1484 /* Open the image */ 1485 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err); 1486 if (ret < 0) { 1487 goto fail; 1488 } 1489 1490 if (file && (bs->file != file)) { 1491 bdrv_unref(file); 1492 file = NULL; 1493 } 1494 1495 /* If there is a backing file, use it */ 1496 if ((flags & BDRV_O_NO_BACKING) == 0) { 1497 QDict *backing_options; 1498 1499 qdict_extract_subqdict(options, &backing_options, "backing."); 1500 ret = bdrv_open_backing_file(bs, backing_options, &local_err); 1501 if (ret < 0) { 1502 goto close_and_fail; 1503 } 1504 } 1505 1506 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the 1507 * temporary snapshot afterwards. */ 1508 if (snapshot_flags) { 1509 ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err); 1510 if (local_err) { 1511 goto close_and_fail; 1512 } 1513 } 1514 1515 /* Check if any unknown options were used */ 1516 if (options && (qdict_size(options) != 0)) { 1517 const QDictEntry *entry = qdict_first(options); 1518 if (flags & BDRV_O_PROTOCOL) { 1519 error_setg(errp, "Block protocol '%s' doesn't support the option " 1520 "'%s'", drv->format_name, entry->key); 1521 } else { 1522 error_setg(errp, "Block format '%s' used by device '%s' doesn't " 1523 "support the option '%s'", drv->format_name, 1524 bs->device_name, entry->key); 1525 } 1526 1527 ret = -EINVAL; 1528 goto close_and_fail; 1529 } 1530 1531 if (!bdrv_key_required(bs)) { 1532 bdrv_dev_change_media_cb(bs, true); 1533 } else if (!runstate_check(RUN_STATE_PRELAUNCH) 1534 && !runstate_check(RUN_STATE_INMIGRATE) 1535 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */ 1536 error_setg(errp, 1537 "Guest must be stopped for opening of encrypted image"); 1538 ret = -EBUSY; 1539 goto close_and_fail; 1540 } 1541 1542 QDECREF(options); 1543 *pbs = bs; 1544 return 0; 1545 1546 fail: 1547 if (file != NULL) { 1548 bdrv_unref(file); 1549 } 1550 QDECREF(bs->options); 1551 QDECREF(options); 1552 bs->options = NULL; 1553 if (!*pbs) { 1554 /* If *pbs is NULL, a new BDS has been created in this function and 1555 needs to be freed now. Otherwise, it does not need to be closed, 1556 since it has not really been opened yet. */ 1557 bdrv_unref(bs); 1558 } 1559 if (local_err) { 1560 error_propagate(errp, local_err); 1561 } 1562 return ret; 1563 1564 close_and_fail: 1565 /* See fail path, but now the BDS has to be always closed */ 1566 if (*pbs) { 1567 bdrv_close(bs); 1568 } else { 1569 bdrv_unref(bs); 1570 } 1571 QDECREF(options); 1572 if (local_err) { 1573 error_propagate(errp, local_err); 1574 } 1575 return ret; 1576 } 1577 1578 typedef struct BlockReopenQueueEntry { 1579 bool prepared; 1580 BDRVReopenState state; 1581 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry; 1582 } BlockReopenQueueEntry; 1583 1584 /* 1585 * Adds a BlockDriverState to a simple queue for an atomic, transactional 1586 * reopen of multiple devices. 1587 * 1588 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT 1589 * already performed, or alternatively may be NULL a new BlockReopenQueue will 1590 * be created and initialized. This newly created BlockReopenQueue should be 1591 * passed back in for subsequent calls that are intended to be of the same 1592 * atomic 'set'. 1593 * 1594 * bs is the BlockDriverState to add to the reopen queue. 1595 * 1596 * flags contains the open flags for the associated bs 1597 * 1598 * returns a pointer to bs_queue, which is either the newly allocated 1599 * bs_queue, or the existing bs_queue being used. 1600 * 1601 */ 1602 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, 1603 BlockDriverState *bs, int flags) 1604 { 1605 assert(bs != NULL); 1606 1607 BlockReopenQueueEntry *bs_entry; 1608 if (bs_queue == NULL) { 1609 bs_queue = g_new0(BlockReopenQueue, 1); 1610 QSIMPLEQ_INIT(bs_queue); 1611 } 1612 1613 /* bdrv_open() masks this flag out */ 1614 flags &= ~BDRV_O_PROTOCOL; 1615 1616 if (bs->file) { 1617 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags)); 1618 } 1619 1620 bs_entry = g_new0(BlockReopenQueueEntry, 1); 1621 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry); 1622 1623 bs_entry->state.bs = bs; 1624 bs_entry->state.flags = flags; 1625 1626 return bs_queue; 1627 } 1628 1629 /* 1630 * Reopen multiple BlockDriverStates atomically & transactionally. 1631 * 1632 * The queue passed in (bs_queue) must have been built up previous 1633 * via bdrv_reopen_queue(). 1634 * 1635 * Reopens all BDS specified in the queue, with the appropriate 1636 * flags. All devices are prepared for reopen, and failure of any 1637 * device will cause all device changes to be abandonded, and intermediate 1638 * data cleaned up. 1639 * 1640 * If all devices prepare successfully, then the changes are committed 1641 * to all devices. 1642 * 1643 */ 1644 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) 1645 { 1646 int ret = -1; 1647 BlockReopenQueueEntry *bs_entry, *next; 1648 Error *local_err = NULL; 1649 1650 assert(bs_queue != NULL); 1651 1652 bdrv_drain_all(); 1653 1654 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { 1655 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) { 1656 error_propagate(errp, local_err); 1657 goto cleanup; 1658 } 1659 bs_entry->prepared = true; 1660 } 1661 1662 /* If we reach this point, we have success and just need to apply the 1663 * changes 1664 */ 1665 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { 1666 bdrv_reopen_commit(&bs_entry->state); 1667 } 1668 1669 ret = 0; 1670 1671 cleanup: 1672 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { 1673 if (ret && bs_entry->prepared) { 1674 bdrv_reopen_abort(&bs_entry->state); 1675 } 1676 g_free(bs_entry); 1677 } 1678 g_free(bs_queue); 1679 return ret; 1680 } 1681 1682 1683 /* Reopen a single BlockDriverState with the specified flags. */ 1684 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp) 1685 { 1686 int ret = -1; 1687 Error *local_err = NULL; 1688 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags); 1689 1690 ret = bdrv_reopen_multiple(queue, &local_err); 1691 if (local_err != NULL) { 1692 error_propagate(errp, local_err); 1693 } 1694 return ret; 1695 } 1696 1697 1698 /* 1699 * Prepares a BlockDriverState for reopen. All changes are staged in the 1700 * 'opaque' field of the BDRVReopenState, which is used and allocated by 1701 * the block driver layer .bdrv_reopen_prepare() 1702 * 1703 * bs is the BlockDriverState to reopen 1704 * flags are the new open flags 1705 * queue is the reopen queue 1706 * 1707 * Returns 0 on success, non-zero on error. On error errp will be set 1708 * as well. 1709 * 1710 * On failure, bdrv_reopen_abort() will be called to clean up any data. 1711 * It is the responsibility of the caller to then call the abort() or 1712 * commit() for any other BDS that have been left in a prepare() state 1713 * 1714 */ 1715 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, 1716 Error **errp) 1717 { 1718 int ret = -1; 1719 Error *local_err = NULL; 1720 BlockDriver *drv; 1721 1722 assert(reopen_state != NULL); 1723 assert(reopen_state->bs->drv != NULL); 1724 drv = reopen_state->bs->drv; 1725 1726 /* if we are to stay read-only, do not allow permission change 1727 * to r/w */ 1728 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) && 1729 reopen_state->flags & BDRV_O_RDWR) { 1730 error_set(errp, QERR_DEVICE_IS_READ_ONLY, 1731 reopen_state->bs->device_name); 1732 goto error; 1733 } 1734 1735 1736 ret = bdrv_flush(reopen_state->bs); 1737 if (ret) { 1738 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive", 1739 strerror(-ret)); 1740 goto error; 1741 } 1742 1743 if (drv->bdrv_reopen_prepare) { 1744 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err); 1745 if (ret) { 1746 if (local_err != NULL) { 1747 error_propagate(errp, local_err); 1748 } else { 1749 error_setg(errp, "failed while preparing to reopen image '%s'", 1750 reopen_state->bs->filename); 1751 } 1752 goto error; 1753 } 1754 } else { 1755 /* It is currently mandatory to have a bdrv_reopen_prepare() 1756 * handler for each supported drv. */ 1757 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, 1758 drv->format_name, reopen_state->bs->device_name, 1759 "reopening of file"); 1760 ret = -1; 1761 goto error; 1762 } 1763 1764 ret = 0; 1765 1766 error: 1767 return ret; 1768 } 1769 1770 /* 1771 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and 1772 * makes them final by swapping the staging BlockDriverState contents into 1773 * the active BlockDriverState contents. 1774 */ 1775 void bdrv_reopen_commit(BDRVReopenState *reopen_state) 1776 { 1777 BlockDriver *drv; 1778 1779 assert(reopen_state != NULL); 1780 drv = reopen_state->bs->drv; 1781 assert(drv != NULL); 1782 1783 /* If there are any driver level actions to take */ 1784 if (drv->bdrv_reopen_commit) { 1785 drv->bdrv_reopen_commit(reopen_state); 1786 } 1787 1788 /* set BDS specific flags now */ 1789 reopen_state->bs->open_flags = reopen_state->flags; 1790 reopen_state->bs->enable_write_cache = !!(reopen_state->flags & 1791 BDRV_O_CACHE_WB); 1792 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR); 1793 1794 bdrv_refresh_limits(reopen_state->bs, NULL); 1795 } 1796 1797 /* 1798 * Abort the reopen, and delete and free the staged changes in 1799 * reopen_state 1800 */ 1801 void bdrv_reopen_abort(BDRVReopenState *reopen_state) 1802 { 1803 BlockDriver *drv; 1804 1805 assert(reopen_state != NULL); 1806 drv = reopen_state->bs->drv; 1807 assert(drv != NULL); 1808 1809 if (drv->bdrv_reopen_abort) { 1810 drv->bdrv_reopen_abort(reopen_state); 1811 } 1812 } 1813 1814 1815 void bdrv_close(BlockDriverState *bs) 1816 { 1817 if (bs->job) { 1818 block_job_cancel_sync(bs->job); 1819 } 1820 bdrv_drain_all(); /* complete I/O */ 1821 bdrv_flush(bs); 1822 bdrv_drain_all(); /* in case flush left pending I/O */ 1823 notifier_list_notify(&bs->close_notifiers, bs); 1824 1825 if (bs->drv) { 1826 if (bs->backing_hd) { 1827 BlockDriverState *backing_hd = bs->backing_hd; 1828 bdrv_set_backing_hd(bs, NULL); 1829 bdrv_unref(backing_hd); 1830 } 1831 bs->drv->bdrv_close(bs); 1832 g_free(bs->opaque); 1833 bs->opaque = NULL; 1834 bs->drv = NULL; 1835 bs->copy_on_read = 0; 1836 bs->backing_file[0] = '\0'; 1837 bs->backing_format[0] = '\0'; 1838 bs->total_sectors = 0; 1839 bs->encrypted = 0; 1840 bs->valid_key = 0; 1841 bs->sg = 0; 1842 bs->growable = 0; 1843 bs->zero_beyond_eof = false; 1844 QDECREF(bs->options); 1845 bs->options = NULL; 1846 1847 if (bs->file != NULL) { 1848 bdrv_unref(bs->file); 1849 bs->file = NULL; 1850 } 1851 } 1852 1853 bdrv_dev_change_media_cb(bs, false); 1854 1855 /*throttling disk I/O limits*/ 1856 if (bs->io_limits_enabled) { 1857 bdrv_io_limits_disable(bs); 1858 } 1859 } 1860 1861 void bdrv_close_all(void) 1862 { 1863 BlockDriverState *bs; 1864 1865 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 1866 AioContext *aio_context = bdrv_get_aio_context(bs); 1867 1868 aio_context_acquire(aio_context); 1869 bdrv_close(bs); 1870 aio_context_release(aio_context); 1871 } 1872 } 1873 1874 /* Check if any requests are in-flight (including throttled requests) */ 1875 static bool bdrv_requests_pending(BlockDriverState *bs) 1876 { 1877 if (!QLIST_EMPTY(&bs->tracked_requests)) { 1878 return true; 1879 } 1880 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { 1881 return true; 1882 } 1883 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { 1884 return true; 1885 } 1886 if (bs->file && bdrv_requests_pending(bs->file)) { 1887 return true; 1888 } 1889 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { 1890 return true; 1891 } 1892 return false; 1893 } 1894 1895 /* 1896 * Wait for pending requests to complete across all BlockDriverStates 1897 * 1898 * This function does not flush data to disk, use bdrv_flush_all() for that 1899 * after calling this function. 1900 * 1901 * Note that completion of an asynchronous I/O operation can trigger any 1902 * number of other I/O operations on other devices---for example a coroutine 1903 * can be arbitrarily complex and a constant flow of I/O can come until the 1904 * coroutine is complete. Because of this, it is not possible to have a 1905 * function to drain a single device's I/O queue. 1906 */ 1907 void bdrv_drain_all(void) 1908 { 1909 /* Always run first iteration so any pending completion BHs run */ 1910 bool busy = true; 1911 BlockDriverState *bs; 1912 1913 while (busy) { 1914 busy = false; 1915 1916 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 1917 AioContext *aio_context = bdrv_get_aio_context(bs); 1918 bool bs_busy; 1919 1920 aio_context_acquire(aio_context); 1921 bdrv_flush_io_queue(bs); 1922 bdrv_start_throttled_reqs(bs); 1923 bs_busy = bdrv_requests_pending(bs); 1924 bs_busy |= aio_poll(aio_context, bs_busy); 1925 aio_context_release(aio_context); 1926 1927 busy |= bs_busy; 1928 } 1929 } 1930 } 1931 1932 /* make a BlockDriverState anonymous by removing from bdrv_state and 1933 * graph_bdrv_state list. 1934 Also, NULL terminate the device_name to prevent double remove */ 1935 void bdrv_make_anon(BlockDriverState *bs) 1936 { 1937 if (bs->device_name[0] != '\0') { 1938 QTAILQ_REMOVE(&bdrv_states, bs, device_list); 1939 } 1940 bs->device_name[0] = '\0'; 1941 if (bs->node_name[0] != '\0') { 1942 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list); 1943 } 1944 bs->node_name[0] = '\0'; 1945 } 1946 1947 static void bdrv_rebind(BlockDriverState *bs) 1948 { 1949 if (bs->drv && bs->drv->bdrv_rebind) { 1950 bs->drv->bdrv_rebind(bs); 1951 } 1952 } 1953 1954 static void bdrv_move_feature_fields(BlockDriverState *bs_dest, 1955 BlockDriverState *bs_src) 1956 { 1957 /* move some fields that need to stay attached to the device */ 1958 1959 /* dev info */ 1960 bs_dest->dev_ops = bs_src->dev_ops; 1961 bs_dest->dev_opaque = bs_src->dev_opaque; 1962 bs_dest->dev = bs_src->dev; 1963 bs_dest->guest_block_size = bs_src->guest_block_size; 1964 bs_dest->copy_on_read = bs_src->copy_on_read; 1965 1966 bs_dest->enable_write_cache = bs_src->enable_write_cache; 1967 1968 /* i/o throttled req */ 1969 memcpy(&bs_dest->throttle_state, 1970 &bs_src->throttle_state, 1971 sizeof(ThrottleState)); 1972 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0]; 1973 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1]; 1974 bs_dest->io_limits_enabled = bs_src->io_limits_enabled; 1975 1976 /* r/w error */ 1977 bs_dest->on_read_error = bs_src->on_read_error; 1978 bs_dest->on_write_error = bs_src->on_write_error; 1979 1980 /* i/o status */ 1981 bs_dest->iostatus_enabled = bs_src->iostatus_enabled; 1982 bs_dest->iostatus = bs_src->iostatus; 1983 1984 /* dirty bitmap */ 1985 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps; 1986 1987 /* reference count */ 1988 bs_dest->refcnt = bs_src->refcnt; 1989 1990 /* job */ 1991 bs_dest->job = bs_src->job; 1992 1993 /* keep the same entry in bdrv_states */ 1994 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name), 1995 bs_src->device_name); 1996 bs_dest->device_list = bs_src->device_list; 1997 memcpy(bs_dest->op_blockers, bs_src->op_blockers, 1998 sizeof(bs_dest->op_blockers)); 1999 } 2000 2001 /* 2002 * Swap bs contents for two image chains while they are live, 2003 * while keeping required fields on the BlockDriverState that is 2004 * actually attached to a device. 2005 * 2006 * This will modify the BlockDriverState fields, and swap contents 2007 * between bs_new and bs_old. Both bs_new and bs_old are modified. 2008 * 2009 * bs_new is required to be anonymous. 2010 * 2011 * This function does not create any image files. 2012 */ 2013 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old) 2014 { 2015 BlockDriverState tmp; 2016 2017 /* The code needs to swap the node_name but simply swapping node_list won't 2018 * work so first remove the nodes from the graph list, do the swap then 2019 * insert them back if needed. 2020 */ 2021 if (bs_new->node_name[0] != '\0') { 2022 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list); 2023 } 2024 if (bs_old->node_name[0] != '\0') { 2025 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list); 2026 } 2027 2028 /* bs_new must be anonymous and shouldn't have anything fancy enabled */ 2029 assert(bs_new->device_name[0] == '\0'); 2030 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps)); 2031 assert(bs_new->job == NULL); 2032 assert(bs_new->dev == NULL); 2033 assert(bs_new->io_limits_enabled == false); 2034 assert(!throttle_have_timer(&bs_new->throttle_state)); 2035 2036 tmp = *bs_new; 2037 *bs_new = *bs_old; 2038 *bs_old = tmp; 2039 2040 /* there are some fields that should not be swapped, move them back */ 2041 bdrv_move_feature_fields(&tmp, bs_old); 2042 bdrv_move_feature_fields(bs_old, bs_new); 2043 bdrv_move_feature_fields(bs_new, &tmp); 2044 2045 /* bs_new shouldn't be in bdrv_states even after the swap! */ 2046 assert(bs_new->device_name[0] == '\0'); 2047 2048 /* Check a few fields that should remain attached to the device */ 2049 assert(bs_new->dev == NULL); 2050 assert(bs_new->job == NULL); 2051 assert(bs_new->io_limits_enabled == false); 2052 assert(!throttle_have_timer(&bs_new->throttle_state)); 2053 2054 /* insert the nodes back into the graph node list if needed */ 2055 if (bs_new->node_name[0] != '\0') { 2056 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list); 2057 } 2058 if (bs_old->node_name[0] != '\0') { 2059 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list); 2060 } 2061 2062 bdrv_rebind(bs_new); 2063 bdrv_rebind(bs_old); 2064 } 2065 2066 /* 2067 * Add new bs contents at the top of an image chain while the chain is 2068 * live, while keeping required fields on the top layer. 2069 * 2070 * This will modify the BlockDriverState fields, and swap contents 2071 * between bs_new and bs_top. Both bs_new and bs_top are modified. 2072 * 2073 * bs_new is required to be anonymous. 2074 * 2075 * This function does not create any image files. 2076 */ 2077 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top) 2078 { 2079 bdrv_swap(bs_new, bs_top); 2080 2081 /* The contents of 'tmp' will become bs_top, as we are 2082 * swapping bs_new and bs_top contents. */ 2083 bdrv_set_backing_hd(bs_top, bs_new); 2084 } 2085 2086 static void bdrv_delete(BlockDriverState *bs) 2087 { 2088 assert(!bs->dev); 2089 assert(!bs->job); 2090 assert(bdrv_op_blocker_is_empty(bs)); 2091 assert(!bs->refcnt); 2092 assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 2093 2094 bdrv_close(bs); 2095 2096 /* remove from list, if necessary */ 2097 bdrv_make_anon(bs); 2098 2099 g_free(bs); 2100 } 2101 2102 int bdrv_attach_dev(BlockDriverState *bs, void *dev) 2103 /* TODO change to DeviceState *dev when all users are qdevified */ 2104 { 2105 if (bs->dev) { 2106 return -EBUSY; 2107 } 2108 bs->dev = dev; 2109 bdrv_iostatus_reset(bs); 2110 return 0; 2111 } 2112 2113 /* TODO qdevified devices don't use this, remove when devices are qdevified */ 2114 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev) 2115 { 2116 if (bdrv_attach_dev(bs, dev) < 0) { 2117 abort(); 2118 } 2119 } 2120 2121 void bdrv_detach_dev(BlockDriverState *bs, void *dev) 2122 /* TODO change to DeviceState *dev when all users are qdevified */ 2123 { 2124 assert(bs->dev == dev); 2125 bs->dev = NULL; 2126 bs->dev_ops = NULL; 2127 bs->dev_opaque = NULL; 2128 bs->guest_block_size = 512; 2129 } 2130 2131 /* TODO change to return DeviceState * when all users are qdevified */ 2132 void *bdrv_get_attached_dev(BlockDriverState *bs) 2133 { 2134 return bs->dev; 2135 } 2136 2137 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops, 2138 void *opaque) 2139 { 2140 bs->dev_ops = ops; 2141 bs->dev_opaque = opaque; 2142 } 2143 2144 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load) 2145 { 2146 if (bs->dev_ops && bs->dev_ops->change_media_cb) { 2147 bool tray_was_closed = !bdrv_dev_is_tray_open(bs); 2148 bs->dev_ops->change_media_cb(bs->dev_opaque, load); 2149 if (tray_was_closed) { 2150 /* tray open */ 2151 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs), 2152 true, &error_abort); 2153 } 2154 if (load) { 2155 /* tray close */ 2156 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs), 2157 false, &error_abort); 2158 } 2159 } 2160 } 2161 2162 bool bdrv_dev_has_removable_media(BlockDriverState *bs) 2163 { 2164 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb); 2165 } 2166 2167 void bdrv_dev_eject_request(BlockDriverState *bs, bool force) 2168 { 2169 if (bs->dev_ops && bs->dev_ops->eject_request_cb) { 2170 bs->dev_ops->eject_request_cb(bs->dev_opaque, force); 2171 } 2172 } 2173 2174 bool bdrv_dev_is_tray_open(BlockDriverState *bs) 2175 { 2176 if (bs->dev_ops && bs->dev_ops->is_tray_open) { 2177 return bs->dev_ops->is_tray_open(bs->dev_opaque); 2178 } 2179 return false; 2180 } 2181 2182 static void bdrv_dev_resize_cb(BlockDriverState *bs) 2183 { 2184 if (bs->dev_ops && bs->dev_ops->resize_cb) { 2185 bs->dev_ops->resize_cb(bs->dev_opaque); 2186 } 2187 } 2188 2189 bool bdrv_dev_is_medium_locked(BlockDriverState *bs) 2190 { 2191 if (bs->dev_ops && bs->dev_ops->is_medium_locked) { 2192 return bs->dev_ops->is_medium_locked(bs->dev_opaque); 2193 } 2194 return false; 2195 } 2196 2197 /* 2198 * Run consistency checks on an image 2199 * 2200 * Returns 0 if the check could be completed (it doesn't mean that the image is 2201 * free of errors) or -errno when an internal error occurred. The results of the 2202 * check are stored in res. 2203 */ 2204 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) 2205 { 2206 if (bs->drv->bdrv_check == NULL) { 2207 return -ENOTSUP; 2208 } 2209 2210 memset(res, 0, sizeof(*res)); 2211 return bs->drv->bdrv_check(bs, res, fix); 2212 } 2213 2214 #define COMMIT_BUF_SECTORS 2048 2215 2216 /* commit COW file into the raw image */ 2217 int bdrv_commit(BlockDriverState *bs) 2218 { 2219 BlockDriver *drv = bs->drv; 2220 int64_t sector, total_sectors, length, backing_length; 2221 int n, ro, open_flags; 2222 int ret = 0; 2223 uint8_t *buf = NULL; 2224 char filename[PATH_MAX]; 2225 2226 if (!drv) 2227 return -ENOMEDIUM; 2228 2229 if (!bs->backing_hd) { 2230 return -ENOTSUP; 2231 } 2232 2233 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) || 2234 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) { 2235 return -EBUSY; 2236 } 2237 2238 ro = bs->backing_hd->read_only; 2239 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */ 2240 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename); 2241 open_flags = bs->backing_hd->open_flags; 2242 2243 if (ro) { 2244 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) { 2245 return -EACCES; 2246 } 2247 } 2248 2249 length = bdrv_getlength(bs); 2250 if (length < 0) { 2251 ret = length; 2252 goto ro_cleanup; 2253 } 2254 2255 backing_length = bdrv_getlength(bs->backing_hd); 2256 if (backing_length < 0) { 2257 ret = backing_length; 2258 goto ro_cleanup; 2259 } 2260 2261 /* If our top snapshot is larger than the backing file image, 2262 * grow the backing file image if possible. If not possible, 2263 * we must return an error */ 2264 if (length > backing_length) { 2265 ret = bdrv_truncate(bs->backing_hd, length); 2266 if (ret < 0) { 2267 goto ro_cleanup; 2268 } 2269 } 2270 2271 total_sectors = length >> BDRV_SECTOR_BITS; 2272 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE); 2273 2274 for (sector = 0; sector < total_sectors; sector += n) { 2275 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n); 2276 if (ret < 0) { 2277 goto ro_cleanup; 2278 } 2279 if (ret) { 2280 ret = bdrv_read(bs, sector, buf, n); 2281 if (ret < 0) { 2282 goto ro_cleanup; 2283 } 2284 2285 ret = bdrv_write(bs->backing_hd, sector, buf, n); 2286 if (ret < 0) { 2287 goto ro_cleanup; 2288 } 2289 } 2290 } 2291 2292 if (drv->bdrv_make_empty) { 2293 ret = drv->bdrv_make_empty(bs); 2294 if (ret < 0) { 2295 goto ro_cleanup; 2296 } 2297 bdrv_flush(bs); 2298 } 2299 2300 /* 2301 * Make sure all data we wrote to the backing device is actually 2302 * stable on disk. 2303 */ 2304 if (bs->backing_hd) { 2305 bdrv_flush(bs->backing_hd); 2306 } 2307 2308 ret = 0; 2309 ro_cleanup: 2310 g_free(buf); 2311 2312 if (ro) { 2313 /* ignoring error return here */ 2314 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL); 2315 } 2316 2317 return ret; 2318 } 2319 2320 int bdrv_commit_all(void) 2321 { 2322 BlockDriverState *bs; 2323 2324 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 2325 AioContext *aio_context = bdrv_get_aio_context(bs); 2326 2327 aio_context_acquire(aio_context); 2328 if (bs->drv && bs->backing_hd) { 2329 int ret = bdrv_commit(bs); 2330 if (ret < 0) { 2331 aio_context_release(aio_context); 2332 return ret; 2333 } 2334 } 2335 aio_context_release(aio_context); 2336 } 2337 return 0; 2338 } 2339 2340 /** 2341 * Remove an active request from the tracked requests list 2342 * 2343 * This function should be called when a tracked request is completing. 2344 */ 2345 static void tracked_request_end(BdrvTrackedRequest *req) 2346 { 2347 if (req->serialising) { 2348 req->bs->serialising_in_flight--; 2349 } 2350 2351 QLIST_REMOVE(req, list); 2352 qemu_co_queue_restart_all(&req->wait_queue); 2353 } 2354 2355 /** 2356 * Add an active request to the tracked requests list 2357 */ 2358 static void tracked_request_begin(BdrvTrackedRequest *req, 2359 BlockDriverState *bs, 2360 int64_t offset, 2361 unsigned int bytes, bool is_write) 2362 { 2363 *req = (BdrvTrackedRequest){ 2364 .bs = bs, 2365 .offset = offset, 2366 .bytes = bytes, 2367 .is_write = is_write, 2368 .co = qemu_coroutine_self(), 2369 .serialising = false, 2370 .overlap_offset = offset, 2371 .overlap_bytes = bytes, 2372 }; 2373 2374 qemu_co_queue_init(&req->wait_queue); 2375 2376 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 2377 } 2378 2379 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 2380 { 2381 int64_t overlap_offset = req->offset & ~(align - 1); 2382 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 2383 - overlap_offset; 2384 2385 if (!req->serialising) { 2386 req->bs->serialising_in_flight++; 2387 req->serialising = true; 2388 } 2389 2390 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 2391 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 2392 } 2393 2394 /** 2395 * Round a region to cluster boundaries 2396 */ 2397 void bdrv_round_to_clusters(BlockDriverState *bs, 2398 int64_t sector_num, int nb_sectors, 2399 int64_t *cluster_sector_num, 2400 int *cluster_nb_sectors) 2401 { 2402 BlockDriverInfo bdi; 2403 2404 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 2405 *cluster_sector_num = sector_num; 2406 *cluster_nb_sectors = nb_sectors; 2407 } else { 2408 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 2409 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 2410 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 2411 nb_sectors, c); 2412 } 2413 } 2414 2415 static int bdrv_get_cluster_size(BlockDriverState *bs) 2416 { 2417 BlockDriverInfo bdi; 2418 int ret; 2419 2420 ret = bdrv_get_info(bs, &bdi); 2421 if (ret < 0 || bdi.cluster_size == 0) { 2422 return bs->request_alignment; 2423 } else { 2424 return bdi.cluster_size; 2425 } 2426 } 2427 2428 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 2429 int64_t offset, unsigned int bytes) 2430 { 2431 /* aaaa bbbb */ 2432 if (offset >= req->overlap_offset + req->overlap_bytes) { 2433 return false; 2434 } 2435 /* bbbb aaaa */ 2436 if (req->overlap_offset >= offset + bytes) { 2437 return false; 2438 } 2439 return true; 2440 } 2441 2442 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 2443 { 2444 BlockDriverState *bs = self->bs; 2445 BdrvTrackedRequest *req; 2446 bool retry; 2447 bool waited = false; 2448 2449 if (!bs->serialising_in_flight) { 2450 return false; 2451 } 2452 2453 do { 2454 retry = false; 2455 QLIST_FOREACH(req, &bs->tracked_requests, list) { 2456 if (req == self || (!req->serialising && !self->serialising)) { 2457 continue; 2458 } 2459 if (tracked_request_overlaps(req, self->overlap_offset, 2460 self->overlap_bytes)) 2461 { 2462 /* Hitting this means there was a reentrant request, for 2463 * example, a block driver issuing nested requests. This must 2464 * never happen since it means deadlock. 2465 */ 2466 assert(qemu_coroutine_self() != req->co); 2467 2468 /* If the request is already (indirectly) waiting for us, or 2469 * will wait for us as soon as it wakes up, then just go on 2470 * (instead of producing a deadlock in the former case). */ 2471 if (!req->waiting_for) { 2472 self->waiting_for = req; 2473 qemu_co_queue_wait(&req->wait_queue); 2474 self->waiting_for = NULL; 2475 retry = true; 2476 waited = true; 2477 break; 2478 } 2479 } 2480 } 2481 } while (retry); 2482 2483 return waited; 2484 } 2485 2486 /* 2487 * Return values: 2488 * 0 - success 2489 * -EINVAL - backing format specified, but no file 2490 * -ENOSPC - can't update the backing file because no space is left in the 2491 * image file header 2492 * -ENOTSUP - format driver doesn't support changing the backing file 2493 */ 2494 int bdrv_change_backing_file(BlockDriverState *bs, 2495 const char *backing_file, const char *backing_fmt) 2496 { 2497 BlockDriver *drv = bs->drv; 2498 int ret; 2499 2500 /* Backing file format doesn't make sense without a backing file */ 2501 if (backing_fmt && !backing_file) { 2502 return -EINVAL; 2503 } 2504 2505 if (drv->bdrv_change_backing_file != NULL) { 2506 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt); 2507 } else { 2508 ret = -ENOTSUP; 2509 } 2510 2511 if (ret == 0) { 2512 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2513 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2514 } 2515 return ret; 2516 } 2517 2518 /* 2519 * Finds the image layer in the chain that has 'bs' as its backing file. 2520 * 2521 * active is the current topmost image. 2522 * 2523 * Returns NULL if bs is not found in active's image chain, 2524 * or if active == bs. 2525 * 2526 * Returns the bottommost base image if bs == NULL. 2527 */ 2528 BlockDriverState *bdrv_find_overlay(BlockDriverState *active, 2529 BlockDriverState *bs) 2530 { 2531 while (active && bs != active->backing_hd) { 2532 active = active->backing_hd; 2533 } 2534 2535 return active; 2536 } 2537 2538 /* Given a BDS, searches for the base layer. */ 2539 BlockDriverState *bdrv_find_base(BlockDriverState *bs) 2540 { 2541 return bdrv_find_overlay(bs, NULL); 2542 } 2543 2544 typedef struct BlkIntermediateStates { 2545 BlockDriverState *bs; 2546 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry; 2547 } BlkIntermediateStates; 2548 2549 2550 /* 2551 * Drops images above 'base' up to and including 'top', and sets the image 2552 * above 'top' to have base as its backing file. 2553 * 2554 * Requires that the overlay to 'top' is opened r/w, so that the backing file 2555 * information in 'bs' can be properly updated. 2556 * 2557 * E.g., this will convert the following chain: 2558 * bottom <- base <- intermediate <- top <- active 2559 * 2560 * to 2561 * 2562 * bottom <- base <- active 2563 * 2564 * It is allowed for bottom==base, in which case it converts: 2565 * 2566 * base <- intermediate <- top <- active 2567 * 2568 * to 2569 * 2570 * base <- active 2571 * 2572 * If backing_file_str is non-NULL, it will be used when modifying top's 2573 * overlay image metadata. 2574 * 2575 * Error conditions: 2576 * if active == top, that is considered an error 2577 * 2578 */ 2579 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top, 2580 BlockDriverState *base, const char *backing_file_str) 2581 { 2582 BlockDriverState *intermediate; 2583 BlockDriverState *base_bs = NULL; 2584 BlockDriverState *new_top_bs = NULL; 2585 BlkIntermediateStates *intermediate_state, *next; 2586 int ret = -EIO; 2587 2588 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete; 2589 QSIMPLEQ_INIT(&states_to_delete); 2590 2591 if (!top->drv || !base->drv) { 2592 goto exit; 2593 } 2594 2595 new_top_bs = bdrv_find_overlay(active, top); 2596 2597 if (new_top_bs == NULL) { 2598 /* we could not find the image above 'top', this is an error */ 2599 goto exit; 2600 } 2601 2602 /* special case of new_top_bs->backing_hd already pointing to base - nothing 2603 * to do, no intermediate images */ 2604 if (new_top_bs->backing_hd == base) { 2605 ret = 0; 2606 goto exit; 2607 } 2608 2609 intermediate = top; 2610 2611 /* now we will go down through the list, and add each BDS we find 2612 * into our deletion queue, until we hit the 'base' 2613 */ 2614 while (intermediate) { 2615 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates)); 2616 intermediate_state->bs = intermediate; 2617 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry); 2618 2619 if (intermediate->backing_hd == base) { 2620 base_bs = intermediate->backing_hd; 2621 break; 2622 } 2623 intermediate = intermediate->backing_hd; 2624 } 2625 if (base_bs == NULL) { 2626 /* something went wrong, we did not end at the base. safely 2627 * unravel everything, and exit with error */ 2628 goto exit; 2629 } 2630 2631 /* success - we can delete the intermediate states, and link top->base */ 2632 backing_file_str = backing_file_str ? backing_file_str : base_bs->filename; 2633 ret = bdrv_change_backing_file(new_top_bs, backing_file_str, 2634 base_bs->drv ? base_bs->drv->format_name : ""); 2635 if (ret) { 2636 goto exit; 2637 } 2638 bdrv_set_backing_hd(new_top_bs, base_bs); 2639 2640 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { 2641 /* so that bdrv_close() does not recursively close the chain */ 2642 bdrv_set_backing_hd(intermediate_state->bs, NULL); 2643 bdrv_unref(intermediate_state->bs); 2644 } 2645 ret = 0; 2646 2647 exit: 2648 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { 2649 g_free(intermediate_state); 2650 } 2651 return ret; 2652 } 2653 2654 2655 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 2656 size_t size) 2657 { 2658 int64_t len; 2659 2660 if (size > INT_MAX) { 2661 return -EIO; 2662 } 2663 2664 if (!bdrv_is_inserted(bs)) 2665 return -ENOMEDIUM; 2666 2667 if (bs->growable) 2668 return 0; 2669 2670 len = bdrv_getlength(bs); 2671 2672 if (offset < 0) 2673 return -EIO; 2674 2675 if ((offset > len) || (len - offset < size)) 2676 return -EIO; 2677 2678 return 0; 2679 } 2680 2681 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 2682 int nb_sectors) 2683 { 2684 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { 2685 return -EIO; 2686 } 2687 2688 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 2689 nb_sectors * BDRV_SECTOR_SIZE); 2690 } 2691 2692 typedef struct RwCo { 2693 BlockDriverState *bs; 2694 int64_t offset; 2695 QEMUIOVector *qiov; 2696 bool is_write; 2697 int ret; 2698 BdrvRequestFlags flags; 2699 } RwCo; 2700 2701 static void coroutine_fn bdrv_rw_co_entry(void *opaque) 2702 { 2703 RwCo *rwco = opaque; 2704 2705 if (!rwco->is_write) { 2706 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, 2707 rwco->qiov->size, rwco->qiov, 2708 rwco->flags); 2709 } else { 2710 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, 2711 rwco->qiov->size, rwco->qiov, 2712 rwco->flags); 2713 } 2714 } 2715 2716 /* 2717 * Process a vectored synchronous request using coroutines 2718 */ 2719 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, 2720 QEMUIOVector *qiov, bool is_write, 2721 BdrvRequestFlags flags) 2722 { 2723 Coroutine *co; 2724 RwCo rwco = { 2725 .bs = bs, 2726 .offset = offset, 2727 .qiov = qiov, 2728 .is_write = is_write, 2729 .ret = NOT_DONE, 2730 .flags = flags, 2731 }; 2732 2733 /** 2734 * In sync call context, when the vcpu is blocked, this throttling timer 2735 * will not fire; so the I/O throttling function has to be disabled here 2736 * if it has been enabled. 2737 */ 2738 if (bs->io_limits_enabled) { 2739 fprintf(stderr, "Disabling I/O throttling on '%s' due " 2740 "to synchronous I/O.\n", bdrv_get_device_name(bs)); 2741 bdrv_io_limits_disable(bs); 2742 } 2743 2744 if (qemu_in_coroutine()) { 2745 /* Fast-path if already in coroutine context */ 2746 bdrv_rw_co_entry(&rwco); 2747 } else { 2748 AioContext *aio_context = bdrv_get_aio_context(bs); 2749 2750 co = qemu_coroutine_create(bdrv_rw_co_entry); 2751 qemu_coroutine_enter(co, &rwco); 2752 while (rwco.ret == NOT_DONE) { 2753 aio_poll(aio_context, true); 2754 } 2755 } 2756 return rwco.ret; 2757 } 2758 2759 /* 2760 * Process a synchronous request using coroutines 2761 */ 2762 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 2763 int nb_sectors, bool is_write, BdrvRequestFlags flags) 2764 { 2765 QEMUIOVector qiov; 2766 struct iovec iov = { 2767 .iov_base = (void *)buf, 2768 .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 2769 }; 2770 2771 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { 2772 return -EINVAL; 2773 } 2774 2775 qemu_iovec_init_external(&qiov, &iov, 1); 2776 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, 2777 &qiov, is_write, flags); 2778 } 2779 2780 /* return < 0 if error. See bdrv_write() for the return codes */ 2781 int bdrv_read(BlockDriverState *bs, int64_t sector_num, 2782 uint8_t *buf, int nb_sectors) 2783 { 2784 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); 2785 } 2786 2787 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */ 2788 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, 2789 uint8_t *buf, int nb_sectors) 2790 { 2791 bool enabled; 2792 int ret; 2793 2794 enabled = bs->io_limits_enabled; 2795 bs->io_limits_enabled = false; 2796 ret = bdrv_read(bs, sector_num, buf, nb_sectors); 2797 bs->io_limits_enabled = enabled; 2798 return ret; 2799 } 2800 2801 /* Return < 0 if error. Important errors are: 2802 -EIO generic I/O error (may happen for all errors) 2803 -ENOMEDIUM No media inserted. 2804 -EINVAL Invalid sector number or nb_sectors 2805 -EACCES Trying to write a read-only device 2806 */ 2807 int bdrv_write(BlockDriverState *bs, int64_t sector_num, 2808 const uint8_t *buf, int nb_sectors) 2809 { 2810 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 2811 } 2812 2813 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, 2814 int nb_sectors, BdrvRequestFlags flags) 2815 { 2816 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, 2817 BDRV_REQ_ZERO_WRITE | flags); 2818 } 2819 2820 /* 2821 * Completely zero out a block device with the help of bdrv_write_zeroes. 2822 * The operation is sped up by checking the block status and only writing 2823 * zeroes to the device if they currently do not return zeroes. Optional 2824 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). 2825 * 2826 * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 2827 */ 2828 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) 2829 { 2830 int64_t target_sectors, ret, nb_sectors, sector_num = 0; 2831 int n; 2832 2833 target_sectors = bdrv_nb_sectors(bs); 2834 if (target_sectors < 0) { 2835 return target_sectors; 2836 } 2837 2838 for (;;) { 2839 nb_sectors = target_sectors - sector_num; 2840 if (nb_sectors <= 0) { 2841 return 0; 2842 } 2843 if (nb_sectors > INT_MAX) { 2844 nb_sectors = INT_MAX; 2845 } 2846 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n); 2847 if (ret < 0) { 2848 error_report("error getting block status at sector %" PRId64 ": %s", 2849 sector_num, strerror(-ret)); 2850 return ret; 2851 } 2852 if (ret & BDRV_BLOCK_ZERO) { 2853 sector_num += n; 2854 continue; 2855 } 2856 ret = bdrv_write_zeroes(bs, sector_num, n, flags); 2857 if (ret < 0) { 2858 error_report("error writing zeroes at sector %" PRId64 ": %s", 2859 sector_num, strerror(-ret)); 2860 return ret; 2861 } 2862 sector_num += n; 2863 } 2864 } 2865 2866 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) 2867 { 2868 QEMUIOVector qiov; 2869 struct iovec iov = { 2870 .iov_base = (void *)buf, 2871 .iov_len = bytes, 2872 }; 2873 int ret; 2874 2875 if (bytes < 0) { 2876 return -EINVAL; 2877 } 2878 2879 qemu_iovec_init_external(&qiov, &iov, 1); 2880 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); 2881 if (ret < 0) { 2882 return ret; 2883 } 2884 2885 return bytes; 2886 } 2887 2888 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 2889 { 2890 int ret; 2891 2892 ret = bdrv_prwv_co(bs, offset, qiov, true, 0); 2893 if (ret < 0) { 2894 return ret; 2895 } 2896 2897 return qiov->size; 2898 } 2899 2900 int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 2901 const void *buf, int bytes) 2902 { 2903 QEMUIOVector qiov; 2904 struct iovec iov = { 2905 .iov_base = (void *) buf, 2906 .iov_len = bytes, 2907 }; 2908 2909 if (bytes < 0) { 2910 return -EINVAL; 2911 } 2912 2913 qemu_iovec_init_external(&qiov, &iov, 1); 2914 return bdrv_pwritev(bs, offset, &qiov); 2915 } 2916 2917 /* 2918 * Writes to the file and ensures that no writes are reordered across this 2919 * request (acts as a barrier) 2920 * 2921 * Returns 0 on success, -errno in error cases. 2922 */ 2923 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 2924 const void *buf, int count) 2925 { 2926 int ret; 2927 2928 ret = bdrv_pwrite(bs, offset, buf, count); 2929 if (ret < 0) { 2930 return ret; 2931 } 2932 2933 /* No flush needed for cache modes that already do it */ 2934 if (bs->enable_write_cache) { 2935 bdrv_flush(bs); 2936 } 2937 2938 return 0; 2939 } 2940 2941 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 2942 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 2943 { 2944 /* Perform I/O through a temporary buffer so that users who scribble over 2945 * their read buffer while the operation is in progress do not end up 2946 * modifying the image file. This is critical for zero-copy guest I/O 2947 * where anything might happen inside guest memory. 2948 */ 2949 void *bounce_buffer; 2950 2951 BlockDriver *drv = bs->drv; 2952 struct iovec iov; 2953 QEMUIOVector bounce_qiov; 2954 int64_t cluster_sector_num; 2955 int cluster_nb_sectors; 2956 size_t skip_bytes; 2957 int ret; 2958 2959 /* Cover entire cluster so no additional backing file I/O is required when 2960 * allocating cluster in the image file. 2961 */ 2962 bdrv_round_to_clusters(bs, sector_num, nb_sectors, 2963 &cluster_sector_num, &cluster_nb_sectors); 2964 2965 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, 2966 cluster_sector_num, cluster_nb_sectors); 2967 2968 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; 2969 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len); 2970 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 2971 2972 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, 2973 &bounce_qiov); 2974 if (ret < 0) { 2975 goto err; 2976 } 2977 2978 if (drv->bdrv_co_write_zeroes && 2979 buffer_is_zero(bounce_buffer, iov.iov_len)) { 2980 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, 2981 cluster_nb_sectors, 0); 2982 } else { 2983 /* This does not change the data on the disk, it is not necessary 2984 * to flush even in cache=writethrough mode. 2985 */ 2986 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, 2987 &bounce_qiov); 2988 } 2989 2990 if (ret < 0) { 2991 /* It might be okay to ignore write errors for guest requests. If this 2992 * is a deliberate copy-on-read then we don't want to ignore the error. 2993 * Simply report it in all cases. 2994 */ 2995 goto err; 2996 } 2997 2998 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; 2999 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, 3000 nb_sectors * BDRV_SECTOR_SIZE); 3001 3002 err: 3003 qemu_vfree(bounce_buffer); 3004 return ret; 3005 } 3006 3007 /* 3008 * Forwards an already correctly aligned request to the BlockDriver. This 3009 * handles copy on read and zeroing after EOF; any other features must be 3010 * implemented by the caller. 3011 */ 3012 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 3013 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 3014 int64_t align, QEMUIOVector *qiov, int flags) 3015 { 3016 BlockDriver *drv = bs->drv; 3017 int ret; 3018 3019 int64_t sector_num = offset >> BDRV_SECTOR_BITS; 3020 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 3021 3022 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 3023 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 3024 assert(!qiov || bytes == qiov->size); 3025 3026 /* Handle Copy on Read and associated serialisation */ 3027 if (flags & BDRV_REQ_COPY_ON_READ) { 3028 /* If we touch the same cluster it counts as an overlap. This 3029 * guarantees that allocating writes will be serialized and not race 3030 * with each other for the same cluster. For example, in copy-on-read 3031 * it ensures that the CoR read and write operations are atomic and 3032 * guest writes cannot interleave between them. */ 3033 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 3034 } 3035 3036 wait_serialising_requests(req); 3037 3038 if (flags & BDRV_REQ_COPY_ON_READ) { 3039 int pnum; 3040 3041 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); 3042 if (ret < 0) { 3043 goto out; 3044 } 3045 3046 if (!ret || pnum != nb_sectors) { 3047 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); 3048 goto out; 3049 } 3050 } 3051 3052 /* Forward the request to the BlockDriver */ 3053 if (!(bs->zero_beyond_eof && bs->growable)) { 3054 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 3055 } else { 3056 /* Read zeros after EOF of growable BDSes */ 3057 int64_t total_sectors, max_nb_sectors; 3058 3059 total_sectors = bdrv_nb_sectors(bs); 3060 if (total_sectors < 0) { 3061 ret = total_sectors; 3062 goto out; 3063 } 3064 3065 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), 3066 align >> BDRV_SECTOR_BITS); 3067 if (max_nb_sectors > 0) { 3068 QEMUIOVector local_qiov; 3069 size_t local_sectors; 3070 3071 max_nb_sectors = MIN(max_nb_sectors, SIZE_MAX / BDRV_SECTOR_BITS); 3072 local_sectors = MIN(max_nb_sectors, nb_sectors); 3073 3074 qemu_iovec_init(&local_qiov, qiov->niov); 3075 qemu_iovec_concat(&local_qiov, qiov, 0, 3076 local_sectors * BDRV_SECTOR_SIZE); 3077 3078 ret = drv->bdrv_co_readv(bs, sector_num, local_sectors, 3079 &local_qiov); 3080 3081 qemu_iovec_destroy(&local_qiov); 3082 } else { 3083 ret = 0; 3084 } 3085 3086 /* Reading beyond end of file is supposed to produce zeroes */ 3087 if (ret == 0 && total_sectors < sector_num + nb_sectors) { 3088 uint64_t offset = MAX(0, total_sectors - sector_num); 3089 uint64_t bytes = (sector_num + nb_sectors - offset) * 3090 BDRV_SECTOR_SIZE; 3091 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); 3092 } 3093 } 3094 3095 out: 3096 return ret; 3097 } 3098 3099 /* 3100 * Handle a read request in coroutine context 3101 */ 3102 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 3103 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 3104 BdrvRequestFlags flags) 3105 { 3106 BlockDriver *drv = bs->drv; 3107 BdrvTrackedRequest req; 3108 3109 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 3110 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 3111 uint8_t *head_buf = NULL; 3112 uint8_t *tail_buf = NULL; 3113 QEMUIOVector local_qiov; 3114 bool use_local_qiov = false; 3115 int ret; 3116 3117 if (!drv) { 3118 return -ENOMEDIUM; 3119 } 3120 if (bdrv_check_byte_request(bs, offset, bytes)) { 3121 return -EIO; 3122 } 3123 3124 if (bs->copy_on_read) { 3125 flags |= BDRV_REQ_COPY_ON_READ; 3126 } 3127 3128 /* throttling disk I/O */ 3129 if (bs->io_limits_enabled) { 3130 bdrv_io_limits_intercept(bs, bytes, false); 3131 } 3132 3133 /* Align read if necessary by padding qiov */ 3134 if (offset & (align - 1)) { 3135 head_buf = qemu_blockalign(bs, align); 3136 qemu_iovec_init(&local_qiov, qiov->niov + 2); 3137 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 3138 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 3139 use_local_qiov = true; 3140 3141 bytes += offset & (align - 1); 3142 offset = offset & ~(align - 1); 3143 } 3144 3145 if ((offset + bytes) & (align - 1)) { 3146 if (!use_local_qiov) { 3147 qemu_iovec_init(&local_qiov, qiov->niov + 1); 3148 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 3149 use_local_qiov = true; 3150 } 3151 tail_buf = qemu_blockalign(bs, align); 3152 qemu_iovec_add(&local_qiov, tail_buf, 3153 align - ((offset + bytes) & (align - 1))); 3154 3155 bytes = ROUND_UP(bytes, align); 3156 } 3157 3158 tracked_request_begin(&req, bs, offset, bytes, false); 3159 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 3160 use_local_qiov ? &local_qiov : qiov, 3161 flags); 3162 tracked_request_end(&req); 3163 3164 if (use_local_qiov) { 3165 qemu_iovec_destroy(&local_qiov); 3166 qemu_vfree(head_buf); 3167 qemu_vfree(tail_buf); 3168 } 3169 3170 return ret; 3171 } 3172 3173 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 3174 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 3175 BdrvRequestFlags flags) 3176 { 3177 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) { 3178 return -EINVAL; 3179 } 3180 3181 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, 3182 nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 3183 } 3184 3185 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 3186 int nb_sectors, QEMUIOVector *qiov) 3187 { 3188 trace_bdrv_co_readv(bs, sector_num, nb_sectors); 3189 3190 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 3191 } 3192 3193 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, 3194 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 3195 { 3196 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); 3197 3198 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 3199 BDRV_REQ_COPY_ON_READ); 3200 } 3201 3202 /* if no limit is specified in the BlockLimits use a default 3203 * of 32768 512-byte sectors (16 MiB) per request. 3204 */ 3205 #define MAX_WRITE_ZEROES_DEFAULT 32768 3206 3207 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 3208 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) 3209 { 3210 BlockDriver *drv = bs->drv; 3211 QEMUIOVector qiov; 3212 struct iovec iov = {0}; 3213 int ret = 0; 3214 3215 int max_write_zeroes = bs->bl.max_write_zeroes ? 3216 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT; 3217 3218 while (nb_sectors > 0 && !ret) { 3219 int num = nb_sectors; 3220 3221 /* Align request. Block drivers can expect the "bulk" of the request 3222 * to be aligned. 3223 */ 3224 if (bs->bl.write_zeroes_alignment 3225 && num > bs->bl.write_zeroes_alignment) { 3226 if (sector_num % bs->bl.write_zeroes_alignment != 0) { 3227 /* Make a small request up to the first aligned sector. */ 3228 num = bs->bl.write_zeroes_alignment; 3229 num -= sector_num % bs->bl.write_zeroes_alignment; 3230 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { 3231 /* Shorten the request to the last aligned sector. num cannot 3232 * underflow because num > bs->bl.write_zeroes_alignment. 3233 */ 3234 num -= (sector_num + num) % bs->bl.write_zeroes_alignment; 3235 } 3236 } 3237 3238 /* limit request size */ 3239 if (num > max_write_zeroes) { 3240 num = max_write_zeroes; 3241 } 3242 3243 ret = -ENOTSUP; 3244 /* First try the efficient write zeroes operation */ 3245 if (drv->bdrv_co_write_zeroes) { 3246 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); 3247 } 3248 3249 if (ret == -ENOTSUP) { 3250 /* Fall back to bounce buffer if write zeroes is unsupported */ 3251 iov.iov_len = num * BDRV_SECTOR_SIZE; 3252 if (iov.iov_base == NULL) { 3253 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE); 3254 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); 3255 } 3256 qemu_iovec_init_external(&qiov, &iov, 1); 3257 3258 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); 3259 3260 /* Keep bounce buffer around if it is big enough for all 3261 * all future requests. 3262 */ 3263 if (num < max_write_zeroes) { 3264 qemu_vfree(iov.iov_base); 3265 iov.iov_base = NULL; 3266 } 3267 } 3268 3269 sector_num += num; 3270 nb_sectors -= num; 3271 } 3272 3273 qemu_vfree(iov.iov_base); 3274 return ret; 3275 } 3276 3277 /* 3278 * Forwards an already correctly aligned write request to the BlockDriver. 3279 */ 3280 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 3281 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 3282 QEMUIOVector *qiov, int flags) 3283 { 3284 BlockDriver *drv = bs->drv; 3285 bool waited; 3286 int ret; 3287 3288 int64_t sector_num = offset >> BDRV_SECTOR_BITS; 3289 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 3290 3291 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 3292 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 3293 assert(!qiov || bytes == qiov->size); 3294 3295 waited = wait_serialising_requests(req); 3296 assert(!waited || !req->serialising); 3297 assert(req->overlap_offset <= offset); 3298 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 3299 3300 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 3301 3302 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && 3303 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes && 3304 qemu_iovec_is_zero(qiov)) { 3305 flags |= BDRV_REQ_ZERO_WRITE; 3306 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { 3307 flags |= BDRV_REQ_MAY_UNMAP; 3308 } 3309 } 3310 3311 if (ret < 0) { 3312 /* Do nothing, write notifier decided to fail this request */ 3313 } else if (flags & BDRV_REQ_ZERO_WRITE) { 3314 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO); 3315 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); 3316 } else { 3317 BLKDBG_EVENT(bs, BLKDBG_PWRITEV); 3318 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 3319 } 3320 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE); 3321 3322 if (ret == 0 && !bs->enable_write_cache) { 3323 ret = bdrv_co_flush(bs); 3324 } 3325 3326 bdrv_set_dirty(bs, sector_num, nb_sectors); 3327 3328 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) { 3329 bs->wr_highest_sector = sector_num + nb_sectors - 1; 3330 } 3331 if (bs->growable && ret >= 0) { 3332 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); 3333 } 3334 3335 return ret; 3336 } 3337 3338 /* 3339 * Handle a write request in coroutine context 3340 */ 3341 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 3342 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 3343 BdrvRequestFlags flags) 3344 { 3345 BdrvTrackedRequest req; 3346 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 3347 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 3348 uint8_t *head_buf = NULL; 3349 uint8_t *tail_buf = NULL; 3350 QEMUIOVector local_qiov; 3351 bool use_local_qiov = false; 3352 int ret; 3353 3354 if (!bs->drv) { 3355 return -ENOMEDIUM; 3356 } 3357 if (bs->read_only) { 3358 return -EACCES; 3359 } 3360 if (bdrv_check_byte_request(bs, offset, bytes)) { 3361 return -EIO; 3362 } 3363 3364 /* throttling disk I/O */ 3365 if (bs->io_limits_enabled) { 3366 bdrv_io_limits_intercept(bs, bytes, true); 3367 } 3368 3369 /* 3370 * Align write if necessary by performing a read-modify-write cycle. 3371 * Pad qiov with the read parts and be sure to have a tracked request not 3372 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 3373 */ 3374 tracked_request_begin(&req, bs, offset, bytes, true); 3375 3376 if (offset & (align - 1)) { 3377 QEMUIOVector head_qiov; 3378 struct iovec head_iov; 3379 3380 mark_request_serialising(&req, align); 3381 wait_serialising_requests(&req); 3382 3383 head_buf = qemu_blockalign(bs, align); 3384 head_iov = (struct iovec) { 3385 .iov_base = head_buf, 3386 .iov_len = align, 3387 }; 3388 qemu_iovec_init_external(&head_qiov, &head_iov, 1); 3389 3390 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); 3391 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 3392 align, &head_qiov, 0); 3393 if (ret < 0) { 3394 goto fail; 3395 } 3396 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 3397 3398 qemu_iovec_init(&local_qiov, qiov->niov + 2); 3399 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 3400 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 3401 use_local_qiov = true; 3402 3403 bytes += offset & (align - 1); 3404 offset = offset & ~(align - 1); 3405 } 3406 3407 if ((offset + bytes) & (align - 1)) { 3408 QEMUIOVector tail_qiov; 3409 struct iovec tail_iov; 3410 size_t tail_bytes; 3411 bool waited; 3412 3413 mark_request_serialising(&req, align); 3414 waited = wait_serialising_requests(&req); 3415 assert(!waited || !use_local_qiov); 3416 3417 tail_buf = qemu_blockalign(bs, align); 3418 tail_iov = (struct iovec) { 3419 .iov_base = tail_buf, 3420 .iov_len = align, 3421 }; 3422 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 3423 3424 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); 3425 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 3426 align, &tail_qiov, 0); 3427 if (ret < 0) { 3428 goto fail; 3429 } 3430 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 3431 3432 if (!use_local_qiov) { 3433 qemu_iovec_init(&local_qiov, qiov->niov + 1); 3434 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 3435 use_local_qiov = true; 3436 } 3437 3438 tail_bytes = (offset + bytes) & (align - 1); 3439 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 3440 3441 bytes = ROUND_UP(bytes, align); 3442 } 3443 3444 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, 3445 use_local_qiov ? &local_qiov : qiov, 3446 flags); 3447 3448 fail: 3449 tracked_request_end(&req); 3450 3451 if (use_local_qiov) { 3452 qemu_iovec_destroy(&local_qiov); 3453 } 3454 qemu_vfree(head_buf); 3455 qemu_vfree(tail_buf); 3456 3457 return ret; 3458 } 3459 3460 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 3461 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 3462 BdrvRequestFlags flags) 3463 { 3464 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) { 3465 return -EINVAL; 3466 } 3467 3468 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, 3469 nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 3470 } 3471 3472 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 3473 int nb_sectors, QEMUIOVector *qiov) 3474 { 3475 trace_bdrv_co_writev(bs, sector_num, nb_sectors); 3476 3477 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 3478 } 3479 3480 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, 3481 int64_t sector_num, int nb_sectors, 3482 BdrvRequestFlags flags) 3483 { 3484 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); 3485 3486 if (!(bs->open_flags & BDRV_O_UNMAP)) { 3487 flags &= ~BDRV_REQ_MAY_UNMAP; 3488 } 3489 3490 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, 3491 BDRV_REQ_ZERO_WRITE | flags); 3492 } 3493 3494 /** 3495 * Truncate file to 'offset' bytes (needed only for file protocols) 3496 */ 3497 int bdrv_truncate(BlockDriverState *bs, int64_t offset) 3498 { 3499 BlockDriver *drv = bs->drv; 3500 int ret; 3501 if (!drv) 3502 return -ENOMEDIUM; 3503 if (!drv->bdrv_truncate) 3504 return -ENOTSUP; 3505 if (bs->read_only) 3506 return -EACCES; 3507 3508 ret = drv->bdrv_truncate(bs, offset); 3509 if (ret == 0) { 3510 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3511 bdrv_dev_resize_cb(bs); 3512 } 3513 return ret; 3514 } 3515 3516 /** 3517 * Length of a allocated file in bytes. Sparse files are counted by actual 3518 * allocated space. Return < 0 if error or unknown. 3519 */ 3520 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs) 3521 { 3522 BlockDriver *drv = bs->drv; 3523 if (!drv) { 3524 return -ENOMEDIUM; 3525 } 3526 if (drv->bdrv_get_allocated_file_size) { 3527 return drv->bdrv_get_allocated_file_size(bs); 3528 } 3529 if (bs->file) { 3530 return bdrv_get_allocated_file_size(bs->file); 3531 } 3532 return -ENOTSUP; 3533 } 3534 3535 /** 3536 * Return number of sectors on success, -errno on error. 3537 */ 3538 int64_t bdrv_nb_sectors(BlockDriverState *bs) 3539 { 3540 BlockDriver *drv = bs->drv; 3541 3542 if (!drv) 3543 return -ENOMEDIUM; 3544 3545 if (drv->has_variable_length) { 3546 int ret = refresh_total_sectors(bs, bs->total_sectors); 3547 if (ret < 0) { 3548 return ret; 3549 } 3550 } 3551 return bs->total_sectors; 3552 } 3553 3554 /** 3555 * Return length in bytes on success, -errno on error. 3556 * The length is always a multiple of BDRV_SECTOR_SIZE. 3557 */ 3558 int64_t bdrv_getlength(BlockDriverState *bs) 3559 { 3560 int64_t ret = bdrv_nb_sectors(bs); 3561 3562 return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE; 3563 } 3564 3565 /* return 0 as number of sectors if no device present or error */ 3566 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) 3567 { 3568 int64_t nb_sectors = bdrv_nb_sectors(bs); 3569 3570 *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors; 3571 } 3572 3573 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error, 3574 BlockdevOnError on_write_error) 3575 { 3576 bs->on_read_error = on_read_error; 3577 bs->on_write_error = on_write_error; 3578 } 3579 3580 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read) 3581 { 3582 return is_read ? bs->on_read_error : bs->on_write_error; 3583 } 3584 3585 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error) 3586 { 3587 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error; 3588 3589 switch (on_err) { 3590 case BLOCKDEV_ON_ERROR_ENOSPC: 3591 return (error == ENOSPC) ? 3592 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; 3593 case BLOCKDEV_ON_ERROR_STOP: 3594 return BLOCK_ERROR_ACTION_STOP; 3595 case BLOCKDEV_ON_ERROR_REPORT: 3596 return BLOCK_ERROR_ACTION_REPORT; 3597 case BLOCKDEV_ON_ERROR_IGNORE: 3598 return BLOCK_ERROR_ACTION_IGNORE; 3599 default: 3600 abort(); 3601 } 3602 } 3603 3604 /* This is done by device models because, while the block layer knows 3605 * about the error, it does not know whether an operation comes from 3606 * the device or the block layer (from a job, for example). 3607 */ 3608 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action, 3609 bool is_read, int error) 3610 { 3611 assert(error >= 0); 3612 3613 if (action == BLOCK_ERROR_ACTION_STOP) { 3614 /* First set the iostatus, so that "info block" returns an iostatus 3615 * that matches the events raised so far (an additional error iostatus 3616 * is fine, but not a lost one). 3617 */ 3618 bdrv_iostatus_set_err(bs, error); 3619 3620 /* Then raise the request to stop the VM and the event. 3621 * qemu_system_vmstop_request_prepare has two effects. First, 3622 * it ensures that the STOP event always comes after the 3623 * BLOCK_IO_ERROR event. Second, it ensures that even if management 3624 * can observe the STOP event and do a "cont" before the STOP 3625 * event is issued, the VM will not stop. In this case, vm_start() 3626 * also ensures that the STOP/RESUME pair of events is emitted. 3627 */ 3628 qemu_system_vmstop_request_prepare(); 3629 qapi_event_send_block_io_error(bdrv_get_device_name(bs), 3630 is_read ? IO_OPERATION_TYPE_READ : 3631 IO_OPERATION_TYPE_WRITE, 3632 action, &error_abort); 3633 qemu_system_vmstop_request(RUN_STATE_IO_ERROR); 3634 } else { 3635 qapi_event_send_block_io_error(bdrv_get_device_name(bs), 3636 is_read ? IO_OPERATION_TYPE_READ : 3637 IO_OPERATION_TYPE_WRITE, 3638 action, &error_abort); 3639 } 3640 } 3641 3642 int bdrv_is_read_only(BlockDriverState *bs) 3643 { 3644 return bs->read_only; 3645 } 3646 3647 int bdrv_is_sg(BlockDriverState *bs) 3648 { 3649 return bs->sg; 3650 } 3651 3652 int bdrv_enable_write_cache(BlockDriverState *bs) 3653 { 3654 return bs->enable_write_cache; 3655 } 3656 3657 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce) 3658 { 3659 bs->enable_write_cache = wce; 3660 3661 /* so a reopen() will preserve wce */ 3662 if (wce) { 3663 bs->open_flags |= BDRV_O_CACHE_WB; 3664 } else { 3665 bs->open_flags &= ~BDRV_O_CACHE_WB; 3666 } 3667 } 3668 3669 int bdrv_is_encrypted(BlockDriverState *bs) 3670 { 3671 if (bs->backing_hd && bs->backing_hd->encrypted) 3672 return 1; 3673 return bs->encrypted; 3674 } 3675 3676 int bdrv_key_required(BlockDriverState *bs) 3677 { 3678 BlockDriverState *backing_hd = bs->backing_hd; 3679 3680 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key) 3681 return 1; 3682 return (bs->encrypted && !bs->valid_key); 3683 } 3684 3685 int bdrv_set_key(BlockDriverState *bs, const char *key) 3686 { 3687 int ret; 3688 if (bs->backing_hd && bs->backing_hd->encrypted) { 3689 ret = bdrv_set_key(bs->backing_hd, key); 3690 if (ret < 0) 3691 return ret; 3692 if (!bs->encrypted) 3693 return 0; 3694 } 3695 if (!bs->encrypted) { 3696 return -EINVAL; 3697 } else if (!bs->drv || !bs->drv->bdrv_set_key) { 3698 return -ENOMEDIUM; 3699 } 3700 ret = bs->drv->bdrv_set_key(bs, key); 3701 if (ret < 0) { 3702 bs->valid_key = 0; 3703 } else if (!bs->valid_key) { 3704 bs->valid_key = 1; 3705 /* call the change callback now, we skipped it on open */ 3706 bdrv_dev_change_media_cb(bs, true); 3707 } 3708 return ret; 3709 } 3710 3711 const char *bdrv_get_format_name(BlockDriverState *bs) 3712 { 3713 return bs->drv ? bs->drv->format_name : NULL; 3714 } 3715 3716 void bdrv_iterate_format(void (*it)(void *opaque, const char *name), 3717 void *opaque) 3718 { 3719 BlockDriver *drv; 3720 int count = 0; 3721 const char **formats = NULL; 3722 3723 QLIST_FOREACH(drv, &bdrv_drivers, list) { 3724 if (drv->format_name) { 3725 bool found = false; 3726 int i = count; 3727 while (formats && i && !found) { 3728 found = !strcmp(formats[--i], drv->format_name); 3729 } 3730 3731 if (!found) { 3732 formats = g_realloc(formats, (count + 1) * sizeof(char *)); 3733 formats[count++] = drv->format_name; 3734 it(opaque, drv->format_name); 3735 } 3736 } 3737 } 3738 g_free(formats); 3739 } 3740 3741 /* This function is to find block backend bs */ 3742 BlockDriverState *bdrv_find(const char *name) 3743 { 3744 BlockDriverState *bs; 3745 3746 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 3747 if (!strcmp(name, bs->device_name)) { 3748 return bs; 3749 } 3750 } 3751 return NULL; 3752 } 3753 3754 /* This function is to find a node in the bs graph */ 3755 BlockDriverState *bdrv_find_node(const char *node_name) 3756 { 3757 BlockDriverState *bs; 3758 3759 assert(node_name); 3760 3761 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { 3762 if (!strcmp(node_name, bs->node_name)) { 3763 return bs; 3764 } 3765 } 3766 return NULL; 3767 } 3768 3769 /* Put this QMP function here so it can access the static graph_bdrv_states. */ 3770 BlockDeviceInfoList *bdrv_named_nodes_list(void) 3771 { 3772 BlockDeviceInfoList *list, *entry; 3773 BlockDriverState *bs; 3774 3775 list = NULL; 3776 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { 3777 entry = g_malloc0(sizeof(*entry)); 3778 entry->value = bdrv_block_device_info(bs); 3779 entry->next = list; 3780 list = entry; 3781 } 3782 3783 return list; 3784 } 3785 3786 BlockDriverState *bdrv_lookup_bs(const char *device, 3787 const char *node_name, 3788 Error **errp) 3789 { 3790 BlockDriverState *bs = NULL; 3791 3792 if (device) { 3793 bs = bdrv_find(device); 3794 3795 if (bs) { 3796 return bs; 3797 } 3798 } 3799 3800 if (node_name) { 3801 bs = bdrv_find_node(node_name); 3802 3803 if (bs) { 3804 return bs; 3805 } 3806 } 3807 3808 error_setg(errp, "Cannot find device=%s nor node_name=%s", 3809 device ? device : "", 3810 node_name ? node_name : ""); 3811 return NULL; 3812 } 3813 3814 /* If 'base' is in the same chain as 'top', return true. Otherwise, 3815 * return false. If either argument is NULL, return false. */ 3816 bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base) 3817 { 3818 while (top && top != base) { 3819 top = top->backing_hd; 3820 } 3821 3822 return top != NULL; 3823 } 3824 3825 BlockDriverState *bdrv_next(BlockDriverState *bs) 3826 { 3827 if (!bs) { 3828 return QTAILQ_FIRST(&bdrv_states); 3829 } 3830 return QTAILQ_NEXT(bs, device_list); 3831 } 3832 3833 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque) 3834 { 3835 BlockDriverState *bs; 3836 3837 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 3838 it(opaque, bs); 3839 } 3840 } 3841 3842 const char *bdrv_get_device_name(BlockDriverState *bs) 3843 { 3844 return bs->device_name; 3845 } 3846 3847 int bdrv_get_flags(BlockDriverState *bs) 3848 { 3849 return bs->open_flags; 3850 } 3851 3852 int bdrv_flush_all(void) 3853 { 3854 BlockDriverState *bs; 3855 int result = 0; 3856 3857 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 3858 AioContext *aio_context = bdrv_get_aio_context(bs); 3859 int ret; 3860 3861 aio_context_acquire(aio_context); 3862 ret = bdrv_flush(bs); 3863 if (ret < 0 && !result) { 3864 result = ret; 3865 } 3866 aio_context_release(aio_context); 3867 } 3868 3869 return result; 3870 } 3871 3872 int bdrv_has_zero_init_1(BlockDriverState *bs) 3873 { 3874 return 1; 3875 } 3876 3877 int bdrv_has_zero_init(BlockDriverState *bs) 3878 { 3879 assert(bs->drv); 3880 3881 /* If BS is a copy on write image, it is initialized to 3882 the contents of the base image, which may not be zeroes. */ 3883 if (bs->backing_hd) { 3884 return 0; 3885 } 3886 if (bs->drv->bdrv_has_zero_init) { 3887 return bs->drv->bdrv_has_zero_init(bs); 3888 } 3889 3890 /* safe default */ 3891 return 0; 3892 } 3893 3894 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs) 3895 { 3896 BlockDriverInfo bdi; 3897 3898 if (bs->backing_hd) { 3899 return false; 3900 } 3901 3902 if (bdrv_get_info(bs, &bdi) == 0) { 3903 return bdi.unallocated_blocks_are_zero; 3904 } 3905 3906 return false; 3907 } 3908 3909 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs) 3910 { 3911 BlockDriverInfo bdi; 3912 3913 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) { 3914 return false; 3915 } 3916 3917 if (bdrv_get_info(bs, &bdi) == 0) { 3918 return bdi.can_write_zeroes_with_unmap; 3919 } 3920 3921 return false; 3922 } 3923 3924 typedef struct BdrvCoGetBlockStatusData { 3925 BlockDriverState *bs; 3926 BlockDriverState *base; 3927 int64_t sector_num; 3928 int nb_sectors; 3929 int *pnum; 3930 int64_t ret; 3931 bool done; 3932 } BdrvCoGetBlockStatusData; 3933 3934 /* 3935 * Returns true iff the specified sector is present in the disk image. Drivers 3936 * not implementing the functionality are assumed to not support backing files, 3937 * hence all their sectors are reported as allocated. 3938 * 3939 * If 'sector_num' is beyond the end of the disk image the return value is 0 3940 * and 'pnum' is set to 0. 3941 * 3942 * 'pnum' is set to the number of sectors (including and immediately following 3943 * the specified sector) that are known to be in the same 3944 * allocated/unallocated state. 3945 * 3946 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 3947 * beyond the end of the disk image it will be clamped. 3948 */ 3949 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 3950 int64_t sector_num, 3951 int nb_sectors, int *pnum) 3952 { 3953 int64_t total_sectors; 3954 int64_t n; 3955 int64_t ret, ret2; 3956 3957 total_sectors = bdrv_nb_sectors(bs); 3958 if (total_sectors < 0) { 3959 return total_sectors; 3960 } 3961 3962 if (sector_num >= total_sectors) { 3963 *pnum = 0; 3964 return 0; 3965 } 3966 3967 n = total_sectors - sector_num; 3968 if (n < nb_sectors) { 3969 nb_sectors = n; 3970 } 3971 3972 if (!bs->drv->bdrv_co_get_block_status) { 3973 *pnum = nb_sectors; 3974 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; 3975 if (bs->drv->protocol_name) { 3976 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 3977 } 3978 return ret; 3979 } 3980 3981 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); 3982 if (ret < 0) { 3983 *pnum = 0; 3984 return ret; 3985 } 3986 3987 if (ret & BDRV_BLOCK_RAW) { 3988 assert(ret & BDRV_BLOCK_OFFSET_VALID); 3989 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, 3990 *pnum, pnum); 3991 } 3992 3993 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { 3994 ret |= BDRV_BLOCK_ALLOCATED; 3995 } 3996 3997 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) { 3998 if (bdrv_unallocated_blocks_are_zero(bs)) { 3999 ret |= BDRV_BLOCK_ZERO; 4000 } else if (bs->backing_hd) { 4001 BlockDriverState *bs2 = bs->backing_hd; 4002 int64_t nb_sectors2 = bdrv_nb_sectors(bs2); 4003 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { 4004 ret |= BDRV_BLOCK_ZERO; 4005 } 4006 } 4007 } 4008 4009 if (bs->file && 4010 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 4011 (ret & BDRV_BLOCK_OFFSET_VALID)) { 4012 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, 4013 *pnum, pnum); 4014 if (ret2 >= 0) { 4015 /* Ignore errors. This is just providing extra information, it 4016 * is useful but not necessary. 4017 */ 4018 ret |= (ret2 & BDRV_BLOCK_ZERO); 4019 } 4020 } 4021 4022 return ret; 4023 } 4024 4025 /* Coroutine wrapper for bdrv_get_block_status() */ 4026 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque) 4027 { 4028 BdrvCoGetBlockStatusData *data = opaque; 4029 BlockDriverState *bs = data->bs; 4030 4031 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors, 4032 data->pnum); 4033 data->done = true; 4034 } 4035 4036 /* 4037 * Synchronous wrapper around bdrv_co_get_block_status(). 4038 * 4039 * See bdrv_co_get_block_status() for details. 4040 */ 4041 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, 4042 int nb_sectors, int *pnum) 4043 { 4044 Coroutine *co; 4045 BdrvCoGetBlockStatusData data = { 4046 .bs = bs, 4047 .sector_num = sector_num, 4048 .nb_sectors = nb_sectors, 4049 .pnum = pnum, 4050 .done = false, 4051 }; 4052 4053 if (qemu_in_coroutine()) { 4054 /* Fast-path if already in coroutine context */ 4055 bdrv_get_block_status_co_entry(&data); 4056 } else { 4057 AioContext *aio_context = bdrv_get_aio_context(bs); 4058 4059 co = qemu_coroutine_create(bdrv_get_block_status_co_entry); 4060 qemu_coroutine_enter(co, &data); 4061 while (!data.done) { 4062 aio_poll(aio_context, true); 4063 } 4064 } 4065 return data.ret; 4066 } 4067 4068 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 4069 int nb_sectors, int *pnum) 4070 { 4071 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); 4072 if (ret < 0) { 4073 return ret; 4074 } 4075 return !!(ret & BDRV_BLOCK_ALLOCATED); 4076 } 4077 4078 /* 4079 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 4080 * 4081 * Return true if the given sector is allocated in any image between 4082 * BASE and TOP (inclusive). BASE can be NULL to check if the given 4083 * sector is allocated in any image of the chain. Return false otherwise. 4084 * 4085 * 'pnum' is set to the number of sectors (including and immediately following 4086 * the specified sector) that are known to be in the same 4087 * allocated/unallocated state. 4088 * 4089 */ 4090 int bdrv_is_allocated_above(BlockDriverState *top, 4091 BlockDriverState *base, 4092 int64_t sector_num, 4093 int nb_sectors, int *pnum) 4094 { 4095 BlockDriverState *intermediate; 4096 int ret, n = nb_sectors; 4097 4098 intermediate = top; 4099 while (intermediate && intermediate != base) { 4100 int pnum_inter; 4101 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 4102 &pnum_inter); 4103 if (ret < 0) { 4104 return ret; 4105 } else if (ret) { 4106 *pnum = pnum_inter; 4107 return 1; 4108 } 4109 4110 /* 4111 * [sector_num, nb_sectors] is unallocated on top but intermediate 4112 * might have 4113 * 4114 * [sector_num+x, nr_sectors] allocated. 4115 */ 4116 if (n > pnum_inter && 4117 (intermediate == top || 4118 sector_num + pnum_inter < intermediate->total_sectors)) { 4119 n = pnum_inter; 4120 } 4121 4122 intermediate = intermediate->backing_hd; 4123 } 4124 4125 *pnum = n; 4126 return 0; 4127 } 4128 4129 const char *bdrv_get_encrypted_filename(BlockDriverState *bs) 4130 { 4131 if (bs->backing_hd && bs->backing_hd->encrypted) 4132 return bs->backing_file; 4133 else if (bs->encrypted) 4134 return bs->filename; 4135 else 4136 return NULL; 4137 } 4138 4139 void bdrv_get_backing_filename(BlockDriverState *bs, 4140 char *filename, int filename_size) 4141 { 4142 pstrcpy(filename, filename_size, bs->backing_file); 4143 } 4144 4145 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 4146 const uint8_t *buf, int nb_sectors) 4147 { 4148 BlockDriver *drv = bs->drv; 4149 if (!drv) 4150 return -ENOMEDIUM; 4151 if (!drv->bdrv_write_compressed) 4152 return -ENOTSUP; 4153 if (bdrv_check_request(bs, sector_num, nb_sectors)) 4154 return -EIO; 4155 4156 assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 4157 4158 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 4159 } 4160 4161 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 4162 { 4163 BlockDriver *drv = bs->drv; 4164 if (!drv) 4165 return -ENOMEDIUM; 4166 if (!drv->bdrv_get_info) 4167 return -ENOTSUP; 4168 memset(bdi, 0, sizeof(*bdi)); 4169 return drv->bdrv_get_info(bs, bdi); 4170 } 4171 4172 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs) 4173 { 4174 BlockDriver *drv = bs->drv; 4175 if (drv && drv->bdrv_get_specific_info) { 4176 return drv->bdrv_get_specific_info(bs); 4177 } 4178 return NULL; 4179 } 4180 4181 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 4182 int64_t pos, int size) 4183 { 4184 QEMUIOVector qiov; 4185 struct iovec iov = { 4186 .iov_base = (void *) buf, 4187 .iov_len = size, 4188 }; 4189 4190 qemu_iovec_init_external(&qiov, &iov, 1); 4191 return bdrv_writev_vmstate(bs, &qiov, pos); 4192 } 4193 4194 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 4195 { 4196 BlockDriver *drv = bs->drv; 4197 4198 if (!drv) { 4199 return -ENOMEDIUM; 4200 } else if (drv->bdrv_save_vmstate) { 4201 return drv->bdrv_save_vmstate(bs, qiov, pos); 4202 } else if (bs->file) { 4203 return bdrv_writev_vmstate(bs->file, qiov, pos); 4204 } 4205 4206 return -ENOTSUP; 4207 } 4208 4209 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 4210 int64_t pos, int size) 4211 { 4212 BlockDriver *drv = bs->drv; 4213 if (!drv) 4214 return -ENOMEDIUM; 4215 if (drv->bdrv_load_vmstate) 4216 return drv->bdrv_load_vmstate(bs, buf, pos, size); 4217 if (bs->file) 4218 return bdrv_load_vmstate(bs->file, buf, pos, size); 4219 return -ENOTSUP; 4220 } 4221 4222 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event) 4223 { 4224 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) { 4225 return; 4226 } 4227 4228 bs->drv->bdrv_debug_event(bs, event); 4229 } 4230 4231 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, 4232 const char *tag) 4233 { 4234 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) { 4235 bs = bs->file; 4236 } 4237 4238 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) { 4239 return bs->drv->bdrv_debug_breakpoint(bs, event, tag); 4240 } 4241 4242 return -ENOTSUP; 4243 } 4244 4245 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag) 4246 { 4247 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) { 4248 bs = bs->file; 4249 } 4250 4251 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) { 4252 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag); 4253 } 4254 4255 return -ENOTSUP; 4256 } 4257 4258 int bdrv_debug_resume(BlockDriverState *bs, const char *tag) 4259 { 4260 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) { 4261 bs = bs->file; 4262 } 4263 4264 if (bs && bs->drv && bs->drv->bdrv_debug_resume) { 4265 return bs->drv->bdrv_debug_resume(bs, tag); 4266 } 4267 4268 return -ENOTSUP; 4269 } 4270 4271 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag) 4272 { 4273 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) { 4274 bs = bs->file; 4275 } 4276 4277 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) { 4278 return bs->drv->bdrv_debug_is_suspended(bs, tag); 4279 } 4280 4281 return false; 4282 } 4283 4284 int bdrv_is_snapshot(BlockDriverState *bs) 4285 { 4286 return !!(bs->open_flags & BDRV_O_SNAPSHOT); 4287 } 4288 4289 /* backing_file can either be relative, or absolute, or a protocol. If it is 4290 * relative, it must be relative to the chain. So, passing in bs->filename 4291 * from a BDS as backing_file should not be done, as that may be relative to 4292 * the CWD rather than the chain. */ 4293 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, 4294 const char *backing_file) 4295 { 4296 char *filename_full = NULL; 4297 char *backing_file_full = NULL; 4298 char *filename_tmp = NULL; 4299 int is_protocol = 0; 4300 BlockDriverState *curr_bs = NULL; 4301 BlockDriverState *retval = NULL; 4302 4303 if (!bs || !bs->drv || !backing_file) { 4304 return NULL; 4305 } 4306 4307 filename_full = g_malloc(PATH_MAX); 4308 backing_file_full = g_malloc(PATH_MAX); 4309 filename_tmp = g_malloc(PATH_MAX); 4310 4311 is_protocol = path_has_protocol(backing_file); 4312 4313 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) { 4314 4315 /* If either of the filename paths is actually a protocol, then 4316 * compare unmodified paths; otherwise make paths relative */ 4317 if (is_protocol || path_has_protocol(curr_bs->backing_file)) { 4318 if (strcmp(backing_file, curr_bs->backing_file) == 0) { 4319 retval = curr_bs->backing_hd; 4320 break; 4321 } 4322 } else { 4323 /* If not an absolute filename path, make it relative to the current 4324 * image's filename path */ 4325 path_combine(filename_tmp, PATH_MAX, curr_bs->filename, 4326 backing_file); 4327 4328 /* We are going to compare absolute pathnames */ 4329 if (!realpath(filename_tmp, filename_full)) { 4330 continue; 4331 } 4332 4333 /* We need to make sure the backing filename we are comparing against 4334 * is relative to the current image filename (or absolute) */ 4335 path_combine(filename_tmp, PATH_MAX, curr_bs->filename, 4336 curr_bs->backing_file); 4337 4338 if (!realpath(filename_tmp, backing_file_full)) { 4339 continue; 4340 } 4341 4342 if (strcmp(backing_file_full, filename_full) == 0) { 4343 retval = curr_bs->backing_hd; 4344 break; 4345 } 4346 } 4347 } 4348 4349 g_free(filename_full); 4350 g_free(backing_file_full); 4351 g_free(filename_tmp); 4352 return retval; 4353 } 4354 4355 int bdrv_get_backing_file_depth(BlockDriverState *bs) 4356 { 4357 if (!bs->drv) { 4358 return 0; 4359 } 4360 4361 if (!bs->backing_hd) { 4362 return 0; 4363 } 4364 4365 return 1 + bdrv_get_backing_file_depth(bs->backing_hd); 4366 } 4367 4368 /**************************************************************/ 4369 /* async I/Os */ 4370 4371 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 4372 QEMUIOVector *qiov, int nb_sectors, 4373 BlockDriverCompletionFunc *cb, void *opaque) 4374 { 4375 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 4376 4377 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 4378 cb, opaque, false); 4379 } 4380 4381 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 4382 QEMUIOVector *qiov, int nb_sectors, 4383 BlockDriverCompletionFunc *cb, void *opaque) 4384 { 4385 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 4386 4387 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 4388 cb, opaque, true); 4389 } 4390 4391 BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, 4392 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, 4393 BlockDriverCompletionFunc *cb, void *opaque) 4394 { 4395 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); 4396 4397 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, 4398 BDRV_REQ_ZERO_WRITE | flags, 4399 cb, opaque, true); 4400 } 4401 4402 4403 typedef struct MultiwriteCB { 4404 int error; 4405 int num_requests; 4406 int num_callbacks; 4407 struct { 4408 BlockDriverCompletionFunc *cb; 4409 void *opaque; 4410 QEMUIOVector *free_qiov; 4411 } callbacks[]; 4412 } MultiwriteCB; 4413 4414 static void multiwrite_user_cb(MultiwriteCB *mcb) 4415 { 4416 int i; 4417 4418 for (i = 0; i < mcb->num_callbacks; i++) { 4419 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); 4420 if (mcb->callbacks[i].free_qiov) { 4421 qemu_iovec_destroy(mcb->callbacks[i].free_qiov); 4422 } 4423 g_free(mcb->callbacks[i].free_qiov); 4424 } 4425 } 4426 4427 static void multiwrite_cb(void *opaque, int ret) 4428 { 4429 MultiwriteCB *mcb = opaque; 4430 4431 trace_multiwrite_cb(mcb, ret); 4432 4433 if (ret < 0 && !mcb->error) { 4434 mcb->error = ret; 4435 } 4436 4437 mcb->num_requests--; 4438 if (mcb->num_requests == 0) { 4439 multiwrite_user_cb(mcb); 4440 g_free(mcb); 4441 } 4442 } 4443 4444 static int multiwrite_req_compare(const void *a, const void *b) 4445 { 4446 const BlockRequest *req1 = a, *req2 = b; 4447 4448 /* 4449 * Note that we can't simply subtract req2->sector from req1->sector 4450 * here as that could overflow the return value. 4451 */ 4452 if (req1->sector > req2->sector) { 4453 return 1; 4454 } else if (req1->sector < req2->sector) { 4455 return -1; 4456 } else { 4457 return 0; 4458 } 4459 } 4460 4461 /* 4462 * Takes a bunch of requests and tries to merge them. Returns the number of 4463 * requests that remain after merging. 4464 */ 4465 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, 4466 int num_reqs, MultiwriteCB *mcb) 4467 { 4468 int i, outidx; 4469 4470 // Sort requests by start sector 4471 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); 4472 4473 // Check if adjacent requests touch the same clusters. If so, combine them, 4474 // filling up gaps with zero sectors. 4475 outidx = 0; 4476 for (i = 1; i < num_reqs; i++) { 4477 int merge = 0; 4478 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; 4479 4480 // Handle exactly sequential writes and overlapping writes. 4481 if (reqs[i].sector <= oldreq_last) { 4482 merge = 1; 4483 } 4484 4485 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { 4486 merge = 0; 4487 } 4488 4489 if (merge) { 4490 size_t size; 4491 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); 4492 qemu_iovec_init(qiov, 4493 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); 4494 4495 // Add the first request to the merged one. If the requests are 4496 // overlapping, drop the last sectors of the first request. 4497 size = (reqs[i].sector - reqs[outidx].sector) << 9; 4498 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); 4499 4500 // We should need to add any zeros between the two requests 4501 assert (reqs[i].sector <= oldreq_last); 4502 4503 // Add the second request 4504 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); 4505 4506 reqs[outidx].nb_sectors = qiov->size >> 9; 4507 reqs[outidx].qiov = qiov; 4508 4509 mcb->callbacks[i].free_qiov = reqs[outidx].qiov; 4510 } else { 4511 outidx++; 4512 reqs[outidx].sector = reqs[i].sector; 4513 reqs[outidx].nb_sectors = reqs[i].nb_sectors; 4514 reqs[outidx].qiov = reqs[i].qiov; 4515 } 4516 } 4517 4518 return outidx + 1; 4519 } 4520 4521 /* 4522 * Submit multiple AIO write requests at once. 4523 * 4524 * On success, the function returns 0 and all requests in the reqs array have 4525 * been submitted. In error case this function returns -1, and any of the 4526 * requests may or may not be submitted yet. In particular, this means that the 4527 * callback will be called for some of the requests, for others it won't. The 4528 * caller must check the error field of the BlockRequest to wait for the right 4529 * callbacks (if error != 0, no callback will be called). 4530 * 4531 * The implementation may modify the contents of the reqs array, e.g. to merge 4532 * requests. However, the fields opaque and error are left unmodified as they 4533 * are used to signal failure for a single request to the caller. 4534 */ 4535 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) 4536 { 4537 MultiwriteCB *mcb; 4538 int i; 4539 4540 /* don't submit writes if we don't have a medium */ 4541 if (bs->drv == NULL) { 4542 for (i = 0; i < num_reqs; i++) { 4543 reqs[i].error = -ENOMEDIUM; 4544 } 4545 return -1; 4546 } 4547 4548 if (num_reqs == 0) { 4549 return 0; 4550 } 4551 4552 // Create MultiwriteCB structure 4553 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); 4554 mcb->num_requests = 0; 4555 mcb->num_callbacks = num_reqs; 4556 4557 for (i = 0; i < num_reqs; i++) { 4558 mcb->callbacks[i].cb = reqs[i].cb; 4559 mcb->callbacks[i].opaque = reqs[i].opaque; 4560 } 4561 4562 // Check for mergable requests 4563 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); 4564 4565 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); 4566 4567 /* Run the aio requests. */ 4568 mcb->num_requests = num_reqs; 4569 for (i = 0; i < num_reqs; i++) { 4570 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, 4571 reqs[i].nb_sectors, reqs[i].flags, 4572 multiwrite_cb, mcb, 4573 true); 4574 } 4575 4576 return 0; 4577 } 4578 4579 void bdrv_aio_cancel(BlockDriverAIOCB *acb) 4580 { 4581 acb->aiocb_info->cancel(acb); 4582 } 4583 4584 /**************************************************************/ 4585 /* async block device emulation */ 4586 4587 typedef struct BlockDriverAIOCBSync { 4588 BlockDriverAIOCB common; 4589 QEMUBH *bh; 4590 int ret; 4591 /* vector translation state */ 4592 QEMUIOVector *qiov; 4593 uint8_t *bounce; 4594 int is_write; 4595 } BlockDriverAIOCBSync; 4596 4597 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb) 4598 { 4599 BlockDriverAIOCBSync *acb = 4600 container_of(blockacb, BlockDriverAIOCBSync, common); 4601 qemu_bh_delete(acb->bh); 4602 acb->bh = NULL; 4603 qemu_aio_release(acb); 4604 } 4605 4606 static const AIOCBInfo bdrv_em_aiocb_info = { 4607 .aiocb_size = sizeof(BlockDriverAIOCBSync), 4608 .cancel = bdrv_aio_cancel_em, 4609 }; 4610 4611 static void bdrv_aio_bh_cb(void *opaque) 4612 { 4613 BlockDriverAIOCBSync *acb = opaque; 4614 4615 if (!acb->is_write) 4616 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); 4617 qemu_vfree(acb->bounce); 4618 acb->common.cb(acb->common.opaque, acb->ret); 4619 qemu_bh_delete(acb->bh); 4620 acb->bh = NULL; 4621 qemu_aio_release(acb); 4622 } 4623 4624 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, 4625 int64_t sector_num, 4626 QEMUIOVector *qiov, 4627 int nb_sectors, 4628 BlockDriverCompletionFunc *cb, 4629 void *opaque, 4630 int is_write) 4631 4632 { 4633 BlockDriverAIOCBSync *acb; 4634 4635 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); 4636 acb->is_write = is_write; 4637 acb->qiov = qiov; 4638 acb->bounce = qemu_blockalign(bs, qiov->size); 4639 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb); 4640 4641 if (is_write) { 4642 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); 4643 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); 4644 } else { 4645 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); 4646 } 4647 4648 qemu_bh_schedule(acb->bh); 4649 4650 return &acb->common; 4651 } 4652 4653 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 4654 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 4655 BlockDriverCompletionFunc *cb, void *opaque) 4656 { 4657 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 4658 } 4659 4660 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 4661 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 4662 BlockDriverCompletionFunc *cb, void *opaque) 4663 { 4664 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); 4665 } 4666 4667 4668 typedef struct BlockDriverAIOCBCoroutine { 4669 BlockDriverAIOCB common; 4670 BlockRequest req; 4671 bool is_write; 4672 bool *done; 4673 QEMUBH* bh; 4674 } BlockDriverAIOCBCoroutine; 4675 4676 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb) 4677 { 4678 AioContext *aio_context = bdrv_get_aio_context(blockacb->bs); 4679 BlockDriverAIOCBCoroutine *acb = 4680 container_of(blockacb, BlockDriverAIOCBCoroutine, common); 4681 bool done = false; 4682 4683 acb->done = &done; 4684 while (!done) { 4685 aio_poll(aio_context, true); 4686 } 4687 } 4688 4689 static const AIOCBInfo bdrv_em_co_aiocb_info = { 4690 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine), 4691 .cancel = bdrv_aio_co_cancel_em, 4692 }; 4693 4694 static void bdrv_co_em_bh(void *opaque) 4695 { 4696 BlockDriverAIOCBCoroutine *acb = opaque; 4697 4698 acb->common.cb(acb->common.opaque, acb->req.error); 4699 4700 if (acb->done) { 4701 *acb->done = true; 4702 } 4703 4704 qemu_bh_delete(acb->bh); 4705 qemu_aio_release(acb); 4706 } 4707 4708 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 4709 static void coroutine_fn bdrv_co_do_rw(void *opaque) 4710 { 4711 BlockDriverAIOCBCoroutine *acb = opaque; 4712 BlockDriverState *bs = acb->common.bs; 4713 4714 if (!acb->is_write) { 4715 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 4716 acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 4717 } else { 4718 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 4719 acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 4720 } 4721 4722 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 4723 qemu_bh_schedule(acb->bh); 4724 } 4725 4726 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 4727 int64_t sector_num, 4728 QEMUIOVector *qiov, 4729 int nb_sectors, 4730 BdrvRequestFlags flags, 4731 BlockDriverCompletionFunc *cb, 4732 void *opaque, 4733 bool is_write) 4734 { 4735 Coroutine *co; 4736 BlockDriverAIOCBCoroutine *acb; 4737 4738 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 4739 acb->req.sector = sector_num; 4740 acb->req.nb_sectors = nb_sectors; 4741 acb->req.qiov = qiov; 4742 acb->req.flags = flags; 4743 acb->is_write = is_write; 4744 acb->done = NULL; 4745 4746 co = qemu_coroutine_create(bdrv_co_do_rw); 4747 qemu_coroutine_enter(co, acb); 4748 4749 return &acb->common; 4750 } 4751 4752 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 4753 { 4754 BlockDriverAIOCBCoroutine *acb = opaque; 4755 BlockDriverState *bs = acb->common.bs; 4756 4757 acb->req.error = bdrv_co_flush(bs); 4758 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 4759 qemu_bh_schedule(acb->bh); 4760 } 4761 4762 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, 4763 BlockDriverCompletionFunc *cb, void *opaque) 4764 { 4765 trace_bdrv_aio_flush(bs, opaque); 4766 4767 Coroutine *co; 4768 BlockDriverAIOCBCoroutine *acb; 4769 4770 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 4771 acb->done = NULL; 4772 4773 co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 4774 qemu_coroutine_enter(co, acb); 4775 4776 return &acb->common; 4777 } 4778 4779 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 4780 { 4781 BlockDriverAIOCBCoroutine *acb = opaque; 4782 BlockDriverState *bs = acb->common.bs; 4783 4784 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 4785 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); 4786 qemu_bh_schedule(acb->bh); 4787 } 4788 4789 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs, 4790 int64_t sector_num, int nb_sectors, 4791 BlockDriverCompletionFunc *cb, void *opaque) 4792 { 4793 Coroutine *co; 4794 BlockDriverAIOCBCoroutine *acb; 4795 4796 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 4797 4798 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 4799 acb->req.sector = sector_num; 4800 acb->req.nb_sectors = nb_sectors; 4801 acb->done = NULL; 4802 co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 4803 qemu_coroutine_enter(co, acb); 4804 4805 return &acb->common; 4806 } 4807 4808 void bdrv_init(void) 4809 { 4810 module_call_init(MODULE_INIT_BLOCK); 4811 } 4812 4813 void bdrv_init_with_whitelist(void) 4814 { 4815 use_bdrv_whitelist = 1; 4816 bdrv_init(); 4817 } 4818 4819 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 4820 BlockDriverCompletionFunc *cb, void *opaque) 4821 { 4822 BlockDriverAIOCB *acb; 4823 4824 acb = g_slice_alloc(aiocb_info->aiocb_size); 4825 acb->aiocb_info = aiocb_info; 4826 acb->bs = bs; 4827 acb->cb = cb; 4828 acb->opaque = opaque; 4829 return acb; 4830 } 4831 4832 void qemu_aio_release(void *p) 4833 { 4834 BlockDriverAIOCB *acb = p; 4835 g_slice_free1(acb->aiocb_info->aiocb_size, acb); 4836 } 4837 4838 /**************************************************************/ 4839 /* Coroutine block device emulation */ 4840 4841 typedef struct CoroutineIOCompletion { 4842 Coroutine *coroutine; 4843 int ret; 4844 } CoroutineIOCompletion; 4845 4846 static void bdrv_co_io_em_complete(void *opaque, int ret) 4847 { 4848 CoroutineIOCompletion *co = opaque; 4849 4850 co->ret = ret; 4851 qemu_coroutine_enter(co->coroutine, NULL); 4852 } 4853 4854 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, 4855 int nb_sectors, QEMUIOVector *iov, 4856 bool is_write) 4857 { 4858 CoroutineIOCompletion co = { 4859 .coroutine = qemu_coroutine_self(), 4860 }; 4861 BlockDriverAIOCB *acb; 4862 4863 if (is_write) { 4864 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, 4865 bdrv_co_io_em_complete, &co); 4866 } else { 4867 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, 4868 bdrv_co_io_em_complete, &co); 4869 } 4870 4871 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); 4872 if (!acb) { 4873 return -EIO; 4874 } 4875 qemu_coroutine_yield(); 4876 4877 return co.ret; 4878 } 4879 4880 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 4881 int64_t sector_num, int nb_sectors, 4882 QEMUIOVector *iov) 4883 { 4884 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); 4885 } 4886 4887 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 4888 int64_t sector_num, int nb_sectors, 4889 QEMUIOVector *iov) 4890 { 4891 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); 4892 } 4893 4894 static void coroutine_fn bdrv_flush_co_entry(void *opaque) 4895 { 4896 RwCo *rwco = opaque; 4897 4898 rwco->ret = bdrv_co_flush(rwco->bs); 4899 } 4900 4901 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 4902 { 4903 int ret; 4904 4905 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { 4906 return 0; 4907 } 4908 4909 /* Write back cached data to the OS even with cache=unsafe */ 4910 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 4911 if (bs->drv->bdrv_co_flush_to_os) { 4912 ret = bs->drv->bdrv_co_flush_to_os(bs); 4913 if (ret < 0) { 4914 return ret; 4915 } 4916 } 4917 4918 /* But don't actually force it to the disk with cache=unsafe */ 4919 if (bs->open_flags & BDRV_O_NO_FLUSH) { 4920 goto flush_parent; 4921 } 4922 4923 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 4924 if (bs->drv->bdrv_co_flush_to_disk) { 4925 ret = bs->drv->bdrv_co_flush_to_disk(bs); 4926 } else if (bs->drv->bdrv_aio_flush) { 4927 BlockDriverAIOCB *acb; 4928 CoroutineIOCompletion co = { 4929 .coroutine = qemu_coroutine_self(), 4930 }; 4931 4932 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 4933 if (acb == NULL) { 4934 ret = -EIO; 4935 } else { 4936 qemu_coroutine_yield(); 4937 ret = co.ret; 4938 } 4939 } else { 4940 /* 4941 * Some block drivers always operate in either writethrough or unsafe 4942 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 4943 * know how the server works (because the behaviour is hardcoded or 4944 * depends on server-side configuration), so we can't ensure that 4945 * everything is safe on disk. Returning an error doesn't work because 4946 * that would break guests even if the server operates in writethrough 4947 * mode. 4948 * 4949 * Let's hope the user knows what he's doing. 4950 */ 4951 ret = 0; 4952 } 4953 if (ret < 0) { 4954 return ret; 4955 } 4956 4957 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 4958 * in the case of cache=unsafe, so there are no useless flushes. 4959 */ 4960 flush_parent: 4961 return bdrv_co_flush(bs->file); 4962 } 4963 4964 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp) 4965 { 4966 Error *local_err = NULL; 4967 int ret; 4968 4969 if (!bs->drv) { 4970 return; 4971 } 4972 4973 if (bs->drv->bdrv_invalidate_cache) { 4974 bs->drv->bdrv_invalidate_cache(bs, &local_err); 4975 } else if (bs->file) { 4976 bdrv_invalidate_cache(bs->file, &local_err); 4977 } 4978 if (local_err) { 4979 error_propagate(errp, local_err); 4980 return; 4981 } 4982 4983 ret = refresh_total_sectors(bs, bs->total_sectors); 4984 if (ret < 0) { 4985 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 4986 return; 4987 } 4988 } 4989 4990 void bdrv_invalidate_cache_all(Error **errp) 4991 { 4992 BlockDriverState *bs; 4993 Error *local_err = NULL; 4994 4995 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 4996 AioContext *aio_context = bdrv_get_aio_context(bs); 4997 4998 aio_context_acquire(aio_context); 4999 bdrv_invalidate_cache(bs, &local_err); 5000 aio_context_release(aio_context); 5001 if (local_err) { 5002 error_propagate(errp, local_err); 5003 return; 5004 } 5005 } 5006 } 5007 5008 void bdrv_clear_incoming_migration_all(void) 5009 { 5010 BlockDriverState *bs; 5011 5012 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 5013 AioContext *aio_context = bdrv_get_aio_context(bs); 5014 5015 aio_context_acquire(aio_context); 5016 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING); 5017 aio_context_release(aio_context); 5018 } 5019 } 5020 5021 int bdrv_flush(BlockDriverState *bs) 5022 { 5023 Coroutine *co; 5024 RwCo rwco = { 5025 .bs = bs, 5026 .ret = NOT_DONE, 5027 }; 5028 5029 if (qemu_in_coroutine()) { 5030 /* Fast-path if already in coroutine context */ 5031 bdrv_flush_co_entry(&rwco); 5032 } else { 5033 AioContext *aio_context = bdrv_get_aio_context(bs); 5034 5035 co = qemu_coroutine_create(bdrv_flush_co_entry); 5036 qemu_coroutine_enter(co, &rwco); 5037 while (rwco.ret == NOT_DONE) { 5038 aio_poll(aio_context, true); 5039 } 5040 } 5041 5042 return rwco.ret; 5043 } 5044 5045 typedef struct DiscardCo { 5046 BlockDriverState *bs; 5047 int64_t sector_num; 5048 int nb_sectors; 5049 int ret; 5050 } DiscardCo; 5051 static void coroutine_fn bdrv_discard_co_entry(void *opaque) 5052 { 5053 DiscardCo *rwco = opaque; 5054 5055 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 5056 } 5057 5058 /* if no limit is specified in the BlockLimits use a default 5059 * of 32768 512-byte sectors (16 MiB) per request. 5060 */ 5061 #define MAX_DISCARD_DEFAULT 32768 5062 5063 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 5064 int nb_sectors) 5065 { 5066 int max_discard; 5067 5068 if (!bs->drv) { 5069 return -ENOMEDIUM; 5070 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) { 5071 return -EIO; 5072 } else if (bs->read_only) { 5073 return -EROFS; 5074 } 5075 5076 bdrv_reset_dirty(bs, sector_num, nb_sectors); 5077 5078 /* Do nothing if disabled. */ 5079 if (!(bs->open_flags & BDRV_O_UNMAP)) { 5080 return 0; 5081 } 5082 5083 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 5084 return 0; 5085 } 5086 5087 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT; 5088 while (nb_sectors > 0) { 5089 int ret; 5090 int num = nb_sectors; 5091 5092 /* align request */ 5093 if (bs->bl.discard_alignment && 5094 num >= bs->bl.discard_alignment && 5095 sector_num % bs->bl.discard_alignment) { 5096 if (num > bs->bl.discard_alignment) { 5097 num = bs->bl.discard_alignment; 5098 } 5099 num -= sector_num % bs->bl.discard_alignment; 5100 } 5101 5102 /* limit request size */ 5103 if (num > max_discard) { 5104 num = max_discard; 5105 } 5106 5107 if (bs->drv->bdrv_co_discard) { 5108 ret = bs->drv->bdrv_co_discard(bs, sector_num, num); 5109 } else { 5110 BlockDriverAIOCB *acb; 5111 CoroutineIOCompletion co = { 5112 .coroutine = qemu_coroutine_self(), 5113 }; 5114 5115 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 5116 bdrv_co_io_em_complete, &co); 5117 if (acb == NULL) { 5118 return -EIO; 5119 } else { 5120 qemu_coroutine_yield(); 5121 ret = co.ret; 5122 } 5123 } 5124 if (ret && ret != -ENOTSUP) { 5125 return ret; 5126 } 5127 5128 sector_num += num; 5129 nb_sectors -= num; 5130 } 5131 return 0; 5132 } 5133 5134 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 5135 { 5136 Coroutine *co; 5137 DiscardCo rwco = { 5138 .bs = bs, 5139 .sector_num = sector_num, 5140 .nb_sectors = nb_sectors, 5141 .ret = NOT_DONE, 5142 }; 5143 5144 if (qemu_in_coroutine()) { 5145 /* Fast-path if already in coroutine context */ 5146 bdrv_discard_co_entry(&rwco); 5147 } else { 5148 AioContext *aio_context = bdrv_get_aio_context(bs); 5149 5150 co = qemu_coroutine_create(bdrv_discard_co_entry); 5151 qemu_coroutine_enter(co, &rwco); 5152 while (rwco.ret == NOT_DONE) { 5153 aio_poll(aio_context, true); 5154 } 5155 } 5156 5157 return rwco.ret; 5158 } 5159 5160 /**************************************************************/ 5161 /* removable device support */ 5162 5163 /** 5164 * Return TRUE if the media is present 5165 */ 5166 int bdrv_is_inserted(BlockDriverState *bs) 5167 { 5168 BlockDriver *drv = bs->drv; 5169 5170 if (!drv) 5171 return 0; 5172 if (!drv->bdrv_is_inserted) 5173 return 1; 5174 return drv->bdrv_is_inserted(bs); 5175 } 5176 5177 /** 5178 * Return whether the media changed since the last call to this 5179 * function, or -ENOTSUP if we don't know. Most drivers don't know. 5180 */ 5181 int bdrv_media_changed(BlockDriverState *bs) 5182 { 5183 BlockDriver *drv = bs->drv; 5184 5185 if (drv && drv->bdrv_media_changed) { 5186 return drv->bdrv_media_changed(bs); 5187 } 5188 return -ENOTSUP; 5189 } 5190 5191 /** 5192 * If eject_flag is TRUE, eject the media. Otherwise, close the tray 5193 */ 5194 void bdrv_eject(BlockDriverState *bs, bool eject_flag) 5195 { 5196 BlockDriver *drv = bs->drv; 5197 5198 if (drv && drv->bdrv_eject) { 5199 drv->bdrv_eject(bs, eject_flag); 5200 } 5201 5202 if (bs->device_name[0] != '\0') { 5203 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs), 5204 eject_flag, &error_abort); 5205 } 5206 } 5207 5208 /** 5209 * Lock or unlock the media (if it is locked, the user won't be able 5210 * to eject it manually). 5211 */ 5212 void bdrv_lock_medium(BlockDriverState *bs, bool locked) 5213 { 5214 BlockDriver *drv = bs->drv; 5215 5216 trace_bdrv_lock_medium(bs, locked); 5217 5218 if (drv && drv->bdrv_lock_medium) { 5219 drv->bdrv_lock_medium(bs, locked); 5220 } 5221 } 5222 5223 /* needed for generic scsi interface */ 5224 5225 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 5226 { 5227 BlockDriver *drv = bs->drv; 5228 5229 if (drv && drv->bdrv_ioctl) 5230 return drv->bdrv_ioctl(bs, req, buf); 5231 return -ENOTSUP; 5232 } 5233 5234 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 5235 unsigned long int req, void *buf, 5236 BlockDriverCompletionFunc *cb, void *opaque) 5237 { 5238 BlockDriver *drv = bs->drv; 5239 5240 if (drv && drv->bdrv_aio_ioctl) 5241 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); 5242 return NULL; 5243 } 5244 5245 void bdrv_set_guest_block_size(BlockDriverState *bs, int align) 5246 { 5247 bs->guest_block_size = align; 5248 } 5249 5250 void *qemu_blockalign(BlockDriverState *bs, size_t size) 5251 { 5252 return qemu_memalign(bdrv_opt_mem_align(bs), size); 5253 } 5254 5255 /* 5256 * Check if all memory in this vector is sector aligned. 5257 */ 5258 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 5259 { 5260 int i; 5261 size_t alignment = bdrv_opt_mem_align(bs); 5262 5263 for (i = 0; i < qiov->niov; i++) { 5264 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 5265 return false; 5266 } 5267 if (qiov->iov[i].iov_len % alignment) { 5268 return false; 5269 } 5270 } 5271 5272 return true; 5273 } 5274 5275 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity, 5276 Error **errp) 5277 { 5278 int64_t bitmap_size; 5279 BdrvDirtyBitmap *bitmap; 5280 5281 assert((granularity & (granularity - 1)) == 0); 5282 5283 granularity >>= BDRV_SECTOR_BITS; 5284 assert(granularity); 5285 bitmap_size = bdrv_nb_sectors(bs); 5286 if (bitmap_size < 0) { 5287 error_setg_errno(errp, -bitmap_size, "could not get length of device"); 5288 errno = -bitmap_size; 5289 return NULL; 5290 } 5291 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap)); 5292 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1); 5293 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list); 5294 return bitmap; 5295 } 5296 5297 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) 5298 { 5299 BdrvDirtyBitmap *bm, *next; 5300 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) { 5301 if (bm == bitmap) { 5302 QLIST_REMOVE(bitmap, list); 5303 hbitmap_free(bitmap->bitmap); 5304 g_free(bitmap); 5305 return; 5306 } 5307 } 5308 } 5309 5310 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs) 5311 { 5312 BdrvDirtyBitmap *bm; 5313 BlockDirtyInfoList *list = NULL; 5314 BlockDirtyInfoList **plist = &list; 5315 5316 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) { 5317 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo)); 5318 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList)); 5319 info->count = bdrv_get_dirty_count(bs, bm); 5320 info->granularity = 5321 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap)); 5322 entry->value = info; 5323 *plist = entry; 5324 plist = &entry->next; 5325 } 5326 5327 return list; 5328 } 5329 5330 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector) 5331 { 5332 if (bitmap) { 5333 return hbitmap_get(bitmap->bitmap, sector); 5334 } else { 5335 return 0; 5336 } 5337 } 5338 5339 void bdrv_dirty_iter_init(BlockDriverState *bs, 5340 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi) 5341 { 5342 hbitmap_iter_init(hbi, bitmap->bitmap, 0); 5343 } 5344 5345 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, 5346 int nr_sectors) 5347 { 5348 BdrvDirtyBitmap *bitmap; 5349 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { 5350 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors); 5351 } 5352 } 5353 5354 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors) 5355 { 5356 BdrvDirtyBitmap *bitmap; 5357 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { 5358 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors); 5359 } 5360 } 5361 5362 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) 5363 { 5364 return hbitmap_count(bitmap->bitmap); 5365 } 5366 5367 /* Get a reference to bs */ 5368 void bdrv_ref(BlockDriverState *bs) 5369 { 5370 bs->refcnt++; 5371 } 5372 5373 /* Release a previously grabbed reference to bs. 5374 * If after releasing, reference count is zero, the BlockDriverState is 5375 * deleted. */ 5376 void bdrv_unref(BlockDriverState *bs) 5377 { 5378 assert(bs->refcnt > 0); 5379 if (--bs->refcnt == 0) { 5380 bdrv_delete(bs); 5381 } 5382 } 5383 5384 struct BdrvOpBlocker { 5385 Error *reason; 5386 QLIST_ENTRY(BdrvOpBlocker) list; 5387 }; 5388 5389 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp) 5390 { 5391 BdrvOpBlocker *blocker; 5392 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); 5393 if (!QLIST_EMPTY(&bs->op_blockers[op])) { 5394 blocker = QLIST_FIRST(&bs->op_blockers[op]); 5395 if (errp) { 5396 error_setg(errp, "Device '%s' is busy: %s", 5397 bs->device_name, error_get_pretty(blocker->reason)); 5398 } 5399 return true; 5400 } 5401 return false; 5402 } 5403 5404 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason) 5405 { 5406 BdrvOpBlocker *blocker; 5407 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); 5408 5409 blocker = g_malloc0(sizeof(BdrvOpBlocker)); 5410 blocker->reason = reason; 5411 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list); 5412 } 5413 5414 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason) 5415 { 5416 BdrvOpBlocker *blocker, *next; 5417 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); 5418 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) { 5419 if (blocker->reason == reason) { 5420 QLIST_REMOVE(blocker, list); 5421 g_free(blocker); 5422 } 5423 } 5424 } 5425 5426 void bdrv_op_block_all(BlockDriverState *bs, Error *reason) 5427 { 5428 int i; 5429 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { 5430 bdrv_op_block(bs, i, reason); 5431 } 5432 } 5433 5434 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason) 5435 { 5436 int i; 5437 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { 5438 bdrv_op_unblock(bs, i, reason); 5439 } 5440 } 5441 5442 bool bdrv_op_blocker_is_empty(BlockDriverState *bs) 5443 { 5444 int i; 5445 5446 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { 5447 if (!QLIST_EMPTY(&bs->op_blockers[i])) { 5448 return false; 5449 } 5450 } 5451 return true; 5452 } 5453 5454 void bdrv_iostatus_enable(BlockDriverState *bs) 5455 { 5456 bs->iostatus_enabled = true; 5457 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 5458 } 5459 5460 /* The I/O status is only enabled if the drive explicitly 5461 * enables it _and_ the VM is configured to stop on errors */ 5462 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs) 5463 { 5464 return (bs->iostatus_enabled && 5465 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || 5466 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP || 5467 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP)); 5468 } 5469 5470 void bdrv_iostatus_disable(BlockDriverState *bs) 5471 { 5472 bs->iostatus_enabled = false; 5473 } 5474 5475 void bdrv_iostatus_reset(BlockDriverState *bs) 5476 { 5477 if (bdrv_iostatus_is_enabled(bs)) { 5478 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 5479 if (bs->job) { 5480 block_job_iostatus_reset(bs->job); 5481 } 5482 } 5483 } 5484 5485 void bdrv_iostatus_set_err(BlockDriverState *bs, int error) 5486 { 5487 assert(bdrv_iostatus_is_enabled(bs)); 5488 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 5489 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 5490 BLOCK_DEVICE_IO_STATUS_FAILED; 5491 } 5492 } 5493 5494 void 5495 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes, 5496 enum BlockAcctType type) 5497 { 5498 assert(type < BDRV_MAX_IOTYPE); 5499 5500 cookie->bytes = bytes; 5501 cookie->start_time_ns = get_clock(); 5502 cookie->type = type; 5503 } 5504 5505 void 5506 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie) 5507 { 5508 assert(cookie->type < BDRV_MAX_IOTYPE); 5509 5510 bs->nr_bytes[cookie->type] += cookie->bytes; 5511 bs->nr_ops[cookie->type]++; 5512 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns; 5513 } 5514 5515 void bdrv_img_create(const char *filename, const char *fmt, 5516 const char *base_filename, const char *base_fmt, 5517 char *options, uint64_t img_size, int flags, 5518 Error **errp, bool quiet) 5519 { 5520 QemuOptsList *create_opts = NULL; 5521 QemuOpts *opts = NULL; 5522 const char *backing_fmt, *backing_file; 5523 int64_t size; 5524 BlockDriver *drv, *proto_drv; 5525 BlockDriver *backing_drv = NULL; 5526 Error *local_err = NULL; 5527 int ret = 0; 5528 5529 /* Find driver and parse its options */ 5530 drv = bdrv_find_format(fmt); 5531 if (!drv) { 5532 error_setg(errp, "Unknown file format '%s'", fmt); 5533 return; 5534 } 5535 5536 proto_drv = bdrv_find_protocol(filename, true); 5537 if (!proto_drv) { 5538 error_setg(errp, "Unknown protocol '%s'", filename); 5539 return; 5540 } 5541 5542 create_opts = qemu_opts_append(create_opts, drv->create_opts); 5543 create_opts = qemu_opts_append(create_opts, proto_drv->create_opts); 5544 5545 /* Create parameter list with default values */ 5546 opts = qemu_opts_create(create_opts, NULL, 0, &error_abort); 5547 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size); 5548 5549 /* Parse -o options */ 5550 if (options) { 5551 if (qemu_opts_do_parse(opts, options, NULL) != 0) { 5552 error_setg(errp, "Invalid options for file format '%s'", fmt); 5553 goto out; 5554 } 5555 } 5556 5557 if (base_filename) { 5558 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) { 5559 error_setg(errp, "Backing file not supported for file format '%s'", 5560 fmt); 5561 goto out; 5562 } 5563 } 5564 5565 if (base_fmt) { 5566 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) { 5567 error_setg(errp, "Backing file format not supported for file " 5568 "format '%s'", fmt); 5569 goto out; 5570 } 5571 } 5572 5573 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 5574 if (backing_file) { 5575 if (!strcmp(filename, backing_file)) { 5576 error_setg(errp, "Error: Trying to create an image with the " 5577 "same filename as the backing file"); 5578 goto out; 5579 } 5580 } 5581 5582 backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 5583 if (backing_fmt) { 5584 backing_drv = bdrv_find_format(backing_fmt); 5585 if (!backing_drv) { 5586 error_setg(errp, "Unknown backing file format '%s'", 5587 backing_fmt); 5588 goto out; 5589 } 5590 } 5591 5592 // The size for the image must always be specified, with one exception: 5593 // If we are using a backing file, we can obtain the size from there 5594 size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 5595 if (size == -1) { 5596 if (backing_file) { 5597 BlockDriverState *bs; 5598 int64_t size; 5599 int back_flags; 5600 5601 /* backing files always opened read-only */ 5602 back_flags = 5603 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); 5604 5605 bs = NULL; 5606 ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags, 5607 backing_drv, &local_err); 5608 if (ret < 0) { 5609 error_setg_errno(errp, -ret, "Could not open '%s': %s", 5610 backing_file, 5611 error_get_pretty(local_err)); 5612 error_free(local_err); 5613 local_err = NULL; 5614 goto out; 5615 } 5616 size = bdrv_getlength(bs); 5617 if (size < 0) { 5618 error_setg_errno(errp, -size, "Could not get size of '%s'", 5619 backing_file); 5620 bdrv_unref(bs); 5621 goto out; 5622 } 5623 5624 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size); 5625 5626 bdrv_unref(bs); 5627 } else { 5628 error_setg(errp, "Image creation needs a size parameter"); 5629 goto out; 5630 } 5631 } 5632 5633 if (!quiet) { 5634 printf("Formatting '%s', fmt=%s ", filename, fmt); 5635 qemu_opts_print(opts); 5636 puts(""); 5637 } 5638 5639 ret = bdrv_create(drv, filename, opts, &local_err); 5640 5641 if (ret == -EFBIG) { 5642 /* This is generally a better message than whatever the driver would 5643 * deliver (especially because of the cluster_size_hint), since that 5644 * is most probably not much different from "image too large". */ 5645 const char *cluster_size_hint = ""; 5646 if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) { 5647 cluster_size_hint = " (try using a larger cluster size)"; 5648 } 5649 error_setg(errp, "The image size is too large for file format '%s'" 5650 "%s", fmt, cluster_size_hint); 5651 error_free(local_err); 5652 local_err = NULL; 5653 } 5654 5655 out: 5656 qemu_opts_del(opts); 5657 qemu_opts_free(create_opts); 5658 if (local_err) { 5659 error_propagate(errp, local_err); 5660 } 5661 } 5662 5663 AioContext *bdrv_get_aio_context(BlockDriverState *bs) 5664 { 5665 return bs->aio_context; 5666 } 5667 5668 void bdrv_detach_aio_context(BlockDriverState *bs) 5669 { 5670 if (!bs->drv) { 5671 return; 5672 } 5673 5674 if (bs->io_limits_enabled) { 5675 throttle_detach_aio_context(&bs->throttle_state); 5676 } 5677 if (bs->drv->bdrv_detach_aio_context) { 5678 bs->drv->bdrv_detach_aio_context(bs); 5679 } 5680 if (bs->file) { 5681 bdrv_detach_aio_context(bs->file); 5682 } 5683 if (bs->backing_hd) { 5684 bdrv_detach_aio_context(bs->backing_hd); 5685 } 5686 5687 bs->aio_context = NULL; 5688 } 5689 5690 void bdrv_attach_aio_context(BlockDriverState *bs, 5691 AioContext *new_context) 5692 { 5693 if (!bs->drv) { 5694 return; 5695 } 5696 5697 bs->aio_context = new_context; 5698 5699 if (bs->backing_hd) { 5700 bdrv_attach_aio_context(bs->backing_hd, new_context); 5701 } 5702 if (bs->file) { 5703 bdrv_attach_aio_context(bs->file, new_context); 5704 } 5705 if (bs->drv->bdrv_attach_aio_context) { 5706 bs->drv->bdrv_attach_aio_context(bs, new_context); 5707 } 5708 if (bs->io_limits_enabled) { 5709 throttle_attach_aio_context(&bs->throttle_state, new_context); 5710 } 5711 } 5712 5713 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context) 5714 { 5715 bdrv_drain_all(); /* ensure there are no in-flight requests */ 5716 5717 bdrv_detach_aio_context(bs); 5718 5719 /* This function executes in the old AioContext so acquire the new one in 5720 * case it runs in a different thread. 5721 */ 5722 aio_context_acquire(new_context); 5723 bdrv_attach_aio_context(bs, new_context); 5724 aio_context_release(new_context); 5725 } 5726 5727 void bdrv_add_before_write_notifier(BlockDriverState *bs, 5728 NotifierWithReturn *notifier) 5729 { 5730 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 5731 } 5732 5733 int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts) 5734 { 5735 if (!bs->drv->bdrv_amend_options) { 5736 return -ENOTSUP; 5737 } 5738 return bs->drv->bdrv_amend_options(bs, opts); 5739 } 5740 5741 /* This function will be called by the bdrv_recurse_is_first_non_filter method 5742 * of block filter and by bdrv_is_first_non_filter. 5743 * It is used to test if the given bs is the candidate or recurse more in the 5744 * node graph. 5745 */ 5746 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs, 5747 BlockDriverState *candidate) 5748 { 5749 /* return false if basic checks fails */ 5750 if (!bs || !bs->drv) { 5751 return false; 5752 } 5753 5754 /* the code reached a non block filter driver -> check if the bs is 5755 * the same as the candidate. It's the recursion termination condition. 5756 */ 5757 if (!bs->drv->is_filter) { 5758 return bs == candidate; 5759 } 5760 /* Down this path the driver is a block filter driver */ 5761 5762 /* If the block filter recursion method is defined use it to recurse down 5763 * the node graph. 5764 */ 5765 if (bs->drv->bdrv_recurse_is_first_non_filter) { 5766 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate); 5767 } 5768 5769 /* the driver is a block filter but don't allow to recurse -> return false 5770 */ 5771 return false; 5772 } 5773 5774 /* This function checks if the candidate is the first non filter bs down it's 5775 * bs chain. Since we don't have pointers to parents it explore all bs chains 5776 * from the top. Some filters can choose not to pass down the recursion. 5777 */ 5778 bool bdrv_is_first_non_filter(BlockDriverState *candidate) 5779 { 5780 BlockDriverState *bs; 5781 5782 /* walk down the bs forest recursively */ 5783 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 5784 bool perm; 5785 5786 /* try to recurse in this top level bs */ 5787 perm = bdrv_recurse_is_first_non_filter(bs, candidate); 5788 5789 /* candidate is the first non filter */ 5790 if (perm) { 5791 return true; 5792 } 5793 } 5794 5795 return false; 5796 } 5797 5798 BlockDriverState *check_to_replace_node(const char *node_name, Error **errp) 5799 { 5800 BlockDriverState *to_replace_bs = bdrv_find_node(node_name); 5801 if (!to_replace_bs) { 5802 error_setg(errp, "Node name '%s' not found", node_name); 5803 return NULL; 5804 } 5805 5806 if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) { 5807 return NULL; 5808 } 5809 5810 /* We don't want arbitrary node of the BDS chain to be replaced only the top 5811 * most non filter in order to prevent data corruption. 5812 * Another benefit is that this tests exclude backing files which are 5813 * blocked by the backing blockers. 5814 */ 5815 if (!bdrv_is_first_non_filter(to_replace_bs)) { 5816 error_setg(errp, "Only top most non filter can be replaced"); 5817 return NULL; 5818 } 5819 5820 return to_replace_bs; 5821 } 5822 5823 void bdrv_io_plug(BlockDriverState *bs) 5824 { 5825 BlockDriver *drv = bs->drv; 5826 if (drv && drv->bdrv_io_plug) { 5827 drv->bdrv_io_plug(bs); 5828 } else if (bs->file) { 5829 bdrv_io_plug(bs->file); 5830 } 5831 } 5832 5833 void bdrv_io_unplug(BlockDriverState *bs) 5834 { 5835 BlockDriver *drv = bs->drv; 5836 if (drv && drv->bdrv_io_unplug) { 5837 drv->bdrv_io_unplug(bs); 5838 } else if (bs->file) { 5839 bdrv_io_unplug(bs->file); 5840 } 5841 } 5842 5843 void bdrv_flush_io_queue(BlockDriverState *bs) 5844 { 5845 BlockDriver *drv = bs->drv; 5846 if (drv && drv->bdrv_flush_io_queue) { 5847 drv->bdrv_flush_io_queue(bs); 5848 } else if (bs->file) { 5849 bdrv_flush_io_queue(bs->file); 5850 } 5851 } 5852