1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "config-host.h" 25 #include "qemu-common.h" 26 #include "trace.h" 27 #include "monitor/monitor.h" 28 #include "block/block_int.h" 29 #include "block/blockjob.h" 30 #include "qemu/module.h" 31 #include "qapi/qmp/qjson.h" 32 #include "sysemu/sysemu.h" 33 #include "qemu/notify.h" 34 #include "block/coroutine.h" 35 #include "block/qapi.h" 36 #include "qmp-commands.h" 37 #include "qemu/timer.h" 38 39 #ifdef CONFIG_BSD 40 #include <sys/types.h> 41 #include <sys/stat.h> 42 #include <sys/ioctl.h> 43 #include <sys/queue.h> 44 #ifndef __DragonFly__ 45 #include <sys/disk.h> 46 #endif 47 #endif 48 49 #ifdef _WIN32 50 #include <windows.h> 51 #endif 52 53 struct BdrvDirtyBitmap { 54 HBitmap *bitmap; 55 QLIST_ENTRY(BdrvDirtyBitmap) list; 56 }; 57 58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 59 60 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load); 61 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 63 BlockDriverCompletionFunc *cb, void *opaque); 64 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 66 BlockDriverCompletionFunc *cb, void *opaque); 67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 68 int64_t sector_num, int nb_sectors, 69 QEMUIOVector *iov); 70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 71 int64_t sector_num, int nb_sectors, 72 QEMUIOVector *iov); 73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 75 BdrvRequestFlags flags); 76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 78 BdrvRequestFlags flags); 79 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 80 int64_t sector_num, 81 QEMUIOVector *qiov, 82 int nb_sectors, 83 BdrvRequestFlags flags, 84 BlockDriverCompletionFunc *cb, 85 void *opaque, 86 bool is_write); 87 static void coroutine_fn bdrv_co_do_rw(void *opaque); 88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); 90 91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states = 92 QTAILQ_HEAD_INITIALIZER(bdrv_states); 93 94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states = 95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states); 96 97 static QLIST_HEAD(, BlockDriver) bdrv_drivers = 98 QLIST_HEAD_INITIALIZER(bdrv_drivers); 99 100 /* If non-zero, use only whitelisted block drivers */ 101 static int use_bdrv_whitelist; 102 103 #ifdef _WIN32 104 static int is_windows_drive_prefix(const char *filename) 105 { 106 return (((filename[0] >= 'a' && filename[0] <= 'z') || 107 (filename[0] >= 'A' && filename[0] <= 'Z')) && 108 filename[1] == ':'); 109 } 110 111 int is_windows_drive(const char *filename) 112 { 113 if (is_windows_drive_prefix(filename) && 114 filename[2] == '\0') 115 return 1; 116 if (strstart(filename, "\\\\.\\", NULL) || 117 strstart(filename, "//./", NULL)) 118 return 1; 119 return 0; 120 } 121 #endif 122 123 /* throttling disk I/O limits */ 124 void bdrv_set_io_limits(BlockDriverState *bs, 125 ThrottleConfig *cfg) 126 { 127 int i; 128 129 throttle_config(&bs->throttle_state, cfg); 130 131 for (i = 0; i < 2; i++) { 132 qemu_co_enter_next(&bs->throttled_reqs[i]); 133 } 134 } 135 136 /* this function drain all the throttled IOs */ 137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs) 138 { 139 bool drained = false; 140 bool enabled = bs->io_limits_enabled; 141 int i; 142 143 bs->io_limits_enabled = false; 144 145 for (i = 0; i < 2; i++) { 146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) { 147 drained = true; 148 } 149 } 150 151 bs->io_limits_enabled = enabled; 152 153 return drained; 154 } 155 156 void bdrv_io_limits_disable(BlockDriverState *bs) 157 { 158 bs->io_limits_enabled = false; 159 160 bdrv_start_throttled_reqs(bs); 161 162 throttle_destroy(&bs->throttle_state); 163 } 164 165 static void bdrv_throttle_read_timer_cb(void *opaque) 166 { 167 BlockDriverState *bs = opaque; 168 qemu_co_enter_next(&bs->throttled_reqs[0]); 169 } 170 171 static void bdrv_throttle_write_timer_cb(void *opaque) 172 { 173 BlockDriverState *bs = opaque; 174 qemu_co_enter_next(&bs->throttled_reqs[1]); 175 } 176 177 /* should be called before bdrv_set_io_limits if a limit is set */ 178 void bdrv_io_limits_enable(BlockDriverState *bs) 179 { 180 assert(!bs->io_limits_enabled); 181 throttle_init(&bs->throttle_state, 182 QEMU_CLOCK_VIRTUAL, 183 bdrv_throttle_read_timer_cb, 184 bdrv_throttle_write_timer_cb, 185 bs); 186 bs->io_limits_enabled = true; 187 } 188 189 /* This function makes an IO wait if needed 190 * 191 * @nb_sectors: the number of sectors of the IO 192 * @is_write: is the IO a write 193 */ 194 static void bdrv_io_limits_intercept(BlockDriverState *bs, 195 unsigned int bytes, 196 bool is_write) 197 { 198 /* does this io must wait */ 199 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write); 200 201 /* if must wait or any request of this type throttled queue the IO */ 202 if (must_wait || 203 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { 204 qemu_co_queue_wait(&bs->throttled_reqs[is_write]); 205 } 206 207 /* the IO will be executed, do the accounting */ 208 throttle_account(&bs->throttle_state, is_write, bytes); 209 210 211 /* if the next request must wait -> do nothing */ 212 if (throttle_schedule_timer(&bs->throttle_state, is_write)) { 213 return; 214 } 215 216 /* else queue next request for execution */ 217 qemu_co_queue_next(&bs->throttled_reqs[is_write]); 218 } 219 220 size_t bdrv_opt_mem_align(BlockDriverState *bs) 221 { 222 if (!bs || !bs->drv) { 223 /* 4k should be on the safe side */ 224 return 4096; 225 } 226 227 return bs->bl.opt_mem_alignment; 228 } 229 230 /* check if the path starts with "<protocol>:" */ 231 static int path_has_protocol(const char *path) 232 { 233 const char *p; 234 235 #ifdef _WIN32 236 if (is_windows_drive(path) || 237 is_windows_drive_prefix(path)) { 238 return 0; 239 } 240 p = path + strcspn(path, ":/\\"); 241 #else 242 p = path + strcspn(path, ":/"); 243 #endif 244 245 return *p == ':'; 246 } 247 248 int path_is_absolute(const char *path) 249 { 250 #ifdef _WIN32 251 /* specific case for names like: "\\.\d:" */ 252 if (is_windows_drive(path) || is_windows_drive_prefix(path)) { 253 return 1; 254 } 255 return (*path == '/' || *path == '\\'); 256 #else 257 return (*path == '/'); 258 #endif 259 } 260 261 /* if filename is absolute, just copy it to dest. Otherwise, build a 262 path to it by considering it is relative to base_path. URL are 263 supported. */ 264 void path_combine(char *dest, int dest_size, 265 const char *base_path, 266 const char *filename) 267 { 268 const char *p, *p1; 269 int len; 270 271 if (dest_size <= 0) 272 return; 273 if (path_is_absolute(filename)) { 274 pstrcpy(dest, dest_size, filename); 275 } else { 276 p = strchr(base_path, ':'); 277 if (p) 278 p++; 279 else 280 p = base_path; 281 p1 = strrchr(base_path, '/'); 282 #ifdef _WIN32 283 { 284 const char *p2; 285 p2 = strrchr(base_path, '\\'); 286 if (!p1 || p2 > p1) 287 p1 = p2; 288 } 289 #endif 290 if (p1) 291 p1++; 292 else 293 p1 = base_path; 294 if (p1 > p) 295 p = p1; 296 len = p - base_path; 297 if (len > dest_size - 1) 298 len = dest_size - 1; 299 memcpy(dest, base_path, len); 300 dest[len] = '\0'; 301 pstrcat(dest, dest_size, filename); 302 } 303 } 304 305 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz) 306 { 307 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) { 308 pstrcpy(dest, sz, bs->backing_file); 309 } else { 310 path_combine(dest, sz, bs->filename, bs->backing_file); 311 } 312 } 313 314 void bdrv_register(BlockDriver *bdrv) 315 { 316 /* Block drivers without coroutine functions need emulation */ 317 if (!bdrv->bdrv_co_readv) { 318 bdrv->bdrv_co_readv = bdrv_co_readv_em; 319 bdrv->bdrv_co_writev = bdrv_co_writev_em; 320 321 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if 322 * the block driver lacks aio we need to emulate that too. 323 */ 324 if (!bdrv->bdrv_aio_readv) { 325 /* add AIO emulation layer */ 326 bdrv->bdrv_aio_readv = bdrv_aio_readv_em; 327 bdrv->bdrv_aio_writev = bdrv_aio_writev_em; 328 } 329 } 330 331 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list); 332 } 333 334 /* create a new block device (by default it is empty) */ 335 BlockDriverState *bdrv_new(const char *device_name) 336 { 337 BlockDriverState *bs; 338 339 bs = g_malloc0(sizeof(BlockDriverState)); 340 QLIST_INIT(&bs->dirty_bitmaps); 341 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name); 342 if (device_name[0] != '\0') { 343 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list); 344 } 345 bdrv_iostatus_disable(bs); 346 notifier_list_init(&bs->close_notifiers); 347 notifier_with_return_list_init(&bs->before_write_notifiers); 348 qemu_co_queue_init(&bs->throttled_reqs[0]); 349 qemu_co_queue_init(&bs->throttled_reqs[1]); 350 bs->refcnt = 1; 351 352 return bs; 353 } 354 355 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify) 356 { 357 notifier_list_add(&bs->close_notifiers, notify); 358 } 359 360 BlockDriver *bdrv_find_format(const char *format_name) 361 { 362 BlockDriver *drv1; 363 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 364 if (!strcmp(drv1->format_name, format_name)) { 365 return drv1; 366 } 367 } 368 return NULL; 369 } 370 371 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only) 372 { 373 static const char *whitelist_rw[] = { 374 CONFIG_BDRV_RW_WHITELIST 375 }; 376 static const char *whitelist_ro[] = { 377 CONFIG_BDRV_RO_WHITELIST 378 }; 379 const char **p; 380 381 if (!whitelist_rw[0] && !whitelist_ro[0]) { 382 return 1; /* no whitelist, anything goes */ 383 } 384 385 for (p = whitelist_rw; *p; p++) { 386 if (!strcmp(drv->format_name, *p)) { 387 return 1; 388 } 389 } 390 if (read_only) { 391 for (p = whitelist_ro; *p; p++) { 392 if (!strcmp(drv->format_name, *p)) { 393 return 1; 394 } 395 } 396 } 397 return 0; 398 } 399 400 BlockDriver *bdrv_find_whitelisted_format(const char *format_name, 401 bool read_only) 402 { 403 BlockDriver *drv = bdrv_find_format(format_name); 404 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL; 405 } 406 407 typedef struct CreateCo { 408 BlockDriver *drv; 409 char *filename; 410 QEMUOptionParameter *options; 411 int ret; 412 Error *err; 413 } CreateCo; 414 415 static void coroutine_fn bdrv_create_co_entry(void *opaque) 416 { 417 Error *local_err = NULL; 418 int ret; 419 420 CreateCo *cco = opaque; 421 assert(cco->drv); 422 423 ret = cco->drv->bdrv_create(cco->filename, cco->options, &local_err); 424 if (local_err) { 425 error_propagate(&cco->err, local_err); 426 } 427 cco->ret = ret; 428 } 429 430 int bdrv_create(BlockDriver *drv, const char* filename, 431 QEMUOptionParameter *options, Error **errp) 432 { 433 int ret; 434 435 Coroutine *co; 436 CreateCo cco = { 437 .drv = drv, 438 .filename = g_strdup(filename), 439 .options = options, 440 .ret = NOT_DONE, 441 .err = NULL, 442 }; 443 444 if (!drv->bdrv_create) { 445 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name); 446 ret = -ENOTSUP; 447 goto out; 448 } 449 450 if (qemu_in_coroutine()) { 451 /* Fast-path if already in coroutine context */ 452 bdrv_create_co_entry(&cco); 453 } else { 454 co = qemu_coroutine_create(bdrv_create_co_entry); 455 qemu_coroutine_enter(co, &cco); 456 while (cco.ret == NOT_DONE) { 457 qemu_aio_wait(); 458 } 459 } 460 461 ret = cco.ret; 462 if (ret < 0) { 463 if (cco.err) { 464 error_propagate(errp, cco.err); 465 } else { 466 error_setg_errno(errp, -ret, "Could not create image"); 467 } 468 } 469 470 out: 471 g_free(cco.filename); 472 return ret; 473 } 474 475 int bdrv_create_file(const char* filename, QEMUOptionParameter *options, 476 Error **errp) 477 { 478 BlockDriver *drv; 479 Error *local_err = NULL; 480 int ret; 481 482 drv = bdrv_find_protocol(filename, true); 483 if (drv == NULL) { 484 error_setg(errp, "Could not find protocol for file '%s'", filename); 485 return -ENOENT; 486 } 487 488 ret = bdrv_create(drv, filename, options, &local_err); 489 if (local_err) { 490 error_propagate(errp, local_err); 491 } 492 return ret; 493 } 494 495 int bdrv_refresh_limits(BlockDriverState *bs) 496 { 497 BlockDriver *drv = bs->drv; 498 499 memset(&bs->bl, 0, sizeof(bs->bl)); 500 501 if (!drv) { 502 return 0; 503 } 504 505 /* Take some limits from the children as a default */ 506 if (bs->file) { 507 bdrv_refresh_limits(bs->file); 508 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length; 509 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment; 510 } else { 511 bs->bl.opt_mem_alignment = 512; 512 } 513 514 if (bs->backing_hd) { 515 bdrv_refresh_limits(bs->backing_hd); 516 bs->bl.opt_transfer_length = 517 MAX(bs->bl.opt_transfer_length, 518 bs->backing_hd->bl.opt_transfer_length); 519 bs->bl.opt_mem_alignment = 520 MAX(bs->bl.opt_mem_alignment, 521 bs->backing_hd->bl.opt_mem_alignment); 522 } 523 524 /* Then let the driver override it */ 525 if (drv->bdrv_refresh_limits) { 526 return drv->bdrv_refresh_limits(bs); 527 } 528 529 return 0; 530 } 531 532 /* 533 * Create a uniquely-named empty temporary file. 534 * Return 0 upon success, otherwise a negative errno value. 535 */ 536 int get_tmp_filename(char *filename, int size) 537 { 538 #ifdef _WIN32 539 char temp_dir[MAX_PATH]; 540 /* GetTempFileName requires that its output buffer (4th param) 541 have length MAX_PATH or greater. */ 542 assert(size >= MAX_PATH); 543 return (GetTempPath(MAX_PATH, temp_dir) 544 && GetTempFileName(temp_dir, "qem", 0, filename) 545 ? 0 : -GetLastError()); 546 #else 547 int fd; 548 const char *tmpdir; 549 tmpdir = getenv("TMPDIR"); 550 if (!tmpdir) { 551 tmpdir = "/var/tmp"; 552 } 553 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) { 554 return -EOVERFLOW; 555 } 556 fd = mkstemp(filename); 557 if (fd < 0) { 558 return -errno; 559 } 560 if (close(fd) != 0) { 561 unlink(filename); 562 return -errno; 563 } 564 return 0; 565 #endif 566 } 567 568 /* 569 * Detect host devices. By convention, /dev/cdrom[N] is always 570 * recognized as a host CDROM. 571 */ 572 static BlockDriver *find_hdev_driver(const char *filename) 573 { 574 int score_max = 0, score; 575 BlockDriver *drv = NULL, *d; 576 577 QLIST_FOREACH(d, &bdrv_drivers, list) { 578 if (d->bdrv_probe_device) { 579 score = d->bdrv_probe_device(filename); 580 if (score > score_max) { 581 score_max = score; 582 drv = d; 583 } 584 } 585 } 586 587 return drv; 588 } 589 590 BlockDriver *bdrv_find_protocol(const char *filename, 591 bool allow_protocol_prefix) 592 { 593 BlockDriver *drv1; 594 char protocol[128]; 595 int len; 596 const char *p; 597 598 /* TODO Drivers without bdrv_file_open must be specified explicitly */ 599 600 /* 601 * XXX(hch): we really should not let host device detection 602 * override an explicit protocol specification, but moving this 603 * later breaks access to device names with colons in them. 604 * Thanks to the brain-dead persistent naming schemes on udev- 605 * based Linux systems those actually are quite common. 606 */ 607 drv1 = find_hdev_driver(filename); 608 if (drv1) { 609 return drv1; 610 } 611 612 if (!path_has_protocol(filename) || !allow_protocol_prefix) { 613 return bdrv_find_format("file"); 614 } 615 616 p = strchr(filename, ':'); 617 assert(p != NULL); 618 len = p - filename; 619 if (len > sizeof(protocol) - 1) 620 len = sizeof(protocol) - 1; 621 memcpy(protocol, filename, len); 622 protocol[len] = '\0'; 623 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 624 if (drv1->protocol_name && 625 !strcmp(drv1->protocol_name, protocol)) { 626 return drv1; 627 } 628 } 629 return NULL; 630 } 631 632 static int find_image_format(BlockDriverState *bs, const char *filename, 633 BlockDriver **pdrv, Error **errp) 634 { 635 int score, score_max; 636 BlockDriver *drv1, *drv; 637 uint8_t buf[2048]; 638 int ret = 0; 639 640 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */ 641 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) { 642 drv = bdrv_find_format("raw"); 643 if (!drv) { 644 error_setg(errp, "Could not find raw image format"); 645 ret = -ENOENT; 646 } 647 *pdrv = drv; 648 return ret; 649 } 650 651 ret = bdrv_pread(bs, 0, buf, sizeof(buf)); 652 if (ret < 0) { 653 error_setg_errno(errp, -ret, "Could not read image for determining its " 654 "format"); 655 *pdrv = NULL; 656 return ret; 657 } 658 659 score_max = 0; 660 drv = NULL; 661 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 662 if (drv1->bdrv_probe) { 663 score = drv1->bdrv_probe(buf, ret, filename); 664 if (score > score_max) { 665 score_max = score; 666 drv = drv1; 667 } 668 } 669 } 670 if (!drv) { 671 error_setg(errp, "Could not determine image format: No compatible " 672 "driver found"); 673 ret = -ENOENT; 674 } 675 *pdrv = drv; 676 return ret; 677 } 678 679 /** 680 * Set the current 'total_sectors' value 681 */ 682 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint) 683 { 684 BlockDriver *drv = bs->drv; 685 686 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */ 687 if (bs->sg) 688 return 0; 689 690 /* query actual device if possible, otherwise just trust the hint */ 691 if (drv->bdrv_getlength) { 692 int64_t length = drv->bdrv_getlength(bs); 693 if (length < 0) { 694 return length; 695 } 696 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE); 697 } 698 699 bs->total_sectors = hint; 700 return 0; 701 } 702 703 /** 704 * Set open flags for a given discard mode 705 * 706 * Return 0 on success, -1 if the discard mode was invalid. 707 */ 708 int bdrv_parse_discard_flags(const char *mode, int *flags) 709 { 710 *flags &= ~BDRV_O_UNMAP; 711 712 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) { 713 /* do nothing */ 714 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) { 715 *flags |= BDRV_O_UNMAP; 716 } else { 717 return -1; 718 } 719 720 return 0; 721 } 722 723 /** 724 * Set open flags for a given cache mode 725 * 726 * Return 0 on success, -1 if the cache mode was invalid. 727 */ 728 int bdrv_parse_cache_flags(const char *mode, int *flags) 729 { 730 *flags &= ~BDRV_O_CACHE_MASK; 731 732 if (!strcmp(mode, "off") || !strcmp(mode, "none")) { 733 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; 734 } else if (!strcmp(mode, "directsync")) { 735 *flags |= BDRV_O_NOCACHE; 736 } else if (!strcmp(mode, "writeback")) { 737 *flags |= BDRV_O_CACHE_WB; 738 } else if (!strcmp(mode, "unsafe")) { 739 *flags |= BDRV_O_CACHE_WB; 740 *flags |= BDRV_O_NO_FLUSH; 741 } else if (!strcmp(mode, "writethrough")) { 742 /* this is the default */ 743 } else { 744 return -1; 745 } 746 747 return 0; 748 } 749 750 /** 751 * The copy-on-read flag is actually a reference count so multiple users may 752 * use the feature without worrying about clobbering its previous state. 753 * Copy-on-read stays enabled until all users have called to disable it. 754 */ 755 void bdrv_enable_copy_on_read(BlockDriverState *bs) 756 { 757 bs->copy_on_read++; 758 } 759 760 void bdrv_disable_copy_on_read(BlockDriverState *bs) 761 { 762 assert(bs->copy_on_read > 0); 763 bs->copy_on_read--; 764 } 765 766 static int bdrv_open_flags(BlockDriverState *bs, int flags) 767 { 768 int open_flags = flags | BDRV_O_CACHE_WB; 769 770 /* 771 * Clear flags that are internal to the block layer before opening the 772 * image. 773 */ 774 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); 775 776 /* 777 * Snapshots should be writable. 778 */ 779 if (bs->is_temporary) { 780 open_flags |= BDRV_O_RDWR; 781 } 782 783 return open_flags; 784 } 785 786 static int bdrv_assign_node_name(BlockDriverState *bs, 787 const char *node_name, 788 Error **errp) 789 { 790 if (!node_name) { 791 return 0; 792 } 793 794 /* empty string node name is invalid */ 795 if (node_name[0] == '\0') { 796 error_setg(errp, "Empty node name"); 797 return -EINVAL; 798 } 799 800 /* takes care of avoiding namespaces collisions */ 801 if (bdrv_find(node_name)) { 802 error_setg(errp, "node-name=%s is conflicting with a device id", 803 node_name); 804 return -EINVAL; 805 } 806 807 /* takes care of avoiding duplicates node names */ 808 if (bdrv_find_node(node_name)) { 809 error_setg(errp, "Duplicate node name"); 810 return -EINVAL; 811 } 812 813 /* copy node name into the bs and insert it into the graph list */ 814 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name); 815 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list); 816 817 return 0; 818 } 819 820 /* 821 * Common part for opening disk images and files 822 * 823 * Removes all processed options from *options. 824 */ 825 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file, 826 QDict *options, int flags, BlockDriver *drv, Error **errp) 827 { 828 int ret, open_flags; 829 const char *filename; 830 const char *node_name = NULL; 831 Error *local_err = NULL; 832 833 assert(drv != NULL); 834 assert(bs->file == NULL); 835 assert(options != NULL && bs->options != options); 836 837 if (file != NULL) { 838 filename = file->filename; 839 } else { 840 filename = qdict_get_try_str(options, "filename"); 841 } 842 843 if (drv->bdrv_needs_filename && !filename) { 844 error_setg(errp, "The '%s' block driver requires a file name", 845 drv->format_name); 846 return -EINVAL; 847 } 848 849 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name); 850 851 node_name = qdict_get_try_str(options, "node-name"); 852 ret = bdrv_assign_node_name(bs, node_name, errp); 853 if (ret < 0) { 854 return ret; 855 } 856 qdict_del(options, "node-name"); 857 858 /* bdrv_open() with directly using a protocol as drv. This layer is already 859 * opened, so assign it to bs (while file becomes a closed BlockDriverState) 860 * and return immediately. */ 861 if (file != NULL && drv->bdrv_file_open) { 862 bdrv_swap(file, bs); 863 return 0; 864 } 865 866 bs->open_flags = flags; 867 bs->guest_block_size = 512; 868 bs->request_alignment = 512; 869 bs->zero_beyond_eof = true; 870 open_flags = bdrv_open_flags(bs, flags); 871 bs->read_only = !(open_flags & BDRV_O_RDWR); 872 873 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) { 874 error_setg(errp, 875 !bs->read_only && bdrv_is_whitelisted(drv, true) 876 ? "Driver '%s' can only be used for read-only devices" 877 : "Driver '%s' is not whitelisted", 878 drv->format_name); 879 return -ENOTSUP; 880 } 881 882 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */ 883 if (flags & BDRV_O_COPY_ON_READ) { 884 if (!bs->read_only) { 885 bdrv_enable_copy_on_read(bs); 886 } else { 887 error_setg(errp, "Can't use copy-on-read on read-only device"); 888 return -EINVAL; 889 } 890 } 891 892 if (filename != NULL) { 893 pstrcpy(bs->filename, sizeof(bs->filename), filename); 894 } else { 895 bs->filename[0] = '\0'; 896 } 897 898 bs->drv = drv; 899 bs->opaque = g_malloc0(drv->instance_size); 900 901 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB); 902 903 /* Open the image, either directly or using a protocol */ 904 if (drv->bdrv_file_open) { 905 assert(file == NULL); 906 assert(!drv->bdrv_needs_filename || filename != NULL); 907 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err); 908 } else { 909 if (file == NULL) { 910 error_setg(errp, "Can't use '%s' as a block driver for the " 911 "protocol level", drv->format_name); 912 ret = -EINVAL; 913 goto free_and_fail; 914 } 915 bs->file = file; 916 ret = drv->bdrv_open(bs, options, open_flags, &local_err); 917 } 918 919 if (ret < 0) { 920 if (local_err) { 921 error_propagate(errp, local_err); 922 } else if (bs->filename[0]) { 923 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename); 924 } else { 925 error_setg_errno(errp, -ret, "Could not open image"); 926 } 927 goto free_and_fail; 928 } 929 930 ret = refresh_total_sectors(bs, bs->total_sectors); 931 if (ret < 0) { 932 error_setg_errno(errp, -ret, "Could not refresh total sector count"); 933 goto free_and_fail; 934 } 935 936 bdrv_refresh_limits(bs); 937 assert(bdrv_opt_mem_align(bs) != 0); 938 assert((bs->request_alignment != 0) || bs->sg); 939 940 #ifndef _WIN32 941 if (bs->is_temporary) { 942 assert(bs->filename[0] != '\0'); 943 unlink(bs->filename); 944 } 945 #endif 946 return 0; 947 948 free_and_fail: 949 bs->file = NULL; 950 g_free(bs->opaque); 951 bs->opaque = NULL; 952 bs->drv = NULL; 953 return ret; 954 } 955 956 /* 957 * Opens a file using a protocol (file, host_device, nbd, ...) 958 * 959 * options is an indirect pointer to a QDict of options to pass to the block 960 * drivers, or pointer to NULL for an empty set of options. If this function 961 * takes ownership of the QDict reference, it will set *options to NULL; 962 * otherwise, it will contain unused/unrecognized options after this function 963 * returns. Then, the caller is responsible for freeing it. If it intends to 964 * reuse the QDict, QINCREF() should be called beforehand. 965 */ 966 static int bdrv_file_open(BlockDriverState *bs, const char *filename, 967 QDict **options, int flags, Error **errp) 968 { 969 BlockDriver *drv; 970 const char *drvname; 971 bool allow_protocol_prefix = false; 972 Error *local_err = NULL; 973 int ret; 974 975 /* Fetch the file name from the options QDict if necessary */ 976 if (!filename) { 977 filename = qdict_get_try_str(*options, "filename"); 978 } else if (filename && !qdict_haskey(*options, "filename")) { 979 qdict_put(*options, "filename", qstring_from_str(filename)); 980 allow_protocol_prefix = true; 981 } else { 982 error_setg(errp, "Can't specify 'file' and 'filename' options at the " 983 "same time"); 984 ret = -EINVAL; 985 goto fail; 986 } 987 988 /* Find the right block driver */ 989 drvname = qdict_get_try_str(*options, "driver"); 990 if (drvname) { 991 drv = bdrv_find_format(drvname); 992 if (!drv) { 993 error_setg(errp, "Unknown driver '%s'", drvname); 994 } 995 qdict_del(*options, "driver"); 996 } else if (filename) { 997 drv = bdrv_find_protocol(filename, allow_protocol_prefix); 998 if (!drv) { 999 error_setg(errp, "Unknown protocol"); 1000 } 1001 } else { 1002 error_setg(errp, "Must specify either driver or file"); 1003 drv = NULL; 1004 } 1005 1006 if (!drv) { 1007 /* errp has been set already */ 1008 ret = -ENOENT; 1009 goto fail; 1010 } 1011 1012 /* Parse the filename and open it */ 1013 if (drv->bdrv_parse_filename && filename) { 1014 drv->bdrv_parse_filename(filename, *options, &local_err); 1015 if (local_err) { 1016 error_propagate(errp, local_err); 1017 ret = -EINVAL; 1018 goto fail; 1019 } 1020 1021 if (!drv->bdrv_needs_filename) { 1022 qdict_del(*options, "filename"); 1023 } else { 1024 filename = qdict_get_str(*options, "filename"); 1025 } 1026 } 1027 1028 if (!drv->bdrv_file_open) { 1029 ret = bdrv_open(&bs, filename, NULL, *options, flags, drv, &local_err); 1030 *options = NULL; 1031 } else { 1032 ret = bdrv_open_common(bs, NULL, *options, flags, drv, &local_err); 1033 } 1034 if (ret < 0) { 1035 error_propagate(errp, local_err); 1036 goto fail; 1037 } 1038 1039 bs->growable = 1; 1040 return 0; 1041 1042 fail: 1043 return ret; 1044 } 1045 1046 /* 1047 * Opens the backing file for a BlockDriverState if not yet open 1048 * 1049 * options is a QDict of options to pass to the block drivers, or NULL for an 1050 * empty set of options. The reference to the QDict is transferred to this 1051 * function (even on failure), so if the caller intends to reuse the dictionary, 1052 * it needs to use QINCREF() before calling bdrv_file_open. 1053 */ 1054 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp) 1055 { 1056 char backing_filename[PATH_MAX]; 1057 int back_flags, ret; 1058 BlockDriver *back_drv = NULL; 1059 Error *local_err = NULL; 1060 1061 if (bs->backing_hd != NULL) { 1062 QDECREF(options); 1063 return 0; 1064 } 1065 1066 /* NULL means an empty set of options */ 1067 if (options == NULL) { 1068 options = qdict_new(); 1069 } 1070 1071 bs->open_flags &= ~BDRV_O_NO_BACKING; 1072 if (qdict_haskey(options, "file.filename")) { 1073 backing_filename[0] = '\0'; 1074 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) { 1075 QDECREF(options); 1076 return 0; 1077 } else { 1078 bdrv_get_full_backing_filename(bs, backing_filename, 1079 sizeof(backing_filename)); 1080 } 1081 1082 if (bs->backing_format[0] != '\0') { 1083 back_drv = bdrv_find_format(bs->backing_format); 1084 } 1085 1086 /* backing files always opened read-only */ 1087 back_flags = bs->open_flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | 1088 BDRV_O_COPY_ON_READ); 1089 1090 assert(bs->backing_hd == NULL); 1091 ret = bdrv_open(&bs->backing_hd, 1092 *backing_filename ? backing_filename : NULL, NULL, options, 1093 back_flags, back_drv, &local_err); 1094 if (ret < 0) { 1095 bs->backing_hd = NULL; 1096 bs->open_flags |= BDRV_O_NO_BACKING; 1097 error_setg(errp, "Could not open backing file: %s", 1098 error_get_pretty(local_err)); 1099 error_free(local_err); 1100 return ret; 1101 } 1102 1103 if (bs->backing_hd->file) { 1104 pstrcpy(bs->backing_file, sizeof(bs->backing_file), 1105 bs->backing_hd->file->filename); 1106 } 1107 1108 /* Recalculate the BlockLimits with the backing file */ 1109 bdrv_refresh_limits(bs); 1110 1111 return 0; 1112 } 1113 1114 /* 1115 * Opens a disk image whose options are given as BlockdevRef in another block 1116 * device's options. 1117 * 1118 * If allow_none is true, no image will be opened if filename is false and no 1119 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned. 1120 * 1121 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict. 1122 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict 1123 * itself, all options starting with "${bdref_key}." are considered part of the 1124 * BlockdevRef. 1125 * 1126 * The BlockdevRef will be removed from the options QDict. 1127 * 1128 * To conform with the behavior of bdrv_open(), *pbs has to be NULL. 1129 */ 1130 int bdrv_open_image(BlockDriverState **pbs, const char *filename, 1131 QDict *options, const char *bdref_key, int flags, 1132 bool allow_none, Error **errp) 1133 { 1134 QDict *image_options; 1135 int ret; 1136 char *bdref_key_dot; 1137 const char *reference; 1138 1139 assert(pbs); 1140 assert(*pbs == NULL); 1141 1142 bdref_key_dot = g_strdup_printf("%s.", bdref_key); 1143 qdict_extract_subqdict(options, &image_options, bdref_key_dot); 1144 g_free(bdref_key_dot); 1145 1146 reference = qdict_get_try_str(options, bdref_key); 1147 if (!filename && !reference && !qdict_size(image_options)) { 1148 if (allow_none) { 1149 ret = 0; 1150 } else { 1151 error_setg(errp, "A block device must be specified for \"%s\"", 1152 bdref_key); 1153 ret = -EINVAL; 1154 } 1155 goto done; 1156 } 1157 1158 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp); 1159 1160 done: 1161 qdict_del(options, bdref_key); 1162 return ret; 1163 } 1164 1165 /* 1166 * Opens a disk image (raw, qcow2, vmdk, ...) 1167 * 1168 * options is a QDict of options to pass to the block drivers, or NULL for an 1169 * empty set of options. The reference to the QDict belongs to the block layer 1170 * after the call (even on failure), so if the caller intends to reuse the 1171 * dictionary, it needs to use QINCREF() before calling bdrv_open. 1172 * 1173 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there. 1174 * If it is not NULL, the referenced BDS will be reused. 1175 * 1176 * The reference parameter may be used to specify an existing block device which 1177 * should be opened. If specified, neither options nor a filename may be given, 1178 * nor can an existing BDS be reused (that is, *pbs has to be NULL). 1179 */ 1180 int bdrv_open(BlockDriverState **pbs, const char *filename, 1181 const char *reference, QDict *options, int flags, 1182 BlockDriver *drv, Error **errp) 1183 { 1184 int ret; 1185 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */ 1186 char tmp_filename[PATH_MAX + 1]; 1187 BlockDriverState *file = NULL, *bs; 1188 const char *drvname; 1189 Error *local_err = NULL; 1190 1191 assert(pbs); 1192 1193 if (reference) { 1194 bool options_non_empty = options ? qdict_size(options) : false; 1195 QDECREF(options); 1196 1197 if (*pbs) { 1198 error_setg(errp, "Cannot reuse an existing BDS when referencing " 1199 "another block device"); 1200 return -EINVAL; 1201 } 1202 1203 if (filename || options_non_empty) { 1204 error_setg(errp, "Cannot reference an existing block device with " 1205 "additional options or a new filename"); 1206 return -EINVAL; 1207 } 1208 1209 bs = bdrv_lookup_bs(reference, reference, errp); 1210 if (!bs) { 1211 return -ENODEV; 1212 } 1213 bdrv_ref(bs); 1214 *pbs = bs; 1215 return 0; 1216 } 1217 1218 if (*pbs) { 1219 bs = *pbs; 1220 } else { 1221 bs = bdrv_new(""); 1222 } 1223 1224 /* NULL means an empty set of options */ 1225 if (options == NULL) { 1226 options = qdict_new(); 1227 } 1228 1229 bs->options = options; 1230 options = qdict_clone_shallow(options); 1231 1232 if (flags & BDRV_O_PROTOCOL) { 1233 assert(!drv); 1234 ret = bdrv_file_open(bs, filename, &options, flags & ~BDRV_O_PROTOCOL, 1235 &local_err); 1236 if (!ret) { 1237 drv = bs->drv; 1238 goto done; 1239 } else if (bs->drv) { 1240 goto close_and_fail; 1241 } else { 1242 goto fail; 1243 } 1244 } 1245 1246 /* For snapshot=on, create a temporary qcow2 overlay */ 1247 if (flags & BDRV_O_SNAPSHOT) { 1248 BlockDriverState *bs1; 1249 int64_t total_size; 1250 BlockDriver *bdrv_qcow2; 1251 QEMUOptionParameter *create_options; 1252 QDict *snapshot_options; 1253 1254 /* if snapshot, we create a temporary backing file and open it 1255 instead of opening 'filename' directly */ 1256 1257 /* Get the required size from the image */ 1258 QINCREF(options); 1259 bs1 = NULL; 1260 ret = bdrv_open(&bs1, filename, NULL, options, BDRV_O_NO_BACKING, 1261 drv, &local_err); 1262 if (ret < 0) { 1263 goto fail; 1264 } 1265 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK; 1266 1267 bdrv_unref(bs1); 1268 1269 /* Create the temporary image */ 1270 ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename)); 1271 if (ret < 0) { 1272 error_setg_errno(errp, -ret, "Could not get temporary filename"); 1273 goto fail; 1274 } 1275 1276 bdrv_qcow2 = bdrv_find_format("qcow2"); 1277 create_options = parse_option_parameters("", bdrv_qcow2->create_options, 1278 NULL); 1279 1280 set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size); 1281 1282 ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err); 1283 free_option_parameters(create_options); 1284 if (ret < 0) { 1285 error_setg_errno(errp, -ret, "Could not create temporary overlay " 1286 "'%s': %s", tmp_filename, 1287 error_get_pretty(local_err)); 1288 error_free(local_err); 1289 local_err = NULL; 1290 goto fail; 1291 } 1292 1293 /* Prepare a new options QDict for the temporary file, where user 1294 * options refer to the backing file */ 1295 if (filename) { 1296 qdict_put(options, "file.filename", qstring_from_str(filename)); 1297 } 1298 if (drv) { 1299 qdict_put(options, "driver", qstring_from_str(drv->format_name)); 1300 } 1301 1302 snapshot_options = qdict_new(); 1303 qdict_put(snapshot_options, "backing", options); 1304 qdict_flatten(snapshot_options); 1305 1306 bs->options = snapshot_options; 1307 options = qdict_clone_shallow(bs->options); 1308 1309 filename = tmp_filename; 1310 drv = bdrv_qcow2; 1311 bs->is_temporary = 1; 1312 } 1313 1314 /* Open image file without format layer */ 1315 if (flags & BDRV_O_RDWR) { 1316 flags |= BDRV_O_ALLOW_RDWR; 1317 } 1318 1319 assert(file == NULL); 1320 ret = bdrv_open_image(&file, filename, options, "file", 1321 bdrv_open_flags(bs, flags | BDRV_O_UNMAP) | 1322 BDRV_O_PROTOCOL, true, &local_err); 1323 if (ret < 0) { 1324 goto unlink_and_fail; 1325 } 1326 1327 /* Find the right image format driver */ 1328 drvname = qdict_get_try_str(options, "driver"); 1329 if (drvname) { 1330 drv = bdrv_find_format(drvname); 1331 qdict_del(options, "driver"); 1332 if (!drv) { 1333 error_setg(errp, "Invalid driver: '%s'", drvname); 1334 ret = -EINVAL; 1335 goto unlink_and_fail; 1336 } 1337 } 1338 1339 if (!drv) { 1340 if (file) { 1341 ret = find_image_format(file, filename, &drv, &local_err); 1342 } else { 1343 error_setg(errp, "Must specify either driver or file"); 1344 ret = -EINVAL; 1345 goto unlink_and_fail; 1346 } 1347 } 1348 1349 if (!drv) { 1350 goto unlink_and_fail; 1351 } 1352 1353 /* Open the image */ 1354 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err); 1355 if (ret < 0) { 1356 goto unlink_and_fail; 1357 } 1358 1359 if (file && (bs->file != file)) { 1360 bdrv_unref(file); 1361 file = NULL; 1362 } 1363 1364 /* If there is a backing file, use it */ 1365 if ((flags & BDRV_O_NO_BACKING) == 0) { 1366 QDict *backing_options; 1367 1368 qdict_extract_subqdict(options, &backing_options, "backing."); 1369 ret = bdrv_open_backing_file(bs, backing_options, &local_err); 1370 if (ret < 0) { 1371 goto close_and_fail; 1372 } 1373 } 1374 1375 done: 1376 /* Check if any unknown options were used */ 1377 if (options && (qdict_size(options) != 0)) { 1378 const QDictEntry *entry = qdict_first(options); 1379 if (flags & BDRV_O_PROTOCOL) { 1380 error_setg(errp, "Block protocol '%s' doesn't support the option " 1381 "'%s'", drv->format_name, entry->key); 1382 } else { 1383 error_setg(errp, "Block format '%s' used by device '%s' doesn't " 1384 "support the option '%s'", drv->format_name, 1385 bs->device_name, entry->key); 1386 } 1387 1388 ret = -EINVAL; 1389 goto close_and_fail; 1390 } 1391 1392 if (!bdrv_key_required(bs)) { 1393 bdrv_dev_change_media_cb(bs, true); 1394 } else if (!runstate_check(RUN_STATE_PRELAUNCH) 1395 && !runstate_check(RUN_STATE_INMIGRATE) 1396 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */ 1397 error_setg(errp, 1398 "Guest must be stopped for opening of encrypted image"); 1399 ret = -EBUSY; 1400 goto close_and_fail; 1401 } 1402 1403 QDECREF(options); 1404 *pbs = bs; 1405 return 0; 1406 1407 unlink_and_fail: 1408 if (file != NULL) { 1409 bdrv_unref(file); 1410 } 1411 if (bs->is_temporary) { 1412 unlink(filename); 1413 } 1414 fail: 1415 QDECREF(bs->options); 1416 QDECREF(options); 1417 bs->options = NULL; 1418 if (!*pbs) { 1419 /* If *pbs is NULL, a new BDS has been created in this function and 1420 needs to be freed now. Otherwise, it does not need to be closed, 1421 since it has not really been opened yet. */ 1422 bdrv_unref(bs); 1423 } 1424 if (local_err) { 1425 error_propagate(errp, local_err); 1426 } 1427 return ret; 1428 1429 close_and_fail: 1430 /* See fail path, but now the BDS has to be always closed */ 1431 if (*pbs) { 1432 bdrv_close(bs); 1433 } else { 1434 bdrv_unref(bs); 1435 } 1436 QDECREF(options); 1437 if (local_err) { 1438 error_propagate(errp, local_err); 1439 } 1440 return ret; 1441 } 1442 1443 typedef struct BlockReopenQueueEntry { 1444 bool prepared; 1445 BDRVReopenState state; 1446 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry; 1447 } BlockReopenQueueEntry; 1448 1449 /* 1450 * Adds a BlockDriverState to a simple queue for an atomic, transactional 1451 * reopen of multiple devices. 1452 * 1453 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT 1454 * already performed, or alternatively may be NULL a new BlockReopenQueue will 1455 * be created and initialized. This newly created BlockReopenQueue should be 1456 * passed back in for subsequent calls that are intended to be of the same 1457 * atomic 'set'. 1458 * 1459 * bs is the BlockDriverState to add to the reopen queue. 1460 * 1461 * flags contains the open flags for the associated bs 1462 * 1463 * returns a pointer to bs_queue, which is either the newly allocated 1464 * bs_queue, or the existing bs_queue being used. 1465 * 1466 */ 1467 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, 1468 BlockDriverState *bs, int flags) 1469 { 1470 assert(bs != NULL); 1471 1472 BlockReopenQueueEntry *bs_entry; 1473 if (bs_queue == NULL) { 1474 bs_queue = g_new0(BlockReopenQueue, 1); 1475 QSIMPLEQ_INIT(bs_queue); 1476 } 1477 1478 if (bs->file) { 1479 bdrv_reopen_queue(bs_queue, bs->file, flags); 1480 } 1481 1482 bs_entry = g_new0(BlockReopenQueueEntry, 1); 1483 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry); 1484 1485 bs_entry->state.bs = bs; 1486 bs_entry->state.flags = flags; 1487 1488 return bs_queue; 1489 } 1490 1491 /* 1492 * Reopen multiple BlockDriverStates atomically & transactionally. 1493 * 1494 * The queue passed in (bs_queue) must have been built up previous 1495 * via bdrv_reopen_queue(). 1496 * 1497 * Reopens all BDS specified in the queue, with the appropriate 1498 * flags. All devices are prepared for reopen, and failure of any 1499 * device will cause all device changes to be abandonded, and intermediate 1500 * data cleaned up. 1501 * 1502 * If all devices prepare successfully, then the changes are committed 1503 * to all devices. 1504 * 1505 */ 1506 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) 1507 { 1508 int ret = -1; 1509 BlockReopenQueueEntry *bs_entry, *next; 1510 Error *local_err = NULL; 1511 1512 assert(bs_queue != NULL); 1513 1514 bdrv_drain_all(); 1515 1516 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { 1517 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) { 1518 error_propagate(errp, local_err); 1519 goto cleanup; 1520 } 1521 bs_entry->prepared = true; 1522 } 1523 1524 /* If we reach this point, we have success and just need to apply the 1525 * changes 1526 */ 1527 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { 1528 bdrv_reopen_commit(&bs_entry->state); 1529 } 1530 1531 ret = 0; 1532 1533 cleanup: 1534 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { 1535 if (ret && bs_entry->prepared) { 1536 bdrv_reopen_abort(&bs_entry->state); 1537 } 1538 g_free(bs_entry); 1539 } 1540 g_free(bs_queue); 1541 return ret; 1542 } 1543 1544 1545 /* Reopen a single BlockDriverState with the specified flags. */ 1546 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp) 1547 { 1548 int ret = -1; 1549 Error *local_err = NULL; 1550 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags); 1551 1552 ret = bdrv_reopen_multiple(queue, &local_err); 1553 if (local_err != NULL) { 1554 error_propagate(errp, local_err); 1555 } 1556 return ret; 1557 } 1558 1559 1560 /* 1561 * Prepares a BlockDriverState for reopen. All changes are staged in the 1562 * 'opaque' field of the BDRVReopenState, which is used and allocated by 1563 * the block driver layer .bdrv_reopen_prepare() 1564 * 1565 * bs is the BlockDriverState to reopen 1566 * flags are the new open flags 1567 * queue is the reopen queue 1568 * 1569 * Returns 0 on success, non-zero on error. On error errp will be set 1570 * as well. 1571 * 1572 * On failure, bdrv_reopen_abort() will be called to clean up any data. 1573 * It is the responsibility of the caller to then call the abort() or 1574 * commit() for any other BDS that have been left in a prepare() state 1575 * 1576 */ 1577 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, 1578 Error **errp) 1579 { 1580 int ret = -1; 1581 Error *local_err = NULL; 1582 BlockDriver *drv; 1583 1584 assert(reopen_state != NULL); 1585 assert(reopen_state->bs->drv != NULL); 1586 drv = reopen_state->bs->drv; 1587 1588 /* if we are to stay read-only, do not allow permission change 1589 * to r/w */ 1590 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) && 1591 reopen_state->flags & BDRV_O_RDWR) { 1592 error_set(errp, QERR_DEVICE_IS_READ_ONLY, 1593 reopen_state->bs->device_name); 1594 goto error; 1595 } 1596 1597 1598 ret = bdrv_flush(reopen_state->bs); 1599 if (ret) { 1600 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive", 1601 strerror(-ret)); 1602 goto error; 1603 } 1604 1605 if (drv->bdrv_reopen_prepare) { 1606 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err); 1607 if (ret) { 1608 if (local_err != NULL) { 1609 error_propagate(errp, local_err); 1610 } else { 1611 error_setg(errp, "failed while preparing to reopen image '%s'", 1612 reopen_state->bs->filename); 1613 } 1614 goto error; 1615 } 1616 } else { 1617 /* It is currently mandatory to have a bdrv_reopen_prepare() 1618 * handler for each supported drv. */ 1619 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, 1620 drv->format_name, reopen_state->bs->device_name, 1621 "reopening of file"); 1622 ret = -1; 1623 goto error; 1624 } 1625 1626 ret = 0; 1627 1628 error: 1629 return ret; 1630 } 1631 1632 /* 1633 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and 1634 * makes them final by swapping the staging BlockDriverState contents into 1635 * the active BlockDriverState contents. 1636 */ 1637 void bdrv_reopen_commit(BDRVReopenState *reopen_state) 1638 { 1639 BlockDriver *drv; 1640 1641 assert(reopen_state != NULL); 1642 drv = reopen_state->bs->drv; 1643 assert(drv != NULL); 1644 1645 /* If there are any driver level actions to take */ 1646 if (drv->bdrv_reopen_commit) { 1647 drv->bdrv_reopen_commit(reopen_state); 1648 } 1649 1650 /* set BDS specific flags now */ 1651 reopen_state->bs->open_flags = reopen_state->flags; 1652 reopen_state->bs->enable_write_cache = !!(reopen_state->flags & 1653 BDRV_O_CACHE_WB); 1654 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR); 1655 1656 bdrv_refresh_limits(reopen_state->bs); 1657 } 1658 1659 /* 1660 * Abort the reopen, and delete and free the staged changes in 1661 * reopen_state 1662 */ 1663 void bdrv_reopen_abort(BDRVReopenState *reopen_state) 1664 { 1665 BlockDriver *drv; 1666 1667 assert(reopen_state != NULL); 1668 drv = reopen_state->bs->drv; 1669 assert(drv != NULL); 1670 1671 if (drv->bdrv_reopen_abort) { 1672 drv->bdrv_reopen_abort(reopen_state); 1673 } 1674 } 1675 1676 1677 void bdrv_close(BlockDriverState *bs) 1678 { 1679 if (bs->job) { 1680 block_job_cancel_sync(bs->job); 1681 } 1682 bdrv_drain_all(); /* complete I/O */ 1683 bdrv_flush(bs); 1684 bdrv_drain_all(); /* in case flush left pending I/O */ 1685 notifier_list_notify(&bs->close_notifiers, bs); 1686 1687 if (bs->drv) { 1688 if (bs->backing_hd) { 1689 bdrv_unref(bs->backing_hd); 1690 bs->backing_hd = NULL; 1691 } 1692 bs->drv->bdrv_close(bs); 1693 g_free(bs->opaque); 1694 #ifdef _WIN32 1695 if (bs->is_temporary) { 1696 unlink(bs->filename); 1697 } 1698 #endif 1699 bs->opaque = NULL; 1700 bs->drv = NULL; 1701 bs->copy_on_read = 0; 1702 bs->backing_file[0] = '\0'; 1703 bs->backing_format[0] = '\0'; 1704 bs->total_sectors = 0; 1705 bs->encrypted = 0; 1706 bs->valid_key = 0; 1707 bs->sg = 0; 1708 bs->growable = 0; 1709 bs->zero_beyond_eof = false; 1710 QDECREF(bs->options); 1711 bs->options = NULL; 1712 1713 if (bs->file != NULL) { 1714 bdrv_unref(bs->file); 1715 bs->file = NULL; 1716 } 1717 } 1718 1719 bdrv_dev_change_media_cb(bs, false); 1720 1721 /*throttling disk I/O limits*/ 1722 if (bs->io_limits_enabled) { 1723 bdrv_io_limits_disable(bs); 1724 } 1725 } 1726 1727 void bdrv_close_all(void) 1728 { 1729 BlockDriverState *bs; 1730 1731 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 1732 bdrv_close(bs); 1733 } 1734 } 1735 1736 /* Check if any requests are in-flight (including throttled requests) */ 1737 static bool bdrv_requests_pending(BlockDriverState *bs) 1738 { 1739 if (!QLIST_EMPTY(&bs->tracked_requests)) { 1740 return true; 1741 } 1742 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { 1743 return true; 1744 } 1745 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { 1746 return true; 1747 } 1748 if (bs->file && bdrv_requests_pending(bs->file)) { 1749 return true; 1750 } 1751 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { 1752 return true; 1753 } 1754 return false; 1755 } 1756 1757 static bool bdrv_requests_pending_all(void) 1758 { 1759 BlockDriverState *bs; 1760 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 1761 if (bdrv_requests_pending(bs)) { 1762 return true; 1763 } 1764 } 1765 return false; 1766 } 1767 1768 /* 1769 * Wait for pending requests to complete across all BlockDriverStates 1770 * 1771 * This function does not flush data to disk, use bdrv_flush_all() for that 1772 * after calling this function. 1773 * 1774 * Note that completion of an asynchronous I/O operation can trigger any 1775 * number of other I/O operations on other devices---for example a coroutine 1776 * can be arbitrarily complex and a constant flow of I/O can come until the 1777 * coroutine is complete. Because of this, it is not possible to have a 1778 * function to drain a single device's I/O queue. 1779 */ 1780 void bdrv_drain_all(void) 1781 { 1782 /* Always run first iteration so any pending completion BHs run */ 1783 bool busy = true; 1784 BlockDriverState *bs; 1785 1786 while (busy) { 1787 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 1788 bdrv_start_throttled_reqs(bs); 1789 } 1790 1791 busy = bdrv_requests_pending_all(); 1792 busy |= aio_poll(qemu_get_aio_context(), busy); 1793 } 1794 } 1795 1796 /* make a BlockDriverState anonymous by removing from bdrv_state and 1797 * graph_bdrv_state list. 1798 Also, NULL terminate the device_name to prevent double remove */ 1799 void bdrv_make_anon(BlockDriverState *bs) 1800 { 1801 if (bs->device_name[0] != '\0') { 1802 QTAILQ_REMOVE(&bdrv_states, bs, device_list); 1803 } 1804 bs->device_name[0] = '\0'; 1805 if (bs->node_name[0] != '\0') { 1806 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list); 1807 } 1808 bs->node_name[0] = '\0'; 1809 } 1810 1811 static void bdrv_rebind(BlockDriverState *bs) 1812 { 1813 if (bs->drv && bs->drv->bdrv_rebind) { 1814 bs->drv->bdrv_rebind(bs); 1815 } 1816 } 1817 1818 static void bdrv_move_feature_fields(BlockDriverState *bs_dest, 1819 BlockDriverState *bs_src) 1820 { 1821 /* move some fields that need to stay attached to the device */ 1822 bs_dest->open_flags = bs_src->open_flags; 1823 1824 /* dev info */ 1825 bs_dest->dev_ops = bs_src->dev_ops; 1826 bs_dest->dev_opaque = bs_src->dev_opaque; 1827 bs_dest->dev = bs_src->dev; 1828 bs_dest->guest_block_size = bs_src->guest_block_size; 1829 bs_dest->copy_on_read = bs_src->copy_on_read; 1830 1831 bs_dest->enable_write_cache = bs_src->enable_write_cache; 1832 1833 /* i/o throttled req */ 1834 memcpy(&bs_dest->throttle_state, 1835 &bs_src->throttle_state, 1836 sizeof(ThrottleState)); 1837 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0]; 1838 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1]; 1839 bs_dest->io_limits_enabled = bs_src->io_limits_enabled; 1840 1841 /* r/w error */ 1842 bs_dest->on_read_error = bs_src->on_read_error; 1843 bs_dest->on_write_error = bs_src->on_write_error; 1844 1845 /* i/o status */ 1846 bs_dest->iostatus_enabled = bs_src->iostatus_enabled; 1847 bs_dest->iostatus = bs_src->iostatus; 1848 1849 /* dirty bitmap */ 1850 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps; 1851 1852 /* reference count */ 1853 bs_dest->refcnt = bs_src->refcnt; 1854 1855 /* job */ 1856 bs_dest->in_use = bs_src->in_use; 1857 bs_dest->job = bs_src->job; 1858 1859 /* keep the same entry in bdrv_states */ 1860 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name), 1861 bs_src->device_name); 1862 bs_dest->device_list = bs_src->device_list; 1863 } 1864 1865 /* 1866 * Swap bs contents for two image chains while they are live, 1867 * while keeping required fields on the BlockDriverState that is 1868 * actually attached to a device. 1869 * 1870 * This will modify the BlockDriverState fields, and swap contents 1871 * between bs_new and bs_old. Both bs_new and bs_old are modified. 1872 * 1873 * bs_new is required to be anonymous. 1874 * 1875 * This function does not create any image files. 1876 */ 1877 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old) 1878 { 1879 BlockDriverState tmp; 1880 1881 /* The code needs to swap the node_name but simply swapping node_list won't 1882 * work so first remove the nodes from the graph list, do the swap then 1883 * insert them back if needed. 1884 */ 1885 if (bs_new->node_name[0] != '\0') { 1886 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list); 1887 } 1888 if (bs_old->node_name[0] != '\0') { 1889 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list); 1890 } 1891 1892 /* bs_new must be anonymous and shouldn't have anything fancy enabled */ 1893 assert(bs_new->device_name[0] == '\0'); 1894 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps)); 1895 assert(bs_new->job == NULL); 1896 assert(bs_new->dev == NULL); 1897 assert(bs_new->in_use == 0); 1898 assert(bs_new->io_limits_enabled == false); 1899 assert(!throttle_have_timer(&bs_new->throttle_state)); 1900 1901 tmp = *bs_new; 1902 *bs_new = *bs_old; 1903 *bs_old = tmp; 1904 1905 /* there are some fields that should not be swapped, move them back */ 1906 bdrv_move_feature_fields(&tmp, bs_old); 1907 bdrv_move_feature_fields(bs_old, bs_new); 1908 bdrv_move_feature_fields(bs_new, &tmp); 1909 1910 /* bs_new shouldn't be in bdrv_states even after the swap! */ 1911 assert(bs_new->device_name[0] == '\0'); 1912 1913 /* Check a few fields that should remain attached to the device */ 1914 assert(bs_new->dev == NULL); 1915 assert(bs_new->job == NULL); 1916 assert(bs_new->in_use == 0); 1917 assert(bs_new->io_limits_enabled == false); 1918 assert(!throttle_have_timer(&bs_new->throttle_state)); 1919 1920 /* insert the nodes back into the graph node list if needed */ 1921 if (bs_new->node_name[0] != '\0') { 1922 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list); 1923 } 1924 if (bs_old->node_name[0] != '\0') { 1925 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list); 1926 } 1927 1928 bdrv_rebind(bs_new); 1929 bdrv_rebind(bs_old); 1930 } 1931 1932 /* 1933 * Add new bs contents at the top of an image chain while the chain is 1934 * live, while keeping required fields on the top layer. 1935 * 1936 * This will modify the BlockDriverState fields, and swap contents 1937 * between bs_new and bs_top. Both bs_new and bs_top are modified. 1938 * 1939 * bs_new is required to be anonymous. 1940 * 1941 * This function does not create any image files. 1942 */ 1943 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top) 1944 { 1945 bdrv_swap(bs_new, bs_top); 1946 1947 /* The contents of 'tmp' will become bs_top, as we are 1948 * swapping bs_new and bs_top contents. */ 1949 bs_top->backing_hd = bs_new; 1950 bs_top->open_flags &= ~BDRV_O_NO_BACKING; 1951 pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file), 1952 bs_new->filename); 1953 pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format), 1954 bs_new->drv ? bs_new->drv->format_name : ""); 1955 } 1956 1957 static void bdrv_delete(BlockDriverState *bs) 1958 { 1959 assert(!bs->dev); 1960 assert(!bs->job); 1961 assert(!bs->in_use); 1962 assert(!bs->refcnt); 1963 assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 1964 1965 bdrv_close(bs); 1966 1967 /* remove from list, if necessary */ 1968 bdrv_make_anon(bs); 1969 1970 g_free(bs); 1971 } 1972 1973 int bdrv_attach_dev(BlockDriverState *bs, void *dev) 1974 /* TODO change to DeviceState *dev when all users are qdevified */ 1975 { 1976 if (bs->dev) { 1977 return -EBUSY; 1978 } 1979 bs->dev = dev; 1980 bdrv_iostatus_reset(bs); 1981 return 0; 1982 } 1983 1984 /* TODO qdevified devices don't use this, remove when devices are qdevified */ 1985 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev) 1986 { 1987 if (bdrv_attach_dev(bs, dev) < 0) { 1988 abort(); 1989 } 1990 } 1991 1992 void bdrv_detach_dev(BlockDriverState *bs, void *dev) 1993 /* TODO change to DeviceState *dev when all users are qdevified */ 1994 { 1995 assert(bs->dev == dev); 1996 bs->dev = NULL; 1997 bs->dev_ops = NULL; 1998 bs->dev_opaque = NULL; 1999 bs->guest_block_size = 512; 2000 } 2001 2002 /* TODO change to return DeviceState * when all users are qdevified */ 2003 void *bdrv_get_attached_dev(BlockDriverState *bs) 2004 { 2005 return bs->dev; 2006 } 2007 2008 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops, 2009 void *opaque) 2010 { 2011 bs->dev_ops = ops; 2012 bs->dev_opaque = opaque; 2013 } 2014 2015 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv, 2016 enum MonitorEvent ev, 2017 BlockErrorAction action, bool is_read) 2018 { 2019 QObject *data; 2020 const char *action_str; 2021 2022 switch (action) { 2023 case BDRV_ACTION_REPORT: 2024 action_str = "report"; 2025 break; 2026 case BDRV_ACTION_IGNORE: 2027 action_str = "ignore"; 2028 break; 2029 case BDRV_ACTION_STOP: 2030 action_str = "stop"; 2031 break; 2032 default: 2033 abort(); 2034 } 2035 2036 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }", 2037 bdrv->device_name, 2038 action_str, 2039 is_read ? "read" : "write"); 2040 monitor_protocol_event(ev, data); 2041 2042 qobject_decref(data); 2043 } 2044 2045 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected) 2046 { 2047 QObject *data; 2048 2049 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }", 2050 bdrv_get_device_name(bs), ejected); 2051 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data); 2052 2053 qobject_decref(data); 2054 } 2055 2056 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load) 2057 { 2058 if (bs->dev_ops && bs->dev_ops->change_media_cb) { 2059 bool tray_was_closed = !bdrv_dev_is_tray_open(bs); 2060 bs->dev_ops->change_media_cb(bs->dev_opaque, load); 2061 if (tray_was_closed) { 2062 /* tray open */ 2063 bdrv_emit_qmp_eject_event(bs, true); 2064 } 2065 if (load) { 2066 /* tray close */ 2067 bdrv_emit_qmp_eject_event(bs, false); 2068 } 2069 } 2070 } 2071 2072 bool bdrv_dev_has_removable_media(BlockDriverState *bs) 2073 { 2074 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb); 2075 } 2076 2077 void bdrv_dev_eject_request(BlockDriverState *bs, bool force) 2078 { 2079 if (bs->dev_ops && bs->dev_ops->eject_request_cb) { 2080 bs->dev_ops->eject_request_cb(bs->dev_opaque, force); 2081 } 2082 } 2083 2084 bool bdrv_dev_is_tray_open(BlockDriverState *bs) 2085 { 2086 if (bs->dev_ops && bs->dev_ops->is_tray_open) { 2087 return bs->dev_ops->is_tray_open(bs->dev_opaque); 2088 } 2089 return false; 2090 } 2091 2092 static void bdrv_dev_resize_cb(BlockDriverState *bs) 2093 { 2094 if (bs->dev_ops && bs->dev_ops->resize_cb) { 2095 bs->dev_ops->resize_cb(bs->dev_opaque); 2096 } 2097 } 2098 2099 bool bdrv_dev_is_medium_locked(BlockDriverState *bs) 2100 { 2101 if (bs->dev_ops && bs->dev_ops->is_medium_locked) { 2102 return bs->dev_ops->is_medium_locked(bs->dev_opaque); 2103 } 2104 return false; 2105 } 2106 2107 /* 2108 * Run consistency checks on an image 2109 * 2110 * Returns 0 if the check could be completed (it doesn't mean that the image is 2111 * free of errors) or -errno when an internal error occurred. The results of the 2112 * check are stored in res. 2113 */ 2114 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) 2115 { 2116 if (bs->drv->bdrv_check == NULL) { 2117 return -ENOTSUP; 2118 } 2119 2120 memset(res, 0, sizeof(*res)); 2121 return bs->drv->bdrv_check(bs, res, fix); 2122 } 2123 2124 #define COMMIT_BUF_SECTORS 2048 2125 2126 /* commit COW file into the raw image */ 2127 int bdrv_commit(BlockDriverState *bs) 2128 { 2129 BlockDriver *drv = bs->drv; 2130 int64_t sector, total_sectors, length, backing_length; 2131 int n, ro, open_flags; 2132 int ret = 0; 2133 uint8_t *buf = NULL; 2134 char filename[PATH_MAX]; 2135 2136 if (!drv) 2137 return -ENOMEDIUM; 2138 2139 if (!bs->backing_hd) { 2140 return -ENOTSUP; 2141 } 2142 2143 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) { 2144 return -EBUSY; 2145 } 2146 2147 ro = bs->backing_hd->read_only; 2148 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */ 2149 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename); 2150 open_flags = bs->backing_hd->open_flags; 2151 2152 if (ro) { 2153 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) { 2154 return -EACCES; 2155 } 2156 } 2157 2158 length = bdrv_getlength(bs); 2159 if (length < 0) { 2160 ret = length; 2161 goto ro_cleanup; 2162 } 2163 2164 backing_length = bdrv_getlength(bs->backing_hd); 2165 if (backing_length < 0) { 2166 ret = backing_length; 2167 goto ro_cleanup; 2168 } 2169 2170 /* If our top snapshot is larger than the backing file image, 2171 * grow the backing file image if possible. If not possible, 2172 * we must return an error */ 2173 if (length > backing_length) { 2174 ret = bdrv_truncate(bs->backing_hd, length); 2175 if (ret < 0) { 2176 goto ro_cleanup; 2177 } 2178 } 2179 2180 total_sectors = length >> BDRV_SECTOR_BITS; 2181 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE); 2182 2183 for (sector = 0; sector < total_sectors; sector += n) { 2184 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n); 2185 if (ret < 0) { 2186 goto ro_cleanup; 2187 } 2188 if (ret) { 2189 ret = bdrv_read(bs, sector, buf, n); 2190 if (ret < 0) { 2191 goto ro_cleanup; 2192 } 2193 2194 ret = bdrv_write(bs->backing_hd, sector, buf, n); 2195 if (ret < 0) { 2196 goto ro_cleanup; 2197 } 2198 } 2199 } 2200 2201 if (drv->bdrv_make_empty) { 2202 ret = drv->bdrv_make_empty(bs); 2203 if (ret < 0) { 2204 goto ro_cleanup; 2205 } 2206 bdrv_flush(bs); 2207 } 2208 2209 /* 2210 * Make sure all data we wrote to the backing device is actually 2211 * stable on disk. 2212 */ 2213 if (bs->backing_hd) { 2214 bdrv_flush(bs->backing_hd); 2215 } 2216 2217 ret = 0; 2218 ro_cleanup: 2219 g_free(buf); 2220 2221 if (ro) { 2222 /* ignoring error return here */ 2223 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL); 2224 } 2225 2226 return ret; 2227 } 2228 2229 int bdrv_commit_all(void) 2230 { 2231 BlockDriverState *bs; 2232 2233 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 2234 if (bs->drv && bs->backing_hd) { 2235 int ret = bdrv_commit(bs); 2236 if (ret < 0) { 2237 return ret; 2238 } 2239 } 2240 } 2241 return 0; 2242 } 2243 2244 /** 2245 * Remove an active request from the tracked requests list 2246 * 2247 * This function should be called when a tracked request is completing. 2248 */ 2249 static void tracked_request_end(BdrvTrackedRequest *req) 2250 { 2251 if (req->serialising) { 2252 req->bs->serialising_in_flight--; 2253 } 2254 2255 QLIST_REMOVE(req, list); 2256 qemu_co_queue_restart_all(&req->wait_queue); 2257 } 2258 2259 /** 2260 * Add an active request to the tracked requests list 2261 */ 2262 static void tracked_request_begin(BdrvTrackedRequest *req, 2263 BlockDriverState *bs, 2264 int64_t offset, 2265 unsigned int bytes, bool is_write) 2266 { 2267 *req = (BdrvTrackedRequest){ 2268 .bs = bs, 2269 .offset = offset, 2270 .bytes = bytes, 2271 .is_write = is_write, 2272 .co = qemu_coroutine_self(), 2273 .serialising = false, 2274 .overlap_offset = offset, 2275 .overlap_bytes = bytes, 2276 }; 2277 2278 qemu_co_queue_init(&req->wait_queue); 2279 2280 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 2281 } 2282 2283 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) 2284 { 2285 int64_t overlap_offset = req->offset & ~(align - 1); 2286 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) 2287 - overlap_offset; 2288 2289 if (!req->serialising) { 2290 req->bs->serialising_in_flight++; 2291 req->serialising = true; 2292 } 2293 2294 req->overlap_offset = MIN(req->overlap_offset, overlap_offset); 2295 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); 2296 } 2297 2298 /** 2299 * Round a region to cluster boundaries 2300 */ 2301 void bdrv_round_to_clusters(BlockDriverState *bs, 2302 int64_t sector_num, int nb_sectors, 2303 int64_t *cluster_sector_num, 2304 int *cluster_nb_sectors) 2305 { 2306 BlockDriverInfo bdi; 2307 2308 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 2309 *cluster_sector_num = sector_num; 2310 *cluster_nb_sectors = nb_sectors; 2311 } else { 2312 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 2313 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 2314 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 2315 nb_sectors, c); 2316 } 2317 } 2318 2319 static int bdrv_get_cluster_size(BlockDriverState *bs) 2320 { 2321 BlockDriverInfo bdi; 2322 int ret; 2323 2324 ret = bdrv_get_info(bs, &bdi); 2325 if (ret < 0 || bdi.cluster_size == 0) { 2326 return bs->request_alignment; 2327 } else { 2328 return bdi.cluster_size; 2329 } 2330 } 2331 2332 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 2333 int64_t offset, unsigned int bytes) 2334 { 2335 /* aaaa bbbb */ 2336 if (offset >= req->overlap_offset + req->overlap_bytes) { 2337 return false; 2338 } 2339 /* bbbb aaaa */ 2340 if (req->overlap_offset >= offset + bytes) { 2341 return false; 2342 } 2343 return true; 2344 } 2345 2346 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) 2347 { 2348 BlockDriverState *bs = self->bs; 2349 BdrvTrackedRequest *req; 2350 bool retry; 2351 bool waited = false; 2352 2353 if (!bs->serialising_in_flight) { 2354 return false; 2355 } 2356 2357 do { 2358 retry = false; 2359 QLIST_FOREACH(req, &bs->tracked_requests, list) { 2360 if (req == self || (!req->serialising && !self->serialising)) { 2361 continue; 2362 } 2363 if (tracked_request_overlaps(req, self->overlap_offset, 2364 self->overlap_bytes)) 2365 { 2366 /* Hitting this means there was a reentrant request, for 2367 * example, a block driver issuing nested requests. This must 2368 * never happen since it means deadlock. 2369 */ 2370 assert(qemu_coroutine_self() != req->co); 2371 2372 /* If the request is already (indirectly) waiting for us, or 2373 * will wait for us as soon as it wakes up, then just go on 2374 * (instead of producing a deadlock in the former case). */ 2375 if (!req->waiting_for) { 2376 self->waiting_for = req; 2377 qemu_co_queue_wait(&req->wait_queue); 2378 self->waiting_for = NULL; 2379 retry = true; 2380 waited = true; 2381 break; 2382 } 2383 } 2384 } 2385 } while (retry); 2386 2387 return waited; 2388 } 2389 2390 /* 2391 * Return values: 2392 * 0 - success 2393 * -EINVAL - backing format specified, but no file 2394 * -ENOSPC - can't update the backing file because no space is left in the 2395 * image file header 2396 * -ENOTSUP - format driver doesn't support changing the backing file 2397 */ 2398 int bdrv_change_backing_file(BlockDriverState *bs, 2399 const char *backing_file, const char *backing_fmt) 2400 { 2401 BlockDriver *drv = bs->drv; 2402 int ret; 2403 2404 /* Backing file format doesn't make sense without a backing file */ 2405 if (backing_fmt && !backing_file) { 2406 return -EINVAL; 2407 } 2408 2409 if (drv->bdrv_change_backing_file != NULL) { 2410 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt); 2411 } else { 2412 ret = -ENOTSUP; 2413 } 2414 2415 if (ret == 0) { 2416 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2417 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2418 } 2419 return ret; 2420 } 2421 2422 /* 2423 * Finds the image layer in the chain that has 'bs' as its backing file. 2424 * 2425 * active is the current topmost image. 2426 * 2427 * Returns NULL if bs is not found in active's image chain, 2428 * or if active == bs. 2429 */ 2430 BlockDriverState *bdrv_find_overlay(BlockDriverState *active, 2431 BlockDriverState *bs) 2432 { 2433 BlockDriverState *overlay = NULL; 2434 BlockDriverState *intermediate; 2435 2436 assert(active != NULL); 2437 assert(bs != NULL); 2438 2439 /* if bs is the same as active, then by definition it has no overlay 2440 */ 2441 if (active == bs) { 2442 return NULL; 2443 } 2444 2445 intermediate = active; 2446 while (intermediate->backing_hd) { 2447 if (intermediate->backing_hd == bs) { 2448 overlay = intermediate; 2449 break; 2450 } 2451 intermediate = intermediate->backing_hd; 2452 } 2453 2454 return overlay; 2455 } 2456 2457 typedef struct BlkIntermediateStates { 2458 BlockDriverState *bs; 2459 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry; 2460 } BlkIntermediateStates; 2461 2462 2463 /* 2464 * Drops images above 'base' up to and including 'top', and sets the image 2465 * above 'top' to have base as its backing file. 2466 * 2467 * Requires that the overlay to 'top' is opened r/w, so that the backing file 2468 * information in 'bs' can be properly updated. 2469 * 2470 * E.g., this will convert the following chain: 2471 * bottom <- base <- intermediate <- top <- active 2472 * 2473 * to 2474 * 2475 * bottom <- base <- active 2476 * 2477 * It is allowed for bottom==base, in which case it converts: 2478 * 2479 * base <- intermediate <- top <- active 2480 * 2481 * to 2482 * 2483 * base <- active 2484 * 2485 * Error conditions: 2486 * if active == top, that is considered an error 2487 * 2488 */ 2489 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top, 2490 BlockDriverState *base) 2491 { 2492 BlockDriverState *intermediate; 2493 BlockDriverState *base_bs = NULL; 2494 BlockDriverState *new_top_bs = NULL; 2495 BlkIntermediateStates *intermediate_state, *next; 2496 int ret = -EIO; 2497 2498 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete; 2499 QSIMPLEQ_INIT(&states_to_delete); 2500 2501 if (!top->drv || !base->drv) { 2502 goto exit; 2503 } 2504 2505 new_top_bs = bdrv_find_overlay(active, top); 2506 2507 if (new_top_bs == NULL) { 2508 /* we could not find the image above 'top', this is an error */ 2509 goto exit; 2510 } 2511 2512 /* special case of new_top_bs->backing_hd already pointing to base - nothing 2513 * to do, no intermediate images */ 2514 if (new_top_bs->backing_hd == base) { 2515 ret = 0; 2516 goto exit; 2517 } 2518 2519 intermediate = top; 2520 2521 /* now we will go down through the list, and add each BDS we find 2522 * into our deletion queue, until we hit the 'base' 2523 */ 2524 while (intermediate) { 2525 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates)); 2526 intermediate_state->bs = intermediate; 2527 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry); 2528 2529 if (intermediate->backing_hd == base) { 2530 base_bs = intermediate->backing_hd; 2531 break; 2532 } 2533 intermediate = intermediate->backing_hd; 2534 } 2535 if (base_bs == NULL) { 2536 /* something went wrong, we did not end at the base. safely 2537 * unravel everything, and exit with error */ 2538 goto exit; 2539 } 2540 2541 /* success - we can delete the intermediate states, and link top->base */ 2542 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename, 2543 base_bs->drv ? base_bs->drv->format_name : ""); 2544 if (ret) { 2545 goto exit; 2546 } 2547 new_top_bs->backing_hd = base_bs; 2548 2549 bdrv_refresh_limits(new_top_bs); 2550 2551 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { 2552 /* so that bdrv_close() does not recursively close the chain */ 2553 intermediate_state->bs->backing_hd = NULL; 2554 bdrv_unref(intermediate_state->bs); 2555 } 2556 ret = 0; 2557 2558 exit: 2559 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { 2560 g_free(intermediate_state); 2561 } 2562 return ret; 2563 } 2564 2565 2566 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 2567 size_t size) 2568 { 2569 int64_t len; 2570 2571 if (!bdrv_is_inserted(bs)) 2572 return -ENOMEDIUM; 2573 2574 if (bs->growable) 2575 return 0; 2576 2577 len = bdrv_getlength(bs); 2578 2579 if (offset < 0) 2580 return -EIO; 2581 2582 if ((offset > len) || (len - offset < size)) 2583 return -EIO; 2584 2585 return 0; 2586 } 2587 2588 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 2589 int nb_sectors) 2590 { 2591 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 2592 nb_sectors * BDRV_SECTOR_SIZE); 2593 } 2594 2595 typedef struct RwCo { 2596 BlockDriverState *bs; 2597 int64_t offset; 2598 QEMUIOVector *qiov; 2599 bool is_write; 2600 int ret; 2601 BdrvRequestFlags flags; 2602 } RwCo; 2603 2604 static void coroutine_fn bdrv_rw_co_entry(void *opaque) 2605 { 2606 RwCo *rwco = opaque; 2607 2608 if (!rwco->is_write) { 2609 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, 2610 rwco->qiov->size, rwco->qiov, 2611 rwco->flags); 2612 } else { 2613 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, 2614 rwco->qiov->size, rwco->qiov, 2615 rwco->flags); 2616 } 2617 } 2618 2619 /* 2620 * Process a vectored synchronous request using coroutines 2621 */ 2622 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, 2623 QEMUIOVector *qiov, bool is_write, 2624 BdrvRequestFlags flags) 2625 { 2626 Coroutine *co; 2627 RwCo rwco = { 2628 .bs = bs, 2629 .offset = offset, 2630 .qiov = qiov, 2631 .is_write = is_write, 2632 .ret = NOT_DONE, 2633 .flags = flags, 2634 }; 2635 2636 /** 2637 * In sync call context, when the vcpu is blocked, this throttling timer 2638 * will not fire; so the I/O throttling function has to be disabled here 2639 * if it has been enabled. 2640 */ 2641 if (bs->io_limits_enabled) { 2642 fprintf(stderr, "Disabling I/O throttling on '%s' due " 2643 "to synchronous I/O.\n", bdrv_get_device_name(bs)); 2644 bdrv_io_limits_disable(bs); 2645 } 2646 2647 if (qemu_in_coroutine()) { 2648 /* Fast-path if already in coroutine context */ 2649 bdrv_rw_co_entry(&rwco); 2650 } else { 2651 co = qemu_coroutine_create(bdrv_rw_co_entry); 2652 qemu_coroutine_enter(co, &rwco); 2653 while (rwco.ret == NOT_DONE) { 2654 qemu_aio_wait(); 2655 } 2656 } 2657 return rwco.ret; 2658 } 2659 2660 /* 2661 * Process a synchronous request using coroutines 2662 */ 2663 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 2664 int nb_sectors, bool is_write, BdrvRequestFlags flags) 2665 { 2666 QEMUIOVector qiov; 2667 struct iovec iov = { 2668 .iov_base = (void *)buf, 2669 .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 2670 }; 2671 2672 qemu_iovec_init_external(&qiov, &iov, 1); 2673 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, 2674 &qiov, is_write, flags); 2675 } 2676 2677 /* return < 0 if error. See bdrv_write() for the return codes */ 2678 int bdrv_read(BlockDriverState *bs, int64_t sector_num, 2679 uint8_t *buf, int nb_sectors) 2680 { 2681 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); 2682 } 2683 2684 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */ 2685 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, 2686 uint8_t *buf, int nb_sectors) 2687 { 2688 bool enabled; 2689 int ret; 2690 2691 enabled = bs->io_limits_enabled; 2692 bs->io_limits_enabled = false; 2693 ret = bdrv_read(bs, sector_num, buf, nb_sectors); 2694 bs->io_limits_enabled = enabled; 2695 return ret; 2696 } 2697 2698 /* Return < 0 if error. Important errors are: 2699 -EIO generic I/O error (may happen for all errors) 2700 -ENOMEDIUM No media inserted. 2701 -EINVAL Invalid sector number or nb_sectors 2702 -EACCES Trying to write a read-only device 2703 */ 2704 int bdrv_write(BlockDriverState *bs, int64_t sector_num, 2705 const uint8_t *buf, int nb_sectors) 2706 { 2707 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 2708 } 2709 2710 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, 2711 int nb_sectors, BdrvRequestFlags flags) 2712 { 2713 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, 2714 BDRV_REQ_ZERO_WRITE | flags); 2715 } 2716 2717 /* 2718 * Completely zero out a block device with the help of bdrv_write_zeroes. 2719 * The operation is sped up by checking the block status and only writing 2720 * zeroes to the device if they currently do not return zeroes. Optional 2721 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). 2722 * 2723 * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). 2724 */ 2725 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) 2726 { 2727 int64_t target_size = bdrv_getlength(bs) / BDRV_SECTOR_SIZE; 2728 int64_t ret, nb_sectors, sector_num = 0; 2729 int n; 2730 2731 for (;;) { 2732 nb_sectors = target_size - sector_num; 2733 if (nb_sectors <= 0) { 2734 return 0; 2735 } 2736 if (nb_sectors > INT_MAX) { 2737 nb_sectors = INT_MAX; 2738 } 2739 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n); 2740 if (ret < 0) { 2741 error_report("error getting block status at sector %" PRId64 ": %s", 2742 sector_num, strerror(-ret)); 2743 return ret; 2744 } 2745 if (ret & BDRV_BLOCK_ZERO) { 2746 sector_num += n; 2747 continue; 2748 } 2749 ret = bdrv_write_zeroes(bs, sector_num, n, flags); 2750 if (ret < 0) { 2751 error_report("error writing zeroes at sector %" PRId64 ": %s", 2752 sector_num, strerror(-ret)); 2753 return ret; 2754 } 2755 sector_num += n; 2756 } 2757 } 2758 2759 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) 2760 { 2761 QEMUIOVector qiov; 2762 struct iovec iov = { 2763 .iov_base = (void *)buf, 2764 .iov_len = bytes, 2765 }; 2766 int ret; 2767 2768 if (bytes < 0) { 2769 return -EINVAL; 2770 } 2771 2772 qemu_iovec_init_external(&qiov, &iov, 1); 2773 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); 2774 if (ret < 0) { 2775 return ret; 2776 } 2777 2778 return bytes; 2779 } 2780 2781 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 2782 { 2783 int ret; 2784 2785 ret = bdrv_prwv_co(bs, offset, qiov, true, 0); 2786 if (ret < 0) { 2787 return ret; 2788 } 2789 2790 return qiov->size; 2791 } 2792 2793 int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 2794 const void *buf, int bytes) 2795 { 2796 QEMUIOVector qiov; 2797 struct iovec iov = { 2798 .iov_base = (void *) buf, 2799 .iov_len = bytes, 2800 }; 2801 2802 if (bytes < 0) { 2803 return -EINVAL; 2804 } 2805 2806 qemu_iovec_init_external(&qiov, &iov, 1); 2807 return bdrv_pwritev(bs, offset, &qiov); 2808 } 2809 2810 /* 2811 * Writes to the file and ensures that no writes are reordered across this 2812 * request (acts as a barrier) 2813 * 2814 * Returns 0 on success, -errno in error cases. 2815 */ 2816 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 2817 const void *buf, int count) 2818 { 2819 int ret; 2820 2821 ret = bdrv_pwrite(bs, offset, buf, count); 2822 if (ret < 0) { 2823 return ret; 2824 } 2825 2826 /* No flush needed for cache modes that already do it */ 2827 if (bs->enable_write_cache) { 2828 bdrv_flush(bs); 2829 } 2830 2831 return 0; 2832 } 2833 2834 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 2835 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 2836 { 2837 /* Perform I/O through a temporary buffer so that users who scribble over 2838 * their read buffer while the operation is in progress do not end up 2839 * modifying the image file. This is critical for zero-copy guest I/O 2840 * where anything might happen inside guest memory. 2841 */ 2842 void *bounce_buffer; 2843 2844 BlockDriver *drv = bs->drv; 2845 struct iovec iov; 2846 QEMUIOVector bounce_qiov; 2847 int64_t cluster_sector_num; 2848 int cluster_nb_sectors; 2849 size_t skip_bytes; 2850 int ret; 2851 2852 /* Cover entire cluster so no additional backing file I/O is required when 2853 * allocating cluster in the image file. 2854 */ 2855 bdrv_round_to_clusters(bs, sector_num, nb_sectors, 2856 &cluster_sector_num, &cluster_nb_sectors); 2857 2858 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, 2859 cluster_sector_num, cluster_nb_sectors); 2860 2861 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; 2862 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len); 2863 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 2864 2865 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, 2866 &bounce_qiov); 2867 if (ret < 0) { 2868 goto err; 2869 } 2870 2871 if (drv->bdrv_co_write_zeroes && 2872 buffer_is_zero(bounce_buffer, iov.iov_len)) { 2873 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, 2874 cluster_nb_sectors, 0); 2875 } else { 2876 /* This does not change the data on the disk, it is not necessary 2877 * to flush even in cache=writethrough mode. 2878 */ 2879 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, 2880 &bounce_qiov); 2881 } 2882 2883 if (ret < 0) { 2884 /* It might be okay to ignore write errors for guest requests. If this 2885 * is a deliberate copy-on-read then we don't want to ignore the error. 2886 * Simply report it in all cases. 2887 */ 2888 goto err; 2889 } 2890 2891 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; 2892 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, 2893 nb_sectors * BDRV_SECTOR_SIZE); 2894 2895 err: 2896 qemu_vfree(bounce_buffer); 2897 return ret; 2898 } 2899 2900 /* 2901 * Forwards an already correctly aligned request to the BlockDriver. This 2902 * handles copy on read and zeroing after EOF; any other features must be 2903 * implemented by the caller. 2904 */ 2905 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, 2906 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 2907 int64_t align, QEMUIOVector *qiov, int flags) 2908 { 2909 BlockDriver *drv = bs->drv; 2910 int ret; 2911 2912 int64_t sector_num = offset >> BDRV_SECTOR_BITS; 2913 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 2914 2915 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 2916 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 2917 2918 /* Handle Copy on Read and associated serialisation */ 2919 if (flags & BDRV_REQ_COPY_ON_READ) { 2920 /* If we touch the same cluster it counts as an overlap. This 2921 * guarantees that allocating writes will be serialized and not race 2922 * with each other for the same cluster. For example, in copy-on-read 2923 * it ensures that the CoR read and write operations are atomic and 2924 * guest writes cannot interleave between them. */ 2925 mark_request_serialising(req, bdrv_get_cluster_size(bs)); 2926 } 2927 2928 wait_serialising_requests(req); 2929 2930 if (flags & BDRV_REQ_COPY_ON_READ) { 2931 int pnum; 2932 2933 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); 2934 if (ret < 0) { 2935 goto out; 2936 } 2937 2938 if (!ret || pnum != nb_sectors) { 2939 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); 2940 goto out; 2941 } 2942 } 2943 2944 /* Forward the request to the BlockDriver */ 2945 if (!(bs->zero_beyond_eof && bs->growable)) { 2946 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 2947 } else { 2948 /* Read zeros after EOF of growable BDSes */ 2949 int64_t len, total_sectors, max_nb_sectors; 2950 2951 len = bdrv_getlength(bs); 2952 if (len < 0) { 2953 ret = len; 2954 goto out; 2955 } 2956 2957 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE); 2958 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), 2959 align >> BDRV_SECTOR_BITS); 2960 if (max_nb_sectors > 0) { 2961 ret = drv->bdrv_co_readv(bs, sector_num, 2962 MIN(nb_sectors, max_nb_sectors), qiov); 2963 } else { 2964 ret = 0; 2965 } 2966 2967 /* Reading beyond end of file is supposed to produce zeroes */ 2968 if (ret == 0 && total_sectors < sector_num + nb_sectors) { 2969 uint64_t offset = MAX(0, total_sectors - sector_num); 2970 uint64_t bytes = (sector_num + nb_sectors - offset) * 2971 BDRV_SECTOR_SIZE; 2972 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); 2973 } 2974 } 2975 2976 out: 2977 return ret; 2978 } 2979 2980 /* 2981 * Handle a read request in coroutine context 2982 */ 2983 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, 2984 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 2985 BdrvRequestFlags flags) 2986 { 2987 BlockDriver *drv = bs->drv; 2988 BdrvTrackedRequest req; 2989 2990 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 2991 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 2992 uint8_t *head_buf = NULL; 2993 uint8_t *tail_buf = NULL; 2994 QEMUIOVector local_qiov; 2995 bool use_local_qiov = false; 2996 int ret; 2997 2998 if (!drv) { 2999 return -ENOMEDIUM; 3000 } 3001 if (bdrv_check_byte_request(bs, offset, bytes)) { 3002 return -EIO; 3003 } 3004 3005 if (bs->copy_on_read) { 3006 flags |= BDRV_REQ_COPY_ON_READ; 3007 } 3008 3009 /* throttling disk I/O */ 3010 if (bs->io_limits_enabled) { 3011 bdrv_io_limits_intercept(bs, bytes, false); 3012 } 3013 3014 /* Align read if necessary by padding qiov */ 3015 if (offset & (align - 1)) { 3016 head_buf = qemu_blockalign(bs, align); 3017 qemu_iovec_init(&local_qiov, qiov->niov + 2); 3018 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 3019 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 3020 use_local_qiov = true; 3021 3022 bytes += offset & (align - 1); 3023 offset = offset & ~(align - 1); 3024 } 3025 3026 if ((offset + bytes) & (align - 1)) { 3027 if (!use_local_qiov) { 3028 qemu_iovec_init(&local_qiov, qiov->niov + 1); 3029 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 3030 use_local_qiov = true; 3031 } 3032 tail_buf = qemu_blockalign(bs, align); 3033 qemu_iovec_add(&local_qiov, tail_buf, 3034 align - ((offset + bytes) & (align - 1))); 3035 3036 bytes = ROUND_UP(bytes, align); 3037 } 3038 3039 tracked_request_begin(&req, bs, offset, bytes, false); 3040 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, 3041 use_local_qiov ? &local_qiov : qiov, 3042 flags); 3043 tracked_request_end(&req); 3044 3045 if (use_local_qiov) { 3046 qemu_iovec_destroy(&local_qiov); 3047 qemu_vfree(head_buf); 3048 qemu_vfree(tail_buf); 3049 } 3050 3051 return ret; 3052 } 3053 3054 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 3055 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 3056 BdrvRequestFlags flags) 3057 { 3058 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) { 3059 return -EINVAL; 3060 } 3061 3062 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, 3063 nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 3064 } 3065 3066 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 3067 int nb_sectors, QEMUIOVector *qiov) 3068 { 3069 trace_bdrv_co_readv(bs, sector_num, nb_sectors); 3070 3071 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 3072 } 3073 3074 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, 3075 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 3076 { 3077 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); 3078 3079 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 3080 BDRV_REQ_COPY_ON_READ); 3081 } 3082 3083 /* if no limit is specified in the BlockLimits use a default 3084 * of 32768 512-byte sectors (16 MiB) per request. 3085 */ 3086 #define MAX_WRITE_ZEROES_DEFAULT 32768 3087 3088 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 3089 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) 3090 { 3091 BlockDriver *drv = bs->drv; 3092 QEMUIOVector qiov; 3093 struct iovec iov = {0}; 3094 int ret = 0; 3095 3096 int max_write_zeroes = bs->bl.max_write_zeroes ? 3097 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT; 3098 3099 while (nb_sectors > 0 && !ret) { 3100 int num = nb_sectors; 3101 3102 /* Align request. Block drivers can expect the "bulk" of the request 3103 * to be aligned. 3104 */ 3105 if (bs->bl.write_zeroes_alignment 3106 && num > bs->bl.write_zeroes_alignment) { 3107 if (sector_num % bs->bl.write_zeroes_alignment != 0) { 3108 /* Make a small request up to the first aligned sector. */ 3109 num = bs->bl.write_zeroes_alignment; 3110 num -= sector_num % bs->bl.write_zeroes_alignment; 3111 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { 3112 /* Shorten the request to the last aligned sector. num cannot 3113 * underflow because num > bs->bl.write_zeroes_alignment. 3114 */ 3115 num -= (sector_num + num) % bs->bl.write_zeroes_alignment; 3116 } 3117 } 3118 3119 /* limit request size */ 3120 if (num > max_write_zeroes) { 3121 num = max_write_zeroes; 3122 } 3123 3124 ret = -ENOTSUP; 3125 /* First try the efficient write zeroes operation */ 3126 if (drv->bdrv_co_write_zeroes) { 3127 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); 3128 } 3129 3130 if (ret == -ENOTSUP) { 3131 /* Fall back to bounce buffer if write zeroes is unsupported */ 3132 iov.iov_len = num * BDRV_SECTOR_SIZE; 3133 if (iov.iov_base == NULL) { 3134 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE); 3135 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); 3136 } 3137 qemu_iovec_init_external(&qiov, &iov, 1); 3138 3139 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); 3140 3141 /* Keep bounce buffer around if it is big enough for all 3142 * all future requests. 3143 */ 3144 if (num < max_write_zeroes) { 3145 qemu_vfree(iov.iov_base); 3146 iov.iov_base = NULL; 3147 } 3148 } 3149 3150 sector_num += num; 3151 nb_sectors -= num; 3152 } 3153 3154 qemu_vfree(iov.iov_base); 3155 return ret; 3156 } 3157 3158 /* 3159 * Forwards an already correctly aligned write request to the BlockDriver. 3160 */ 3161 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, 3162 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, 3163 QEMUIOVector *qiov, int flags) 3164 { 3165 BlockDriver *drv = bs->drv; 3166 bool waited; 3167 int ret; 3168 3169 int64_t sector_num = offset >> BDRV_SECTOR_BITS; 3170 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; 3171 3172 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 3173 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 3174 3175 waited = wait_serialising_requests(req); 3176 assert(!waited || !req->serialising); 3177 assert(req->overlap_offset <= offset); 3178 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); 3179 3180 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); 3181 3182 if (ret < 0) { 3183 /* Do nothing, write notifier decided to fail this request */ 3184 } else if (flags & BDRV_REQ_ZERO_WRITE) { 3185 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO); 3186 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); 3187 } else { 3188 BLKDBG_EVENT(bs, BLKDBG_PWRITEV); 3189 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 3190 } 3191 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE); 3192 3193 if (ret == 0 && !bs->enable_write_cache) { 3194 ret = bdrv_co_flush(bs); 3195 } 3196 3197 bdrv_set_dirty(bs, sector_num, nb_sectors); 3198 3199 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) { 3200 bs->wr_highest_sector = sector_num + nb_sectors - 1; 3201 } 3202 if (bs->growable && ret >= 0) { 3203 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); 3204 } 3205 3206 return ret; 3207 } 3208 3209 /* 3210 * Handle a write request in coroutine context 3211 */ 3212 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, 3213 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, 3214 BdrvRequestFlags flags) 3215 { 3216 BdrvTrackedRequest req; 3217 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ 3218 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); 3219 uint8_t *head_buf = NULL; 3220 uint8_t *tail_buf = NULL; 3221 QEMUIOVector local_qiov; 3222 bool use_local_qiov = false; 3223 int ret; 3224 3225 if (!bs->drv) { 3226 return -ENOMEDIUM; 3227 } 3228 if (bs->read_only) { 3229 return -EACCES; 3230 } 3231 if (bdrv_check_byte_request(bs, offset, bytes)) { 3232 return -EIO; 3233 } 3234 3235 /* throttling disk I/O */ 3236 if (bs->io_limits_enabled) { 3237 bdrv_io_limits_intercept(bs, bytes, true); 3238 } 3239 3240 /* 3241 * Align write if necessary by performing a read-modify-write cycle. 3242 * Pad qiov with the read parts and be sure to have a tracked request not 3243 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. 3244 */ 3245 tracked_request_begin(&req, bs, offset, bytes, true); 3246 3247 if (offset & (align - 1)) { 3248 QEMUIOVector head_qiov; 3249 struct iovec head_iov; 3250 3251 mark_request_serialising(&req, align); 3252 wait_serialising_requests(&req); 3253 3254 head_buf = qemu_blockalign(bs, align); 3255 head_iov = (struct iovec) { 3256 .iov_base = head_buf, 3257 .iov_len = align, 3258 }; 3259 qemu_iovec_init_external(&head_qiov, &head_iov, 1); 3260 3261 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); 3262 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, 3263 align, &head_qiov, 0); 3264 if (ret < 0) { 3265 goto fail; 3266 } 3267 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); 3268 3269 qemu_iovec_init(&local_qiov, qiov->niov + 2); 3270 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); 3271 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 3272 use_local_qiov = true; 3273 3274 bytes += offset & (align - 1); 3275 offset = offset & ~(align - 1); 3276 } 3277 3278 if ((offset + bytes) & (align - 1)) { 3279 QEMUIOVector tail_qiov; 3280 struct iovec tail_iov; 3281 size_t tail_bytes; 3282 bool waited; 3283 3284 mark_request_serialising(&req, align); 3285 waited = wait_serialising_requests(&req); 3286 assert(!waited || !use_local_qiov); 3287 3288 tail_buf = qemu_blockalign(bs, align); 3289 tail_iov = (struct iovec) { 3290 .iov_base = tail_buf, 3291 .iov_len = align, 3292 }; 3293 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); 3294 3295 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); 3296 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, 3297 align, &tail_qiov, 0); 3298 if (ret < 0) { 3299 goto fail; 3300 } 3301 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); 3302 3303 if (!use_local_qiov) { 3304 qemu_iovec_init(&local_qiov, qiov->niov + 1); 3305 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); 3306 use_local_qiov = true; 3307 } 3308 3309 tail_bytes = (offset + bytes) & (align - 1); 3310 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); 3311 3312 bytes = ROUND_UP(bytes, align); 3313 } 3314 3315 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, 3316 use_local_qiov ? &local_qiov : qiov, 3317 flags); 3318 3319 fail: 3320 tracked_request_end(&req); 3321 3322 if (use_local_qiov) { 3323 qemu_iovec_destroy(&local_qiov); 3324 } 3325 qemu_vfree(head_buf); 3326 qemu_vfree(tail_buf); 3327 3328 return ret; 3329 } 3330 3331 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 3332 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 3333 BdrvRequestFlags flags) 3334 { 3335 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) { 3336 return -EINVAL; 3337 } 3338 3339 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, 3340 nb_sectors << BDRV_SECTOR_BITS, qiov, flags); 3341 } 3342 3343 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 3344 int nb_sectors, QEMUIOVector *qiov) 3345 { 3346 trace_bdrv_co_writev(bs, sector_num, nb_sectors); 3347 3348 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 3349 } 3350 3351 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, 3352 int64_t sector_num, int nb_sectors, 3353 BdrvRequestFlags flags) 3354 { 3355 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); 3356 3357 if (!(bs->open_flags & BDRV_O_UNMAP)) { 3358 flags &= ~BDRV_REQ_MAY_UNMAP; 3359 } 3360 3361 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, 3362 BDRV_REQ_ZERO_WRITE | flags); 3363 } 3364 3365 /** 3366 * Truncate file to 'offset' bytes (needed only for file protocols) 3367 */ 3368 int bdrv_truncate(BlockDriverState *bs, int64_t offset) 3369 { 3370 BlockDriver *drv = bs->drv; 3371 int ret; 3372 if (!drv) 3373 return -ENOMEDIUM; 3374 if (!drv->bdrv_truncate) 3375 return -ENOTSUP; 3376 if (bs->read_only) 3377 return -EACCES; 3378 if (bdrv_in_use(bs)) 3379 return -EBUSY; 3380 ret = drv->bdrv_truncate(bs, offset); 3381 if (ret == 0) { 3382 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 3383 bdrv_dev_resize_cb(bs); 3384 } 3385 return ret; 3386 } 3387 3388 /** 3389 * Length of a allocated file in bytes. Sparse files are counted by actual 3390 * allocated space. Return < 0 if error or unknown. 3391 */ 3392 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs) 3393 { 3394 BlockDriver *drv = bs->drv; 3395 if (!drv) { 3396 return -ENOMEDIUM; 3397 } 3398 if (drv->bdrv_get_allocated_file_size) { 3399 return drv->bdrv_get_allocated_file_size(bs); 3400 } 3401 if (bs->file) { 3402 return bdrv_get_allocated_file_size(bs->file); 3403 } 3404 return -ENOTSUP; 3405 } 3406 3407 /** 3408 * Length of a file in bytes. Return < 0 if error or unknown. 3409 */ 3410 int64_t bdrv_getlength(BlockDriverState *bs) 3411 { 3412 BlockDriver *drv = bs->drv; 3413 if (!drv) 3414 return -ENOMEDIUM; 3415 3416 if (drv->has_variable_length) { 3417 int ret = refresh_total_sectors(bs, bs->total_sectors); 3418 if (ret < 0) { 3419 return ret; 3420 } 3421 } 3422 return bs->total_sectors * BDRV_SECTOR_SIZE; 3423 } 3424 3425 /* return 0 as number of sectors if no device present or error */ 3426 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) 3427 { 3428 int64_t length; 3429 length = bdrv_getlength(bs); 3430 if (length < 0) 3431 length = 0; 3432 else 3433 length = length >> BDRV_SECTOR_BITS; 3434 *nb_sectors_ptr = length; 3435 } 3436 3437 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error, 3438 BlockdevOnError on_write_error) 3439 { 3440 bs->on_read_error = on_read_error; 3441 bs->on_write_error = on_write_error; 3442 } 3443 3444 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read) 3445 { 3446 return is_read ? bs->on_read_error : bs->on_write_error; 3447 } 3448 3449 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error) 3450 { 3451 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error; 3452 3453 switch (on_err) { 3454 case BLOCKDEV_ON_ERROR_ENOSPC: 3455 return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT; 3456 case BLOCKDEV_ON_ERROR_STOP: 3457 return BDRV_ACTION_STOP; 3458 case BLOCKDEV_ON_ERROR_REPORT: 3459 return BDRV_ACTION_REPORT; 3460 case BLOCKDEV_ON_ERROR_IGNORE: 3461 return BDRV_ACTION_IGNORE; 3462 default: 3463 abort(); 3464 } 3465 } 3466 3467 /* This is done by device models because, while the block layer knows 3468 * about the error, it does not know whether an operation comes from 3469 * the device or the block layer (from a job, for example). 3470 */ 3471 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action, 3472 bool is_read, int error) 3473 { 3474 assert(error >= 0); 3475 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read); 3476 if (action == BDRV_ACTION_STOP) { 3477 vm_stop(RUN_STATE_IO_ERROR); 3478 bdrv_iostatus_set_err(bs, error); 3479 } 3480 } 3481 3482 int bdrv_is_read_only(BlockDriverState *bs) 3483 { 3484 return bs->read_only; 3485 } 3486 3487 int bdrv_is_sg(BlockDriverState *bs) 3488 { 3489 return bs->sg; 3490 } 3491 3492 int bdrv_enable_write_cache(BlockDriverState *bs) 3493 { 3494 return bs->enable_write_cache; 3495 } 3496 3497 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce) 3498 { 3499 bs->enable_write_cache = wce; 3500 3501 /* so a reopen() will preserve wce */ 3502 if (wce) { 3503 bs->open_flags |= BDRV_O_CACHE_WB; 3504 } else { 3505 bs->open_flags &= ~BDRV_O_CACHE_WB; 3506 } 3507 } 3508 3509 int bdrv_is_encrypted(BlockDriverState *bs) 3510 { 3511 if (bs->backing_hd && bs->backing_hd->encrypted) 3512 return 1; 3513 return bs->encrypted; 3514 } 3515 3516 int bdrv_key_required(BlockDriverState *bs) 3517 { 3518 BlockDriverState *backing_hd = bs->backing_hd; 3519 3520 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key) 3521 return 1; 3522 return (bs->encrypted && !bs->valid_key); 3523 } 3524 3525 int bdrv_set_key(BlockDriverState *bs, const char *key) 3526 { 3527 int ret; 3528 if (bs->backing_hd && bs->backing_hd->encrypted) { 3529 ret = bdrv_set_key(bs->backing_hd, key); 3530 if (ret < 0) 3531 return ret; 3532 if (!bs->encrypted) 3533 return 0; 3534 } 3535 if (!bs->encrypted) { 3536 return -EINVAL; 3537 } else if (!bs->drv || !bs->drv->bdrv_set_key) { 3538 return -ENOMEDIUM; 3539 } 3540 ret = bs->drv->bdrv_set_key(bs, key); 3541 if (ret < 0) { 3542 bs->valid_key = 0; 3543 } else if (!bs->valid_key) { 3544 bs->valid_key = 1; 3545 /* call the change callback now, we skipped it on open */ 3546 bdrv_dev_change_media_cb(bs, true); 3547 } 3548 return ret; 3549 } 3550 3551 const char *bdrv_get_format_name(BlockDriverState *bs) 3552 { 3553 return bs->drv ? bs->drv->format_name : NULL; 3554 } 3555 3556 void bdrv_iterate_format(void (*it)(void *opaque, const char *name), 3557 void *opaque) 3558 { 3559 BlockDriver *drv; 3560 3561 QLIST_FOREACH(drv, &bdrv_drivers, list) { 3562 it(opaque, drv->format_name); 3563 } 3564 } 3565 3566 /* This function is to find block backend bs */ 3567 BlockDriverState *bdrv_find(const char *name) 3568 { 3569 BlockDriverState *bs; 3570 3571 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 3572 if (!strcmp(name, bs->device_name)) { 3573 return bs; 3574 } 3575 } 3576 return NULL; 3577 } 3578 3579 /* This function is to find a node in the bs graph */ 3580 BlockDriverState *bdrv_find_node(const char *node_name) 3581 { 3582 BlockDriverState *bs; 3583 3584 assert(node_name); 3585 3586 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { 3587 if (!strcmp(node_name, bs->node_name)) { 3588 return bs; 3589 } 3590 } 3591 return NULL; 3592 } 3593 3594 /* Put this QMP function here so it can access the static graph_bdrv_states. */ 3595 BlockDeviceInfoList *bdrv_named_nodes_list(void) 3596 { 3597 BlockDeviceInfoList *list, *entry; 3598 BlockDriverState *bs; 3599 3600 list = NULL; 3601 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { 3602 entry = g_malloc0(sizeof(*entry)); 3603 entry->value = bdrv_block_device_info(bs); 3604 entry->next = list; 3605 list = entry; 3606 } 3607 3608 return list; 3609 } 3610 3611 BlockDriverState *bdrv_lookup_bs(const char *device, 3612 const char *node_name, 3613 Error **errp) 3614 { 3615 BlockDriverState *bs = NULL; 3616 3617 if (device) { 3618 bs = bdrv_find(device); 3619 3620 if (bs) { 3621 return bs; 3622 } 3623 } 3624 3625 if (node_name) { 3626 bs = bdrv_find_node(node_name); 3627 3628 if (bs) { 3629 return bs; 3630 } 3631 } 3632 3633 error_setg(errp, "Cannot find device=%s nor node_name=%s", 3634 device ? device : "", 3635 node_name ? node_name : ""); 3636 return NULL; 3637 } 3638 3639 BlockDriverState *bdrv_next(BlockDriverState *bs) 3640 { 3641 if (!bs) { 3642 return QTAILQ_FIRST(&bdrv_states); 3643 } 3644 return QTAILQ_NEXT(bs, device_list); 3645 } 3646 3647 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque) 3648 { 3649 BlockDriverState *bs; 3650 3651 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 3652 it(opaque, bs); 3653 } 3654 } 3655 3656 const char *bdrv_get_device_name(BlockDriverState *bs) 3657 { 3658 return bs->device_name; 3659 } 3660 3661 int bdrv_get_flags(BlockDriverState *bs) 3662 { 3663 return bs->open_flags; 3664 } 3665 3666 int bdrv_flush_all(void) 3667 { 3668 BlockDriverState *bs; 3669 int result = 0; 3670 3671 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 3672 int ret = bdrv_flush(bs); 3673 if (ret < 0 && !result) { 3674 result = ret; 3675 } 3676 } 3677 3678 return result; 3679 } 3680 3681 int bdrv_has_zero_init_1(BlockDriverState *bs) 3682 { 3683 return 1; 3684 } 3685 3686 int bdrv_has_zero_init(BlockDriverState *bs) 3687 { 3688 assert(bs->drv); 3689 3690 /* If BS is a copy on write image, it is initialized to 3691 the contents of the base image, which may not be zeroes. */ 3692 if (bs->backing_hd) { 3693 return 0; 3694 } 3695 if (bs->drv->bdrv_has_zero_init) { 3696 return bs->drv->bdrv_has_zero_init(bs); 3697 } 3698 3699 /* safe default */ 3700 return 0; 3701 } 3702 3703 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs) 3704 { 3705 BlockDriverInfo bdi; 3706 3707 if (bs->backing_hd) { 3708 return false; 3709 } 3710 3711 if (bdrv_get_info(bs, &bdi) == 0) { 3712 return bdi.unallocated_blocks_are_zero; 3713 } 3714 3715 return false; 3716 } 3717 3718 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs) 3719 { 3720 BlockDriverInfo bdi; 3721 3722 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) { 3723 return false; 3724 } 3725 3726 if (bdrv_get_info(bs, &bdi) == 0) { 3727 return bdi.can_write_zeroes_with_unmap; 3728 } 3729 3730 return false; 3731 } 3732 3733 typedef struct BdrvCoGetBlockStatusData { 3734 BlockDriverState *bs; 3735 BlockDriverState *base; 3736 int64_t sector_num; 3737 int nb_sectors; 3738 int *pnum; 3739 int64_t ret; 3740 bool done; 3741 } BdrvCoGetBlockStatusData; 3742 3743 /* 3744 * Returns true iff the specified sector is present in the disk image. Drivers 3745 * not implementing the functionality are assumed to not support backing files, 3746 * hence all their sectors are reported as allocated. 3747 * 3748 * If 'sector_num' is beyond the end of the disk image the return value is 0 3749 * and 'pnum' is set to 0. 3750 * 3751 * 'pnum' is set to the number of sectors (including and immediately following 3752 * the specified sector) that are known to be in the same 3753 * allocated/unallocated state. 3754 * 3755 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 3756 * beyond the end of the disk image it will be clamped. 3757 */ 3758 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 3759 int64_t sector_num, 3760 int nb_sectors, int *pnum) 3761 { 3762 int64_t length; 3763 int64_t n; 3764 int64_t ret, ret2; 3765 3766 length = bdrv_getlength(bs); 3767 if (length < 0) { 3768 return length; 3769 } 3770 3771 if (sector_num >= (length >> BDRV_SECTOR_BITS)) { 3772 *pnum = 0; 3773 return 0; 3774 } 3775 3776 n = bs->total_sectors - sector_num; 3777 if (n < nb_sectors) { 3778 nb_sectors = n; 3779 } 3780 3781 if (!bs->drv->bdrv_co_get_block_status) { 3782 *pnum = nb_sectors; 3783 ret = BDRV_BLOCK_DATA; 3784 if (bs->drv->protocol_name) { 3785 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 3786 } 3787 return ret; 3788 } 3789 3790 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); 3791 if (ret < 0) { 3792 *pnum = 0; 3793 return ret; 3794 } 3795 3796 if (ret & BDRV_BLOCK_RAW) { 3797 assert(ret & BDRV_BLOCK_OFFSET_VALID); 3798 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, 3799 *pnum, pnum); 3800 } 3801 3802 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) { 3803 if (bdrv_unallocated_blocks_are_zero(bs)) { 3804 ret |= BDRV_BLOCK_ZERO; 3805 } else if (bs->backing_hd) { 3806 BlockDriverState *bs2 = bs->backing_hd; 3807 int64_t length2 = bdrv_getlength(bs2); 3808 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) { 3809 ret |= BDRV_BLOCK_ZERO; 3810 } 3811 } 3812 } 3813 3814 if (bs->file && 3815 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 3816 (ret & BDRV_BLOCK_OFFSET_VALID)) { 3817 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, 3818 *pnum, pnum); 3819 if (ret2 >= 0) { 3820 /* Ignore errors. This is just providing extra information, it 3821 * is useful but not necessary. 3822 */ 3823 ret |= (ret2 & BDRV_BLOCK_ZERO); 3824 } 3825 } 3826 3827 return ret; 3828 } 3829 3830 /* Coroutine wrapper for bdrv_get_block_status() */ 3831 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque) 3832 { 3833 BdrvCoGetBlockStatusData *data = opaque; 3834 BlockDriverState *bs = data->bs; 3835 3836 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors, 3837 data->pnum); 3838 data->done = true; 3839 } 3840 3841 /* 3842 * Synchronous wrapper around bdrv_co_get_block_status(). 3843 * 3844 * See bdrv_co_get_block_status() for details. 3845 */ 3846 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, 3847 int nb_sectors, int *pnum) 3848 { 3849 Coroutine *co; 3850 BdrvCoGetBlockStatusData data = { 3851 .bs = bs, 3852 .sector_num = sector_num, 3853 .nb_sectors = nb_sectors, 3854 .pnum = pnum, 3855 .done = false, 3856 }; 3857 3858 if (qemu_in_coroutine()) { 3859 /* Fast-path if already in coroutine context */ 3860 bdrv_get_block_status_co_entry(&data); 3861 } else { 3862 co = qemu_coroutine_create(bdrv_get_block_status_co_entry); 3863 qemu_coroutine_enter(co, &data); 3864 while (!data.done) { 3865 qemu_aio_wait(); 3866 } 3867 } 3868 return data.ret; 3869 } 3870 3871 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 3872 int nb_sectors, int *pnum) 3873 { 3874 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); 3875 if (ret < 0) { 3876 return ret; 3877 } 3878 return 3879 (ret & BDRV_BLOCK_DATA) || 3880 ((ret & BDRV_BLOCK_ZERO) && !bdrv_has_zero_init(bs)); 3881 } 3882 3883 /* 3884 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 3885 * 3886 * Return true if the given sector is allocated in any image between 3887 * BASE and TOP (inclusive). BASE can be NULL to check if the given 3888 * sector is allocated in any image of the chain. Return false otherwise. 3889 * 3890 * 'pnum' is set to the number of sectors (including and immediately following 3891 * the specified sector) that are known to be in the same 3892 * allocated/unallocated state. 3893 * 3894 */ 3895 int bdrv_is_allocated_above(BlockDriverState *top, 3896 BlockDriverState *base, 3897 int64_t sector_num, 3898 int nb_sectors, int *pnum) 3899 { 3900 BlockDriverState *intermediate; 3901 int ret, n = nb_sectors; 3902 3903 intermediate = top; 3904 while (intermediate && intermediate != base) { 3905 int pnum_inter; 3906 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 3907 &pnum_inter); 3908 if (ret < 0) { 3909 return ret; 3910 } else if (ret) { 3911 *pnum = pnum_inter; 3912 return 1; 3913 } 3914 3915 /* 3916 * [sector_num, nb_sectors] is unallocated on top but intermediate 3917 * might have 3918 * 3919 * [sector_num+x, nr_sectors] allocated. 3920 */ 3921 if (n > pnum_inter && 3922 (intermediate == top || 3923 sector_num + pnum_inter < intermediate->total_sectors)) { 3924 n = pnum_inter; 3925 } 3926 3927 intermediate = intermediate->backing_hd; 3928 } 3929 3930 *pnum = n; 3931 return 0; 3932 } 3933 3934 const char *bdrv_get_encrypted_filename(BlockDriverState *bs) 3935 { 3936 if (bs->backing_hd && bs->backing_hd->encrypted) 3937 return bs->backing_file; 3938 else if (bs->encrypted) 3939 return bs->filename; 3940 else 3941 return NULL; 3942 } 3943 3944 void bdrv_get_backing_filename(BlockDriverState *bs, 3945 char *filename, int filename_size) 3946 { 3947 pstrcpy(filename, filename_size, bs->backing_file); 3948 } 3949 3950 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 3951 const uint8_t *buf, int nb_sectors) 3952 { 3953 BlockDriver *drv = bs->drv; 3954 if (!drv) 3955 return -ENOMEDIUM; 3956 if (!drv->bdrv_write_compressed) 3957 return -ENOTSUP; 3958 if (bdrv_check_request(bs, sector_num, nb_sectors)) 3959 return -EIO; 3960 3961 assert(QLIST_EMPTY(&bs->dirty_bitmaps)); 3962 3963 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 3964 } 3965 3966 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 3967 { 3968 BlockDriver *drv = bs->drv; 3969 if (!drv) 3970 return -ENOMEDIUM; 3971 if (!drv->bdrv_get_info) 3972 return -ENOTSUP; 3973 memset(bdi, 0, sizeof(*bdi)); 3974 return drv->bdrv_get_info(bs, bdi); 3975 } 3976 3977 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs) 3978 { 3979 BlockDriver *drv = bs->drv; 3980 if (drv && drv->bdrv_get_specific_info) { 3981 return drv->bdrv_get_specific_info(bs); 3982 } 3983 return NULL; 3984 } 3985 3986 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 3987 int64_t pos, int size) 3988 { 3989 QEMUIOVector qiov; 3990 struct iovec iov = { 3991 .iov_base = (void *) buf, 3992 .iov_len = size, 3993 }; 3994 3995 qemu_iovec_init_external(&qiov, &iov, 1); 3996 return bdrv_writev_vmstate(bs, &qiov, pos); 3997 } 3998 3999 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 4000 { 4001 BlockDriver *drv = bs->drv; 4002 4003 if (!drv) { 4004 return -ENOMEDIUM; 4005 } else if (drv->bdrv_save_vmstate) { 4006 return drv->bdrv_save_vmstate(bs, qiov, pos); 4007 } else if (bs->file) { 4008 return bdrv_writev_vmstate(bs->file, qiov, pos); 4009 } 4010 4011 return -ENOTSUP; 4012 } 4013 4014 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 4015 int64_t pos, int size) 4016 { 4017 BlockDriver *drv = bs->drv; 4018 if (!drv) 4019 return -ENOMEDIUM; 4020 if (drv->bdrv_load_vmstate) 4021 return drv->bdrv_load_vmstate(bs, buf, pos, size); 4022 if (bs->file) 4023 return bdrv_load_vmstate(bs->file, buf, pos, size); 4024 return -ENOTSUP; 4025 } 4026 4027 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event) 4028 { 4029 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) { 4030 return; 4031 } 4032 4033 bs->drv->bdrv_debug_event(bs, event); 4034 } 4035 4036 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, 4037 const char *tag) 4038 { 4039 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) { 4040 bs = bs->file; 4041 } 4042 4043 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) { 4044 return bs->drv->bdrv_debug_breakpoint(bs, event, tag); 4045 } 4046 4047 return -ENOTSUP; 4048 } 4049 4050 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag) 4051 { 4052 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) { 4053 bs = bs->file; 4054 } 4055 4056 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) { 4057 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag); 4058 } 4059 4060 return -ENOTSUP; 4061 } 4062 4063 int bdrv_debug_resume(BlockDriverState *bs, const char *tag) 4064 { 4065 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) { 4066 bs = bs->file; 4067 } 4068 4069 if (bs && bs->drv && bs->drv->bdrv_debug_resume) { 4070 return bs->drv->bdrv_debug_resume(bs, tag); 4071 } 4072 4073 return -ENOTSUP; 4074 } 4075 4076 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag) 4077 { 4078 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) { 4079 bs = bs->file; 4080 } 4081 4082 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) { 4083 return bs->drv->bdrv_debug_is_suspended(bs, tag); 4084 } 4085 4086 return false; 4087 } 4088 4089 int bdrv_is_snapshot(BlockDriverState *bs) 4090 { 4091 return !!(bs->open_flags & BDRV_O_SNAPSHOT); 4092 } 4093 4094 /* backing_file can either be relative, or absolute, or a protocol. If it is 4095 * relative, it must be relative to the chain. So, passing in bs->filename 4096 * from a BDS as backing_file should not be done, as that may be relative to 4097 * the CWD rather than the chain. */ 4098 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, 4099 const char *backing_file) 4100 { 4101 char *filename_full = NULL; 4102 char *backing_file_full = NULL; 4103 char *filename_tmp = NULL; 4104 int is_protocol = 0; 4105 BlockDriverState *curr_bs = NULL; 4106 BlockDriverState *retval = NULL; 4107 4108 if (!bs || !bs->drv || !backing_file) { 4109 return NULL; 4110 } 4111 4112 filename_full = g_malloc(PATH_MAX); 4113 backing_file_full = g_malloc(PATH_MAX); 4114 filename_tmp = g_malloc(PATH_MAX); 4115 4116 is_protocol = path_has_protocol(backing_file); 4117 4118 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) { 4119 4120 /* If either of the filename paths is actually a protocol, then 4121 * compare unmodified paths; otherwise make paths relative */ 4122 if (is_protocol || path_has_protocol(curr_bs->backing_file)) { 4123 if (strcmp(backing_file, curr_bs->backing_file) == 0) { 4124 retval = curr_bs->backing_hd; 4125 break; 4126 } 4127 } else { 4128 /* If not an absolute filename path, make it relative to the current 4129 * image's filename path */ 4130 path_combine(filename_tmp, PATH_MAX, curr_bs->filename, 4131 backing_file); 4132 4133 /* We are going to compare absolute pathnames */ 4134 if (!realpath(filename_tmp, filename_full)) { 4135 continue; 4136 } 4137 4138 /* We need to make sure the backing filename we are comparing against 4139 * is relative to the current image filename (or absolute) */ 4140 path_combine(filename_tmp, PATH_MAX, curr_bs->filename, 4141 curr_bs->backing_file); 4142 4143 if (!realpath(filename_tmp, backing_file_full)) { 4144 continue; 4145 } 4146 4147 if (strcmp(backing_file_full, filename_full) == 0) { 4148 retval = curr_bs->backing_hd; 4149 break; 4150 } 4151 } 4152 } 4153 4154 g_free(filename_full); 4155 g_free(backing_file_full); 4156 g_free(filename_tmp); 4157 return retval; 4158 } 4159 4160 int bdrv_get_backing_file_depth(BlockDriverState *bs) 4161 { 4162 if (!bs->drv) { 4163 return 0; 4164 } 4165 4166 if (!bs->backing_hd) { 4167 return 0; 4168 } 4169 4170 return 1 + bdrv_get_backing_file_depth(bs->backing_hd); 4171 } 4172 4173 BlockDriverState *bdrv_find_base(BlockDriverState *bs) 4174 { 4175 BlockDriverState *curr_bs = NULL; 4176 4177 if (!bs) { 4178 return NULL; 4179 } 4180 4181 curr_bs = bs; 4182 4183 while (curr_bs->backing_hd) { 4184 curr_bs = curr_bs->backing_hd; 4185 } 4186 return curr_bs; 4187 } 4188 4189 /**************************************************************/ 4190 /* async I/Os */ 4191 4192 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 4193 QEMUIOVector *qiov, int nb_sectors, 4194 BlockDriverCompletionFunc *cb, void *opaque) 4195 { 4196 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 4197 4198 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 4199 cb, opaque, false); 4200 } 4201 4202 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 4203 QEMUIOVector *qiov, int nb_sectors, 4204 BlockDriverCompletionFunc *cb, void *opaque) 4205 { 4206 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 4207 4208 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, 4209 cb, opaque, true); 4210 } 4211 4212 BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, 4213 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, 4214 BlockDriverCompletionFunc *cb, void *opaque) 4215 { 4216 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); 4217 4218 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, 4219 BDRV_REQ_ZERO_WRITE | flags, 4220 cb, opaque, true); 4221 } 4222 4223 4224 typedef struct MultiwriteCB { 4225 int error; 4226 int num_requests; 4227 int num_callbacks; 4228 struct { 4229 BlockDriverCompletionFunc *cb; 4230 void *opaque; 4231 QEMUIOVector *free_qiov; 4232 } callbacks[]; 4233 } MultiwriteCB; 4234 4235 static void multiwrite_user_cb(MultiwriteCB *mcb) 4236 { 4237 int i; 4238 4239 for (i = 0; i < mcb->num_callbacks; i++) { 4240 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); 4241 if (mcb->callbacks[i].free_qiov) { 4242 qemu_iovec_destroy(mcb->callbacks[i].free_qiov); 4243 } 4244 g_free(mcb->callbacks[i].free_qiov); 4245 } 4246 } 4247 4248 static void multiwrite_cb(void *opaque, int ret) 4249 { 4250 MultiwriteCB *mcb = opaque; 4251 4252 trace_multiwrite_cb(mcb, ret); 4253 4254 if (ret < 0 && !mcb->error) { 4255 mcb->error = ret; 4256 } 4257 4258 mcb->num_requests--; 4259 if (mcb->num_requests == 0) { 4260 multiwrite_user_cb(mcb); 4261 g_free(mcb); 4262 } 4263 } 4264 4265 static int multiwrite_req_compare(const void *a, const void *b) 4266 { 4267 const BlockRequest *req1 = a, *req2 = b; 4268 4269 /* 4270 * Note that we can't simply subtract req2->sector from req1->sector 4271 * here as that could overflow the return value. 4272 */ 4273 if (req1->sector > req2->sector) { 4274 return 1; 4275 } else if (req1->sector < req2->sector) { 4276 return -1; 4277 } else { 4278 return 0; 4279 } 4280 } 4281 4282 /* 4283 * Takes a bunch of requests and tries to merge them. Returns the number of 4284 * requests that remain after merging. 4285 */ 4286 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, 4287 int num_reqs, MultiwriteCB *mcb) 4288 { 4289 int i, outidx; 4290 4291 // Sort requests by start sector 4292 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); 4293 4294 // Check if adjacent requests touch the same clusters. If so, combine them, 4295 // filling up gaps with zero sectors. 4296 outidx = 0; 4297 for (i = 1; i < num_reqs; i++) { 4298 int merge = 0; 4299 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; 4300 4301 // Handle exactly sequential writes and overlapping writes. 4302 if (reqs[i].sector <= oldreq_last) { 4303 merge = 1; 4304 } 4305 4306 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { 4307 merge = 0; 4308 } 4309 4310 if (merge) { 4311 size_t size; 4312 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); 4313 qemu_iovec_init(qiov, 4314 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); 4315 4316 // Add the first request to the merged one. If the requests are 4317 // overlapping, drop the last sectors of the first request. 4318 size = (reqs[i].sector - reqs[outidx].sector) << 9; 4319 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); 4320 4321 // We should need to add any zeros between the two requests 4322 assert (reqs[i].sector <= oldreq_last); 4323 4324 // Add the second request 4325 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); 4326 4327 reqs[outidx].nb_sectors = qiov->size >> 9; 4328 reqs[outidx].qiov = qiov; 4329 4330 mcb->callbacks[i].free_qiov = reqs[outidx].qiov; 4331 } else { 4332 outidx++; 4333 reqs[outidx].sector = reqs[i].sector; 4334 reqs[outidx].nb_sectors = reqs[i].nb_sectors; 4335 reqs[outidx].qiov = reqs[i].qiov; 4336 } 4337 } 4338 4339 return outidx + 1; 4340 } 4341 4342 /* 4343 * Submit multiple AIO write requests at once. 4344 * 4345 * On success, the function returns 0 and all requests in the reqs array have 4346 * been submitted. In error case this function returns -1, and any of the 4347 * requests may or may not be submitted yet. In particular, this means that the 4348 * callback will be called for some of the requests, for others it won't. The 4349 * caller must check the error field of the BlockRequest to wait for the right 4350 * callbacks (if error != 0, no callback will be called). 4351 * 4352 * The implementation may modify the contents of the reqs array, e.g. to merge 4353 * requests. However, the fields opaque and error are left unmodified as they 4354 * are used to signal failure for a single request to the caller. 4355 */ 4356 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) 4357 { 4358 MultiwriteCB *mcb; 4359 int i; 4360 4361 /* don't submit writes if we don't have a medium */ 4362 if (bs->drv == NULL) { 4363 for (i = 0; i < num_reqs; i++) { 4364 reqs[i].error = -ENOMEDIUM; 4365 } 4366 return -1; 4367 } 4368 4369 if (num_reqs == 0) { 4370 return 0; 4371 } 4372 4373 // Create MultiwriteCB structure 4374 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); 4375 mcb->num_requests = 0; 4376 mcb->num_callbacks = num_reqs; 4377 4378 for (i = 0; i < num_reqs; i++) { 4379 mcb->callbacks[i].cb = reqs[i].cb; 4380 mcb->callbacks[i].opaque = reqs[i].opaque; 4381 } 4382 4383 // Check for mergable requests 4384 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); 4385 4386 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); 4387 4388 /* Run the aio requests. */ 4389 mcb->num_requests = num_reqs; 4390 for (i = 0; i < num_reqs; i++) { 4391 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, 4392 reqs[i].nb_sectors, reqs[i].flags, 4393 multiwrite_cb, mcb, 4394 true); 4395 } 4396 4397 return 0; 4398 } 4399 4400 void bdrv_aio_cancel(BlockDriverAIOCB *acb) 4401 { 4402 acb->aiocb_info->cancel(acb); 4403 } 4404 4405 /**************************************************************/ 4406 /* async block device emulation */ 4407 4408 typedef struct BlockDriverAIOCBSync { 4409 BlockDriverAIOCB common; 4410 QEMUBH *bh; 4411 int ret; 4412 /* vector translation state */ 4413 QEMUIOVector *qiov; 4414 uint8_t *bounce; 4415 int is_write; 4416 } BlockDriverAIOCBSync; 4417 4418 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb) 4419 { 4420 BlockDriverAIOCBSync *acb = 4421 container_of(blockacb, BlockDriverAIOCBSync, common); 4422 qemu_bh_delete(acb->bh); 4423 acb->bh = NULL; 4424 qemu_aio_release(acb); 4425 } 4426 4427 static const AIOCBInfo bdrv_em_aiocb_info = { 4428 .aiocb_size = sizeof(BlockDriverAIOCBSync), 4429 .cancel = bdrv_aio_cancel_em, 4430 }; 4431 4432 static void bdrv_aio_bh_cb(void *opaque) 4433 { 4434 BlockDriverAIOCBSync *acb = opaque; 4435 4436 if (!acb->is_write) 4437 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); 4438 qemu_vfree(acb->bounce); 4439 acb->common.cb(acb->common.opaque, acb->ret); 4440 qemu_bh_delete(acb->bh); 4441 acb->bh = NULL; 4442 qemu_aio_release(acb); 4443 } 4444 4445 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, 4446 int64_t sector_num, 4447 QEMUIOVector *qiov, 4448 int nb_sectors, 4449 BlockDriverCompletionFunc *cb, 4450 void *opaque, 4451 int is_write) 4452 4453 { 4454 BlockDriverAIOCBSync *acb; 4455 4456 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); 4457 acb->is_write = is_write; 4458 acb->qiov = qiov; 4459 acb->bounce = qemu_blockalign(bs, qiov->size); 4460 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb); 4461 4462 if (is_write) { 4463 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); 4464 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); 4465 } else { 4466 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); 4467 } 4468 4469 qemu_bh_schedule(acb->bh); 4470 4471 return &acb->common; 4472 } 4473 4474 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 4475 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 4476 BlockDriverCompletionFunc *cb, void *opaque) 4477 { 4478 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 4479 } 4480 4481 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 4482 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 4483 BlockDriverCompletionFunc *cb, void *opaque) 4484 { 4485 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); 4486 } 4487 4488 4489 typedef struct BlockDriverAIOCBCoroutine { 4490 BlockDriverAIOCB common; 4491 BlockRequest req; 4492 bool is_write; 4493 bool *done; 4494 QEMUBH* bh; 4495 } BlockDriverAIOCBCoroutine; 4496 4497 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb) 4498 { 4499 BlockDriverAIOCBCoroutine *acb = 4500 container_of(blockacb, BlockDriverAIOCBCoroutine, common); 4501 bool done = false; 4502 4503 acb->done = &done; 4504 while (!done) { 4505 qemu_aio_wait(); 4506 } 4507 } 4508 4509 static const AIOCBInfo bdrv_em_co_aiocb_info = { 4510 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine), 4511 .cancel = bdrv_aio_co_cancel_em, 4512 }; 4513 4514 static void bdrv_co_em_bh(void *opaque) 4515 { 4516 BlockDriverAIOCBCoroutine *acb = opaque; 4517 4518 acb->common.cb(acb->common.opaque, acb->req.error); 4519 4520 if (acb->done) { 4521 *acb->done = true; 4522 } 4523 4524 qemu_bh_delete(acb->bh); 4525 qemu_aio_release(acb); 4526 } 4527 4528 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 4529 static void coroutine_fn bdrv_co_do_rw(void *opaque) 4530 { 4531 BlockDriverAIOCBCoroutine *acb = opaque; 4532 BlockDriverState *bs = acb->common.bs; 4533 4534 if (!acb->is_write) { 4535 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 4536 acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 4537 } else { 4538 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 4539 acb->req.nb_sectors, acb->req.qiov, acb->req.flags); 4540 } 4541 4542 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); 4543 qemu_bh_schedule(acb->bh); 4544 } 4545 4546 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 4547 int64_t sector_num, 4548 QEMUIOVector *qiov, 4549 int nb_sectors, 4550 BdrvRequestFlags flags, 4551 BlockDriverCompletionFunc *cb, 4552 void *opaque, 4553 bool is_write) 4554 { 4555 Coroutine *co; 4556 BlockDriverAIOCBCoroutine *acb; 4557 4558 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 4559 acb->req.sector = sector_num; 4560 acb->req.nb_sectors = nb_sectors; 4561 acb->req.qiov = qiov; 4562 acb->req.flags = flags; 4563 acb->is_write = is_write; 4564 acb->done = NULL; 4565 4566 co = qemu_coroutine_create(bdrv_co_do_rw); 4567 qemu_coroutine_enter(co, acb); 4568 4569 return &acb->common; 4570 } 4571 4572 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 4573 { 4574 BlockDriverAIOCBCoroutine *acb = opaque; 4575 BlockDriverState *bs = acb->common.bs; 4576 4577 acb->req.error = bdrv_co_flush(bs); 4578 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); 4579 qemu_bh_schedule(acb->bh); 4580 } 4581 4582 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, 4583 BlockDriverCompletionFunc *cb, void *opaque) 4584 { 4585 trace_bdrv_aio_flush(bs, opaque); 4586 4587 Coroutine *co; 4588 BlockDriverAIOCBCoroutine *acb; 4589 4590 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 4591 acb->done = NULL; 4592 4593 co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 4594 qemu_coroutine_enter(co, acb); 4595 4596 return &acb->common; 4597 } 4598 4599 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 4600 { 4601 BlockDriverAIOCBCoroutine *acb = opaque; 4602 BlockDriverState *bs = acb->common.bs; 4603 4604 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 4605 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); 4606 qemu_bh_schedule(acb->bh); 4607 } 4608 4609 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs, 4610 int64_t sector_num, int nb_sectors, 4611 BlockDriverCompletionFunc *cb, void *opaque) 4612 { 4613 Coroutine *co; 4614 BlockDriverAIOCBCoroutine *acb; 4615 4616 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 4617 4618 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 4619 acb->req.sector = sector_num; 4620 acb->req.nb_sectors = nb_sectors; 4621 acb->done = NULL; 4622 co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 4623 qemu_coroutine_enter(co, acb); 4624 4625 return &acb->common; 4626 } 4627 4628 void bdrv_init(void) 4629 { 4630 module_call_init(MODULE_INIT_BLOCK); 4631 } 4632 4633 void bdrv_init_with_whitelist(void) 4634 { 4635 use_bdrv_whitelist = 1; 4636 bdrv_init(); 4637 } 4638 4639 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 4640 BlockDriverCompletionFunc *cb, void *opaque) 4641 { 4642 BlockDriverAIOCB *acb; 4643 4644 acb = g_slice_alloc(aiocb_info->aiocb_size); 4645 acb->aiocb_info = aiocb_info; 4646 acb->bs = bs; 4647 acb->cb = cb; 4648 acb->opaque = opaque; 4649 return acb; 4650 } 4651 4652 void qemu_aio_release(void *p) 4653 { 4654 BlockDriverAIOCB *acb = p; 4655 g_slice_free1(acb->aiocb_info->aiocb_size, acb); 4656 } 4657 4658 /**************************************************************/ 4659 /* Coroutine block device emulation */ 4660 4661 typedef struct CoroutineIOCompletion { 4662 Coroutine *coroutine; 4663 int ret; 4664 } CoroutineIOCompletion; 4665 4666 static void bdrv_co_io_em_complete(void *opaque, int ret) 4667 { 4668 CoroutineIOCompletion *co = opaque; 4669 4670 co->ret = ret; 4671 qemu_coroutine_enter(co->coroutine, NULL); 4672 } 4673 4674 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, 4675 int nb_sectors, QEMUIOVector *iov, 4676 bool is_write) 4677 { 4678 CoroutineIOCompletion co = { 4679 .coroutine = qemu_coroutine_self(), 4680 }; 4681 BlockDriverAIOCB *acb; 4682 4683 if (is_write) { 4684 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, 4685 bdrv_co_io_em_complete, &co); 4686 } else { 4687 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, 4688 bdrv_co_io_em_complete, &co); 4689 } 4690 4691 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); 4692 if (!acb) { 4693 return -EIO; 4694 } 4695 qemu_coroutine_yield(); 4696 4697 return co.ret; 4698 } 4699 4700 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 4701 int64_t sector_num, int nb_sectors, 4702 QEMUIOVector *iov) 4703 { 4704 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); 4705 } 4706 4707 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 4708 int64_t sector_num, int nb_sectors, 4709 QEMUIOVector *iov) 4710 { 4711 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); 4712 } 4713 4714 static void coroutine_fn bdrv_flush_co_entry(void *opaque) 4715 { 4716 RwCo *rwco = opaque; 4717 4718 rwco->ret = bdrv_co_flush(rwco->bs); 4719 } 4720 4721 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 4722 { 4723 int ret; 4724 4725 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { 4726 return 0; 4727 } 4728 4729 /* Write back cached data to the OS even with cache=unsafe */ 4730 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 4731 if (bs->drv->bdrv_co_flush_to_os) { 4732 ret = bs->drv->bdrv_co_flush_to_os(bs); 4733 if (ret < 0) { 4734 return ret; 4735 } 4736 } 4737 4738 /* But don't actually force it to the disk with cache=unsafe */ 4739 if (bs->open_flags & BDRV_O_NO_FLUSH) { 4740 goto flush_parent; 4741 } 4742 4743 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 4744 if (bs->drv->bdrv_co_flush_to_disk) { 4745 ret = bs->drv->bdrv_co_flush_to_disk(bs); 4746 } else if (bs->drv->bdrv_aio_flush) { 4747 BlockDriverAIOCB *acb; 4748 CoroutineIOCompletion co = { 4749 .coroutine = qemu_coroutine_self(), 4750 }; 4751 4752 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 4753 if (acb == NULL) { 4754 ret = -EIO; 4755 } else { 4756 qemu_coroutine_yield(); 4757 ret = co.ret; 4758 } 4759 } else { 4760 /* 4761 * Some block drivers always operate in either writethrough or unsafe 4762 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 4763 * know how the server works (because the behaviour is hardcoded or 4764 * depends on server-side configuration), so we can't ensure that 4765 * everything is safe on disk. Returning an error doesn't work because 4766 * that would break guests even if the server operates in writethrough 4767 * mode. 4768 * 4769 * Let's hope the user knows what he's doing. 4770 */ 4771 ret = 0; 4772 } 4773 if (ret < 0) { 4774 return ret; 4775 } 4776 4777 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 4778 * in the case of cache=unsafe, so there are no useless flushes. 4779 */ 4780 flush_parent: 4781 return bdrv_co_flush(bs->file); 4782 } 4783 4784 void bdrv_invalidate_cache(BlockDriverState *bs) 4785 { 4786 if (!bs->drv) { 4787 return; 4788 } 4789 4790 if (bs->drv->bdrv_invalidate_cache) { 4791 bs->drv->bdrv_invalidate_cache(bs); 4792 } else if (bs->file) { 4793 bdrv_invalidate_cache(bs->file); 4794 } 4795 4796 refresh_total_sectors(bs, bs->total_sectors); 4797 } 4798 4799 void bdrv_invalidate_cache_all(void) 4800 { 4801 BlockDriverState *bs; 4802 4803 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 4804 bdrv_invalidate_cache(bs); 4805 } 4806 } 4807 4808 void bdrv_clear_incoming_migration_all(void) 4809 { 4810 BlockDriverState *bs; 4811 4812 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 4813 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING); 4814 } 4815 } 4816 4817 int bdrv_flush(BlockDriverState *bs) 4818 { 4819 Coroutine *co; 4820 RwCo rwco = { 4821 .bs = bs, 4822 .ret = NOT_DONE, 4823 }; 4824 4825 if (qemu_in_coroutine()) { 4826 /* Fast-path if already in coroutine context */ 4827 bdrv_flush_co_entry(&rwco); 4828 } else { 4829 co = qemu_coroutine_create(bdrv_flush_co_entry); 4830 qemu_coroutine_enter(co, &rwco); 4831 while (rwco.ret == NOT_DONE) { 4832 qemu_aio_wait(); 4833 } 4834 } 4835 4836 return rwco.ret; 4837 } 4838 4839 typedef struct DiscardCo { 4840 BlockDriverState *bs; 4841 int64_t sector_num; 4842 int nb_sectors; 4843 int ret; 4844 } DiscardCo; 4845 static void coroutine_fn bdrv_discard_co_entry(void *opaque) 4846 { 4847 DiscardCo *rwco = opaque; 4848 4849 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 4850 } 4851 4852 /* if no limit is specified in the BlockLimits use a default 4853 * of 32768 512-byte sectors (16 MiB) per request. 4854 */ 4855 #define MAX_DISCARD_DEFAULT 32768 4856 4857 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 4858 int nb_sectors) 4859 { 4860 int max_discard; 4861 4862 if (!bs->drv) { 4863 return -ENOMEDIUM; 4864 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) { 4865 return -EIO; 4866 } else if (bs->read_only) { 4867 return -EROFS; 4868 } 4869 4870 bdrv_reset_dirty(bs, sector_num, nb_sectors); 4871 4872 /* Do nothing if disabled. */ 4873 if (!(bs->open_flags & BDRV_O_UNMAP)) { 4874 return 0; 4875 } 4876 4877 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { 4878 return 0; 4879 } 4880 4881 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT; 4882 while (nb_sectors > 0) { 4883 int ret; 4884 int num = nb_sectors; 4885 4886 /* align request */ 4887 if (bs->bl.discard_alignment && 4888 num >= bs->bl.discard_alignment && 4889 sector_num % bs->bl.discard_alignment) { 4890 if (num > bs->bl.discard_alignment) { 4891 num = bs->bl.discard_alignment; 4892 } 4893 num -= sector_num % bs->bl.discard_alignment; 4894 } 4895 4896 /* limit request size */ 4897 if (num > max_discard) { 4898 num = max_discard; 4899 } 4900 4901 if (bs->drv->bdrv_co_discard) { 4902 ret = bs->drv->bdrv_co_discard(bs, sector_num, num); 4903 } else { 4904 BlockDriverAIOCB *acb; 4905 CoroutineIOCompletion co = { 4906 .coroutine = qemu_coroutine_self(), 4907 }; 4908 4909 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 4910 bdrv_co_io_em_complete, &co); 4911 if (acb == NULL) { 4912 return -EIO; 4913 } else { 4914 qemu_coroutine_yield(); 4915 ret = co.ret; 4916 } 4917 } 4918 if (ret && ret != -ENOTSUP) { 4919 return ret; 4920 } 4921 4922 sector_num += num; 4923 nb_sectors -= num; 4924 } 4925 return 0; 4926 } 4927 4928 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 4929 { 4930 Coroutine *co; 4931 DiscardCo rwco = { 4932 .bs = bs, 4933 .sector_num = sector_num, 4934 .nb_sectors = nb_sectors, 4935 .ret = NOT_DONE, 4936 }; 4937 4938 if (qemu_in_coroutine()) { 4939 /* Fast-path if already in coroutine context */ 4940 bdrv_discard_co_entry(&rwco); 4941 } else { 4942 co = qemu_coroutine_create(bdrv_discard_co_entry); 4943 qemu_coroutine_enter(co, &rwco); 4944 while (rwco.ret == NOT_DONE) { 4945 qemu_aio_wait(); 4946 } 4947 } 4948 4949 return rwco.ret; 4950 } 4951 4952 /**************************************************************/ 4953 /* removable device support */ 4954 4955 /** 4956 * Return TRUE if the media is present 4957 */ 4958 int bdrv_is_inserted(BlockDriverState *bs) 4959 { 4960 BlockDriver *drv = bs->drv; 4961 4962 if (!drv) 4963 return 0; 4964 if (!drv->bdrv_is_inserted) 4965 return 1; 4966 return drv->bdrv_is_inserted(bs); 4967 } 4968 4969 /** 4970 * Return whether the media changed since the last call to this 4971 * function, or -ENOTSUP if we don't know. Most drivers don't know. 4972 */ 4973 int bdrv_media_changed(BlockDriverState *bs) 4974 { 4975 BlockDriver *drv = bs->drv; 4976 4977 if (drv && drv->bdrv_media_changed) { 4978 return drv->bdrv_media_changed(bs); 4979 } 4980 return -ENOTSUP; 4981 } 4982 4983 /** 4984 * If eject_flag is TRUE, eject the media. Otherwise, close the tray 4985 */ 4986 void bdrv_eject(BlockDriverState *bs, bool eject_flag) 4987 { 4988 BlockDriver *drv = bs->drv; 4989 4990 if (drv && drv->bdrv_eject) { 4991 drv->bdrv_eject(bs, eject_flag); 4992 } 4993 4994 if (bs->device_name[0] != '\0') { 4995 bdrv_emit_qmp_eject_event(bs, eject_flag); 4996 } 4997 } 4998 4999 /** 5000 * Lock or unlock the media (if it is locked, the user won't be able 5001 * to eject it manually). 5002 */ 5003 void bdrv_lock_medium(BlockDriverState *bs, bool locked) 5004 { 5005 BlockDriver *drv = bs->drv; 5006 5007 trace_bdrv_lock_medium(bs, locked); 5008 5009 if (drv && drv->bdrv_lock_medium) { 5010 drv->bdrv_lock_medium(bs, locked); 5011 } 5012 } 5013 5014 /* needed for generic scsi interface */ 5015 5016 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 5017 { 5018 BlockDriver *drv = bs->drv; 5019 5020 if (drv && drv->bdrv_ioctl) 5021 return drv->bdrv_ioctl(bs, req, buf); 5022 return -ENOTSUP; 5023 } 5024 5025 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 5026 unsigned long int req, void *buf, 5027 BlockDriverCompletionFunc *cb, void *opaque) 5028 { 5029 BlockDriver *drv = bs->drv; 5030 5031 if (drv && drv->bdrv_aio_ioctl) 5032 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); 5033 return NULL; 5034 } 5035 5036 void bdrv_set_guest_block_size(BlockDriverState *bs, int align) 5037 { 5038 bs->guest_block_size = align; 5039 } 5040 5041 void *qemu_blockalign(BlockDriverState *bs, size_t size) 5042 { 5043 return qemu_memalign(bdrv_opt_mem_align(bs), size); 5044 } 5045 5046 /* 5047 * Check if all memory in this vector is sector aligned. 5048 */ 5049 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 5050 { 5051 int i; 5052 size_t alignment = bdrv_opt_mem_align(bs); 5053 5054 for (i = 0; i < qiov->niov; i++) { 5055 if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 5056 return false; 5057 } 5058 if (qiov->iov[i].iov_len % alignment) { 5059 return false; 5060 } 5061 } 5062 5063 return true; 5064 } 5065 5066 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity) 5067 { 5068 int64_t bitmap_size; 5069 BdrvDirtyBitmap *bitmap; 5070 5071 assert((granularity & (granularity - 1)) == 0); 5072 5073 granularity >>= BDRV_SECTOR_BITS; 5074 assert(granularity); 5075 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS); 5076 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap)); 5077 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1); 5078 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list); 5079 return bitmap; 5080 } 5081 5082 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) 5083 { 5084 BdrvDirtyBitmap *bm, *next; 5085 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) { 5086 if (bm == bitmap) { 5087 QLIST_REMOVE(bitmap, list); 5088 hbitmap_free(bitmap->bitmap); 5089 g_free(bitmap); 5090 return; 5091 } 5092 } 5093 } 5094 5095 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs) 5096 { 5097 BdrvDirtyBitmap *bm; 5098 BlockDirtyInfoList *list = NULL; 5099 BlockDirtyInfoList **plist = &list; 5100 5101 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) { 5102 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo)); 5103 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList)); 5104 info->count = bdrv_get_dirty_count(bs, bm); 5105 info->granularity = 5106 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap)); 5107 entry->value = info; 5108 *plist = entry; 5109 plist = &entry->next; 5110 } 5111 5112 return list; 5113 } 5114 5115 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector) 5116 { 5117 if (bitmap) { 5118 return hbitmap_get(bitmap->bitmap, sector); 5119 } else { 5120 return 0; 5121 } 5122 } 5123 5124 void bdrv_dirty_iter_init(BlockDriverState *bs, 5125 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi) 5126 { 5127 hbitmap_iter_init(hbi, bitmap->bitmap, 0); 5128 } 5129 5130 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, 5131 int nr_sectors) 5132 { 5133 BdrvDirtyBitmap *bitmap; 5134 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { 5135 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors); 5136 } 5137 } 5138 5139 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors) 5140 { 5141 BdrvDirtyBitmap *bitmap; 5142 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { 5143 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors); 5144 } 5145 } 5146 5147 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) 5148 { 5149 return hbitmap_count(bitmap->bitmap); 5150 } 5151 5152 /* Get a reference to bs */ 5153 void bdrv_ref(BlockDriverState *bs) 5154 { 5155 bs->refcnt++; 5156 } 5157 5158 /* Release a previously grabbed reference to bs. 5159 * If after releasing, reference count is zero, the BlockDriverState is 5160 * deleted. */ 5161 void bdrv_unref(BlockDriverState *bs) 5162 { 5163 assert(bs->refcnt > 0); 5164 if (--bs->refcnt == 0) { 5165 bdrv_delete(bs); 5166 } 5167 } 5168 5169 void bdrv_set_in_use(BlockDriverState *bs, int in_use) 5170 { 5171 assert(bs->in_use != in_use); 5172 bs->in_use = in_use; 5173 } 5174 5175 int bdrv_in_use(BlockDriverState *bs) 5176 { 5177 return bs->in_use; 5178 } 5179 5180 void bdrv_iostatus_enable(BlockDriverState *bs) 5181 { 5182 bs->iostatus_enabled = true; 5183 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 5184 } 5185 5186 /* The I/O status is only enabled if the drive explicitly 5187 * enables it _and_ the VM is configured to stop on errors */ 5188 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs) 5189 { 5190 return (bs->iostatus_enabled && 5191 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || 5192 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP || 5193 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP)); 5194 } 5195 5196 void bdrv_iostatus_disable(BlockDriverState *bs) 5197 { 5198 bs->iostatus_enabled = false; 5199 } 5200 5201 void bdrv_iostatus_reset(BlockDriverState *bs) 5202 { 5203 if (bdrv_iostatus_is_enabled(bs)) { 5204 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 5205 if (bs->job) { 5206 block_job_iostatus_reset(bs->job); 5207 } 5208 } 5209 } 5210 5211 void bdrv_iostatus_set_err(BlockDriverState *bs, int error) 5212 { 5213 assert(bdrv_iostatus_is_enabled(bs)); 5214 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 5215 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 5216 BLOCK_DEVICE_IO_STATUS_FAILED; 5217 } 5218 } 5219 5220 void 5221 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes, 5222 enum BlockAcctType type) 5223 { 5224 assert(type < BDRV_MAX_IOTYPE); 5225 5226 cookie->bytes = bytes; 5227 cookie->start_time_ns = get_clock(); 5228 cookie->type = type; 5229 } 5230 5231 void 5232 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie) 5233 { 5234 assert(cookie->type < BDRV_MAX_IOTYPE); 5235 5236 bs->nr_bytes[cookie->type] += cookie->bytes; 5237 bs->nr_ops[cookie->type]++; 5238 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns; 5239 } 5240 5241 void bdrv_img_create(const char *filename, const char *fmt, 5242 const char *base_filename, const char *base_fmt, 5243 char *options, uint64_t img_size, int flags, 5244 Error **errp, bool quiet) 5245 { 5246 QEMUOptionParameter *param = NULL, *create_options = NULL; 5247 QEMUOptionParameter *backing_fmt, *backing_file, *size; 5248 BlockDriver *drv, *proto_drv; 5249 BlockDriver *backing_drv = NULL; 5250 Error *local_err = NULL; 5251 int ret = 0; 5252 5253 /* Find driver and parse its options */ 5254 drv = bdrv_find_format(fmt); 5255 if (!drv) { 5256 error_setg(errp, "Unknown file format '%s'", fmt); 5257 return; 5258 } 5259 5260 proto_drv = bdrv_find_protocol(filename, true); 5261 if (!proto_drv) { 5262 error_setg(errp, "Unknown protocol '%s'", filename); 5263 return; 5264 } 5265 5266 create_options = append_option_parameters(create_options, 5267 drv->create_options); 5268 create_options = append_option_parameters(create_options, 5269 proto_drv->create_options); 5270 5271 /* Create parameter list with default values */ 5272 param = parse_option_parameters("", create_options, param); 5273 5274 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size); 5275 5276 /* Parse -o options */ 5277 if (options) { 5278 param = parse_option_parameters(options, create_options, param); 5279 if (param == NULL) { 5280 error_setg(errp, "Invalid options for file format '%s'.", fmt); 5281 goto out; 5282 } 5283 } 5284 5285 if (base_filename) { 5286 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE, 5287 base_filename)) { 5288 error_setg(errp, "Backing file not supported for file format '%s'", 5289 fmt); 5290 goto out; 5291 } 5292 } 5293 5294 if (base_fmt) { 5295 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) { 5296 error_setg(errp, "Backing file format not supported for file " 5297 "format '%s'", fmt); 5298 goto out; 5299 } 5300 } 5301 5302 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE); 5303 if (backing_file && backing_file->value.s) { 5304 if (!strcmp(filename, backing_file->value.s)) { 5305 error_setg(errp, "Error: Trying to create an image with the " 5306 "same filename as the backing file"); 5307 goto out; 5308 } 5309 } 5310 5311 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT); 5312 if (backing_fmt && backing_fmt->value.s) { 5313 backing_drv = bdrv_find_format(backing_fmt->value.s); 5314 if (!backing_drv) { 5315 error_setg(errp, "Unknown backing file format '%s'", 5316 backing_fmt->value.s); 5317 goto out; 5318 } 5319 } 5320 5321 // The size for the image must always be specified, with one exception: 5322 // If we are using a backing file, we can obtain the size from there 5323 size = get_option_parameter(param, BLOCK_OPT_SIZE); 5324 if (size && size->value.n == -1) { 5325 if (backing_file && backing_file->value.s) { 5326 BlockDriverState *bs; 5327 uint64_t size; 5328 char buf[32]; 5329 int back_flags; 5330 5331 /* backing files always opened read-only */ 5332 back_flags = 5333 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); 5334 5335 bs = NULL; 5336 ret = bdrv_open(&bs, backing_file->value.s, NULL, NULL, back_flags, 5337 backing_drv, &local_err); 5338 if (ret < 0) { 5339 error_setg_errno(errp, -ret, "Could not open '%s': %s", 5340 backing_file->value.s, 5341 error_get_pretty(local_err)); 5342 error_free(local_err); 5343 local_err = NULL; 5344 goto out; 5345 } 5346 bdrv_get_geometry(bs, &size); 5347 size *= 512; 5348 5349 snprintf(buf, sizeof(buf), "%" PRId64, size); 5350 set_option_parameter(param, BLOCK_OPT_SIZE, buf); 5351 5352 bdrv_unref(bs); 5353 } else { 5354 error_setg(errp, "Image creation needs a size parameter"); 5355 goto out; 5356 } 5357 } 5358 5359 if (!quiet) { 5360 printf("Formatting '%s', fmt=%s ", filename, fmt); 5361 print_option_parameters(param); 5362 puts(""); 5363 } 5364 ret = bdrv_create(drv, filename, param, &local_err); 5365 if (ret == -EFBIG) { 5366 /* This is generally a better message than whatever the driver would 5367 * deliver (especially because of the cluster_size_hint), since that 5368 * is most probably not much different from "image too large". */ 5369 const char *cluster_size_hint = ""; 5370 if (get_option_parameter(create_options, BLOCK_OPT_CLUSTER_SIZE)) { 5371 cluster_size_hint = " (try using a larger cluster size)"; 5372 } 5373 error_setg(errp, "The image size is too large for file format '%s'" 5374 "%s", fmt, cluster_size_hint); 5375 error_free(local_err); 5376 local_err = NULL; 5377 } 5378 5379 out: 5380 free_option_parameters(create_options); 5381 free_option_parameters(param); 5382 5383 if (local_err) { 5384 error_propagate(errp, local_err); 5385 } 5386 } 5387 5388 AioContext *bdrv_get_aio_context(BlockDriverState *bs) 5389 { 5390 /* Currently BlockDriverState always uses the main loop AioContext */ 5391 return qemu_get_aio_context(); 5392 } 5393 5394 void bdrv_add_before_write_notifier(BlockDriverState *bs, 5395 NotifierWithReturn *notifier) 5396 { 5397 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 5398 } 5399 5400 int bdrv_amend_options(BlockDriverState *bs, QEMUOptionParameter *options) 5401 { 5402 if (bs->drv->bdrv_amend_options == NULL) { 5403 return -ENOTSUP; 5404 } 5405 return bs->drv->bdrv_amend_options(bs, options); 5406 } 5407 5408 /* This function will be called by the bdrv_recurse_is_first_non_filter method 5409 * of block filter and by bdrv_is_first_non_filter. 5410 * It is used to test if the given bs is the candidate or recurse more in the 5411 * node graph. 5412 */ 5413 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs, 5414 BlockDriverState *candidate) 5415 { 5416 /* return false if basic checks fails */ 5417 if (!bs || !bs->drv) { 5418 return false; 5419 } 5420 5421 /* the code reached a non block filter driver -> check if the bs is 5422 * the same as the candidate. It's the recursion termination condition. 5423 */ 5424 if (!bs->drv->is_filter) { 5425 return bs == candidate; 5426 } 5427 /* Down this path the driver is a block filter driver */ 5428 5429 /* If the block filter recursion method is defined use it to recurse down 5430 * the node graph. 5431 */ 5432 if (bs->drv->bdrv_recurse_is_first_non_filter) { 5433 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate); 5434 } 5435 5436 /* the driver is a block filter but don't allow to recurse -> return false 5437 */ 5438 return false; 5439 } 5440 5441 /* This function checks if the candidate is the first non filter bs down it's 5442 * bs chain. Since we don't have pointers to parents it explore all bs chains 5443 * from the top. Some filters can choose not to pass down the recursion. 5444 */ 5445 bool bdrv_is_first_non_filter(BlockDriverState *candidate) 5446 { 5447 BlockDriverState *bs; 5448 5449 /* walk down the bs forest recursively */ 5450 QTAILQ_FOREACH(bs, &bdrv_states, device_list) { 5451 bool perm; 5452 5453 /* try to recurse in this top level bs */ 5454 perm = bdrv_recurse_is_first_non_filter(bs, candidate); 5455 5456 /* candidate is the first non filter */ 5457 if (perm) { 5458 return true; 5459 } 5460 } 5461 5462 return false; 5463 } 5464