1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "config-host.h" 25 #include "qemu-common.h" 26 #include "trace.h" 27 #include "monitor/monitor.h" 28 #include "block/block_int.h" 29 #include "block/blockjob.h" 30 #include "qemu/module.h" 31 #include "qapi/qmp/qjson.h" 32 #include "sysemu/sysemu.h" 33 #include "qemu/notify.h" 34 #include "block/coroutine.h" 35 #include "qmp-commands.h" 36 #include "qemu/timer.h" 37 38 #ifdef CONFIG_BSD 39 #include <sys/types.h> 40 #include <sys/stat.h> 41 #include <sys/ioctl.h> 42 #include <sys/queue.h> 43 #ifndef __DragonFly__ 44 #include <sys/disk.h> 45 #endif 46 #endif 47 48 #ifdef _WIN32 49 #include <windows.h> 50 #endif 51 52 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 53 54 typedef enum { 55 BDRV_REQ_COPY_ON_READ = 0x1, 56 BDRV_REQ_ZERO_WRITE = 0x2, 57 } BdrvRequestFlags; 58 59 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load); 60 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 61 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 62 BlockDriverCompletionFunc *cb, void *opaque); 63 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 64 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 65 BlockDriverCompletionFunc *cb, void *opaque); 66 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 67 int64_t sector_num, int nb_sectors, 68 QEMUIOVector *iov); 69 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 70 int64_t sector_num, int nb_sectors, 71 QEMUIOVector *iov); 72 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 73 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 74 BdrvRequestFlags flags); 75 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 76 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 77 BdrvRequestFlags flags); 78 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 79 int64_t sector_num, 80 QEMUIOVector *qiov, 81 int nb_sectors, 82 BlockDriverCompletionFunc *cb, 83 void *opaque, 84 bool is_write); 85 static void coroutine_fn bdrv_co_do_rw(void *opaque); 86 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 87 int64_t sector_num, int nb_sectors); 88 89 static QTAILQ_HEAD(, BlockDriverState) bdrv_states = 90 QTAILQ_HEAD_INITIALIZER(bdrv_states); 91 92 static QLIST_HEAD(, BlockDriver) bdrv_drivers = 93 QLIST_HEAD_INITIALIZER(bdrv_drivers); 94 95 /* If non-zero, use only whitelisted block drivers */ 96 static int use_bdrv_whitelist; 97 98 #ifdef _WIN32 99 static int is_windows_drive_prefix(const char *filename) 100 { 101 return (((filename[0] >= 'a' && filename[0] <= 'z') || 102 (filename[0] >= 'A' && filename[0] <= 'Z')) && 103 filename[1] == ':'); 104 } 105 106 int is_windows_drive(const char *filename) 107 { 108 if (is_windows_drive_prefix(filename) && 109 filename[2] == '\0') 110 return 1; 111 if (strstart(filename, "\\\\.\\", NULL) || 112 strstart(filename, "//./", NULL)) 113 return 1; 114 return 0; 115 } 116 #endif 117 118 /* throttling disk I/O limits */ 119 void bdrv_set_io_limits(BlockDriverState *bs, 120 ThrottleConfig *cfg) 121 { 122 int i; 123 124 throttle_config(&bs->throttle_state, cfg); 125 126 for (i = 0; i < 2; i++) { 127 qemu_co_enter_next(&bs->throttled_reqs[i]); 128 } 129 } 130 131 /* this function drain all the throttled IOs */ 132 static bool bdrv_start_throttled_reqs(BlockDriverState *bs) 133 { 134 bool drained = false; 135 bool enabled = bs->io_limits_enabled; 136 int i; 137 138 bs->io_limits_enabled = false; 139 140 for (i = 0; i < 2; i++) { 141 while (qemu_co_enter_next(&bs->throttled_reqs[i])) { 142 drained = true; 143 } 144 } 145 146 bs->io_limits_enabled = enabled; 147 148 return drained; 149 } 150 151 void bdrv_io_limits_disable(BlockDriverState *bs) 152 { 153 bs->io_limits_enabled = false; 154 155 bdrv_start_throttled_reqs(bs); 156 157 throttle_destroy(&bs->throttle_state); 158 } 159 160 static void bdrv_throttle_read_timer_cb(void *opaque) 161 { 162 BlockDriverState *bs = opaque; 163 qemu_co_enter_next(&bs->throttled_reqs[0]); 164 } 165 166 static void bdrv_throttle_write_timer_cb(void *opaque) 167 { 168 BlockDriverState *bs = opaque; 169 qemu_co_enter_next(&bs->throttled_reqs[1]); 170 } 171 172 /* should be called before bdrv_set_io_limits if a limit is set */ 173 void bdrv_io_limits_enable(BlockDriverState *bs) 174 { 175 assert(!bs->io_limits_enabled); 176 throttle_init(&bs->throttle_state, 177 QEMU_CLOCK_VIRTUAL, 178 bdrv_throttle_read_timer_cb, 179 bdrv_throttle_write_timer_cb, 180 bs); 181 bs->io_limits_enabled = true; 182 } 183 184 /* This function makes an IO wait if needed 185 * 186 * @nb_sectors: the number of sectors of the IO 187 * @is_write: is the IO a write 188 */ 189 static void bdrv_io_limits_intercept(BlockDriverState *bs, 190 int nb_sectors, 191 bool is_write) 192 { 193 /* does this io must wait */ 194 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write); 195 196 /* if must wait or any request of this type throttled queue the IO */ 197 if (must_wait || 198 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { 199 qemu_co_queue_wait(&bs->throttled_reqs[is_write]); 200 } 201 202 /* the IO will be executed, do the accounting */ 203 throttle_account(&bs->throttle_state, 204 is_write, 205 nb_sectors * BDRV_SECTOR_SIZE); 206 207 /* if the next request must wait -> do nothing */ 208 if (throttle_schedule_timer(&bs->throttle_state, is_write)) { 209 return; 210 } 211 212 /* else queue next request for execution */ 213 qemu_co_queue_next(&bs->throttled_reqs[is_write]); 214 } 215 216 /* check if the path starts with "<protocol>:" */ 217 static int path_has_protocol(const char *path) 218 { 219 const char *p; 220 221 #ifdef _WIN32 222 if (is_windows_drive(path) || 223 is_windows_drive_prefix(path)) { 224 return 0; 225 } 226 p = path + strcspn(path, ":/\\"); 227 #else 228 p = path + strcspn(path, ":/"); 229 #endif 230 231 return *p == ':'; 232 } 233 234 int path_is_absolute(const char *path) 235 { 236 #ifdef _WIN32 237 /* specific case for names like: "\\.\d:" */ 238 if (is_windows_drive(path) || is_windows_drive_prefix(path)) { 239 return 1; 240 } 241 return (*path == '/' || *path == '\\'); 242 #else 243 return (*path == '/'); 244 #endif 245 } 246 247 /* if filename is absolute, just copy it to dest. Otherwise, build a 248 path to it by considering it is relative to base_path. URL are 249 supported. */ 250 void path_combine(char *dest, int dest_size, 251 const char *base_path, 252 const char *filename) 253 { 254 const char *p, *p1; 255 int len; 256 257 if (dest_size <= 0) 258 return; 259 if (path_is_absolute(filename)) { 260 pstrcpy(dest, dest_size, filename); 261 } else { 262 p = strchr(base_path, ':'); 263 if (p) 264 p++; 265 else 266 p = base_path; 267 p1 = strrchr(base_path, '/'); 268 #ifdef _WIN32 269 { 270 const char *p2; 271 p2 = strrchr(base_path, '\\'); 272 if (!p1 || p2 > p1) 273 p1 = p2; 274 } 275 #endif 276 if (p1) 277 p1++; 278 else 279 p1 = base_path; 280 if (p1 > p) 281 p = p1; 282 len = p - base_path; 283 if (len > dest_size - 1) 284 len = dest_size - 1; 285 memcpy(dest, base_path, len); 286 dest[len] = '\0'; 287 pstrcat(dest, dest_size, filename); 288 } 289 } 290 291 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz) 292 { 293 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) { 294 pstrcpy(dest, sz, bs->backing_file); 295 } else { 296 path_combine(dest, sz, bs->filename, bs->backing_file); 297 } 298 } 299 300 void bdrv_register(BlockDriver *bdrv) 301 { 302 /* Block drivers without coroutine functions need emulation */ 303 if (!bdrv->bdrv_co_readv) { 304 bdrv->bdrv_co_readv = bdrv_co_readv_em; 305 bdrv->bdrv_co_writev = bdrv_co_writev_em; 306 307 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if 308 * the block driver lacks aio we need to emulate that too. 309 */ 310 if (!bdrv->bdrv_aio_readv) { 311 /* add AIO emulation layer */ 312 bdrv->bdrv_aio_readv = bdrv_aio_readv_em; 313 bdrv->bdrv_aio_writev = bdrv_aio_writev_em; 314 } 315 } 316 317 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list); 318 } 319 320 /* create a new block device (by default it is empty) */ 321 BlockDriverState *bdrv_new(const char *device_name) 322 { 323 BlockDriverState *bs; 324 325 bs = g_malloc0(sizeof(BlockDriverState)); 326 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name); 327 if (device_name[0] != '\0') { 328 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list); 329 } 330 bdrv_iostatus_disable(bs); 331 notifier_list_init(&bs->close_notifiers); 332 notifier_with_return_list_init(&bs->before_write_notifiers); 333 qemu_co_queue_init(&bs->throttled_reqs[0]); 334 qemu_co_queue_init(&bs->throttled_reqs[1]); 335 bs->refcnt = 1; 336 337 return bs; 338 } 339 340 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify) 341 { 342 notifier_list_add(&bs->close_notifiers, notify); 343 } 344 345 BlockDriver *bdrv_find_format(const char *format_name) 346 { 347 BlockDriver *drv1; 348 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 349 if (!strcmp(drv1->format_name, format_name)) { 350 return drv1; 351 } 352 } 353 return NULL; 354 } 355 356 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only) 357 { 358 static const char *whitelist_rw[] = { 359 CONFIG_BDRV_RW_WHITELIST 360 }; 361 static const char *whitelist_ro[] = { 362 CONFIG_BDRV_RO_WHITELIST 363 }; 364 const char **p; 365 366 if (!whitelist_rw[0] && !whitelist_ro[0]) { 367 return 1; /* no whitelist, anything goes */ 368 } 369 370 for (p = whitelist_rw; *p; p++) { 371 if (!strcmp(drv->format_name, *p)) { 372 return 1; 373 } 374 } 375 if (read_only) { 376 for (p = whitelist_ro; *p; p++) { 377 if (!strcmp(drv->format_name, *p)) { 378 return 1; 379 } 380 } 381 } 382 return 0; 383 } 384 385 BlockDriver *bdrv_find_whitelisted_format(const char *format_name, 386 bool read_only) 387 { 388 BlockDriver *drv = bdrv_find_format(format_name); 389 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL; 390 } 391 392 typedef struct CreateCo { 393 BlockDriver *drv; 394 char *filename; 395 QEMUOptionParameter *options; 396 int ret; 397 } CreateCo; 398 399 static void coroutine_fn bdrv_create_co_entry(void *opaque) 400 { 401 CreateCo *cco = opaque; 402 assert(cco->drv); 403 404 cco->ret = cco->drv->bdrv_create(cco->filename, cco->options); 405 } 406 407 int bdrv_create(BlockDriver *drv, const char* filename, 408 QEMUOptionParameter *options) 409 { 410 int ret; 411 412 Coroutine *co; 413 CreateCo cco = { 414 .drv = drv, 415 .filename = g_strdup(filename), 416 .options = options, 417 .ret = NOT_DONE, 418 }; 419 420 if (!drv->bdrv_create) { 421 ret = -ENOTSUP; 422 goto out; 423 } 424 425 if (qemu_in_coroutine()) { 426 /* Fast-path if already in coroutine context */ 427 bdrv_create_co_entry(&cco); 428 } else { 429 co = qemu_coroutine_create(bdrv_create_co_entry); 430 qemu_coroutine_enter(co, &cco); 431 while (cco.ret == NOT_DONE) { 432 qemu_aio_wait(); 433 } 434 } 435 436 ret = cco.ret; 437 438 out: 439 g_free(cco.filename); 440 return ret; 441 } 442 443 int bdrv_create_file(const char* filename, QEMUOptionParameter *options) 444 { 445 BlockDriver *drv; 446 447 drv = bdrv_find_protocol(filename, true); 448 if (drv == NULL) { 449 return -ENOENT; 450 } 451 452 return bdrv_create(drv, filename, options); 453 } 454 455 /* 456 * Create a uniquely-named empty temporary file. 457 * Return 0 upon success, otherwise a negative errno value. 458 */ 459 int get_tmp_filename(char *filename, int size) 460 { 461 #ifdef _WIN32 462 char temp_dir[MAX_PATH]; 463 /* GetTempFileName requires that its output buffer (4th param) 464 have length MAX_PATH or greater. */ 465 assert(size >= MAX_PATH); 466 return (GetTempPath(MAX_PATH, temp_dir) 467 && GetTempFileName(temp_dir, "qem", 0, filename) 468 ? 0 : -GetLastError()); 469 #else 470 int fd; 471 const char *tmpdir; 472 tmpdir = getenv("TMPDIR"); 473 if (!tmpdir) 474 tmpdir = "/tmp"; 475 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) { 476 return -EOVERFLOW; 477 } 478 fd = mkstemp(filename); 479 if (fd < 0) { 480 return -errno; 481 } 482 if (close(fd) != 0) { 483 unlink(filename); 484 return -errno; 485 } 486 return 0; 487 #endif 488 } 489 490 /* 491 * Detect host devices. By convention, /dev/cdrom[N] is always 492 * recognized as a host CDROM. 493 */ 494 static BlockDriver *find_hdev_driver(const char *filename) 495 { 496 int score_max = 0, score; 497 BlockDriver *drv = NULL, *d; 498 499 QLIST_FOREACH(d, &bdrv_drivers, list) { 500 if (d->bdrv_probe_device) { 501 score = d->bdrv_probe_device(filename); 502 if (score > score_max) { 503 score_max = score; 504 drv = d; 505 } 506 } 507 } 508 509 return drv; 510 } 511 512 BlockDriver *bdrv_find_protocol(const char *filename, 513 bool allow_protocol_prefix) 514 { 515 BlockDriver *drv1; 516 char protocol[128]; 517 int len; 518 const char *p; 519 520 /* TODO Drivers without bdrv_file_open must be specified explicitly */ 521 522 /* 523 * XXX(hch): we really should not let host device detection 524 * override an explicit protocol specification, but moving this 525 * later breaks access to device names with colons in them. 526 * Thanks to the brain-dead persistent naming schemes on udev- 527 * based Linux systems those actually are quite common. 528 */ 529 drv1 = find_hdev_driver(filename); 530 if (drv1) { 531 return drv1; 532 } 533 534 if (!path_has_protocol(filename) || !allow_protocol_prefix) { 535 return bdrv_find_format("file"); 536 } 537 538 p = strchr(filename, ':'); 539 assert(p != NULL); 540 len = p - filename; 541 if (len > sizeof(protocol) - 1) 542 len = sizeof(protocol) - 1; 543 memcpy(protocol, filename, len); 544 protocol[len] = '\0'; 545 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 546 if (drv1->protocol_name && 547 !strcmp(drv1->protocol_name, protocol)) { 548 return drv1; 549 } 550 } 551 return NULL; 552 } 553 554 static int find_image_format(BlockDriverState *bs, const char *filename, 555 BlockDriver **pdrv) 556 { 557 int score, score_max; 558 BlockDriver *drv1, *drv; 559 uint8_t buf[2048]; 560 int ret = 0; 561 562 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */ 563 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) { 564 drv = bdrv_find_format("raw"); 565 if (!drv) { 566 ret = -ENOENT; 567 } 568 *pdrv = drv; 569 return ret; 570 } 571 572 ret = bdrv_pread(bs, 0, buf, sizeof(buf)); 573 if (ret < 0) { 574 *pdrv = NULL; 575 return ret; 576 } 577 578 score_max = 0; 579 drv = NULL; 580 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 581 if (drv1->bdrv_probe) { 582 score = drv1->bdrv_probe(buf, ret, filename); 583 if (score > score_max) { 584 score_max = score; 585 drv = drv1; 586 } 587 } 588 } 589 if (!drv) { 590 ret = -ENOENT; 591 } 592 *pdrv = drv; 593 return ret; 594 } 595 596 /** 597 * Set the current 'total_sectors' value 598 */ 599 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint) 600 { 601 BlockDriver *drv = bs->drv; 602 603 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */ 604 if (bs->sg) 605 return 0; 606 607 /* query actual device if possible, otherwise just trust the hint */ 608 if (drv->bdrv_getlength) { 609 int64_t length = drv->bdrv_getlength(bs); 610 if (length < 0) { 611 return length; 612 } 613 hint = length >> BDRV_SECTOR_BITS; 614 } 615 616 bs->total_sectors = hint; 617 return 0; 618 } 619 620 /** 621 * Set open flags for a given discard mode 622 * 623 * Return 0 on success, -1 if the discard mode was invalid. 624 */ 625 int bdrv_parse_discard_flags(const char *mode, int *flags) 626 { 627 *flags &= ~BDRV_O_UNMAP; 628 629 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) { 630 /* do nothing */ 631 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) { 632 *flags |= BDRV_O_UNMAP; 633 } else { 634 return -1; 635 } 636 637 return 0; 638 } 639 640 /** 641 * Set open flags for a given cache mode 642 * 643 * Return 0 on success, -1 if the cache mode was invalid. 644 */ 645 int bdrv_parse_cache_flags(const char *mode, int *flags) 646 { 647 *flags &= ~BDRV_O_CACHE_MASK; 648 649 if (!strcmp(mode, "off") || !strcmp(mode, "none")) { 650 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; 651 } else if (!strcmp(mode, "directsync")) { 652 *flags |= BDRV_O_NOCACHE; 653 } else if (!strcmp(mode, "writeback")) { 654 *flags |= BDRV_O_CACHE_WB; 655 } else if (!strcmp(mode, "unsafe")) { 656 *flags |= BDRV_O_CACHE_WB; 657 *flags |= BDRV_O_NO_FLUSH; 658 } else if (!strcmp(mode, "writethrough")) { 659 /* this is the default */ 660 } else { 661 return -1; 662 } 663 664 return 0; 665 } 666 667 /** 668 * The copy-on-read flag is actually a reference count so multiple users may 669 * use the feature without worrying about clobbering its previous state. 670 * Copy-on-read stays enabled until all users have called to disable it. 671 */ 672 void bdrv_enable_copy_on_read(BlockDriverState *bs) 673 { 674 bs->copy_on_read++; 675 } 676 677 void bdrv_disable_copy_on_read(BlockDriverState *bs) 678 { 679 assert(bs->copy_on_read > 0); 680 bs->copy_on_read--; 681 } 682 683 static int bdrv_open_flags(BlockDriverState *bs, int flags) 684 { 685 int open_flags = flags | BDRV_O_CACHE_WB; 686 687 /* 688 * Clear flags that are internal to the block layer before opening the 689 * image. 690 */ 691 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); 692 693 /* 694 * Snapshots should be writable. 695 */ 696 if (bs->is_temporary) { 697 open_flags |= BDRV_O_RDWR; 698 } 699 700 return open_flags; 701 } 702 703 /* 704 * Common part for opening disk images and files 705 * 706 * Removes all processed options from *options. 707 */ 708 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file, 709 QDict *options, int flags, BlockDriver *drv) 710 { 711 int ret, open_flags; 712 const char *filename; 713 714 assert(drv != NULL); 715 assert(bs->file == NULL); 716 assert(options != NULL && bs->options != options); 717 718 if (file != NULL) { 719 filename = file->filename; 720 } else { 721 filename = qdict_get_try_str(options, "filename"); 722 } 723 724 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name); 725 726 /* bdrv_open() with directly using a protocol as drv. This layer is already 727 * opened, so assign it to bs (while file becomes a closed BlockDriverState) 728 * and return immediately. */ 729 if (file != NULL && drv->bdrv_file_open) { 730 bdrv_swap(file, bs); 731 return 0; 732 } 733 734 bs->open_flags = flags; 735 bs->buffer_alignment = 512; 736 bs->zero_beyond_eof = true; 737 open_flags = bdrv_open_flags(bs, flags); 738 bs->read_only = !(open_flags & BDRV_O_RDWR); 739 740 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) { 741 return -ENOTSUP; 742 } 743 744 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */ 745 if (!bs->read_only && (flags & BDRV_O_COPY_ON_READ)) { 746 bdrv_enable_copy_on_read(bs); 747 } 748 749 if (filename != NULL) { 750 pstrcpy(bs->filename, sizeof(bs->filename), filename); 751 } else { 752 bs->filename[0] = '\0'; 753 } 754 755 bs->drv = drv; 756 bs->opaque = g_malloc0(drv->instance_size); 757 758 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB); 759 760 /* Open the image, either directly or using a protocol */ 761 if (drv->bdrv_file_open) { 762 assert(file == NULL); 763 assert(drv->bdrv_parse_filename || filename != NULL); 764 ret = drv->bdrv_file_open(bs, options, open_flags); 765 } else { 766 if (file == NULL) { 767 qerror_report(ERROR_CLASS_GENERIC_ERROR, "Can't use '%s' as a " 768 "block driver for the protocol level", 769 drv->format_name); 770 ret = -EINVAL; 771 goto free_and_fail; 772 } 773 bs->file = file; 774 ret = drv->bdrv_open(bs, options, open_flags); 775 } 776 777 if (ret < 0) { 778 goto free_and_fail; 779 } 780 781 ret = refresh_total_sectors(bs, bs->total_sectors); 782 if (ret < 0) { 783 goto free_and_fail; 784 } 785 786 #ifndef _WIN32 787 if (bs->is_temporary) { 788 assert(filename != NULL); 789 unlink(filename); 790 } 791 #endif 792 return 0; 793 794 free_and_fail: 795 bs->file = NULL; 796 g_free(bs->opaque); 797 bs->opaque = NULL; 798 bs->drv = NULL; 799 return ret; 800 } 801 802 /* 803 * Opens a file using a protocol (file, host_device, nbd, ...) 804 * 805 * options is a QDict of options to pass to the block drivers, or NULL for an 806 * empty set of options. The reference to the QDict belongs to the block layer 807 * after the call (even on failure), so if the caller intends to reuse the 808 * dictionary, it needs to use QINCREF() before calling bdrv_file_open. 809 */ 810 int bdrv_file_open(BlockDriverState **pbs, const char *filename, 811 QDict *options, int flags) 812 { 813 BlockDriverState *bs; 814 BlockDriver *drv; 815 const char *drvname; 816 bool allow_protocol_prefix = false; 817 int ret; 818 819 /* NULL means an empty set of options */ 820 if (options == NULL) { 821 options = qdict_new(); 822 } 823 824 bs = bdrv_new(""); 825 bs->options = options; 826 options = qdict_clone_shallow(options); 827 828 /* Fetch the file name from the options QDict if necessary */ 829 if (!filename) { 830 filename = qdict_get_try_str(options, "filename"); 831 } else if (filename && !qdict_haskey(options, "filename")) { 832 qdict_put(options, "filename", qstring_from_str(filename)); 833 allow_protocol_prefix = true; 834 } else { 835 qerror_report(ERROR_CLASS_GENERIC_ERROR, "Can't specify 'file' and " 836 "'filename' options at the same time"); 837 ret = -EINVAL; 838 goto fail; 839 } 840 841 /* Find the right block driver */ 842 drvname = qdict_get_try_str(options, "driver"); 843 if (drvname) { 844 drv = bdrv_find_whitelisted_format(drvname, !(flags & BDRV_O_RDWR)); 845 qdict_del(options, "driver"); 846 } else if (filename) { 847 drv = bdrv_find_protocol(filename, allow_protocol_prefix); 848 if (!drv) { 849 qerror_report(ERROR_CLASS_GENERIC_ERROR, "Unknown protocol"); 850 } 851 } else { 852 qerror_report(ERROR_CLASS_GENERIC_ERROR, 853 "Must specify either driver or file"); 854 drv = NULL; 855 } 856 857 if (!drv) { 858 ret = -ENOENT; 859 goto fail; 860 } 861 862 /* Parse the filename and open it */ 863 if (drv->bdrv_parse_filename && filename) { 864 Error *local_err = NULL; 865 drv->bdrv_parse_filename(filename, options, &local_err); 866 if (error_is_set(&local_err)) { 867 qerror_report_err(local_err); 868 error_free(local_err); 869 ret = -EINVAL; 870 goto fail; 871 } 872 qdict_del(options, "filename"); 873 } else if (!drv->bdrv_parse_filename && !filename) { 874 qerror_report(ERROR_CLASS_GENERIC_ERROR, 875 "The '%s' block driver requires a file name", 876 drv->format_name); 877 ret = -EINVAL; 878 goto fail; 879 } 880 881 ret = bdrv_open_common(bs, NULL, options, flags, drv); 882 if (ret < 0) { 883 goto fail; 884 } 885 886 /* Check if any unknown options were used */ 887 if (qdict_size(options) != 0) { 888 const QDictEntry *entry = qdict_first(options); 889 qerror_report(ERROR_CLASS_GENERIC_ERROR, "Block protocol '%s' doesn't " 890 "support the option '%s'", 891 drv->format_name, entry->key); 892 ret = -EINVAL; 893 goto fail; 894 } 895 QDECREF(options); 896 897 bs->growable = 1; 898 *pbs = bs; 899 return 0; 900 901 fail: 902 QDECREF(options); 903 if (!bs->drv) { 904 QDECREF(bs->options); 905 } 906 bdrv_unref(bs); 907 return ret; 908 } 909 910 /* 911 * Opens the backing file for a BlockDriverState if not yet open 912 * 913 * options is a QDict of options to pass to the block drivers, or NULL for an 914 * empty set of options. The reference to the QDict is transferred to this 915 * function (even on failure), so if the caller intends to reuse the dictionary, 916 * it needs to use QINCREF() before calling bdrv_file_open. 917 */ 918 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options) 919 { 920 char backing_filename[PATH_MAX]; 921 int back_flags, ret; 922 BlockDriver *back_drv = NULL; 923 924 if (bs->backing_hd != NULL) { 925 QDECREF(options); 926 return 0; 927 } 928 929 /* NULL means an empty set of options */ 930 if (options == NULL) { 931 options = qdict_new(); 932 } 933 934 bs->open_flags &= ~BDRV_O_NO_BACKING; 935 if (qdict_haskey(options, "file.filename")) { 936 backing_filename[0] = '\0'; 937 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) { 938 QDECREF(options); 939 return 0; 940 } 941 942 bs->backing_hd = bdrv_new(""); 943 bdrv_get_full_backing_filename(bs, backing_filename, 944 sizeof(backing_filename)); 945 946 if (bs->backing_format[0] != '\0') { 947 back_drv = bdrv_find_format(bs->backing_format); 948 } 949 950 /* backing files always opened read-only */ 951 back_flags = bs->open_flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT); 952 953 ret = bdrv_open(bs->backing_hd, 954 *backing_filename ? backing_filename : NULL, options, 955 back_flags, back_drv); 956 if (ret < 0) { 957 bdrv_unref(bs->backing_hd); 958 bs->backing_hd = NULL; 959 bs->open_flags |= BDRV_O_NO_BACKING; 960 return ret; 961 } 962 return 0; 963 } 964 965 static void extract_subqdict(QDict *src, QDict **dst, const char *start) 966 { 967 const QDictEntry *entry, *next; 968 const char *p; 969 970 *dst = qdict_new(); 971 entry = qdict_first(src); 972 973 while (entry != NULL) { 974 next = qdict_next(src, entry); 975 if (strstart(entry->key, start, &p)) { 976 qobject_incref(entry->value); 977 qdict_put_obj(*dst, p, entry->value); 978 qdict_del(src, entry->key); 979 } 980 entry = next; 981 } 982 } 983 984 /* 985 * Opens a disk image (raw, qcow2, vmdk, ...) 986 * 987 * options is a QDict of options to pass to the block drivers, or NULL for an 988 * empty set of options. The reference to the QDict belongs to the block layer 989 * after the call (even on failure), so if the caller intends to reuse the 990 * dictionary, it needs to use QINCREF() before calling bdrv_open. 991 */ 992 int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options, 993 int flags, BlockDriver *drv) 994 { 995 int ret; 996 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */ 997 char tmp_filename[PATH_MAX + 1]; 998 BlockDriverState *file = NULL; 999 QDict *file_options = NULL; 1000 const char *drvname; 1001 1002 /* NULL means an empty set of options */ 1003 if (options == NULL) { 1004 options = qdict_new(); 1005 } 1006 1007 bs->options = options; 1008 options = qdict_clone_shallow(options); 1009 1010 /* For snapshot=on, create a temporary qcow2 overlay */ 1011 if (flags & BDRV_O_SNAPSHOT) { 1012 BlockDriverState *bs1; 1013 int64_t total_size; 1014 BlockDriver *bdrv_qcow2; 1015 QEMUOptionParameter *create_options; 1016 char backing_filename[PATH_MAX]; 1017 1018 if (qdict_size(options) != 0) { 1019 error_report("Can't use snapshot=on with driver-specific options"); 1020 ret = -EINVAL; 1021 goto fail; 1022 } 1023 assert(filename != NULL); 1024 1025 /* if snapshot, we create a temporary backing file and open it 1026 instead of opening 'filename' directly */ 1027 1028 /* if there is a backing file, use it */ 1029 bs1 = bdrv_new(""); 1030 ret = bdrv_open(bs1, filename, NULL, 0, drv); 1031 if (ret < 0) { 1032 bdrv_unref(bs1); 1033 goto fail; 1034 } 1035 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK; 1036 1037 bdrv_unref(bs1); 1038 1039 ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename)); 1040 if (ret < 0) { 1041 goto fail; 1042 } 1043 1044 /* Real path is meaningless for protocols */ 1045 if (path_has_protocol(filename)) { 1046 snprintf(backing_filename, sizeof(backing_filename), 1047 "%s", filename); 1048 } else if (!realpath(filename, backing_filename)) { 1049 ret = -errno; 1050 goto fail; 1051 } 1052 1053 bdrv_qcow2 = bdrv_find_format("qcow2"); 1054 create_options = parse_option_parameters("", bdrv_qcow2->create_options, 1055 NULL); 1056 1057 set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size); 1058 set_option_parameter(create_options, BLOCK_OPT_BACKING_FILE, 1059 backing_filename); 1060 if (drv) { 1061 set_option_parameter(create_options, BLOCK_OPT_BACKING_FMT, 1062 drv->format_name); 1063 } 1064 1065 ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options); 1066 free_option_parameters(create_options); 1067 if (ret < 0) { 1068 goto fail; 1069 } 1070 1071 filename = tmp_filename; 1072 drv = bdrv_qcow2; 1073 bs->is_temporary = 1; 1074 } 1075 1076 /* Open image file without format layer */ 1077 if (flags & BDRV_O_RDWR) { 1078 flags |= BDRV_O_ALLOW_RDWR; 1079 } 1080 1081 extract_subqdict(options, &file_options, "file."); 1082 1083 ret = bdrv_file_open(&file, filename, file_options, 1084 bdrv_open_flags(bs, flags | BDRV_O_UNMAP)); 1085 if (ret < 0) { 1086 goto fail; 1087 } 1088 1089 /* Find the right image format driver */ 1090 drvname = qdict_get_try_str(options, "driver"); 1091 if (drvname) { 1092 drv = bdrv_find_whitelisted_format(drvname, !(flags & BDRV_O_RDWR)); 1093 qdict_del(options, "driver"); 1094 } 1095 1096 if (!drv) { 1097 ret = find_image_format(file, filename, &drv); 1098 } 1099 1100 if (!drv) { 1101 goto unlink_and_fail; 1102 } 1103 1104 /* Open the image */ 1105 ret = bdrv_open_common(bs, file, options, flags, drv); 1106 if (ret < 0) { 1107 goto unlink_and_fail; 1108 } 1109 1110 if (bs->file != file) { 1111 bdrv_unref(file); 1112 file = NULL; 1113 } 1114 1115 /* If there is a backing file, use it */ 1116 if ((flags & BDRV_O_NO_BACKING) == 0) { 1117 QDict *backing_options; 1118 1119 extract_subqdict(options, &backing_options, "backing."); 1120 ret = bdrv_open_backing_file(bs, backing_options); 1121 if (ret < 0) { 1122 goto close_and_fail; 1123 } 1124 } 1125 1126 /* Check if any unknown options were used */ 1127 if (qdict_size(options) != 0) { 1128 const QDictEntry *entry = qdict_first(options); 1129 qerror_report(ERROR_CLASS_GENERIC_ERROR, "Block format '%s' used by " 1130 "device '%s' doesn't support the option '%s'", 1131 drv->format_name, bs->device_name, entry->key); 1132 1133 ret = -EINVAL; 1134 goto close_and_fail; 1135 } 1136 QDECREF(options); 1137 1138 if (!bdrv_key_required(bs)) { 1139 bdrv_dev_change_media_cb(bs, true); 1140 } 1141 1142 return 0; 1143 1144 unlink_and_fail: 1145 if (file != NULL) { 1146 bdrv_unref(file); 1147 } 1148 if (bs->is_temporary) { 1149 unlink(filename); 1150 } 1151 fail: 1152 QDECREF(bs->options); 1153 QDECREF(options); 1154 bs->options = NULL; 1155 return ret; 1156 1157 close_and_fail: 1158 bdrv_close(bs); 1159 QDECREF(options); 1160 return ret; 1161 } 1162 1163 typedef struct BlockReopenQueueEntry { 1164 bool prepared; 1165 BDRVReopenState state; 1166 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry; 1167 } BlockReopenQueueEntry; 1168 1169 /* 1170 * Adds a BlockDriverState to a simple queue for an atomic, transactional 1171 * reopen of multiple devices. 1172 * 1173 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT 1174 * already performed, or alternatively may be NULL a new BlockReopenQueue will 1175 * be created and initialized. This newly created BlockReopenQueue should be 1176 * passed back in for subsequent calls that are intended to be of the same 1177 * atomic 'set'. 1178 * 1179 * bs is the BlockDriverState to add to the reopen queue. 1180 * 1181 * flags contains the open flags for the associated bs 1182 * 1183 * returns a pointer to bs_queue, which is either the newly allocated 1184 * bs_queue, or the existing bs_queue being used. 1185 * 1186 */ 1187 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, 1188 BlockDriverState *bs, int flags) 1189 { 1190 assert(bs != NULL); 1191 1192 BlockReopenQueueEntry *bs_entry; 1193 if (bs_queue == NULL) { 1194 bs_queue = g_new0(BlockReopenQueue, 1); 1195 QSIMPLEQ_INIT(bs_queue); 1196 } 1197 1198 if (bs->file) { 1199 bdrv_reopen_queue(bs_queue, bs->file, flags); 1200 } 1201 1202 bs_entry = g_new0(BlockReopenQueueEntry, 1); 1203 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry); 1204 1205 bs_entry->state.bs = bs; 1206 bs_entry->state.flags = flags; 1207 1208 return bs_queue; 1209 } 1210 1211 /* 1212 * Reopen multiple BlockDriverStates atomically & transactionally. 1213 * 1214 * The queue passed in (bs_queue) must have been built up previous 1215 * via bdrv_reopen_queue(). 1216 * 1217 * Reopens all BDS specified in the queue, with the appropriate 1218 * flags. All devices are prepared for reopen, and failure of any 1219 * device will cause all device changes to be abandonded, and intermediate 1220 * data cleaned up. 1221 * 1222 * If all devices prepare successfully, then the changes are committed 1223 * to all devices. 1224 * 1225 */ 1226 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) 1227 { 1228 int ret = -1; 1229 BlockReopenQueueEntry *bs_entry, *next; 1230 Error *local_err = NULL; 1231 1232 assert(bs_queue != NULL); 1233 1234 bdrv_drain_all(); 1235 1236 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { 1237 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) { 1238 error_propagate(errp, local_err); 1239 goto cleanup; 1240 } 1241 bs_entry->prepared = true; 1242 } 1243 1244 /* If we reach this point, we have success and just need to apply the 1245 * changes 1246 */ 1247 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) { 1248 bdrv_reopen_commit(&bs_entry->state); 1249 } 1250 1251 ret = 0; 1252 1253 cleanup: 1254 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { 1255 if (ret && bs_entry->prepared) { 1256 bdrv_reopen_abort(&bs_entry->state); 1257 } 1258 g_free(bs_entry); 1259 } 1260 g_free(bs_queue); 1261 return ret; 1262 } 1263 1264 1265 /* Reopen a single BlockDriverState with the specified flags. */ 1266 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp) 1267 { 1268 int ret = -1; 1269 Error *local_err = NULL; 1270 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags); 1271 1272 ret = bdrv_reopen_multiple(queue, &local_err); 1273 if (local_err != NULL) { 1274 error_propagate(errp, local_err); 1275 } 1276 return ret; 1277 } 1278 1279 1280 /* 1281 * Prepares a BlockDriverState for reopen. All changes are staged in the 1282 * 'opaque' field of the BDRVReopenState, which is used and allocated by 1283 * the block driver layer .bdrv_reopen_prepare() 1284 * 1285 * bs is the BlockDriverState to reopen 1286 * flags are the new open flags 1287 * queue is the reopen queue 1288 * 1289 * Returns 0 on success, non-zero on error. On error errp will be set 1290 * as well. 1291 * 1292 * On failure, bdrv_reopen_abort() will be called to clean up any data. 1293 * It is the responsibility of the caller to then call the abort() or 1294 * commit() for any other BDS that have been left in a prepare() state 1295 * 1296 */ 1297 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, 1298 Error **errp) 1299 { 1300 int ret = -1; 1301 Error *local_err = NULL; 1302 BlockDriver *drv; 1303 1304 assert(reopen_state != NULL); 1305 assert(reopen_state->bs->drv != NULL); 1306 drv = reopen_state->bs->drv; 1307 1308 /* if we are to stay read-only, do not allow permission change 1309 * to r/w */ 1310 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) && 1311 reopen_state->flags & BDRV_O_RDWR) { 1312 error_set(errp, QERR_DEVICE_IS_READ_ONLY, 1313 reopen_state->bs->device_name); 1314 goto error; 1315 } 1316 1317 1318 ret = bdrv_flush(reopen_state->bs); 1319 if (ret) { 1320 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive", 1321 strerror(-ret)); 1322 goto error; 1323 } 1324 1325 if (drv->bdrv_reopen_prepare) { 1326 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err); 1327 if (ret) { 1328 if (local_err != NULL) { 1329 error_propagate(errp, local_err); 1330 } else { 1331 error_setg(errp, "failed while preparing to reopen image '%s'", 1332 reopen_state->bs->filename); 1333 } 1334 goto error; 1335 } 1336 } else { 1337 /* It is currently mandatory to have a bdrv_reopen_prepare() 1338 * handler for each supported drv. */ 1339 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, 1340 drv->format_name, reopen_state->bs->device_name, 1341 "reopening of file"); 1342 ret = -1; 1343 goto error; 1344 } 1345 1346 ret = 0; 1347 1348 error: 1349 return ret; 1350 } 1351 1352 /* 1353 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and 1354 * makes them final by swapping the staging BlockDriverState contents into 1355 * the active BlockDriverState contents. 1356 */ 1357 void bdrv_reopen_commit(BDRVReopenState *reopen_state) 1358 { 1359 BlockDriver *drv; 1360 1361 assert(reopen_state != NULL); 1362 drv = reopen_state->bs->drv; 1363 assert(drv != NULL); 1364 1365 /* If there are any driver level actions to take */ 1366 if (drv->bdrv_reopen_commit) { 1367 drv->bdrv_reopen_commit(reopen_state); 1368 } 1369 1370 /* set BDS specific flags now */ 1371 reopen_state->bs->open_flags = reopen_state->flags; 1372 reopen_state->bs->enable_write_cache = !!(reopen_state->flags & 1373 BDRV_O_CACHE_WB); 1374 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR); 1375 } 1376 1377 /* 1378 * Abort the reopen, and delete and free the staged changes in 1379 * reopen_state 1380 */ 1381 void bdrv_reopen_abort(BDRVReopenState *reopen_state) 1382 { 1383 BlockDriver *drv; 1384 1385 assert(reopen_state != NULL); 1386 drv = reopen_state->bs->drv; 1387 assert(drv != NULL); 1388 1389 if (drv->bdrv_reopen_abort) { 1390 drv->bdrv_reopen_abort(reopen_state); 1391 } 1392 } 1393 1394 1395 void bdrv_close(BlockDriverState *bs) 1396 { 1397 if (bs->job) { 1398 block_job_cancel_sync(bs->job); 1399 } 1400 bdrv_drain_all(); /* complete I/O */ 1401 bdrv_flush(bs); 1402 bdrv_drain_all(); /* in case flush left pending I/O */ 1403 notifier_list_notify(&bs->close_notifiers, bs); 1404 1405 if (bs->drv) { 1406 if (bs->backing_hd) { 1407 bdrv_unref(bs->backing_hd); 1408 bs->backing_hd = NULL; 1409 } 1410 bs->drv->bdrv_close(bs); 1411 g_free(bs->opaque); 1412 #ifdef _WIN32 1413 if (bs->is_temporary) { 1414 unlink(bs->filename); 1415 } 1416 #endif 1417 bs->opaque = NULL; 1418 bs->drv = NULL; 1419 bs->copy_on_read = 0; 1420 bs->backing_file[0] = '\0'; 1421 bs->backing_format[0] = '\0'; 1422 bs->total_sectors = 0; 1423 bs->encrypted = 0; 1424 bs->valid_key = 0; 1425 bs->sg = 0; 1426 bs->growable = 0; 1427 bs->zero_beyond_eof = false; 1428 QDECREF(bs->options); 1429 bs->options = NULL; 1430 1431 if (bs->file != NULL) { 1432 bdrv_unref(bs->file); 1433 bs->file = NULL; 1434 } 1435 } 1436 1437 bdrv_dev_change_media_cb(bs, false); 1438 1439 /*throttling disk I/O limits*/ 1440 if (bs->io_limits_enabled) { 1441 bdrv_io_limits_disable(bs); 1442 } 1443 } 1444 1445 void bdrv_close_all(void) 1446 { 1447 BlockDriverState *bs; 1448 1449 QTAILQ_FOREACH(bs, &bdrv_states, list) { 1450 bdrv_close(bs); 1451 } 1452 } 1453 1454 /* Check if any requests are in-flight (including throttled requests) */ 1455 static bool bdrv_requests_pending(BlockDriverState *bs) 1456 { 1457 if (!QLIST_EMPTY(&bs->tracked_requests)) { 1458 return true; 1459 } 1460 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { 1461 return true; 1462 } 1463 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { 1464 return true; 1465 } 1466 if (bs->file && bdrv_requests_pending(bs->file)) { 1467 return true; 1468 } 1469 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { 1470 return true; 1471 } 1472 return false; 1473 } 1474 1475 static bool bdrv_requests_pending_all(void) 1476 { 1477 BlockDriverState *bs; 1478 QTAILQ_FOREACH(bs, &bdrv_states, list) { 1479 if (bdrv_requests_pending(bs)) { 1480 return true; 1481 } 1482 } 1483 return false; 1484 } 1485 1486 /* 1487 * Wait for pending requests to complete across all BlockDriverStates 1488 * 1489 * This function does not flush data to disk, use bdrv_flush_all() for that 1490 * after calling this function. 1491 * 1492 * Note that completion of an asynchronous I/O operation can trigger any 1493 * number of other I/O operations on other devices---for example a coroutine 1494 * can be arbitrarily complex and a constant flow of I/O can come until the 1495 * coroutine is complete. Because of this, it is not possible to have a 1496 * function to drain a single device's I/O queue. 1497 */ 1498 void bdrv_drain_all(void) 1499 { 1500 /* Always run first iteration so any pending completion BHs run */ 1501 bool busy = true; 1502 BlockDriverState *bs; 1503 1504 while (busy) { 1505 /* FIXME: We do not have timer support here, so this is effectively 1506 * a busy wait. 1507 */ 1508 QTAILQ_FOREACH(bs, &bdrv_states, list) { 1509 if (bdrv_start_throttled_reqs(bs)) { 1510 busy = true; 1511 } 1512 } 1513 1514 busy = bdrv_requests_pending_all(); 1515 busy |= aio_poll(qemu_get_aio_context(), busy); 1516 } 1517 } 1518 1519 /* make a BlockDriverState anonymous by removing from bdrv_state list. 1520 Also, NULL terminate the device_name to prevent double remove */ 1521 void bdrv_make_anon(BlockDriverState *bs) 1522 { 1523 if (bs->device_name[0] != '\0') { 1524 QTAILQ_REMOVE(&bdrv_states, bs, list); 1525 } 1526 bs->device_name[0] = '\0'; 1527 } 1528 1529 static void bdrv_rebind(BlockDriverState *bs) 1530 { 1531 if (bs->drv && bs->drv->bdrv_rebind) { 1532 bs->drv->bdrv_rebind(bs); 1533 } 1534 } 1535 1536 static void bdrv_move_feature_fields(BlockDriverState *bs_dest, 1537 BlockDriverState *bs_src) 1538 { 1539 /* move some fields that need to stay attached to the device */ 1540 bs_dest->open_flags = bs_src->open_flags; 1541 1542 /* dev info */ 1543 bs_dest->dev_ops = bs_src->dev_ops; 1544 bs_dest->dev_opaque = bs_src->dev_opaque; 1545 bs_dest->dev = bs_src->dev; 1546 bs_dest->buffer_alignment = bs_src->buffer_alignment; 1547 bs_dest->copy_on_read = bs_src->copy_on_read; 1548 1549 bs_dest->enable_write_cache = bs_src->enable_write_cache; 1550 1551 /* i/o throttled req */ 1552 memcpy(&bs_dest->throttle_state, 1553 &bs_src->throttle_state, 1554 sizeof(ThrottleState)); 1555 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0]; 1556 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1]; 1557 bs_dest->io_limits_enabled = bs_src->io_limits_enabled; 1558 1559 /* r/w error */ 1560 bs_dest->on_read_error = bs_src->on_read_error; 1561 bs_dest->on_write_error = bs_src->on_write_error; 1562 1563 /* i/o status */ 1564 bs_dest->iostatus_enabled = bs_src->iostatus_enabled; 1565 bs_dest->iostatus = bs_src->iostatus; 1566 1567 /* dirty bitmap */ 1568 bs_dest->dirty_bitmap = bs_src->dirty_bitmap; 1569 1570 /* reference count */ 1571 bs_dest->refcnt = bs_src->refcnt; 1572 1573 /* job */ 1574 bs_dest->in_use = bs_src->in_use; 1575 bs_dest->job = bs_src->job; 1576 1577 /* keep the same entry in bdrv_states */ 1578 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name), 1579 bs_src->device_name); 1580 bs_dest->list = bs_src->list; 1581 } 1582 1583 /* 1584 * Swap bs contents for two image chains while they are live, 1585 * while keeping required fields on the BlockDriverState that is 1586 * actually attached to a device. 1587 * 1588 * This will modify the BlockDriverState fields, and swap contents 1589 * between bs_new and bs_old. Both bs_new and bs_old are modified. 1590 * 1591 * bs_new is required to be anonymous. 1592 * 1593 * This function does not create any image files. 1594 */ 1595 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old) 1596 { 1597 BlockDriverState tmp; 1598 1599 /* bs_new must be anonymous and shouldn't have anything fancy enabled */ 1600 assert(bs_new->device_name[0] == '\0'); 1601 assert(bs_new->dirty_bitmap == NULL); 1602 assert(bs_new->job == NULL); 1603 assert(bs_new->dev == NULL); 1604 assert(bs_new->in_use == 0); 1605 assert(bs_new->io_limits_enabled == false); 1606 assert(!throttle_have_timer(&bs_new->throttle_state)); 1607 1608 tmp = *bs_new; 1609 *bs_new = *bs_old; 1610 *bs_old = tmp; 1611 1612 /* there are some fields that should not be swapped, move them back */ 1613 bdrv_move_feature_fields(&tmp, bs_old); 1614 bdrv_move_feature_fields(bs_old, bs_new); 1615 bdrv_move_feature_fields(bs_new, &tmp); 1616 1617 /* bs_new shouldn't be in bdrv_states even after the swap! */ 1618 assert(bs_new->device_name[0] == '\0'); 1619 1620 /* Check a few fields that should remain attached to the device */ 1621 assert(bs_new->dev == NULL); 1622 assert(bs_new->job == NULL); 1623 assert(bs_new->in_use == 0); 1624 assert(bs_new->io_limits_enabled == false); 1625 assert(!throttle_have_timer(&bs_new->throttle_state)); 1626 1627 bdrv_rebind(bs_new); 1628 bdrv_rebind(bs_old); 1629 } 1630 1631 /* 1632 * Add new bs contents at the top of an image chain while the chain is 1633 * live, while keeping required fields on the top layer. 1634 * 1635 * This will modify the BlockDriverState fields, and swap contents 1636 * between bs_new and bs_top. Both bs_new and bs_top are modified. 1637 * 1638 * bs_new is required to be anonymous. 1639 * 1640 * This function does not create any image files. 1641 */ 1642 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top) 1643 { 1644 bdrv_swap(bs_new, bs_top); 1645 1646 /* The contents of 'tmp' will become bs_top, as we are 1647 * swapping bs_new and bs_top contents. */ 1648 bs_top->backing_hd = bs_new; 1649 bs_top->open_flags &= ~BDRV_O_NO_BACKING; 1650 pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file), 1651 bs_new->filename); 1652 pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format), 1653 bs_new->drv ? bs_new->drv->format_name : ""); 1654 } 1655 1656 static void bdrv_delete(BlockDriverState *bs) 1657 { 1658 assert(!bs->dev); 1659 assert(!bs->job); 1660 assert(!bs->in_use); 1661 assert(!bs->refcnt); 1662 1663 bdrv_close(bs); 1664 1665 /* remove from list, if necessary */ 1666 bdrv_make_anon(bs); 1667 1668 g_free(bs); 1669 } 1670 1671 int bdrv_attach_dev(BlockDriverState *bs, void *dev) 1672 /* TODO change to DeviceState *dev when all users are qdevified */ 1673 { 1674 if (bs->dev) { 1675 return -EBUSY; 1676 } 1677 bs->dev = dev; 1678 bdrv_iostatus_reset(bs); 1679 return 0; 1680 } 1681 1682 /* TODO qdevified devices don't use this, remove when devices are qdevified */ 1683 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev) 1684 { 1685 if (bdrv_attach_dev(bs, dev) < 0) { 1686 abort(); 1687 } 1688 } 1689 1690 void bdrv_detach_dev(BlockDriverState *bs, void *dev) 1691 /* TODO change to DeviceState *dev when all users are qdevified */ 1692 { 1693 assert(bs->dev == dev); 1694 bs->dev = NULL; 1695 bs->dev_ops = NULL; 1696 bs->dev_opaque = NULL; 1697 bs->buffer_alignment = 512; 1698 } 1699 1700 /* TODO change to return DeviceState * when all users are qdevified */ 1701 void *bdrv_get_attached_dev(BlockDriverState *bs) 1702 { 1703 return bs->dev; 1704 } 1705 1706 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops, 1707 void *opaque) 1708 { 1709 bs->dev_ops = ops; 1710 bs->dev_opaque = opaque; 1711 } 1712 1713 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv, 1714 enum MonitorEvent ev, 1715 BlockErrorAction action, bool is_read) 1716 { 1717 QObject *data; 1718 const char *action_str; 1719 1720 switch (action) { 1721 case BDRV_ACTION_REPORT: 1722 action_str = "report"; 1723 break; 1724 case BDRV_ACTION_IGNORE: 1725 action_str = "ignore"; 1726 break; 1727 case BDRV_ACTION_STOP: 1728 action_str = "stop"; 1729 break; 1730 default: 1731 abort(); 1732 } 1733 1734 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }", 1735 bdrv->device_name, 1736 action_str, 1737 is_read ? "read" : "write"); 1738 monitor_protocol_event(ev, data); 1739 1740 qobject_decref(data); 1741 } 1742 1743 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected) 1744 { 1745 QObject *data; 1746 1747 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }", 1748 bdrv_get_device_name(bs), ejected); 1749 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data); 1750 1751 qobject_decref(data); 1752 } 1753 1754 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load) 1755 { 1756 if (bs->dev_ops && bs->dev_ops->change_media_cb) { 1757 bool tray_was_closed = !bdrv_dev_is_tray_open(bs); 1758 bs->dev_ops->change_media_cb(bs->dev_opaque, load); 1759 if (tray_was_closed) { 1760 /* tray open */ 1761 bdrv_emit_qmp_eject_event(bs, true); 1762 } 1763 if (load) { 1764 /* tray close */ 1765 bdrv_emit_qmp_eject_event(bs, false); 1766 } 1767 } 1768 } 1769 1770 bool bdrv_dev_has_removable_media(BlockDriverState *bs) 1771 { 1772 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb); 1773 } 1774 1775 void bdrv_dev_eject_request(BlockDriverState *bs, bool force) 1776 { 1777 if (bs->dev_ops && bs->dev_ops->eject_request_cb) { 1778 bs->dev_ops->eject_request_cb(bs->dev_opaque, force); 1779 } 1780 } 1781 1782 bool bdrv_dev_is_tray_open(BlockDriverState *bs) 1783 { 1784 if (bs->dev_ops && bs->dev_ops->is_tray_open) { 1785 return bs->dev_ops->is_tray_open(bs->dev_opaque); 1786 } 1787 return false; 1788 } 1789 1790 static void bdrv_dev_resize_cb(BlockDriverState *bs) 1791 { 1792 if (bs->dev_ops && bs->dev_ops->resize_cb) { 1793 bs->dev_ops->resize_cb(bs->dev_opaque); 1794 } 1795 } 1796 1797 bool bdrv_dev_is_medium_locked(BlockDriverState *bs) 1798 { 1799 if (bs->dev_ops && bs->dev_ops->is_medium_locked) { 1800 return bs->dev_ops->is_medium_locked(bs->dev_opaque); 1801 } 1802 return false; 1803 } 1804 1805 /* 1806 * Run consistency checks on an image 1807 * 1808 * Returns 0 if the check could be completed (it doesn't mean that the image is 1809 * free of errors) or -errno when an internal error occurred. The results of the 1810 * check are stored in res. 1811 */ 1812 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) 1813 { 1814 if (bs->drv->bdrv_check == NULL) { 1815 return -ENOTSUP; 1816 } 1817 1818 memset(res, 0, sizeof(*res)); 1819 return bs->drv->bdrv_check(bs, res, fix); 1820 } 1821 1822 #define COMMIT_BUF_SECTORS 2048 1823 1824 /* commit COW file into the raw image */ 1825 int bdrv_commit(BlockDriverState *bs) 1826 { 1827 BlockDriver *drv = bs->drv; 1828 int64_t sector, total_sectors; 1829 int n, ro, open_flags; 1830 int ret = 0; 1831 uint8_t *buf; 1832 char filename[PATH_MAX]; 1833 1834 if (!drv) 1835 return -ENOMEDIUM; 1836 1837 if (!bs->backing_hd) { 1838 return -ENOTSUP; 1839 } 1840 1841 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) { 1842 return -EBUSY; 1843 } 1844 1845 ro = bs->backing_hd->read_only; 1846 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */ 1847 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename); 1848 open_flags = bs->backing_hd->open_flags; 1849 1850 if (ro) { 1851 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) { 1852 return -EACCES; 1853 } 1854 } 1855 1856 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS; 1857 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE); 1858 1859 for (sector = 0; sector < total_sectors; sector += n) { 1860 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n); 1861 if (ret < 0) { 1862 goto ro_cleanup; 1863 } 1864 if (ret) { 1865 if (bdrv_read(bs, sector, buf, n) != 0) { 1866 ret = -EIO; 1867 goto ro_cleanup; 1868 } 1869 1870 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) { 1871 ret = -EIO; 1872 goto ro_cleanup; 1873 } 1874 } 1875 } 1876 1877 if (drv->bdrv_make_empty) { 1878 ret = drv->bdrv_make_empty(bs); 1879 bdrv_flush(bs); 1880 } 1881 1882 /* 1883 * Make sure all data we wrote to the backing device is actually 1884 * stable on disk. 1885 */ 1886 if (bs->backing_hd) 1887 bdrv_flush(bs->backing_hd); 1888 1889 ro_cleanup: 1890 g_free(buf); 1891 1892 if (ro) { 1893 /* ignoring error return here */ 1894 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL); 1895 } 1896 1897 return ret; 1898 } 1899 1900 int bdrv_commit_all(void) 1901 { 1902 BlockDriverState *bs; 1903 1904 QTAILQ_FOREACH(bs, &bdrv_states, list) { 1905 if (bs->drv && bs->backing_hd) { 1906 int ret = bdrv_commit(bs); 1907 if (ret < 0) { 1908 return ret; 1909 } 1910 } 1911 } 1912 return 0; 1913 } 1914 1915 /** 1916 * Remove an active request from the tracked requests list 1917 * 1918 * This function should be called when a tracked request is completing. 1919 */ 1920 static void tracked_request_end(BdrvTrackedRequest *req) 1921 { 1922 QLIST_REMOVE(req, list); 1923 qemu_co_queue_restart_all(&req->wait_queue); 1924 } 1925 1926 /** 1927 * Add an active request to the tracked requests list 1928 */ 1929 static void tracked_request_begin(BdrvTrackedRequest *req, 1930 BlockDriverState *bs, 1931 int64_t sector_num, 1932 int nb_sectors, bool is_write) 1933 { 1934 *req = (BdrvTrackedRequest){ 1935 .bs = bs, 1936 .sector_num = sector_num, 1937 .nb_sectors = nb_sectors, 1938 .is_write = is_write, 1939 .co = qemu_coroutine_self(), 1940 }; 1941 1942 qemu_co_queue_init(&req->wait_queue); 1943 1944 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 1945 } 1946 1947 /** 1948 * Round a region to cluster boundaries 1949 */ 1950 void bdrv_round_to_clusters(BlockDriverState *bs, 1951 int64_t sector_num, int nb_sectors, 1952 int64_t *cluster_sector_num, 1953 int *cluster_nb_sectors) 1954 { 1955 BlockDriverInfo bdi; 1956 1957 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 1958 *cluster_sector_num = sector_num; 1959 *cluster_nb_sectors = nb_sectors; 1960 } else { 1961 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 1962 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 1963 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 1964 nb_sectors, c); 1965 } 1966 } 1967 1968 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 1969 int64_t sector_num, int nb_sectors) { 1970 /* aaaa bbbb */ 1971 if (sector_num >= req->sector_num + req->nb_sectors) { 1972 return false; 1973 } 1974 /* bbbb aaaa */ 1975 if (req->sector_num >= sector_num + nb_sectors) { 1976 return false; 1977 } 1978 return true; 1979 } 1980 1981 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs, 1982 int64_t sector_num, int nb_sectors) 1983 { 1984 BdrvTrackedRequest *req; 1985 int64_t cluster_sector_num; 1986 int cluster_nb_sectors; 1987 bool retry; 1988 1989 /* If we touch the same cluster it counts as an overlap. This guarantees 1990 * that allocating writes will be serialized and not race with each other 1991 * for the same cluster. For example, in copy-on-read it ensures that the 1992 * CoR read and write operations are atomic and guest writes cannot 1993 * interleave between them. 1994 */ 1995 bdrv_round_to_clusters(bs, sector_num, nb_sectors, 1996 &cluster_sector_num, &cluster_nb_sectors); 1997 1998 do { 1999 retry = false; 2000 QLIST_FOREACH(req, &bs->tracked_requests, list) { 2001 if (tracked_request_overlaps(req, cluster_sector_num, 2002 cluster_nb_sectors)) { 2003 /* Hitting this means there was a reentrant request, for 2004 * example, a block driver issuing nested requests. This must 2005 * never happen since it means deadlock. 2006 */ 2007 assert(qemu_coroutine_self() != req->co); 2008 2009 qemu_co_queue_wait(&req->wait_queue); 2010 retry = true; 2011 break; 2012 } 2013 } 2014 } while (retry); 2015 } 2016 2017 /* 2018 * Return values: 2019 * 0 - success 2020 * -EINVAL - backing format specified, but no file 2021 * -ENOSPC - can't update the backing file because no space is left in the 2022 * image file header 2023 * -ENOTSUP - format driver doesn't support changing the backing file 2024 */ 2025 int bdrv_change_backing_file(BlockDriverState *bs, 2026 const char *backing_file, const char *backing_fmt) 2027 { 2028 BlockDriver *drv = bs->drv; 2029 int ret; 2030 2031 /* Backing file format doesn't make sense without a backing file */ 2032 if (backing_fmt && !backing_file) { 2033 return -EINVAL; 2034 } 2035 2036 if (drv->bdrv_change_backing_file != NULL) { 2037 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt); 2038 } else { 2039 ret = -ENOTSUP; 2040 } 2041 2042 if (ret == 0) { 2043 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2044 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2045 } 2046 return ret; 2047 } 2048 2049 /* 2050 * Finds the image layer in the chain that has 'bs' as its backing file. 2051 * 2052 * active is the current topmost image. 2053 * 2054 * Returns NULL if bs is not found in active's image chain, 2055 * or if active == bs. 2056 */ 2057 BlockDriverState *bdrv_find_overlay(BlockDriverState *active, 2058 BlockDriverState *bs) 2059 { 2060 BlockDriverState *overlay = NULL; 2061 BlockDriverState *intermediate; 2062 2063 assert(active != NULL); 2064 assert(bs != NULL); 2065 2066 /* if bs is the same as active, then by definition it has no overlay 2067 */ 2068 if (active == bs) { 2069 return NULL; 2070 } 2071 2072 intermediate = active; 2073 while (intermediate->backing_hd) { 2074 if (intermediate->backing_hd == bs) { 2075 overlay = intermediate; 2076 break; 2077 } 2078 intermediate = intermediate->backing_hd; 2079 } 2080 2081 return overlay; 2082 } 2083 2084 typedef struct BlkIntermediateStates { 2085 BlockDriverState *bs; 2086 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry; 2087 } BlkIntermediateStates; 2088 2089 2090 /* 2091 * Drops images above 'base' up to and including 'top', and sets the image 2092 * above 'top' to have base as its backing file. 2093 * 2094 * Requires that the overlay to 'top' is opened r/w, so that the backing file 2095 * information in 'bs' can be properly updated. 2096 * 2097 * E.g., this will convert the following chain: 2098 * bottom <- base <- intermediate <- top <- active 2099 * 2100 * to 2101 * 2102 * bottom <- base <- active 2103 * 2104 * It is allowed for bottom==base, in which case it converts: 2105 * 2106 * base <- intermediate <- top <- active 2107 * 2108 * to 2109 * 2110 * base <- active 2111 * 2112 * Error conditions: 2113 * if active == top, that is considered an error 2114 * 2115 */ 2116 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top, 2117 BlockDriverState *base) 2118 { 2119 BlockDriverState *intermediate; 2120 BlockDriverState *base_bs = NULL; 2121 BlockDriverState *new_top_bs = NULL; 2122 BlkIntermediateStates *intermediate_state, *next; 2123 int ret = -EIO; 2124 2125 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete; 2126 QSIMPLEQ_INIT(&states_to_delete); 2127 2128 if (!top->drv || !base->drv) { 2129 goto exit; 2130 } 2131 2132 new_top_bs = bdrv_find_overlay(active, top); 2133 2134 if (new_top_bs == NULL) { 2135 /* we could not find the image above 'top', this is an error */ 2136 goto exit; 2137 } 2138 2139 /* special case of new_top_bs->backing_hd already pointing to base - nothing 2140 * to do, no intermediate images */ 2141 if (new_top_bs->backing_hd == base) { 2142 ret = 0; 2143 goto exit; 2144 } 2145 2146 intermediate = top; 2147 2148 /* now we will go down through the list, and add each BDS we find 2149 * into our deletion queue, until we hit the 'base' 2150 */ 2151 while (intermediate) { 2152 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates)); 2153 intermediate_state->bs = intermediate; 2154 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry); 2155 2156 if (intermediate->backing_hd == base) { 2157 base_bs = intermediate->backing_hd; 2158 break; 2159 } 2160 intermediate = intermediate->backing_hd; 2161 } 2162 if (base_bs == NULL) { 2163 /* something went wrong, we did not end at the base. safely 2164 * unravel everything, and exit with error */ 2165 goto exit; 2166 } 2167 2168 /* success - we can delete the intermediate states, and link top->base */ 2169 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename, 2170 base_bs->drv ? base_bs->drv->format_name : ""); 2171 if (ret) { 2172 goto exit; 2173 } 2174 new_top_bs->backing_hd = base_bs; 2175 2176 2177 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { 2178 /* so that bdrv_close() does not recursively close the chain */ 2179 intermediate_state->bs->backing_hd = NULL; 2180 bdrv_unref(intermediate_state->bs); 2181 } 2182 ret = 0; 2183 2184 exit: 2185 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) { 2186 g_free(intermediate_state); 2187 } 2188 return ret; 2189 } 2190 2191 2192 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 2193 size_t size) 2194 { 2195 int64_t len; 2196 2197 if (!bdrv_is_inserted(bs)) 2198 return -ENOMEDIUM; 2199 2200 if (bs->growable) 2201 return 0; 2202 2203 len = bdrv_getlength(bs); 2204 2205 if (offset < 0) 2206 return -EIO; 2207 2208 if ((offset > len) || (len - offset < size)) 2209 return -EIO; 2210 2211 return 0; 2212 } 2213 2214 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 2215 int nb_sectors) 2216 { 2217 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 2218 nb_sectors * BDRV_SECTOR_SIZE); 2219 } 2220 2221 typedef struct RwCo { 2222 BlockDriverState *bs; 2223 int64_t sector_num; 2224 int nb_sectors; 2225 QEMUIOVector *qiov; 2226 bool is_write; 2227 int ret; 2228 BdrvRequestFlags flags; 2229 } RwCo; 2230 2231 static void coroutine_fn bdrv_rw_co_entry(void *opaque) 2232 { 2233 RwCo *rwco = opaque; 2234 2235 if (!rwco->is_write) { 2236 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num, 2237 rwco->nb_sectors, rwco->qiov, 2238 rwco->flags); 2239 } else { 2240 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num, 2241 rwco->nb_sectors, rwco->qiov, 2242 rwco->flags); 2243 } 2244 } 2245 2246 /* 2247 * Process a vectored synchronous request using coroutines 2248 */ 2249 static int bdrv_rwv_co(BlockDriverState *bs, int64_t sector_num, 2250 QEMUIOVector *qiov, bool is_write, 2251 BdrvRequestFlags flags) 2252 { 2253 Coroutine *co; 2254 RwCo rwco = { 2255 .bs = bs, 2256 .sector_num = sector_num, 2257 .nb_sectors = qiov->size >> BDRV_SECTOR_BITS, 2258 .qiov = qiov, 2259 .is_write = is_write, 2260 .ret = NOT_DONE, 2261 .flags = flags, 2262 }; 2263 assert((qiov->size & (BDRV_SECTOR_SIZE - 1)) == 0); 2264 2265 /** 2266 * In sync call context, when the vcpu is blocked, this throttling timer 2267 * will not fire; so the I/O throttling function has to be disabled here 2268 * if it has been enabled. 2269 */ 2270 if (bs->io_limits_enabled) { 2271 fprintf(stderr, "Disabling I/O throttling on '%s' due " 2272 "to synchronous I/O.\n", bdrv_get_device_name(bs)); 2273 bdrv_io_limits_disable(bs); 2274 } 2275 2276 if (qemu_in_coroutine()) { 2277 /* Fast-path if already in coroutine context */ 2278 bdrv_rw_co_entry(&rwco); 2279 } else { 2280 co = qemu_coroutine_create(bdrv_rw_co_entry); 2281 qemu_coroutine_enter(co, &rwco); 2282 while (rwco.ret == NOT_DONE) { 2283 qemu_aio_wait(); 2284 } 2285 } 2286 return rwco.ret; 2287 } 2288 2289 /* 2290 * Process a synchronous request using coroutines 2291 */ 2292 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 2293 int nb_sectors, bool is_write, BdrvRequestFlags flags) 2294 { 2295 QEMUIOVector qiov; 2296 struct iovec iov = { 2297 .iov_base = (void *)buf, 2298 .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 2299 }; 2300 2301 qemu_iovec_init_external(&qiov, &iov, 1); 2302 return bdrv_rwv_co(bs, sector_num, &qiov, is_write, flags); 2303 } 2304 2305 /* return < 0 if error. See bdrv_write() for the return codes */ 2306 int bdrv_read(BlockDriverState *bs, int64_t sector_num, 2307 uint8_t *buf, int nb_sectors) 2308 { 2309 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); 2310 } 2311 2312 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */ 2313 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, 2314 uint8_t *buf, int nb_sectors) 2315 { 2316 bool enabled; 2317 int ret; 2318 2319 enabled = bs->io_limits_enabled; 2320 bs->io_limits_enabled = false; 2321 ret = bdrv_read(bs, sector_num, buf, nb_sectors); 2322 bs->io_limits_enabled = enabled; 2323 return ret; 2324 } 2325 2326 /* Return < 0 if error. Important errors are: 2327 -EIO generic I/O error (may happen for all errors) 2328 -ENOMEDIUM No media inserted. 2329 -EINVAL Invalid sector number or nb_sectors 2330 -EACCES Trying to write a read-only device 2331 */ 2332 int bdrv_write(BlockDriverState *bs, int64_t sector_num, 2333 const uint8_t *buf, int nb_sectors) 2334 { 2335 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); 2336 } 2337 2338 int bdrv_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov) 2339 { 2340 return bdrv_rwv_co(bs, sector_num, qiov, true, 0); 2341 } 2342 2343 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 2344 { 2345 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, 2346 BDRV_REQ_ZERO_WRITE); 2347 } 2348 2349 int bdrv_pread(BlockDriverState *bs, int64_t offset, 2350 void *buf, int count1) 2351 { 2352 uint8_t tmp_buf[BDRV_SECTOR_SIZE]; 2353 int len, nb_sectors, count; 2354 int64_t sector_num; 2355 int ret; 2356 2357 count = count1; 2358 /* first read to align to sector start */ 2359 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1); 2360 if (len > count) 2361 len = count; 2362 sector_num = offset >> BDRV_SECTOR_BITS; 2363 if (len > 0) { 2364 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0) 2365 return ret; 2366 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len); 2367 count -= len; 2368 if (count == 0) 2369 return count1; 2370 sector_num++; 2371 buf += len; 2372 } 2373 2374 /* read the sectors "in place" */ 2375 nb_sectors = count >> BDRV_SECTOR_BITS; 2376 if (nb_sectors > 0) { 2377 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0) 2378 return ret; 2379 sector_num += nb_sectors; 2380 len = nb_sectors << BDRV_SECTOR_BITS; 2381 buf += len; 2382 count -= len; 2383 } 2384 2385 /* add data from the last sector */ 2386 if (count > 0) { 2387 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0) 2388 return ret; 2389 memcpy(buf, tmp_buf, count); 2390 } 2391 return count1; 2392 } 2393 2394 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) 2395 { 2396 uint8_t tmp_buf[BDRV_SECTOR_SIZE]; 2397 int len, nb_sectors, count; 2398 int64_t sector_num; 2399 int ret; 2400 2401 count = qiov->size; 2402 2403 /* first write to align to sector start */ 2404 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1); 2405 if (len > count) 2406 len = count; 2407 sector_num = offset >> BDRV_SECTOR_BITS; 2408 if (len > 0) { 2409 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0) 2410 return ret; 2411 qemu_iovec_to_buf(qiov, 0, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), 2412 len); 2413 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0) 2414 return ret; 2415 count -= len; 2416 if (count == 0) 2417 return qiov->size; 2418 sector_num++; 2419 } 2420 2421 /* write the sectors "in place" */ 2422 nb_sectors = count >> BDRV_SECTOR_BITS; 2423 if (nb_sectors > 0) { 2424 QEMUIOVector qiov_inplace; 2425 2426 qemu_iovec_init(&qiov_inplace, qiov->niov); 2427 qemu_iovec_concat(&qiov_inplace, qiov, len, 2428 nb_sectors << BDRV_SECTOR_BITS); 2429 ret = bdrv_writev(bs, sector_num, &qiov_inplace); 2430 qemu_iovec_destroy(&qiov_inplace); 2431 if (ret < 0) { 2432 return ret; 2433 } 2434 2435 sector_num += nb_sectors; 2436 len = nb_sectors << BDRV_SECTOR_BITS; 2437 count -= len; 2438 } 2439 2440 /* add data from the last sector */ 2441 if (count > 0) { 2442 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0) 2443 return ret; 2444 qemu_iovec_to_buf(qiov, qiov->size - count, tmp_buf, count); 2445 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0) 2446 return ret; 2447 } 2448 return qiov->size; 2449 } 2450 2451 int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 2452 const void *buf, int count1) 2453 { 2454 QEMUIOVector qiov; 2455 struct iovec iov = { 2456 .iov_base = (void *) buf, 2457 .iov_len = count1, 2458 }; 2459 2460 qemu_iovec_init_external(&qiov, &iov, 1); 2461 return bdrv_pwritev(bs, offset, &qiov); 2462 } 2463 2464 /* 2465 * Writes to the file and ensures that no writes are reordered across this 2466 * request (acts as a barrier) 2467 * 2468 * Returns 0 on success, -errno in error cases. 2469 */ 2470 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 2471 const void *buf, int count) 2472 { 2473 int ret; 2474 2475 ret = bdrv_pwrite(bs, offset, buf, count); 2476 if (ret < 0) { 2477 return ret; 2478 } 2479 2480 /* No flush needed for cache modes that already do it */ 2481 if (bs->enable_write_cache) { 2482 bdrv_flush(bs); 2483 } 2484 2485 return 0; 2486 } 2487 2488 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 2489 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 2490 { 2491 /* Perform I/O through a temporary buffer so that users who scribble over 2492 * their read buffer while the operation is in progress do not end up 2493 * modifying the image file. This is critical for zero-copy guest I/O 2494 * where anything might happen inside guest memory. 2495 */ 2496 void *bounce_buffer; 2497 2498 BlockDriver *drv = bs->drv; 2499 struct iovec iov; 2500 QEMUIOVector bounce_qiov; 2501 int64_t cluster_sector_num; 2502 int cluster_nb_sectors; 2503 size_t skip_bytes; 2504 int ret; 2505 2506 /* Cover entire cluster so no additional backing file I/O is required when 2507 * allocating cluster in the image file. 2508 */ 2509 bdrv_round_to_clusters(bs, sector_num, nb_sectors, 2510 &cluster_sector_num, &cluster_nb_sectors); 2511 2512 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, 2513 cluster_sector_num, cluster_nb_sectors); 2514 2515 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; 2516 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len); 2517 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 2518 2519 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, 2520 &bounce_qiov); 2521 if (ret < 0) { 2522 goto err; 2523 } 2524 2525 if (drv->bdrv_co_write_zeroes && 2526 buffer_is_zero(bounce_buffer, iov.iov_len)) { 2527 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, 2528 cluster_nb_sectors); 2529 } else { 2530 /* This does not change the data on the disk, it is not necessary 2531 * to flush even in cache=writethrough mode. 2532 */ 2533 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, 2534 &bounce_qiov); 2535 } 2536 2537 if (ret < 0) { 2538 /* It might be okay to ignore write errors for guest requests. If this 2539 * is a deliberate copy-on-read then we don't want to ignore the error. 2540 * Simply report it in all cases. 2541 */ 2542 goto err; 2543 } 2544 2545 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; 2546 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, 2547 nb_sectors * BDRV_SECTOR_SIZE); 2548 2549 err: 2550 qemu_vfree(bounce_buffer); 2551 return ret; 2552 } 2553 2554 /* 2555 * Handle a read request in coroutine context 2556 */ 2557 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 2558 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 2559 BdrvRequestFlags flags) 2560 { 2561 BlockDriver *drv = bs->drv; 2562 BdrvTrackedRequest req; 2563 int ret; 2564 2565 if (!drv) { 2566 return -ENOMEDIUM; 2567 } 2568 if (bdrv_check_request(bs, sector_num, nb_sectors)) { 2569 return -EIO; 2570 } 2571 2572 if (bs->copy_on_read) { 2573 flags |= BDRV_REQ_COPY_ON_READ; 2574 } 2575 if (flags & BDRV_REQ_COPY_ON_READ) { 2576 bs->copy_on_read_in_flight++; 2577 } 2578 2579 if (bs->copy_on_read_in_flight) { 2580 wait_for_overlapping_requests(bs, sector_num, nb_sectors); 2581 } 2582 2583 /* throttling disk I/O */ 2584 if (bs->io_limits_enabled) { 2585 bdrv_io_limits_intercept(bs, nb_sectors, false); 2586 } 2587 2588 tracked_request_begin(&req, bs, sector_num, nb_sectors, false); 2589 2590 if (flags & BDRV_REQ_COPY_ON_READ) { 2591 int pnum; 2592 2593 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); 2594 if (ret < 0) { 2595 goto out; 2596 } 2597 2598 if (!ret || pnum != nb_sectors) { 2599 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); 2600 goto out; 2601 } 2602 } 2603 2604 if (!(bs->zero_beyond_eof && bs->growable)) { 2605 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 2606 } else { 2607 /* Read zeros after EOF of growable BDSes */ 2608 int64_t len, total_sectors, max_nb_sectors; 2609 2610 len = bdrv_getlength(bs); 2611 if (len < 0) { 2612 ret = len; 2613 goto out; 2614 } 2615 2616 total_sectors = len >> BDRV_SECTOR_BITS; 2617 max_nb_sectors = MAX(0, total_sectors - sector_num); 2618 if (max_nb_sectors > 0) { 2619 ret = drv->bdrv_co_readv(bs, sector_num, 2620 MIN(nb_sectors, max_nb_sectors), qiov); 2621 } else { 2622 ret = 0; 2623 } 2624 2625 /* Reading beyond end of file is supposed to produce zeroes */ 2626 if (ret == 0 && total_sectors < sector_num + nb_sectors) { 2627 uint64_t offset = MAX(0, total_sectors - sector_num); 2628 uint64_t bytes = (sector_num + nb_sectors - offset) * 2629 BDRV_SECTOR_SIZE; 2630 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); 2631 } 2632 } 2633 2634 out: 2635 tracked_request_end(&req); 2636 2637 if (flags & BDRV_REQ_COPY_ON_READ) { 2638 bs->copy_on_read_in_flight--; 2639 } 2640 2641 return ret; 2642 } 2643 2644 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 2645 int nb_sectors, QEMUIOVector *qiov) 2646 { 2647 trace_bdrv_co_readv(bs, sector_num, nb_sectors); 2648 2649 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 2650 } 2651 2652 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, 2653 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 2654 { 2655 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); 2656 2657 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 2658 BDRV_REQ_COPY_ON_READ); 2659 } 2660 2661 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 2662 int64_t sector_num, int nb_sectors) 2663 { 2664 BlockDriver *drv = bs->drv; 2665 QEMUIOVector qiov; 2666 struct iovec iov; 2667 int ret; 2668 2669 /* TODO Emulate only part of misaligned requests instead of letting block 2670 * drivers return -ENOTSUP and emulate everything */ 2671 2672 /* First try the efficient write zeroes operation */ 2673 if (drv->bdrv_co_write_zeroes) { 2674 ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors); 2675 if (ret != -ENOTSUP) { 2676 return ret; 2677 } 2678 } 2679 2680 /* Fall back to bounce buffer if write zeroes is unsupported */ 2681 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE; 2682 iov.iov_base = qemu_blockalign(bs, iov.iov_len); 2683 memset(iov.iov_base, 0, iov.iov_len); 2684 qemu_iovec_init_external(&qiov, &iov, 1); 2685 2686 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov); 2687 2688 qemu_vfree(iov.iov_base); 2689 return ret; 2690 } 2691 2692 /* 2693 * Handle a write request in coroutine context 2694 */ 2695 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 2696 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 2697 BdrvRequestFlags flags) 2698 { 2699 BlockDriver *drv = bs->drv; 2700 BdrvTrackedRequest req; 2701 int ret; 2702 2703 if (!bs->drv) { 2704 return -ENOMEDIUM; 2705 } 2706 if (bs->read_only) { 2707 return -EACCES; 2708 } 2709 if (bdrv_check_request(bs, sector_num, nb_sectors)) { 2710 return -EIO; 2711 } 2712 2713 if (bs->copy_on_read_in_flight) { 2714 wait_for_overlapping_requests(bs, sector_num, nb_sectors); 2715 } 2716 2717 /* throttling disk I/O */ 2718 if (bs->io_limits_enabled) { 2719 bdrv_io_limits_intercept(bs, nb_sectors, true); 2720 } 2721 2722 tracked_request_begin(&req, bs, sector_num, nb_sectors, true); 2723 2724 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req); 2725 2726 if (ret < 0) { 2727 /* Do nothing, write notifier decided to fail this request */ 2728 } else if (flags & BDRV_REQ_ZERO_WRITE) { 2729 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors); 2730 } else { 2731 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 2732 } 2733 2734 if (ret == 0 && !bs->enable_write_cache) { 2735 ret = bdrv_co_flush(bs); 2736 } 2737 2738 if (bs->dirty_bitmap) { 2739 bdrv_set_dirty(bs, sector_num, nb_sectors); 2740 } 2741 2742 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) { 2743 bs->wr_highest_sector = sector_num + nb_sectors - 1; 2744 } 2745 if (bs->growable && ret >= 0) { 2746 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); 2747 } 2748 2749 tracked_request_end(&req); 2750 2751 return ret; 2752 } 2753 2754 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 2755 int nb_sectors, QEMUIOVector *qiov) 2756 { 2757 trace_bdrv_co_writev(bs, sector_num, nb_sectors); 2758 2759 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 2760 } 2761 2762 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, 2763 int64_t sector_num, int nb_sectors) 2764 { 2765 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors); 2766 2767 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, 2768 BDRV_REQ_ZERO_WRITE); 2769 } 2770 2771 /** 2772 * Truncate file to 'offset' bytes (needed only for file protocols) 2773 */ 2774 int bdrv_truncate(BlockDriverState *bs, int64_t offset) 2775 { 2776 BlockDriver *drv = bs->drv; 2777 int ret; 2778 if (!drv) 2779 return -ENOMEDIUM; 2780 if (!drv->bdrv_truncate) 2781 return -ENOTSUP; 2782 if (bs->read_only) 2783 return -EACCES; 2784 if (bdrv_in_use(bs)) 2785 return -EBUSY; 2786 ret = drv->bdrv_truncate(bs, offset); 2787 if (ret == 0) { 2788 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 2789 bdrv_dev_resize_cb(bs); 2790 } 2791 return ret; 2792 } 2793 2794 /** 2795 * Length of a allocated file in bytes. Sparse files are counted by actual 2796 * allocated space. Return < 0 if error or unknown. 2797 */ 2798 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs) 2799 { 2800 BlockDriver *drv = bs->drv; 2801 if (!drv) { 2802 return -ENOMEDIUM; 2803 } 2804 if (drv->bdrv_get_allocated_file_size) { 2805 return drv->bdrv_get_allocated_file_size(bs); 2806 } 2807 if (bs->file) { 2808 return bdrv_get_allocated_file_size(bs->file); 2809 } 2810 return -ENOTSUP; 2811 } 2812 2813 /** 2814 * Length of a file in bytes. Return < 0 if error or unknown. 2815 */ 2816 int64_t bdrv_getlength(BlockDriverState *bs) 2817 { 2818 BlockDriver *drv = bs->drv; 2819 if (!drv) 2820 return -ENOMEDIUM; 2821 2822 if (bdrv_dev_has_removable_media(bs)) { 2823 if (drv->bdrv_getlength) { 2824 return drv->bdrv_getlength(bs); 2825 } 2826 } 2827 return bs->total_sectors * BDRV_SECTOR_SIZE; 2828 } 2829 2830 /* return 0 as number of sectors if no device present or error */ 2831 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) 2832 { 2833 int64_t length; 2834 length = bdrv_getlength(bs); 2835 if (length < 0) 2836 length = 0; 2837 else 2838 length = length >> BDRV_SECTOR_BITS; 2839 *nb_sectors_ptr = length; 2840 } 2841 2842 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error, 2843 BlockdevOnError on_write_error) 2844 { 2845 bs->on_read_error = on_read_error; 2846 bs->on_write_error = on_write_error; 2847 } 2848 2849 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read) 2850 { 2851 return is_read ? bs->on_read_error : bs->on_write_error; 2852 } 2853 2854 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error) 2855 { 2856 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error; 2857 2858 switch (on_err) { 2859 case BLOCKDEV_ON_ERROR_ENOSPC: 2860 return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT; 2861 case BLOCKDEV_ON_ERROR_STOP: 2862 return BDRV_ACTION_STOP; 2863 case BLOCKDEV_ON_ERROR_REPORT: 2864 return BDRV_ACTION_REPORT; 2865 case BLOCKDEV_ON_ERROR_IGNORE: 2866 return BDRV_ACTION_IGNORE; 2867 default: 2868 abort(); 2869 } 2870 } 2871 2872 /* This is done by device models because, while the block layer knows 2873 * about the error, it does not know whether an operation comes from 2874 * the device or the block layer (from a job, for example). 2875 */ 2876 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action, 2877 bool is_read, int error) 2878 { 2879 assert(error >= 0); 2880 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read); 2881 if (action == BDRV_ACTION_STOP) { 2882 vm_stop(RUN_STATE_IO_ERROR); 2883 bdrv_iostatus_set_err(bs, error); 2884 } 2885 } 2886 2887 int bdrv_is_read_only(BlockDriverState *bs) 2888 { 2889 return bs->read_only; 2890 } 2891 2892 int bdrv_is_sg(BlockDriverState *bs) 2893 { 2894 return bs->sg; 2895 } 2896 2897 int bdrv_enable_write_cache(BlockDriverState *bs) 2898 { 2899 return bs->enable_write_cache; 2900 } 2901 2902 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce) 2903 { 2904 bs->enable_write_cache = wce; 2905 2906 /* so a reopen() will preserve wce */ 2907 if (wce) { 2908 bs->open_flags |= BDRV_O_CACHE_WB; 2909 } else { 2910 bs->open_flags &= ~BDRV_O_CACHE_WB; 2911 } 2912 } 2913 2914 int bdrv_is_encrypted(BlockDriverState *bs) 2915 { 2916 if (bs->backing_hd && bs->backing_hd->encrypted) 2917 return 1; 2918 return bs->encrypted; 2919 } 2920 2921 int bdrv_key_required(BlockDriverState *bs) 2922 { 2923 BlockDriverState *backing_hd = bs->backing_hd; 2924 2925 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key) 2926 return 1; 2927 return (bs->encrypted && !bs->valid_key); 2928 } 2929 2930 int bdrv_set_key(BlockDriverState *bs, const char *key) 2931 { 2932 int ret; 2933 if (bs->backing_hd && bs->backing_hd->encrypted) { 2934 ret = bdrv_set_key(bs->backing_hd, key); 2935 if (ret < 0) 2936 return ret; 2937 if (!bs->encrypted) 2938 return 0; 2939 } 2940 if (!bs->encrypted) { 2941 return -EINVAL; 2942 } else if (!bs->drv || !bs->drv->bdrv_set_key) { 2943 return -ENOMEDIUM; 2944 } 2945 ret = bs->drv->bdrv_set_key(bs, key); 2946 if (ret < 0) { 2947 bs->valid_key = 0; 2948 } else if (!bs->valid_key) { 2949 bs->valid_key = 1; 2950 /* call the change callback now, we skipped it on open */ 2951 bdrv_dev_change_media_cb(bs, true); 2952 } 2953 return ret; 2954 } 2955 2956 const char *bdrv_get_format_name(BlockDriverState *bs) 2957 { 2958 return bs->drv ? bs->drv->format_name : NULL; 2959 } 2960 2961 void bdrv_iterate_format(void (*it)(void *opaque, const char *name), 2962 void *opaque) 2963 { 2964 BlockDriver *drv; 2965 2966 QLIST_FOREACH(drv, &bdrv_drivers, list) { 2967 it(opaque, drv->format_name); 2968 } 2969 } 2970 2971 BlockDriverState *bdrv_find(const char *name) 2972 { 2973 BlockDriverState *bs; 2974 2975 QTAILQ_FOREACH(bs, &bdrv_states, list) { 2976 if (!strcmp(name, bs->device_name)) { 2977 return bs; 2978 } 2979 } 2980 return NULL; 2981 } 2982 2983 BlockDriverState *bdrv_next(BlockDriverState *bs) 2984 { 2985 if (!bs) { 2986 return QTAILQ_FIRST(&bdrv_states); 2987 } 2988 return QTAILQ_NEXT(bs, list); 2989 } 2990 2991 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque) 2992 { 2993 BlockDriverState *bs; 2994 2995 QTAILQ_FOREACH(bs, &bdrv_states, list) { 2996 it(opaque, bs); 2997 } 2998 } 2999 3000 const char *bdrv_get_device_name(BlockDriverState *bs) 3001 { 3002 return bs->device_name; 3003 } 3004 3005 int bdrv_get_flags(BlockDriverState *bs) 3006 { 3007 return bs->open_flags; 3008 } 3009 3010 int bdrv_flush_all(void) 3011 { 3012 BlockDriverState *bs; 3013 int result = 0; 3014 3015 QTAILQ_FOREACH(bs, &bdrv_states, list) { 3016 int ret = bdrv_flush(bs); 3017 if (ret < 0 && !result) { 3018 result = ret; 3019 } 3020 } 3021 3022 return result; 3023 } 3024 3025 int bdrv_has_zero_init_1(BlockDriverState *bs) 3026 { 3027 return 1; 3028 } 3029 3030 int bdrv_has_zero_init(BlockDriverState *bs) 3031 { 3032 assert(bs->drv); 3033 3034 /* If BS is a copy on write image, it is initialized to 3035 the contents of the base image, which may not be zeroes. */ 3036 if (bs->backing_hd) { 3037 return 0; 3038 } 3039 if (bs->drv->bdrv_has_zero_init) { 3040 return bs->drv->bdrv_has_zero_init(bs); 3041 } 3042 3043 /* safe default */ 3044 return 0; 3045 } 3046 3047 typedef struct BdrvCoGetBlockStatusData { 3048 BlockDriverState *bs; 3049 BlockDriverState *base; 3050 int64_t sector_num; 3051 int nb_sectors; 3052 int *pnum; 3053 int64_t ret; 3054 bool done; 3055 } BdrvCoGetBlockStatusData; 3056 3057 /* 3058 * Returns true iff the specified sector is present in the disk image. Drivers 3059 * not implementing the functionality are assumed to not support backing files, 3060 * hence all their sectors are reported as allocated. 3061 * 3062 * If 'sector_num' is beyond the end of the disk image the return value is 0 3063 * and 'pnum' is set to 0. 3064 * 3065 * 'pnum' is set to the number of sectors (including and immediately following 3066 * the specified sector) that are known to be in the same 3067 * allocated/unallocated state. 3068 * 3069 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 3070 * beyond the end of the disk image it will be clamped. 3071 */ 3072 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, 3073 int64_t sector_num, 3074 int nb_sectors, int *pnum) 3075 { 3076 int64_t length; 3077 int64_t n; 3078 int64_t ret, ret2; 3079 3080 length = bdrv_getlength(bs); 3081 if (length < 0) { 3082 return length; 3083 } 3084 3085 if (sector_num >= (length >> BDRV_SECTOR_BITS)) { 3086 *pnum = 0; 3087 return 0; 3088 } 3089 3090 n = bs->total_sectors - sector_num; 3091 if (n < nb_sectors) { 3092 nb_sectors = n; 3093 } 3094 3095 if (!bs->drv->bdrv_co_get_block_status) { 3096 *pnum = nb_sectors; 3097 ret = BDRV_BLOCK_DATA; 3098 if (bs->drv->protocol_name) { 3099 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); 3100 } 3101 return ret; 3102 } 3103 3104 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); 3105 if (ret < 0) { 3106 return ret; 3107 } 3108 3109 if (!(ret & BDRV_BLOCK_DATA)) { 3110 if (bdrv_has_zero_init(bs)) { 3111 ret |= BDRV_BLOCK_ZERO; 3112 } else { 3113 BlockDriverState *bs2 = bs->backing_hd; 3114 int64_t length2 = bdrv_getlength(bs2); 3115 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) { 3116 ret |= BDRV_BLOCK_ZERO; 3117 } 3118 } 3119 } 3120 3121 if (bs->file && 3122 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && 3123 (ret & BDRV_BLOCK_OFFSET_VALID)) { 3124 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, 3125 *pnum, pnum); 3126 if (ret2 >= 0) { 3127 /* Ignore errors. This is just providing extra information, it 3128 * is useful but not necessary. 3129 */ 3130 ret |= (ret2 & BDRV_BLOCK_ZERO); 3131 } 3132 } 3133 3134 return ret; 3135 } 3136 3137 /* Coroutine wrapper for bdrv_get_block_status() */ 3138 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque) 3139 { 3140 BdrvCoGetBlockStatusData *data = opaque; 3141 BlockDriverState *bs = data->bs; 3142 3143 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors, 3144 data->pnum); 3145 data->done = true; 3146 } 3147 3148 /* 3149 * Synchronous wrapper around bdrv_co_get_block_status(). 3150 * 3151 * See bdrv_co_get_block_status() for details. 3152 */ 3153 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, 3154 int nb_sectors, int *pnum) 3155 { 3156 Coroutine *co; 3157 BdrvCoGetBlockStatusData data = { 3158 .bs = bs, 3159 .sector_num = sector_num, 3160 .nb_sectors = nb_sectors, 3161 .pnum = pnum, 3162 .done = false, 3163 }; 3164 3165 if (qemu_in_coroutine()) { 3166 /* Fast-path if already in coroutine context */ 3167 bdrv_get_block_status_co_entry(&data); 3168 } else { 3169 co = qemu_coroutine_create(bdrv_get_block_status_co_entry); 3170 qemu_coroutine_enter(co, &data); 3171 while (!data.done) { 3172 qemu_aio_wait(); 3173 } 3174 } 3175 return data.ret; 3176 } 3177 3178 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, 3179 int nb_sectors, int *pnum) 3180 { 3181 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); 3182 if (ret < 0) { 3183 return ret; 3184 } 3185 return 3186 (ret & BDRV_BLOCK_DATA) || 3187 ((ret & BDRV_BLOCK_ZERO) && !bdrv_has_zero_init(bs)); 3188 } 3189 3190 /* 3191 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] 3192 * 3193 * Return true if the given sector is allocated in any image between 3194 * BASE and TOP (inclusive). BASE can be NULL to check if the given 3195 * sector is allocated in any image of the chain. Return false otherwise. 3196 * 3197 * 'pnum' is set to the number of sectors (including and immediately following 3198 * the specified sector) that are known to be in the same 3199 * allocated/unallocated state. 3200 * 3201 */ 3202 int bdrv_is_allocated_above(BlockDriverState *top, 3203 BlockDriverState *base, 3204 int64_t sector_num, 3205 int nb_sectors, int *pnum) 3206 { 3207 BlockDriverState *intermediate; 3208 int ret, n = nb_sectors; 3209 3210 intermediate = top; 3211 while (intermediate && intermediate != base) { 3212 int pnum_inter; 3213 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, 3214 &pnum_inter); 3215 if (ret < 0) { 3216 return ret; 3217 } else if (ret) { 3218 *pnum = pnum_inter; 3219 return 1; 3220 } 3221 3222 /* 3223 * [sector_num, nb_sectors] is unallocated on top but intermediate 3224 * might have 3225 * 3226 * [sector_num+x, nr_sectors] allocated. 3227 */ 3228 if (n > pnum_inter && 3229 (intermediate == top || 3230 sector_num + pnum_inter < intermediate->total_sectors)) { 3231 n = pnum_inter; 3232 } 3233 3234 intermediate = intermediate->backing_hd; 3235 } 3236 3237 *pnum = n; 3238 return 0; 3239 } 3240 3241 const char *bdrv_get_encrypted_filename(BlockDriverState *bs) 3242 { 3243 if (bs->backing_hd && bs->backing_hd->encrypted) 3244 return bs->backing_file; 3245 else if (bs->encrypted) 3246 return bs->filename; 3247 else 3248 return NULL; 3249 } 3250 3251 void bdrv_get_backing_filename(BlockDriverState *bs, 3252 char *filename, int filename_size) 3253 { 3254 pstrcpy(filename, filename_size, bs->backing_file); 3255 } 3256 3257 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 3258 const uint8_t *buf, int nb_sectors) 3259 { 3260 BlockDriver *drv = bs->drv; 3261 if (!drv) 3262 return -ENOMEDIUM; 3263 if (!drv->bdrv_write_compressed) 3264 return -ENOTSUP; 3265 if (bdrv_check_request(bs, sector_num, nb_sectors)) 3266 return -EIO; 3267 3268 assert(!bs->dirty_bitmap); 3269 3270 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 3271 } 3272 3273 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 3274 { 3275 BlockDriver *drv = bs->drv; 3276 if (!drv) 3277 return -ENOMEDIUM; 3278 if (!drv->bdrv_get_info) 3279 return -ENOTSUP; 3280 memset(bdi, 0, sizeof(*bdi)); 3281 return drv->bdrv_get_info(bs, bdi); 3282 } 3283 3284 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 3285 int64_t pos, int size) 3286 { 3287 QEMUIOVector qiov; 3288 struct iovec iov = { 3289 .iov_base = (void *) buf, 3290 .iov_len = size, 3291 }; 3292 3293 qemu_iovec_init_external(&qiov, &iov, 1); 3294 return bdrv_writev_vmstate(bs, &qiov, pos); 3295 } 3296 3297 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 3298 { 3299 BlockDriver *drv = bs->drv; 3300 3301 if (!drv) { 3302 return -ENOMEDIUM; 3303 } else if (drv->bdrv_save_vmstate) { 3304 return drv->bdrv_save_vmstate(bs, qiov, pos); 3305 } else if (bs->file) { 3306 return bdrv_writev_vmstate(bs->file, qiov, pos); 3307 } 3308 3309 return -ENOTSUP; 3310 } 3311 3312 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 3313 int64_t pos, int size) 3314 { 3315 BlockDriver *drv = bs->drv; 3316 if (!drv) 3317 return -ENOMEDIUM; 3318 if (drv->bdrv_load_vmstate) 3319 return drv->bdrv_load_vmstate(bs, buf, pos, size); 3320 if (bs->file) 3321 return bdrv_load_vmstate(bs->file, buf, pos, size); 3322 return -ENOTSUP; 3323 } 3324 3325 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event) 3326 { 3327 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) { 3328 return; 3329 } 3330 3331 bs->drv->bdrv_debug_event(bs, event); 3332 } 3333 3334 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, 3335 const char *tag) 3336 { 3337 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) { 3338 bs = bs->file; 3339 } 3340 3341 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) { 3342 return bs->drv->bdrv_debug_breakpoint(bs, event, tag); 3343 } 3344 3345 return -ENOTSUP; 3346 } 3347 3348 int bdrv_debug_resume(BlockDriverState *bs, const char *tag) 3349 { 3350 while (bs && bs->drv && !bs->drv->bdrv_debug_resume) { 3351 bs = bs->file; 3352 } 3353 3354 if (bs && bs->drv && bs->drv->bdrv_debug_resume) { 3355 return bs->drv->bdrv_debug_resume(bs, tag); 3356 } 3357 3358 return -ENOTSUP; 3359 } 3360 3361 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag) 3362 { 3363 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) { 3364 bs = bs->file; 3365 } 3366 3367 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) { 3368 return bs->drv->bdrv_debug_is_suspended(bs, tag); 3369 } 3370 3371 return false; 3372 } 3373 3374 int bdrv_is_snapshot(BlockDriverState *bs) 3375 { 3376 return !!(bs->open_flags & BDRV_O_SNAPSHOT); 3377 } 3378 3379 /* backing_file can either be relative, or absolute, or a protocol. If it is 3380 * relative, it must be relative to the chain. So, passing in bs->filename 3381 * from a BDS as backing_file should not be done, as that may be relative to 3382 * the CWD rather than the chain. */ 3383 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, 3384 const char *backing_file) 3385 { 3386 char *filename_full = NULL; 3387 char *backing_file_full = NULL; 3388 char *filename_tmp = NULL; 3389 int is_protocol = 0; 3390 BlockDriverState *curr_bs = NULL; 3391 BlockDriverState *retval = NULL; 3392 3393 if (!bs || !bs->drv || !backing_file) { 3394 return NULL; 3395 } 3396 3397 filename_full = g_malloc(PATH_MAX); 3398 backing_file_full = g_malloc(PATH_MAX); 3399 filename_tmp = g_malloc(PATH_MAX); 3400 3401 is_protocol = path_has_protocol(backing_file); 3402 3403 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) { 3404 3405 /* If either of the filename paths is actually a protocol, then 3406 * compare unmodified paths; otherwise make paths relative */ 3407 if (is_protocol || path_has_protocol(curr_bs->backing_file)) { 3408 if (strcmp(backing_file, curr_bs->backing_file) == 0) { 3409 retval = curr_bs->backing_hd; 3410 break; 3411 } 3412 } else { 3413 /* If not an absolute filename path, make it relative to the current 3414 * image's filename path */ 3415 path_combine(filename_tmp, PATH_MAX, curr_bs->filename, 3416 backing_file); 3417 3418 /* We are going to compare absolute pathnames */ 3419 if (!realpath(filename_tmp, filename_full)) { 3420 continue; 3421 } 3422 3423 /* We need to make sure the backing filename we are comparing against 3424 * is relative to the current image filename (or absolute) */ 3425 path_combine(filename_tmp, PATH_MAX, curr_bs->filename, 3426 curr_bs->backing_file); 3427 3428 if (!realpath(filename_tmp, backing_file_full)) { 3429 continue; 3430 } 3431 3432 if (strcmp(backing_file_full, filename_full) == 0) { 3433 retval = curr_bs->backing_hd; 3434 break; 3435 } 3436 } 3437 } 3438 3439 g_free(filename_full); 3440 g_free(backing_file_full); 3441 g_free(filename_tmp); 3442 return retval; 3443 } 3444 3445 int bdrv_get_backing_file_depth(BlockDriverState *bs) 3446 { 3447 if (!bs->drv) { 3448 return 0; 3449 } 3450 3451 if (!bs->backing_hd) { 3452 return 0; 3453 } 3454 3455 return 1 + bdrv_get_backing_file_depth(bs->backing_hd); 3456 } 3457 3458 BlockDriverState *bdrv_find_base(BlockDriverState *bs) 3459 { 3460 BlockDriverState *curr_bs = NULL; 3461 3462 if (!bs) { 3463 return NULL; 3464 } 3465 3466 curr_bs = bs; 3467 3468 while (curr_bs->backing_hd) { 3469 curr_bs = curr_bs->backing_hd; 3470 } 3471 return curr_bs; 3472 } 3473 3474 /**************************************************************/ 3475 /* async I/Os */ 3476 3477 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 3478 QEMUIOVector *qiov, int nb_sectors, 3479 BlockDriverCompletionFunc *cb, void *opaque) 3480 { 3481 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 3482 3483 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 3484 cb, opaque, false); 3485 } 3486 3487 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 3488 QEMUIOVector *qiov, int nb_sectors, 3489 BlockDriverCompletionFunc *cb, void *opaque) 3490 { 3491 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 3492 3493 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 3494 cb, opaque, true); 3495 } 3496 3497 3498 typedef struct MultiwriteCB { 3499 int error; 3500 int num_requests; 3501 int num_callbacks; 3502 struct { 3503 BlockDriverCompletionFunc *cb; 3504 void *opaque; 3505 QEMUIOVector *free_qiov; 3506 } callbacks[]; 3507 } MultiwriteCB; 3508 3509 static void multiwrite_user_cb(MultiwriteCB *mcb) 3510 { 3511 int i; 3512 3513 for (i = 0; i < mcb->num_callbacks; i++) { 3514 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); 3515 if (mcb->callbacks[i].free_qiov) { 3516 qemu_iovec_destroy(mcb->callbacks[i].free_qiov); 3517 } 3518 g_free(mcb->callbacks[i].free_qiov); 3519 } 3520 } 3521 3522 static void multiwrite_cb(void *opaque, int ret) 3523 { 3524 MultiwriteCB *mcb = opaque; 3525 3526 trace_multiwrite_cb(mcb, ret); 3527 3528 if (ret < 0 && !mcb->error) { 3529 mcb->error = ret; 3530 } 3531 3532 mcb->num_requests--; 3533 if (mcb->num_requests == 0) { 3534 multiwrite_user_cb(mcb); 3535 g_free(mcb); 3536 } 3537 } 3538 3539 static int multiwrite_req_compare(const void *a, const void *b) 3540 { 3541 const BlockRequest *req1 = a, *req2 = b; 3542 3543 /* 3544 * Note that we can't simply subtract req2->sector from req1->sector 3545 * here as that could overflow the return value. 3546 */ 3547 if (req1->sector > req2->sector) { 3548 return 1; 3549 } else if (req1->sector < req2->sector) { 3550 return -1; 3551 } else { 3552 return 0; 3553 } 3554 } 3555 3556 /* 3557 * Takes a bunch of requests and tries to merge them. Returns the number of 3558 * requests that remain after merging. 3559 */ 3560 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, 3561 int num_reqs, MultiwriteCB *mcb) 3562 { 3563 int i, outidx; 3564 3565 // Sort requests by start sector 3566 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); 3567 3568 // Check if adjacent requests touch the same clusters. If so, combine them, 3569 // filling up gaps with zero sectors. 3570 outidx = 0; 3571 for (i = 1; i < num_reqs; i++) { 3572 int merge = 0; 3573 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; 3574 3575 // Handle exactly sequential writes and overlapping writes. 3576 if (reqs[i].sector <= oldreq_last) { 3577 merge = 1; 3578 } 3579 3580 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { 3581 merge = 0; 3582 } 3583 3584 if (merge) { 3585 size_t size; 3586 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); 3587 qemu_iovec_init(qiov, 3588 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); 3589 3590 // Add the first request to the merged one. If the requests are 3591 // overlapping, drop the last sectors of the first request. 3592 size = (reqs[i].sector - reqs[outidx].sector) << 9; 3593 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); 3594 3595 // We should need to add any zeros between the two requests 3596 assert (reqs[i].sector <= oldreq_last); 3597 3598 // Add the second request 3599 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); 3600 3601 reqs[outidx].nb_sectors = qiov->size >> 9; 3602 reqs[outidx].qiov = qiov; 3603 3604 mcb->callbacks[i].free_qiov = reqs[outidx].qiov; 3605 } else { 3606 outidx++; 3607 reqs[outidx].sector = reqs[i].sector; 3608 reqs[outidx].nb_sectors = reqs[i].nb_sectors; 3609 reqs[outidx].qiov = reqs[i].qiov; 3610 } 3611 } 3612 3613 return outidx + 1; 3614 } 3615 3616 /* 3617 * Submit multiple AIO write requests at once. 3618 * 3619 * On success, the function returns 0 and all requests in the reqs array have 3620 * been submitted. In error case this function returns -1, and any of the 3621 * requests may or may not be submitted yet. In particular, this means that the 3622 * callback will be called for some of the requests, for others it won't. The 3623 * caller must check the error field of the BlockRequest to wait for the right 3624 * callbacks (if error != 0, no callback will be called). 3625 * 3626 * The implementation may modify the contents of the reqs array, e.g. to merge 3627 * requests. However, the fields opaque and error are left unmodified as they 3628 * are used to signal failure for a single request to the caller. 3629 */ 3630 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) 3631 { 3632 MultiwriteCB *mcb; 3633 int i; 3634 3635 /* don't submit writes if we don't have a medium */ 3636 if (bs->drv == NULL) { 3637 for (i = 0; i < num_reqs; i++) { 3638 reqs[i].error = -ENOMEDIUM; 3639 } 3640 return -1; 3641 } 3642 3643 if (num_reqs == 0) { 3644 return 0; 3645 } 3646 3647 // Create MultiwriteCB structure 3648 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); 3649 mcb->num_requests = 0; 3650 mcb->num_callbacks = num_reqs; 3651 3652 for (i = 0; i < num_reqs; i++) { 3653 mcb->callbacks[i].cb = reqs[i].cb; 3654 mcb->callbacks[i].opaque = reqs[i].opaque; 3655 } 3656 3657 // Check for mergable requests 3658 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); 3659 3660 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); 3661 3662 /* Run the aio requests. */ 3663 mcb->num_requests = num_reqs; 3664 for (i = 0; i < num_reqs; i++) { 3665 bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov, 3666 reqs[i].nb_sectors, multiwrite_cb, mcb); 3667 } 3668 3669 return 0; 3670 } 3671 3672 void bdrv_aio_cancel(BlockDriverAIOCB *acb) 3673 { 3674 acb->aiocb_info->cancel(acb); 3675 } 3676 3677 /**************************************************************/ 3678 /* async block device emulation */ 3679 3680 typedef struct BlockDriverAIOCBSync { 3681 BlockDriverAIOCB common; 3682 QEMUBH *bh; 3683 int ret; 3684 /* vector translation state */ 3685 QEMUIOVector *qiov; 3686 uint8_t *bounce; 3687 int is_write; 3688 } BlockDriverAIOCBSync; 3689 3690 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb) 3691 { 3692 BlockDriverAIOCBSync *acb = 3693 container_of(blockacb, BlockDriverAIOCBSync, common); 3694 qemu_bh_delete(acb->bh); 3695 acb->bh = NULL; 3696 qemu_aio_release(acb); 3697 } 3698 3699 static const AIOCBInfo bdrv_em_aiocb_info = { 3700 .aiocb_size = sizeof(BlockDriverAIOCBSync), 3701 .cancel = bdrv_aio_cancel_em, 3702 }; 3703 3704 static void bdrv_aio_bh_cb(void *opaque) 3705 { 3706 BlockDriverAIOCBSync *acb = opaque; 3707 3708 if (!acb->is_write) 3709 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); 3710 qemu_vfree(acb->bounce); 3711 acb->common.cb(acb->common.opaque, acb->ret); 3712 qemu_bh_delete(acb->bh); 3713 acb->bh = NULL; 3714 qemu_aio_release(acb); 3715 } 3716 3717 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, 3718 int64_t sector_num, 3719 QEMUIOVector *qiov, 3720 int nb_sectors, 3721 BlockDriverCompletionFunc *cb, 3722 void *opaque, 3723 int is_write) 3724 3725 { 3726 BlockDriverAIOCBSync *acb; 3727 3728 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); 3729 acb->is_write = is_write; 3730 acb->qiov = qiov; 3731 acb->bounce = qemu_blockalign(bs, qiov->size); 3732 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb); 3733 3734 if (is_write) { 3735 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); 3736 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); 3737 } else { 3738 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); 3739 } 3740 3741 qemu_bh_schedule(acb->bh); 3742 3743 return &acb->common; 3744 } 3745 3746 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 3747 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 3748 BlockDriverCompletionFunc *cb, void *opaque) 3749 { 3750 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 3751 } 3752 3753 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 3754 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 3755 BlockDriverCompletionFunc *cb, void *opaque) 3756 { 3757 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); 3758 } 3759 3760 3761 typedef struct BlockDriverAIOCBCoroutine { 3762 BlockDriverAIOCB common; 3763 BlockRequest req; 3764 bool is_write; 3765 bool *done; 3766 QEMUBH* bh; 3767 } BlockDriverAIOCBCoroutine; 3768 3769 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb) 3770 { 3771 BlockDriverAIOCBCoroutine *acb = 3772 container_of(blockacb, BlockDriverAIOCBCoroutine, common); 3773 bool done = false; 3774 3775 acb->done = &done; 3776 while (!done) { 3777 qemu_aio_wait(); 3778 } 3779 } 3780 3781 static const AIOCBInfo bdrv_em_co_aiocb_info = { 3782 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine), 3783 .cancel = bdrv_aio_co_cancel_em, 3784 }; 3785 3786 static void bdrv_co_em_bh(void *opaque) 3787 { 3788 BlockDriverAIOCBCoroutine *acb = opaque; 3789 3790 acb->common.cb(acb->common.opaque, acb->req.error); 3791 3792 if (acb->done) { 3793 *acb->done = true; 3794 } 3795 3796 qemu_bh_delete(acb->bh); 3797 qemu_aio_release(acb); 3798 } 3799 3800 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 3801 static void coroutine_fn bdrv_co_do_rw(void *opaque) 3802 { 3803 BlockDriverAIOCBCoroutine *acb = opaque; 3804 BlockDriverState *bs = acb->common.bs; 3805 3806 if (!acb->is_write) { 3807 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 3808 acb->req.nb_sectors, acb->req.qiov, 0); 3809 } else { 3810 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 3811 acb->req.nb_sectors, acb->req.qiov, 0); 3812 } 3813 3814 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); 3815 qemu_bh_schedule(acb->bh); 3816 } 3817 3818 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 3819 int64_t sector_num, 3820 QEMUIOVector *qiov, 3821 int nb_sectors, 3822 BlockDriverCompletionFunc *cb, 3823 void *opaque, 3824 bool is_write) 3825 { 3826 Coroutine *co; 3827 BlockDriverAIOCBCoroutine *acb; 3828 3829 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 3830 acb->req.sector = sector_num; 3831 acb->req.nb_sectors = nb_sectors; 3832 acb->req.qiov = qiov; 3833 acb->is_write = is_write; 3834 acb->done = NULL; 3835 3836 co = qemu_coroutine_create(bdrv_co_do_rw); 3837 qemu_coroutine_enter(co, acb); 3838 3839 return &acb->common; 3840 } 3841 3842 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 3843 { 3844 BlockDriverAIOCBCoroutine *acb = opaque; 3845 BlockDriverState *bs = acb->common.bs; 3846 3847 acb->req.error = bdrv_co_flush(bs); 3848 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); 3849 qemu_bh_schedule(acb->bh); 3850 } 3851 3852 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, 3853 BlockDriverCompletionFunc *cb, void *opaque) 3854 { 3855 trace_bdrv_aio_flush(bs, opaque); 3856 3857 Coroutine *co; 3858 BlockDriverAIOCBCoroutine *acb; 3859 3860 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 3861 acb->done = NULL; 3862 3863 co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 3864 qemu_coroutine_enter(co, acb); 3865 3866 return &acb->common; 3867 } 3868 3869 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 3870 { 3871 BlockDriverAIOCBCoroutine *acb = opaque; 3872 BlockDriverState *bs = acb->common.bs; 3873 3874 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 3875 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); 3876 qemu_bh_schedule(acb->bh); 3877 } 3878 3879 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs, 3880 int64_t sector_num, int nb_sectors, 3881 BlockDriverCompletionFunc *cb, void *opaque) 3882 { 3883 Coroutine *co; 3884 BlockDriverAIOCBCoroutine *acb; 3885 3886 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 3887 3888 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); 3889 acb->req.sector = sector_num; 3890 acb->req.nb_sectors = nb_sectors; 3891 acb->done = NULL; 3892 co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 3893 qemu_coroutine_enter(co, acb); 3894 3895 return &acb->common; 3896 } 3897 3898 void bdrv_init(void) 3899 { 3900 module_call_init(MODULE_INIT_BLOCK); 3901 } 3902 3903 void bdrv_init_with_whitelist(void) 3904 { 3905 use_bdrv_whitelist = 1; 3906 bdrv_init(); 3907 } 3908 3909 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 3910 BlockDriverCompletionFunc *cb, void *opaque) 3911 { 3912 BlockDriverAIOCB *acb; 3913 3914 acb = g_slice_alloc(aiocb_info->aiocb_size); 3915 acb->aiocb_info = aiocb_info; 3916 acb->bs = bs; 3917 acb->cb = cb; 3918 acb->opaque = opaque; 3919 return acb; 3920 } 3921 3922 void qemu_aio_release(void *p) 3923 { 3924 BlockDriverAIOCB *acb = p; 3925 g_slice_free1(acb->aiocb_info->aiocb_size, acb); 3926 } 3927 3928 /**************************************************************/ 3929 /* Coroutine block device emulation */ 3930 3931 typedef struct CoroutineIOCompletion { 3932 Coroutine *coroutine; 3933 int ret; 3934 } CoroutineIOCompletion; 3935 3936 static void bdrv_co_io_em_complete(void *opaque, int ret) 3937 { 3938 CoroutineIOCompletion *co = opaque; 3939 3940 co->ret = ret; 3941 qemu_coroutine_enter(co->coroutine, NULL); 3942 } 3943 3944 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, 3945 int nb_sectors, QEMUIOVector *iov, 3946 bool is_write) 3947 { 3948 CoroutineIOCompletion co = { 3949 .coroutine = qemu_coroutine_self(), 3950 }; 3951 BlockDriverAIOCB *acb; 3952 3953 if (is_write) { 3954 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, 3955 bdrv_co_io_em_complete, &co); 3956 } else { 3957 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, 3958 bdrv_co_io_em_complete, &co); 3959 } 3960 3961 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); 3962 if (!acb) { 3963 return -EIO; 3964 } 3965 qemu_coroutine_yield(); 3966 3967 return co.ret; 3968 } 3969 3970 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 3971 int64_t sector_num, int nb_sectors, 3972 QEMUIOVector *iov) 3973 { 3974 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); 3975 } 3976 3977 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 3978 int64_t sector_num, int nb_sectors, 3979 QEMUIOVector *iov) 3980 { 3981 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); 3982 } 3983 3984 static void coroutine_fn bdrv_flush_co_entry(void *opaque) 3985 { 3986 RwCo *rwco = opaque; 3987 3988 rwco->ret = bdrv_co_flush(rwco->bs); 3989 } 3990 3991 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 3992 { 3993 int ret; 3994 3995 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { 3996 return 0; 3997 } 3998 3999 /* Write back cached data to the OS even with cache=unsafe */ 4000 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); 4001 if (bs->drv->bdrv_co_flush_to_os) { 4002 ret = bs->drv->bdrv_co_flush_to_os(bs); 4003 if (ret < 0) { 4004 return ret; 4005 } 4006 } 4007 4008 /* But don't actually force it to the disk with cache=unsafe */ 4009 if (bs->open_flags & BDRV_O_NO_FLUSH) { 4010 goto flush_parent; 4011 } 4012 4013 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); 4014 if (bs->drv->bdrv_co_flush_to_disk) { 4015 ret = bs->drv->bdrv_co_flush_to_disk(bs); 4016 } else if (bs->drv->bdrv_aio_flush) { 4017 BlockDriverAIOCB *acb; 4018 CoroutineIOCompletion co = { 4019 .coroutine = qemu_coroutine_self(), 4020 }; 4021 4022 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 4023 if (acb == NULL) { 4024 ret = -EIO; 4025 } else { 4026 qemu_coroutine_yield(); 4027 ret = co.ret; 4028 } 4029 } else { 4030 /* 4031 * Some block drivers always operate in either writethrough or unsafe 4032 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 4033 * know how the server works (because the behaviour is hardcoded or 4034 * depends on server-side configuration), so we can't ensure that 4035 * everything is safe on disk. Returning an error doesn't work because 4036 * that would break guests even if the server operates in writethrough 4037 * mode. 4038 * 4039 * Let's hope the user knows what he's doing. 4040 */ 4041 ret = 0; 4042 } 4043 if (ret < 0) { 4044 return ret; 4045 } 4046 4047 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 4048 * in the case of cache=unsafe, so there are no useless flushes. 4049 */ 4050 flush_parent: 4051 return bdrv_co_flush(bs->file); 4052 } 4053 4054 void bdrv_invalidate_cache(BlockDriverState *bs) 4055 { 4056 if (bs->drv && bs->drv->bdrv_invalidate_cache) { 4057 bs->drv->bdrv_invalidate_cache(bs); 4058 } 4059 } 4060 4061 void bdrv_invalidate_cache_all(void) 4062 { 4063 BlockDriverState *bs; 4064 4065 QTAILQ_FOREACH(bs, &bdrv_states, list) { 4066 bdrv_invalidate_cache(bs); 4067 } 4068 } 4069 4070 void bdrv_clear_incoming_migration_all(void) 4071 { 4072 BlockDriverState *bs; 4073 4074 QTAILQ_FOREACH(bs, &bdrv_states, list) { 4075 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING); 4076 } 4077 } 4078 4079 int bdrv_flush(BlockDriverState *bs) 4080 { 4081 Coroutine *co; 4082 RwCo rwco = { 4083 .bs = bs, 4084 .ret = NOT_DONE, 4085 }; 4086 4087 if (qemu_in_coroutine()) { 4088 /* Fast-path if already in coroutine context */ 4089 bdrv_flush_co_entry(&rwco); 4090 } else { 4091 co = qemu_coroutine_create(bdrv_flush_co_entry); 4092 qemu_coroutine_enter(co, &rwco); 4093 while (rwco.ret == NOT_DONE) { 4094 qemu_aio_wait(); 4095 } 4096 } 4097 4098 return rwco.ret; 4099 } 4100 4101 static void coroutine_fn bdrv_discard_co_entry(void *opaque) 4102 { 4103 RwCo *rwco = opaque; 4104 4105 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 4106 } 4107 4108 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 4109 int nb_sectors) 4110 { 4111 if (!bs->drv) { 4112 return -ENOMEDIUM; 4113 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) { 4114 return -EIO; 4115 } else if (bs->read_only) { 4116 return -EROFS; 4117 } 4118 4119 if (bs->dirty_bitmap) { 4120 bdrv_reset_dirty(bs, sector_num, nb_sectors); 4121 } 4122 4123 /* Do nothing if disabled. */ 4124 if (!(bs->open_flags & BDRV_O_UNMAP)) { 4125 return 0; 4126 } 4127 4128 if (bs->drv->bdrv_co_discard) { 4129 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors); 4130 } else if (bs->drv->bdrv_aio_discard) { 4131 BlockDriverAIOCB *acb; 4132 CoroutineIOCompletion co = { 4133 .coroutine = qemu_coroutine_self(), 4134 }; 4135 4136 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 4137 bdrv_co_io_em_complete, &co); 4138 if (acb == NULL) { 4139 return -EIO; 4140 } else { 4141 qemu_coroutine_yield(); 4142 return co.ret; 4143 } 4144 } else { 4145 return 0; 4146 } 4147 } 4148 4149 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 4150 { 4151 Coroutine *co; 4152 RwCo rwco = { 4153 .bs = bs, 4154 .sector_num = sector_num, 4155 .nb_sectors = nb_sectors, 4156 .ret = NOT_DONE, 4157 }; 4158 4159 if (qemu_in_coroutine()) { 4160 /* Fast-path if already in coroutine context */ 4161 bdrv_discard_co_entry(&rwco); 4162 } else { 4163 co = qemu_coroutine_create(bdrv_discard_co_entry); 4164 qemu_coroutine_enter(co, &rwco); 4165 while (rwco.ret == NOT_DONE) { 4166 qemu_aio_wait(); 4167 } 4168 } 4169 4170 return rwco.ret; 4171 } 4172 4173 /**************************************************************/ 4174 /* removable device support */ 4175 4176 /** 4177 * Return TRUE if the media is present 4178 */ 4179 int bdrv_is_inserted(BlockDriverState *bs) 4180 { 4181 BlockDriver *drv = bs->drv; 4182 4183 if (!drv) 4184 return 0; 4185 if (!drv->bdrv_is_inserted) 4186 return 1; 4187 return drv->bdrv_is_inserted(bs); 4188 } 4189 4190 /** 4191 * Return whether the media changed since the last call to this 4192 * function, or -ENOTSUP if we don't know. Most drivers don't know. 4193 */ 4194 int bdrv_media_changed(BlockDriverState *bs) 4195 { 4196 BlockDriver *drv = bs->drv; 4197 4198 if (drv && drv->bdrv_media_changed) { 4199 return drv->bdrv_media_changed(bs); 4200 } 4201 return -ENOTSUP; 4202 } 4203 4204 /** 4205 * If eject_flag is TRUE, eject the media. Otherwise, close the tray 4206 */ 4207 void bdrv_eject(BlockDriverState *bs, bool eject_flag) 4208 { 4209 BlockDriver *drv = bs->drv; 4210 4211 if (drv && drv->bdrv_eject) { 4212 drv->bdrv_eject(bs, eject_flag); 4213 } 4214 4215 if (bs->device_name[0] != '\0') { 4216 bdrv_emit_qmp_eject_event(bs, eject_flag); 4217 } 4218 } 4219 4220 /** 4221 * Lock or unlock the media (if it is locked, the user won't be able 4222 * to eject it manually). 4223 */ 4224 void bdrv_lock_medium(BlockDriverState *bs, bool locked) 4225 { 4226 BlockDriver *drv = bs->drv; 4227 4228 trace_bdrv_lock_medium(bs, locked); 4229 4230 if (drv && drv->bdrv_lock_medium) { 4231 drv->bdrv_lock_medium(bs, locked); 4232 } 4233 } 4234 4235 /* needed for generic scsi interface */ 4236 4237 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 4238 { 4239 BlockDriver *drv = bs->drv; 4240 4241 if (drv && drv->bdrv_ioctl) 4242 return drv->bdrv_ioctl(bs, req, buf); 4243 return -ENOTSUP; 4244 } 4245 4246 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 4247 unsigned long int req, void *buf, 4248 BlockDriverCompletionFunc *cb, void *opaque) 4249 { 4250 BlockDriver *drv = bs->drv; 4251 4252 if (drv && drv->bdrv_aio_ioctl) 4253 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); 4254 return NULL; 4255 } 4256 4257 void bdrv_set_buffer_alignment(BlockDriverState *bs, int align) 4258 { 4259 bs->buffer_alignment = align; 4260 } 4261 4262 void *qemu_blockalign(BlockDriverState *bs, size_t size) 4263 { 4264 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size); 4265 } 4266 4267 /* 4268 * Check if all memory in this vector is sector aligned. 4269 */ 4270 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 4271 { 4272 int i; 4273 4274 for (i = 0; i < qiov->niov; i++) { 4275 if ((uintptr_t) qiov->iov[i].iov_base % bs->buffer_alignment) { 4276 return false; 4277 } 4278 } 4279 4280 return true; 4281 } 4282 4283 void bdrv_set_dirty_tracking(BlockDriverState *bs, int granularity) 4284 { 4285 int64_t bitmap_size; 4286 4287 assert((granularity & (granularity - 1)) == 0); 4288 4289 if (granularity) { 4290 granularity >>= BDRV_SECTOR_BITS; 4291 assert(!bs->dirty_bitmap); 4292 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS); 4293 bs->dirty_bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1); 4294 } else { 4295 if (bs->dirty_bitmap) { 4296 hbitmap_free(bs->dirty_bitmap); 4297 bs->dirty_bitmap = NULL; 4298 } 4299 } 4300 } 4301 4302 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector) 4303 { 4304 if (bs->dirty_bitmap) { 4305 return hbitmap_get(bs->dirty_bitmap, sector); 4306 } else { 4307 return 0; 4308 } 4309 } 4310 4311 void bdrv_dirty_iter_init(BlockDriverState *bs, HBitmapIter *hbi) 4312 { 4313 hbitmap_iter_init(hbi, bs->dirty_bitmap, 0); 4314 } 4315 4316 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, 4317 int nr_sectors) 4318 { 4319 hbitmap_set(bs->dirty_bitmap, cur_sector, nr_sectors); 4320 } 4321 4322 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, 4323 int nr_sectors) 4324 { 4325 hbitmap_reset(bs->dirty_bitmap, cur_sector, nr_sectors); 4326 } 4327 4328 int64_t bdrv_get_dirty_count(BlockDriverState *bs) 4329 { 4330 if (bs->dirty_bitmap) { 4331 return hbitmap_count(bs->dirty_bitmap); 4332 } else { 4333 return 0; 4334 } 4335 } 4336 4337 /* Get a reference to bs */ 4338 void bdrv_ref(BlockDriverState *bs) 4339 { 4340 bs->refcnt++; 4341 } 4342 4343 /* Release a previously grabbed reference to bs. 4344 * If after releasing, reference count is zero, the BlockDriverState is 4345 * deleted. */ 4346 void bdrv_unref(BlockDriverState *bs) 4347 { 4348 assert(bs->refcnt > 0); 4349 if (--bs->refcnt == 0) { 4350 bdrv_delete(bs); 4351 } 4352 } 4353 4354 void bdrv_set_in_use(BlockDriverState *bs, int in_use) 4355 { 4356 assert(bs->in_use != in_use); 4357 bs->in_use = in_use; 4358 } 4359 4360 int bdrv_in_use(BlockDriverState *bs) 4361 { 4362 return bs->in_use; 4363 } 4364 4365 void bdrv_iostatus_enable(BlockDriverState *bs) 4366 { 4367 bs->iostatus_enabled = true; 4368 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 4369 } 4370 4371 /* The I/O status is only enabled if the drive explicitly 4372 * enables it _and_ the VM is configured to stop on errors */ 4373 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs) 4374 { 4375 return (bs->iostatus_enabled && 4376 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || 4377 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP || 4378 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP)); 4379 } 4380 4381 void bdrv_iostatus_disable(BlockDriverState *bs) 4382 { 4383 bs->iostatus_enabled = false; 4384 } 4385 4386 void bdrv_iostatus_reset(BlockDriverState *bs) 4387 { 4388 if (bdrv_iostatus_is_enabled(bs)) { 4389 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 4390 if (bs->job) { 4391 block_job_iostatus_reset(bs->job); 4392 } 4393 } 4394 } 4395 4396 void bdrv_iostatus_set_err(BlockDriverState *bs, int error) 4397 { 4398 assert(bdrv_iostatus_is_enabled(bs)); 4399 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 4400 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 4401 BLOCK_DEVICE_IO_STATUS_FAILED; 4402 } 4403 } 4404 4405 void 4406 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes, 4407 enum BlockAcctType type) 4408 { 4409 assert(type < BDRV_MAX_IOTYPE); 4410 4411 cookie->bytes = bytes; 4412 cookie->start_time_ns = get_clock(); 4413 cookie->type = type; 4414 } 4415 4416 void 4417 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie) 4418 { 4419 assert(cookie->type < BDRV_MAX_IOTYPE); 4420 4421 bs->nr_bytes[cookie->type] += cookie->bytes; 4422 bs->nr_ops[cookie->type]++; 4423 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns; 4424 } 4425 4426 void bdrv_img_create(const char *filename, const char *fmt, 4427 const char *base_filename, const char *base_fmt, 4428 char *options, uint64_t img_size, int flags, 4429 Error **errp, bool quiet) 4430 { 4431 QEMUOptionParameter *param = NULL, *create_options = NULL; 4432 QEMUOptionParameter *backing_fmt, *backing_file, *size; 4433 BlockDriverState *bs = NULL; 4434 BlockDriver *drv, *proto_drv; 4435 BlockDriver *backing_drv = NULL; 4436 int ret = 0; 4437 4438 /* Find driver and parse its options */ 4439 drv = bdrv_find_format(fmt); 4440 if (!drv) { 4441 error_setg(errp, "Unknown file format '%s'", fmt); 4442 return; 4443 } 4444 4445 proto_drv = bdrv_find_protocol(filename, true); 4446 if (!proto_drv) { 4447 error_setg(errp, "Unknown protocol '%s'", filename); 4448 return; 4449 } 4450 4451 create_options = append_option_parameters(create_options, 4452 drv->create_options); 4453 create_options = append_option_parameters(create_options, 4454 proto_drv->create_options); 4455 4456 /* Create parameter list with default values */ 4457 param = parse_option_parameters("", create_options, param); 4458 4459 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size); 4460 4461 /* Parse -o options */ 4462 if (options) { 4463 param = parse_option_parameters(options, create_options, param); 4464 if (param == NULL) { 4465 error_setg(errp, "Invalid options for file format '%s'.", fmt); 4466 goto out; 4467 } 4468 } 4469 4470 if (base_filename) { 4471 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE, 4472 base_filename)) { 4473 error_setg(errp, "Backing file not supported for file format '%s'", 4474 fmt); 4475 goto out; 4476 } 4477 } 4478 4479 if (base_fmt) { 4480 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) { 4481 error_setg(errp, "Backing file format not supported for file " 4482 "format '%s'", fmt); 4483 goto out; 4484 } 4485 } 4486 4487 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE); 4488 if (backing_file && backing_file->value.s) { 4489 if (!strcmp(filename, backing_file->value.s)) { 4490 error_setg(errp, "Error: Trying to create an image with the " 4491 "same filename as the backing file"); 4492 goto out; 4493 } 4494 } 4495 4496 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT); 4497 if (backing_fmt && backing_fmt->value.s) { 4498 backing_drv = bdrv_find_format(backing_fmt->value.s); 4499 if (!backing_drv) { 4500 error_setg(errp, "Unknown backing file format '%s'", 4501 backing_fmt->value.s); 4502 goto out; 4503 } 4504 } 4505 4506 // The size for the image must always be specified, with one exception: 4507 // If we are using a backing file, we can obtain the size from there 4508 size = get_option_parameter(param, BLOCK_OPT_SIZE); 4509 if (size && size->value.n == -1) { 4510 if (backing_file && backing_file->value.s) { 4511 uint64_t size; 4512 char buf[32]; 4513 int back_flags; 4514 4515 /* backing files always opened read-only */ 4516 back_flags = 4517 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); 4518 4519 bs = bdrv_new(""); 4520 4521 ret = bdrv_open(bs, backing_file->value.s, NULL, back_flags, 4522 backing_drv); 4523 if (ret < 0) { 4524 error_setg_errno(errp, -ret, "Could not open '%s'", 4525 backing_file->value.s); 4526 goto out; 4527 } 4528 bdrv_get_geometry(bs, &size); 4529 size *= 512; 4530 4531 snprintf(buf, sizeof(buf), "%" PRId64, size); 4532 set_option_parameter(param, BLOCK_OPT_SIZE, buf); 4533 } else { 4534 error_setg(errp, "Image creation needs a size parameter"); 4535 goto out; 4536 } 4537 } 4538 4539 if (!quiet) { 4540 printf("Formatting '%s', fmt=%s ", filename, fmt); 4541 print_option_parameters(param); 4542 puts(""); 4543 } 4544 ret = bdrv_create(drv, filename, param); 4545 if (ret < 0) { 4546 if (ret == -ENOTSUP) { 4547 error_setg(errp,"Formatting or formatting option not supported for " 4548 "file format '%s'", fmt); 4549 } else if (ret == -EFBIG) { 4550 const char *cluster_size_hint = ""; 4551 if (get_option_parameter(create_options, BLOCK_OPT_CLUSTER_SIZE)) { 4552 cluster_size_hint = " (try using a larger cluster size)"; 4553 } 4554 error_setg(errp, "The image size is too large for file format '%s'%s", 4555 fmt, cluster_size_hint); 4556 } else { 4557 error_setg(errp, "%s: error while creating %s: %s", filename, fmt, 4558 strerror(-ret)); 4559 } 4560 } 4561 4562 out: 4563 free_option_parameters(create_options); 4564 free_option_parameters(param); 4565 4566 if (bs) { 4567 bdrv_unref(bs); 4568 } 4569 } 4570 4571 AioContext *bdrv_get_aio_context(BlockDriverState *bs) 4572 { 4573 /* Currently BlockDriverState always uses the main loop AioContext */ 4574 return qemu_get_aio_context(); 4575 } 4576 4577 void bdrv_add_before_write_notifier(BlockDriverState *bs, 4578 NotifierWithReturn *notifier) 4579 { 4580 notifier_with_return_list_add(&bs->before_write_notifiers, notifier); 4581 } 4582