1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "config-host.h" 25 #include "qemu-common.h" 26 #include "trace.h" 27 #include "monitor.h" 28 #include "block_int.h" 29 #include "module.h" 30 #include "qjson.h" 31 #include "qemu-coroutine.h" 32 #include "qmp-commands.h" 33 #include "qemu-timer.h" 34 35 #ifdef CONFIG_BSD 36 #include <sys/types.h> 37 #include <sys/stat.h> 38 #include <sys/ioctl.h> 39 #include <sys/queue.h> 40 #ifndef __DragonFly__ 41 #include <sys/disk.h> 42 #endif 43 #endif 44 45 #ifdef _WIN32 46 #include <windows.h> 47 #endif 48 49 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 50 51 typedef enum { 52 BDRV_REQ_COPY_ON_READ = 0x1, 53 BDRV_REQ_ZERO_WRITE = 0x2, 54 } BdrvRequestFlags; 55 56 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load); 57 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 58 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 59 BlockDriverCompletionFunc *cb, void *opaque); 60 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 61 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 62 BlockDriverCompletionFunc *cb, void *opaque); 63 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 64 int64_t sector_num, int nb_sectors, 65 QEMUIOVector *iov); 66 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 67 int64_t sector_num, int nb_sectors, 68 QEMUIOVector *iov); 69 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 70 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 71 BdrvRequestFlags flags); 72 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 73 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 74 BdrvRequestFlags flags); 75 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 76 int64_t sector_num, 77 QEMUIOVector *qiov, 78 int nb_sectors, 79 BlockDriverCompletionFunc *cb, 80 void *opaque, 81 bool is_write); 82 static void coroutine_fn bdrv_co_do_rw(void *opaque); 83 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 84 int64_t sector_num, int nb_sectors); 85 86 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors, 87 bool is_write, double elapsed_time, uint64_t *wait); 88 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write, 89 double elapsed_time, uint64_t *wait); 90 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors, 91 bool is_write, int64_t *wait); 92 93 static QTAILQ_HEAD(, BlockDriverState) bdrv_states = 94 QTAILQ_HEAD_INITIALIZER(bdrv_states); 95 96 static QLIST_HEAD(, BlockDriver) bdrv_drivers = 97 QLIST_HEAD_INITIALIZER(bdrv_drivers); 98 99 /* The device to use for VM snapshots */ 100 static BlockDriverState *bs_snapshots; 101 102 /* If non-zero, use only whitelisted block drivers */ 103 static int use_bdrv_whitelist; 104 105 #ifdef _WIN32 106 static int is_windows_drive_prefix(const char *filename) 107 { 108 return (((filename[0] >= 'a' && filename[0] <= 'z') || 109 (filename[0] >= 'A' && filename[0] <= 'Z')) && 110 filename[1] == ':'); 111 } 112 113 int is_windows_drive(const char *filename) 114 { 115 if (is_windows_drive_prefix(filename) && 116 filename[2] == '\0') 117 return 1; 118 if (strstart(filename, "\\\\.\\", NULL) || 119 strstart(filename, "//./", NULL)) 120 return 1; 121 return 0; 122 } 123 #endif 124 125 /* throttling disk I/O limits */ 126 void bdrv_io_limits_disable(BlockDriverState *bs) 127 { 128 bs->io_limits_enabled = false; 129 130 while (qemu_co_queue_next(&bs->throttled_reqs)); 131 132 if (bs->block_timer) { 133 qemu_del_timer(bs->block_timer); 134 qemu_free_timer(bs->block_timer); 135 bs->block_timer = NULL; 136 } 137 138 bs->slice_start = 0; 139 bs->slice_end = 0; 140 bs->slice_time = 0; 141 memset(&bs->io_base, 0, sizeof(bs->io_base)); 142 } 143 144 static void bdrv_block_timer(void *opaque) 145 { 146 BlockDriverState *bs = opaque; 147 148 qemu_co_queue_next(&bs->throttled_reqs); 149 } 150 151 void bdrv_io_limits_enable(BlockDriverState *bs) 152 { 153 qemu_co_queue_init(&bs->throttled_reqs); 154 bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs); 155 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME; 156 bs->slice_start = qemu_get_clock_ns(vm_clock); 157 bs->slice_end = bs->slice_start + bs->slice_time; 158 memset(&bs->io_base, 0, sizeof(bs->io_base)); 159 bs->io_limits_enabled = true; 160 } 161 162 bool bdrv_io_limits_enabled(BlockDriverState *bs) 163 { 164 BlockIOLimit *io_limits = &bs->io_limits; 165 return io_limits->bps[BLOCK_IO_LIMIT_READ] 166 || io_limits->bps[BLOCK_IO_LIMIT_WRITE] 167 || io_limits->bps[BLOCK_IO_LIMIT_TOTAL] 168 || io_limits->iops[BLOCK_IO_LIMIT_READ] 169 || io_limits->iops[BLOCK_IO_LIMIT_WRITE] 170 || io_limits->iops[BLOCK_IO_LIMIT_TOTAL]; 171 } 172 173 static void bdrv_io_limits_intercept(BlockDriverState *bs, 174 bool is_write, int nb_sectors) 175 { 176 int64_t wait_time = -1; 177 178 if (!qemu_co_queue_empty(&bs->throttled_reqs)) { 179 qemu_co_queue_wait(&bs->throttled_reqs); 180 } 181 182 /* In fact, we hope to keep each request's timing, in FIFO mode. The next 183 * throttled requests will not be dequeued until the current request is 184 * allowed to be serviced. So if the current request still exceeds the 185 * limits, it will be inserted to the head. All requests followed it will 186 * be still in throttled_reqs queue. 187 */ 188 189 while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) { 190 qemu_mod_timer(bs->block_timer, 191 wait_time + qemu_get_clock_ns(vm_clock)); 192 qemu_co_queue_wait_insert_head(&bs->throttled_reqs); 193 } 194 195 qemu_co_queue_next(&bs->throttled_reqs); 196 } 197 198 /* check if the path starts with "<protocol>:" */ 199 static int path_has_protocol(const char *path) 200 { 201 #ifdef _WIN32 202 if (is_windows_drive(path) || 203 is_windows_drive_prefix(path)) { 204 return 0; 205 } 206 #endif 207 208 return strchr(path, ':') != NULL; 209 } 210 211 int path_is_absolute(const char *path) 212 { 213 const char *p; 214 #ifdef _WIN32 215 /* specific case for names like: "\\.\d:" */ 216 if (*path == '/' || *path == '\\') 217 return 1; 218 #endif 219 p = strchr(path, ':'); 220 if (p) 221 p++; 222 else 223 p = path; 224 #ifdef _WIN32 225 return (*p == '/' || *p == '\\'); 226 #else 227 return (*p == '/'); 228 #endif 229 } 230 231 /* if filename is absolute, just copy it to dest. Otherwise, build a 232 path to it by considering it is relative to base_path. URL are 233 supported. */ 234 void path_combine(char *dest, int dest_size, 235 const char *base_path, 236 const char *filename) 237 { 238 const char *p, *p1; 239 int len; 240 241 if (dest_size <= 0) 242 return; 243 if (path_is_absolute(filename)) { 244 pstrcpy(dest, dest_size, filename); 245 } else { 246 p = strchr(base_path, ':'); 247 if (p) 248 p++; 249 else 250 p = base_path; 251 p1 = strrchr(base_path, '/'); 252 #ifdef _WIN32 253 { 254 const char *p2; 255 p2 = strrchr(base_path, '\\'); 256 if (!p1 || p2 > p1) 257 p1 = p2; 258 } 259 #endif 260 if (p1) 261 p1++; 262 else 263 p1 = base_path; 264 if (p1 > p) 265 p = p1; 266 len = p - base_path; 267 if (len > dest_size - 1) 268 len = dest_size - 1; 269 memcpy(dest, base_path, len); 270 dest[len] = '\0'; 271 pstrcat(dest, dest_size, filename); 272 } 273 } 274 275 void bdrv_register(BlockDriver *bdrv) 276 { 277 /* Block drivers without coroutine functions need emulation */ 278 if (!bdrv->bdrv_co_readv) { 279 bdrv->bdrv_co_readv = bdrv_co_readv_em; 280 bdrv->bdrv_co_writev = bdrv_co_writev_em; 281 282 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if 283 * the block driver lacks aio we need to emulate that too. 284 */ 285 if (!bdrv->bdrv_aio_readv) { 286 /* add AIO emulation layer */ 287 bdrv->bdrv_aio_readv = bdrv_aio_readv_em; 288 bdrv->bdrv_aio_writev = bdrv_aio_writev_em; 289 } 290 } 291 292 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list); 293 } 294 295 /* create a new block device (by default it is empty) */ 296 BlockDriverState *bdrv_new(const char *device_name) 297 { 298 BlockDriverState *bs; 299 300 bs = g_malloc0(sizeof(BlockDriverState)); 301 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name); 302 if (device_name[0] != '\0') { 303 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list); 304 } 305 bdrv_iostatus_disable(bs); 306 return bs; 307 } 308 309 BlockDriver *bdrv_find_format(const char *format_name) 310 { 311 BlockDriver *drv1; 312 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 313 if (!strcmp(drv1->format_name, format_name)) { 314 return drv1; 315 } 316 } 317 return NULL; 318 } 319 320 static int bdrv_is_whitelisted(BlockDriver *drv) 321 { 322 static const char *whitelist[] = { 323 CONFIG_BDRV_WHITELIST 324 }; 325 const char **p; 326 327 if (!whitelist[0]) 328 return 1; /* no whitelist, anything goes */ 329 330 for (p = whitelist; *p; p++) { 331 if (!strcmp(drv->format_name, *p)) { 332 return 1; 333 } 334 } 335 return 0; 336 } 337 338 BlockDriver *bdrv_find_whitelisted_format(const char *format_name) 339 { 340 BlockDriver *drv = bdrv_find_format(format_name); 341 return drv && bdrv_is_whitelisted(drv) ? drv : NULL; 342 } 343 344 typedef struct CreateCo { 345 BlockDriver *drv; 346 char *filename; 347 QEMUOptionParameter *options; 348 int ret; 349 } CreateCo; 350 351 static void coroutine_fn bdrv_create_co_entry(void *opaque) 352 { 353 CreateCo *cco = opaque; 354 assert(cco->drv); 355 356 cco->ret = cco->drv->bdrv_create(cco->filename, cco->options); 357 } 358 359 int bdrv_create(BlockDriver *drv, const char* filename, 360 QEMUOptionParameter *options) 361 { 362 int ret; 363 364 Coroutine *co; 365 CreateCo cco = { 366 .drv = drv, 367 .filename = g_strdup(filename), 368 .options = options, 369 .ret = NOT_DONE, 370 }; 371 372 if (!drv->bdrv_create) { 373 return -ENOTSUP; 374 } 375 376 if (qemu_in_coroutine()) { 377 /* Fast-path if already in coroutine context */ 378 bdrv_create_co_entry(&cco); 379 } else { 380 co = qemu_coroutine_create(bdrv_create_co_entry); 381 qemu_coroutine_enter(co, &cco); 382 while (cco.ret == NOT_DONE) { 383 qemu_aio_wait(); 384 } 385 } 386 387 ret = cco.ret; 388 g_free(cco.filename); 389 390 return ret; 391 } 392 393 int bdrv_create_file(const char* filename, QEMUOptionParameter *options) 394 { 395 BlockDriver *drv; 396 397 drv = bdrv_find_protocol(filename); 398 if (drv == NULL) { 399 return -ENOENT; 400 } 401 402 return bdrv_create(drv, filename, options); 403 } 404 405 #ifdef _WIN32 406 void get_tmp_filename(char *filename, int size) 407 { 408 char temp_dir[MAX_PATH]; 409 410 GetTempPath(MAX_PATH, temp_dir); 411 GetTempFileName(temp_dir, "qem", 0, filename); 412 } 413 #else 414 void get_tmp_filename(char *filename, int size) 415 { 416 int fd; 417 const char *tmpdir; 418 /* XXX: race condition possible */ 419 tmpdir = getenv("TMPDIR"); 420 if (!tmpdir) 421 tmpdir = "/tmp"; 422 snprintf(filename, size, "%s/vl.XXXXXX", tmpdir); 423 fd = mkstemp(filename); 424 close(fd); 425 } 426 #endif 427 428 /* 429 * Detect host devices. By convention, /dev/cdrom[N] is always 430 * recognized as a host CDROM. 431 */ 432 static BlockDriver *find_hdev_driver(const char *filename) 433 { 434 int score_max = 0, score; 435 BlockDriver *drv = NULL, *d; 436 437 QLIST_FOREACH(d, &bdrv_drivers, list) { 438 if (d->bdrv_probe_device) { 439 score = d->bdrv_probe_device(filename); 440 if (score > score_max) { 441 score_max = score; 442 drv = d; 443 } 444 } 445 } 446 447 return drv; 448 } 449 450 BlockDriver *bdrv_find_protocol(const char *filename) 451 { 452 BlockDriver *drv1; 453 char protocol[128]; 454 int len; 455 const char *p; 456 457 /* TODO Drivers without bdrv_file_open must be specified explicitly */ 458 459 /* 460 * XXX(hch): we really should not let host device detection 461 * override an explicit protocol specification, but moving this 462 * later breaks access to device names with colons in them. 463 * Thanks to the brain-dead persistent naming schemes on udev- 464 * based Linux systems those actually are quite common. 465 */ 466 drv1 = find_hdev_driver(filename); 467 if (drv1) { 468 return drv1; 469 } 470 471 if (!path_has_protocol(filename)) { 472 return bdrv_find_format("file"); 473 } 474 p = strchr(filename, ':'); 475 assert(p != NULL); 476 len = p - filename; 477 if (len > sizeof(protocol) - 1) 478 len = sizeof(protocol) - 1; 479 memcpy(protocol, filename, len); 480 protocol[len] = '\0'; 481 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 482 if (drv1->protocol_name && 483 !strcmp(drv1->protocol_name, protocol)) { 484 return drv1; 485 } 486 } 487 return NULL; 488 } 489 490 static int find_image_format(const char *filename, BlockDriver **pdrv) 491 { 492 int ret, score, score_max; 493 BlockDriver *drv1, *drv; 494 uint8_t buf[2048]; 495 BlockDriverState *bs; 496 497 ret = bdrv_file_open(&bs, filename, 0); 498 if (ret < 0) { 499 *pdrv = NULL; 500 return ret; 501 } 502 503 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */ 504 if (bs->sg || !bdrv_is_inserted(bs)) { 505 bdrv_delete(bs); 506 drv = bdrv_find_format("raw"); 507 if (!drv) { 508 ret = -ENOENT; 509 } 510 *pdrv = drv; 511 return ret; 512 } 513 514 ret = bdrv_pread(bs, 0, buf, sizeof(buf)); 515 bdrv_delete(bs); 516 if (ret < 0) { 517 *pdrv = NULL; 518 return ret; 519 } 520 521 score_max = 0; 522 drv = NULL; 523 QLIST_FOREACH(drv1, &bdrv_drivers, list) { 524 if (drv1->bdrv_probe) { 525 score = drv1->bdrv_probe(buf, ret, filename); 526 if (score > score_max) { 527 score_max = score; 528 drv = drv1; 529 } 530 } 531 } 532 if (!drv) { 533 ret = -ENOENT; 534 } 535 *pdrv = drv; 536 return ret; 537 } 538 539 /** 540 * Set the current 'total_sectors' value 541 */ 542 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint) 543 { 544 BlockDriver *drv = bs->drv; 545 546 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */ 547 if (bs->sg) 548 return 0; 549 550 /* query actual device if possible, otherwise just trust the hint */ 551 if (drv->bdrv_getlength) { 552 int64_t length = drv->bdrv_getlength(bs); 553 if (length < 0) { 554 return length; 555 } 556 hint = length >> BDRV_SECTOR_BITS; 557 } 558 559 bs->total_sectors = hint; 560 return 0; 561 } 562 563 /** 564 * Set open flags for a given cache mode 565 * 566 * Return 0 on success, -1 if the cache mode was invalid. 567 */ 568 int bdrv_parse_cache_flags(const char *mode, int *flags) 569 { 570 *flags &= ~BDRV_O_CACHE_MASK; 571 572 if (!strcmp(mode, "off") || !strcmp(mode, "none")) { 573 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; 574 } else if (!strcmp(mode, "directsync")) { 575 *flags |= BDRV_O_NOCACHE; 576 } else if (!strcmp(mode, "writeback")) { 577 *flags |= BDRV_O_CACHE_WB; 578 } else if (!strcmp(mode, "unsafe")) { 579 *flags |= BDRV_O_CACHE_WB; 580 *flags |= BDRV_O_NO_FLUSH; 581 } else if (!strcmp(mode, "writethrough")) { 582 /* this is the default */ 583 } else { 584 return -1; 585 } 586 587 return 0; 588 } 589 590 /** 591 * The copy-on-read flag is actually a reference count so multiple users may 592 * use the feature without worrying about clobbering its previous state. 593 * Copy-on-read stays enabled until all users have called to disable it. 594 */ 595 void bdrv_enable_copy_on_read(BlockDriverState *bs) 596 { 597 bs->copy_on_read++; 598 } 599 600 void bdrv_disable_copy_on_read(BlockDriverState *bs) 601 { 602 assert(bs->copy_on_read > 0); 603 bs->copy_on_read--; 604 } 605 606 /* 607 * Common part for opening disk images and files 608 */ 609 static int bdrv_open_common(BlockDriverState *bs, const char *filename, 610 int flags, BlockDriver *drv) 611 { 612 int ret, open_flags; 613 614 assert(drv != NULL); 615 616 trace_bdrv_open_common(bs, filename, flags, drv->format_name); 617 618 bs->file = NULL; 619 bs->total_sectors = 0; 620 bs->encrypted = 0; 621 bs->valid_key = 0; 622 bs->sg = 0; 623 bs->open_flags = flags; 624 bs->growable = 0; 625 bs->buffer_alignment = 512; 626 627 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */ 628 if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) { 629 bdrv_enable_copy_on_read(bs); 630 } 631 632 pstrcpy(bs->filename, sizeof(bs->filename), filename); 633 bs->backing_file[0] = '\0'; 634 635 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) { 636 return -ENOTSUP; 637 } 638 639 bs->drv = drv; 640 bs->opaque = g_malloc0(drv->instance_size); 641 642 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB); 643 644 /* 645 * Clear flags that are internal to the block layer before opening the 646 * image. 647 */ 648 open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); 649 650 /* 651 * Snapshots should be writable. 652 */ 653 if (bs->is_temporary) { 654 open_flags |= BDRV_O_RDWR; 655 } 656 657 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR); 658 659 /* Open the image, either directly or using a protocol */ 660 if (drv->bdrv_file_open) { 661 ret = drv->bdrv_file_open(bs, filename, open_flags); 662 } else { 663 ret = bdrv_file_open(&bs->file, filename, open_flags); 664 if (ret >= 0) { 665 ret = drv->bdrv_open(bs, open_flags); 666 } 667 } 668 669 if (ret < 0) { 670 goto free_and_fail; 671 } 672 673 ret = refresh_total_sectors(bs, bs->total_sectors); 674 if (ret < 0) { 675 goto free_and_fail; 676 } 677 678 #ifndef _WIN32 679 if (bs->is_temporary) { 680 unlink(filename); 681 } 682 #endif 683 return 0; 684 685 free_and_fail: 686 if (bs->file) { 687 bdrv_delete(bs->file); 688 bs->file = NULL; 689 } 690 g_free(bs->opaque); 691 bs->opaque = NULL; 692 bs->drv = NULL; 693 return ret; 694 } 695 696 /* 697 * Opens a file using a protocol (file, host_device, nbd, ...) 698 */ 699 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags) 700 { 701 BlockDriverState *bs; 702 BlockDriver *drv; 703 int ret; 704 705 drv = bdrv_find_protocol(filename); 706 if (!drv) { 707 return -ENOENT; 708 } 709 710 bs = bdrv_new(""); 711 ret = bdrv_open_common(bs, filename, flags, drv); 712 if (ret < 0) { 713 bdrv_delete(bs); 714 return ret; 715 } 716 bs->growable = 1; 717 *pbs = bs; 718 return 0; 719 } 720 721 /* 722 * Opens a disk image (raw, qcow2, vmdk, ...) 723 */ 724 int bdrv_open(BlockDriverState *bs, const char *filename, int flags, 725 BlockDriver *drv) 726 { 727 int ret; 728 char tmp_filename[PATH_MAX]; 729 730 if (flags & BDRV_O_SNAPSHOT) { 731 BlockDriverState *bs1; 732 int64_t total_size; 733 int is_protocol = 0; 734 BlockDriver *bdrv_qcow2; 735 QEMUOptionParameter *options; 736 char backing_filename[PATH_MAX]; 737 738 /* if snapshot, we create a temporary backing file and open it 739 instead of opening 'filename' directly */ 740 741 /* if there is a backing file, use it */ 742 bs1 = bdrv_new(""); 743 ret = bdrv_open(bs1, filename, 0, drv); 744 if (ret < 0) { 745 bdrv_delete(bs1); 746 return ret; 747 } 748 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK; 749 750 if (bs1->drv && bs1->drv->protocol_name) 751 is_protocol = 1; 752 753 bdrv_delete(bs1); 754 755 get_tmp_filename(tmp_filename, sizeof(tmp_filename)); 756 757 /* Real path is meaningless for protocols */ 758 if (is_protocol) 759 snprintf(backing_filename, sizeof(backing_filename), 760 "%s", filename); 761 else if (!realpath(filename, backing_filename)) 762 return -errno; 763 764 bdrv_qcow2 = bdrv_find_format("qcow2"); 765 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL); 766 767 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size); 768 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename); 769 if (drv) { 770 set_option_parameter(options, BLOCK_OPT_BACKING_FMT, 771 drv->format_name); 772 } 773 774 ret = bdrv_create(bdrv_qcow2, tmp_filename, options); 775 free_option_parameters(options); 776 if (ret < 0) { 777 return ret; 778 } 779 780 filename = tmp_filename; 781 drv = bdrv_qcow2; 782 bs->is_temporary = 1; 783 } 784 785 /* Find the right image format driver */ 786 if (!drv) { 787 ret = find_image_format(filename, &drv); 788 } 789 790 if (!drv) { 791 goto unlink_and_fail; 792 } 793 794 /* Open the image */ 795 ret = bdrv_open_common(bs, filename, flags, drv); 796 if (ret < 0) { 797 goto unlink_and_fail; 798 } 799 800 /* If there is a backing file, use it */ 801 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') { 802 char backing_filename[PATH_MAX]; 803 int back_flags; 804 BlockDriver *back_drv = NULL; 805 806 bs->backing_hd = bdrv_new(""); 807 808 if (path_has_protocol(bs->backing_file)) { 809 pstrcpy(backing_filename, sizeof(backing_filename), 810 bs->backing_file); 811 } else { 812 path_combine(backing_filename, sizeof(backing_filename), 813 filename, bs->backing_file); 814 } 815 816 if (bs->backing_format[0] != '\0') { 817 back_drv = bdrv_find_format(bs->backing_format); 818 } 819 820 /* backing files always opened read-only */ 821 back_flags = 822 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); 823 824 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv); 825 if (ret < 0) { 826 bdrv_close(bs); 827 return ret; 828 } 829 if (bs->is_temporary) { 830 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR); 831 } else { 832 /* base image inherits from "parent" */ 833 bs->backing_hd->keep_read_only = bs->keep_read_only; 834 } 835 } 836 837 if (!bdrv_key_required(bs)) { 838 bdrv_dev_change_media_cb(bs, true); 839 } 840 841 /* throttling disk I/O limits */ 842 if (bs->io_limits_enabled) { 843 bdrv_io_limits_enable(bs); 844 } 845 846 return 0; 847 848 unlink_and_fail: 849 if (bs->is_temporary) { 850 unlink(filename); 851 } 852 return ret; 853 } 854 855 void bdrv_close(BlockDriverState *bs) 856 { 857 bdrv_flush(bs); 858 if (bs->drv) { 859 if (bs->job) { 860 block_job_cancel_sync(bs->job); 861 } 862 bdrv_drain_all(); 863 864 if (bs == bs_snapshots) { 865 bs_snapshots = NULL; 866 } 867 if (bs->backing_hd) { 868 bdrv_delete(bs->backing_hd); 869 bs->backing_hd = NULL; 870 } 871 bs->drv->bdrv_close(bs); 872 g_free(bs->opaque); 873 #ifdef _WIN32 874 if (bs->is_temporary) { 875 unlink(bs->filename); 876 } 877 #endif 878 bs->opaque = NULL; 879 bs->drv = NULL; 880 bs->copy_on_read = 0; 881 882 if (bs->file != NULL) { 883 bdrv_close(bs->file); 884 } 885 886 bdrv_dev_change_media_cb(bs, false); 887 } 888 889 /*throttling disk I/O limits*/ 890 if (bs->io_limits_enabled) { 891 bdrv_io_limits_disable(bs); 892 } 893 } 894 895 void bdrv_close_all(void) 896 { 897 BlockDriverState *bs; 898 899 QTAILQ_FOREACH(bs, &bdrv_states, list) { 900 bdrv_close(bs); 901 } 902 } 903 904 /* 905 * Wait for pending requests to complete across all BlockDriverStates 906 * 907 * This function does not flush data to disk, use bdrv_flush_all() for that 908 * after calling this function. 909 * 910 * Note that completion of an asynchronous I/O operation can trigger any 911 * number of other I/O operations on other devices---for example a coroutine 912 * can be arbitrarily complex and a constant flow of I/O can come until the 913 * coroutine is complete. Because of this, it is not possible to have a 914 * function to drain a single device's I/O queue. 915 */ 916 void bdrv_drain_all(void) 917 { 918 BlockDriverState *bs; 919 bool busy; 920 921 do { 922 busy = qemu_aio_wait(); 923 924 /* FIXME: We do not have timer support here, so this is effectively 925 * a busy wait. 926 */ 927 QTAILQ_FOREACH(bs, &bdrv_states, list) { 928 if (!qemu_co_queue_empty(&bs->throttled_reqs)) { 929 qemu_co_queue_restart_all(&bs->throttled_reqs); 930 busy = true; 931 } 932 } 933 } while (busy); 934 935 /* If requests are still pending there is a bug somewhere */ 936 QTAILQ_FOREACH(bs, &bdrv_states, list) { 937 assert(QLIST_EMPTY(&bs->tracked_requests)); 938 assert(qemu_co_queue_empty(&bs->throttled_reqs)); 939 } 940 } 941 942 /* make a BlockDriverState anonymous by removing from bdrv_state list. 943 Also, NULL terminate the device_name to prevent double remove */ 944 void bdrv_make_anon(BlockDriverState *bs) 945 { 946 if (bs->device_name[0] != '\0') { 947 QTAILQ_REMOVE(&bdrv_states, bs, list); 948 } 949 bs->device_name[0] = '\0'; 950 } 951 952 /* 953 * Add new bs contents at the top of an image chain while the chain is 954 * live, while keeping required fields on the top layer. 955 * 956 * This will modify the BlockDriverState fields, and swap contents 957 * between bs_new and bs_top. Both bs_new and bs_top are modified. 958 * 959 * bs_new is required to be anonymous. 960 * 961 * This function does not create any image files. 962 */ 963 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top) 964 { 965 BlockDriverState tmp; 966 967 /* bs_new must be anonymous */ 968 assert(bs_new->device_name[0] == '\0'); 969 970 tmp = *bs_new; 971 972 /* there are some fields that need to stay on the top layer: */ 973 974 /* dev info */ 975 tmp.dev_ops = bs_top->dev_ops; 976 tmp.dev_opaque = bs_top->dev_opaque; 977 tmp.dev = bs_top->dev; 978 tmp.buffer_alignment = bs_top->buffer_alignment; 979 tmp.copy_on_read = bs_top->copy_on_read; 980 981 /* i/o timing parameters */ 982 tmp.slice_time = bs_top->slice_time; 983 tmp.slice_start = bs_top->slice_start; 984 tmp.slice_end = bs_top->slice_end; 985 tmp.io_limits = bs_top->io_limits; 986 tmp.io_base = bs_top->io_base; 987 tmp.throttled_reqs = bs_top->throttled_reqs; 988 tmp.block_timer = bs_top->block_timer; 989 tmp.io_limits_enabled = bs_top->io_limits_enabled; 990 991 /* geometry */ 992 tmp.cyls = bs_top->cyls; 993 tmp.heads = bs_top->heads; 994 tmp.secs = bs_top->secs; 995 tmp.translation = bs_top->translation; 996 997 /* r/w error */ 998 tmp.on_read_error = bs_top->on_read_error; 999 tmp.on_write_error = bs_top->on_write_error; 1000 1001 /* i/o status */ 1002 tmp.iostatus_enabled = bs_top->iostatus_enabled; 1003 tmp.iostatus = bs_top->iostatus; 1004 1005 /* keep the same entry in bdrv_states */ 1006 pstrcpy(tmp.device_name, sizeof(tmp.device_name), bs_top->device_name); 1007 tmp.list = bs_top->list; 1008 1009 /* The contents of 'tmp' will become bs_top, as we are 1010 * swapping bs_new and bs_top contents. */ 1011 tmp.backing_hd = bs_new; 1012 pstrcpy(tmp.backing_file, sizeof(tmp.backing_file), bs_top->filename); 1013 bdrv_get_format(bs_top, tmp.backing_format, sizeof(tmp.backing_format)); 1014 1015 /* swap contents of the fixed new bs and the current top */ 1016 *bs_new = *bs_top; 1017 *bs_top = tmp; 1018 1019 /* device_name[] was carried over from the old bs_top. bs_new 1020 * shouldn't be in bdrv_states, so we need to make device_name[] 1021 * reflect the anonymity of bs_new 1022 */ 1023 bs_new->device_name[0] = '\0'; 1024 1025 /* clear the copied fields in the new backing file */ 1026 bdrv_detach_dev(bs_new, bs_new->dev); 1027 1028 qemu_co_queue_init(&bs_new->throttled_reqs); 1029 memset(&bs_new->io_base, 0, sizeof(bs_new->io_base)); 1030 memset(&bs_new->io_limits, 0, sizeof(bs_new->io_limits)); 1031 bdrv_iostatus_disable(bs_new); 1032 1033 /* we don't use bdrv_io_limits_disable() for this, because we don't want 1034 * to affect or delete the block_timer, as it has been moved to bs_top */ 1035 bs_new->io_limits_enabled = false; 1036 bs_new->block_timer = NULL; 1037 bs_new->slice_time = 0; 1038 bs_new->slice_start = 0; 1039 bs_new->slice_end = 0; 1040 } 1041 1042 void bdrv_delete(BlockDriverState *bs) 1043 { 1044 assert(!bs->dev); 1045 assert(!bs->job); 1046 assert(!bs->in_use); 1047 1048 /* remove from list, if necessary */ 1049 bdrv_make_anon(bs); 1050 1051 bdrv_close(bs); 1052 if (bs->file != NULL) { 1053 bdrv_delete(bs->file); 1054 } 1055 1056 assert(bs != bs_snapshots); 1057 g_free(bs); 1058 } 1059 1060 int bdrv_attach_dev(BlockDriverState *bs, void *dev) 1061 /* TODO change to DeviceState *dev when all users are qdevified */ 1062 { 1063 if (bs->dev) { 1064 return -EBUSY; 1065 } 1066 bs->dev = dev; 1067 bdrv_iostatus_reset(bs); 1068 return 0; 1069 } 1070 1071 /* TODO qdevified devices don't use this, remove when devices are qdevified */ 1072 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev) 1073 { 1074 if (bdrv_attach_dev(bs, dev) < 0) { 1075 abort(); 1076 } 1077 } 1078 1079 void bdrv_detach_dev(BlockDriverState *bs, void *dev) 1080 /* TODO change to DeviceState *dev when all users are qdevified */ 1081 { 1082 assert(bs->dev == dev); 1083 bs->dev = NULL; 1084 bs->dev_ops = NULL; 1085 bs->dev_opaque = NULL; 1086 bs->buffer_alignment = 512; 1087 } 1088 1089 /* TODO change to return DeviceState * when all users are qdevified */ 1090 void *bdrv_get_attached_dev(BlockDriverState *bs) 1091 { 1092 return bs->dev; 1093 } 1094 1095 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops, 1096 void *opaque) 1097 { 1098 bs->dev_ops = ops; 1099 bs->dev_opaque = opaque; 1100 if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) { 1101 bs_snapshots = NULL; 1102 } 1103 } 1104 1105 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv, 1106 BlockQMPEventAction action, int is_read) 1107 { 1108 QObject *data; 1109 const char *action_str; 1110 1111 switch (action) { 1112 case BDRV_ACTION_REPORT: 1113 action_str = "report"; 1114 break; 1115 case BDRV_ACTION_IGNORE: 1116 action_str = "ignore"; 1117 break; 1118 case BDRV_ACTION_STOP: 1119 action_str = "stop"; 1120 break; 1121 default: 1122 abort(); 1123 } 1124 1125 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }", 1126 bdrv->device_name, 1127 action_str, 1128 is_read ? "read" : "write"); 1129 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data); 1130 1131 qobject_decref(data); 1132 } 1133 1134 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected) 1135 { 1136 QObject *data; 1137 1138 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }", 1139 bdrv_get_device_name(bs), ejected); 1140 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data); 1141 1142 qobject_decref(data); 1143 } 1144 1145 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load) 1146 { 1147 if (bs->dev_ops && bs->dev_ops->change_media_cb) { 1148 bool tray_was_closed = !bdrv_dev_is_tray_open(bs); 1149 bs->dev_ops->change_media_cb(bs->dev_opaque, load); 1150 if (tray_was_closed) { 1151 /* tray open */ 1152 bdrv_emit_qmp_eject_event(bs, true); 1153 } 1154 if (load) { 1155 /* tray close */ 1156 bdrv_emit_qmp_eject_event(bs, false); 1157 } 1158 } 1159 } 1160 1161 bool bdrv_dev_has_removable_media(BlockDriverState *bs) 1162 { 1163 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb); 1164 } 1165 1166 void bdrv_dev_eject_request(BlockDriverState *bs, bool force) 1167 { 1168 if (bs->dev_ops && bs->dev_ops->eject_request_cb) { 1169 bs->dev_ops->eject_request_cb(bs->dev_opaque, force); 1170 } 1171 } 1172 1173 bool bdrv_dev_is_tray_open(BlockDriverState *bs) 1174 { 1175 if (bs->dev_ops && bs->dev_ops->is_tray_open) { 1176 return bs->dev_ops->is_tray_open(bs->dev_opaque); 1177 } 1178 return false; 1179 } 1180 1181 static void bdrv_dev_resize_cb(BlockDriverState *bs) 1182 { 1183 if (bs->dev_ops && bs->dev_ops->resize_cb) { 1184 bs->dev_ops->resize_cb(bs->dev_opaque); 1185 } 1186 } 1187 1188 bool bdrv_dev_is_medium_locked(BlockDriverState *bs) 1189 { 1190 if (bs->dev_ops && bs->dev_ops->is_medium_locked) { 1191 return bs->dev_ops->is_medium_locked(bs->dev_opaque); 1192 } 1193 return false; 1194 } 1195 1196 /* 1197 * Run consistency checks on an image 1198 * 1199 * Returns 0 if the check could be completed (it doesn't mean that the image is 1200 * free of errors) or -errno when an internal error occurred. The results of the 1201 * check are stored in res. 1202 */ 1203 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res) 1204 { 1205 if (bs->drv->bdrv_check == NULL) { 1206 return -ENOTSUP; 1207 } 1208 1209 memset(res, 0, sizeof(*res)); 1210 return bs->drv->bdrv_check(bs, res); 1211 } 1212 1213 #define COMMIT_BUF_SECTORS 2048 1214 1215 /* commit COW file into the raw image */ 1216 int bdrv_commit(BlockDriverState *bs) 1217 { 1218 BlockDriver *drv = bs->drv; 1219 BlockDriver *backing_drv; 1220 int64_t sector, total_sectors; 1221 int n, ro, open_flags; 1222 int ret = 0, rw_ret = 0; 1223 uint8_t *buf; 1224 char filename[1024]; 1225 BlockDriverState *bs_rw, *bs_ro; 1226 1227 if (!drv) 1228 return -ENOMEDIUM; 1229 1230 if (!bs->backing_hd) { 1231 return -ENOTSUP; 1232 } 1233 1234 if (bs->backing_hd->keep_read_only) { 1235 return -EACCES; 1236 } 1237 1238 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) { 1239 return -EBUSY; 1240 } 1241 1242 backing_drv = bs->backing_hd->drv; 1243 ro = bs->backing_hd->read_only; 1244 strncpy(filename, bs->backing_hd->filename, sizeof(filename)); 1245 open_flags = bs->backing_hd->open_flags; 1246 1247 if (ro) { 1248 /* re-open as RW */ 1249 bdrv_delete(bs->backing_hd); 1250 bs->backing_hd = NULL; 1251 bs_rw = bdrv_new(""); 1252 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR, 1253 backing_drv); 1254 if (rw_ret < 0) { 1255 bdrv_delete(bs_rw); 1256 /* try to re-open read-only */ 1257 bs_ro = bdrv_new(""); 1258 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR, 1259 backing_drv); 1260 if (ret < 0) { 1261 bdrv_delete(bs_ro); 1262 /* drive not functional anymore */ 1263 bs->drv = NULL; 1264 return ret; 1265 } 1266 bs->backing_hd = bs_ro; 1267 return rw_ret; 1268 } 1269 bs->backing_hd = bs_rw; 1270 } 1271 1272 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS; 1273 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE); 1274 1275 for (sector = 0; sector < total_sectors; sector += n) { 1276 if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) { 1277 1278 if (bdrv_read(bs, sector, buf, n) != 0) { 1279 ret = -EIO; 1280 goto ro_cleanup; 1281 } 1282 1283 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) { 1284 ret = -EIO; 1285 goto ro_cleanup; 1286 } 1287 } 1288 } 1289 1290 if (drv->bdrv_make_empty) { 1291 ret = drv->bdrv_make_empty(bs); 1292 bdrv_flush(bs); 1293 } 1294 1295 /* 1296 * Make sure all data we wrote to the backing device is actually 1297 * stable on disk. 1298 */ 1299 if (bs->backing_hd) 1300 bdrv_flush(bs->backing_hd); 1301 1302 ro_cleanup: 1303 g_free(buf); 1304 1305 if (ro) { 1306 /* re-open as RO */ 1307 bdrv_delete(bs->backing_hd); 1308 bs->backing_hd = NULL; 1309 bs_ro = bdrv_new(""); 1310 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR, 1311 backing_drv); 1312 if (ret < 0) { 1313 bdrv_delete(bs_ro); 1314 /* drive not functional anymore */ 1315 bs->drv = NULL; 1316 return ret; 1317 } 1318 bs->backing_hd = bs_ro; 1319 bs->backing_hd->keep_read_only = 0; 1320 } 1321 1322 return ret; 1323 } 1324 1325 int bdrv_commit_all(void) 1326 { 1327 BlockDriverState *bs; 1328 1329 QTAILQ_FOREACH(bs, &bdrv_states, list) { 1330 int ret = bdrv_commit(bs); 1331 if (ret < 0) { 1332 return ret; 1333 } 1334 } 1335 return 0; 1336 } 1337 1338 struct BdrvTrackedRequest { 1339 BlockDriverState *bs; 1340 int64_t sector_num; 1341 int nb_sectors; 1342 bool is_write; 1343 QLIST_ENTRY(BdrvTrackedRequest) list; 1344 Coroutine *co; /* owner, used for deadlock detection */ 1345 CoQueue wait_queue; /* coroutines blocked on this request */ 1346 }; 1347 1348 /** 1349 * Remove an active request from the tracked requests list 1350 * 1351 * This function should be called when a tracked request is completing. 1352 */ 1353 static void tracked_request_end(BdrvTrackedRequest *req) 1354 { 1355 QLIST_REMOVE(req, list); 1356 qemu_co_queue_restart_all(&req->wait_queue); 1357 } 1358 1359 /** 1360 * Add an active request to the tracked requests list 1361 */ 1362 static void tracked_request_begin(BdrvTrackedRequest *req, 1363 BlockDriverState *bs, 1364 int64_t sector_num, 1365 int nb_sectors, bool is_write) 1366 { 1367 *req = (BdrvTrackedRequest){ 1368 .bs = bs, 1369 .sector_num = sector_num, 1370 .nb_sectors = nb_sectors, 1371 .is_write = is_write, 1372 .co = qemu_coroutine_self(), 1373 }; 1374 1375 qemu_co_queue_init(&req->wait_queue); 1376 1377 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); 1378 } 1379 1380 /** 1381 * Round a region to cluster boundaries 1382 */ 1383 static void round_to_clusters(BlockDriverState *bs, 1384 int64_t sector_num, int nb_sectors, 1385 int64_t *cluster_sector_num, 1386 int *cluster_nb_sectors) 1387 { 1388 BlockDriverInfo bdi; 1389 1390 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { 1391 *cluster_sector_num = sector_num; 1392 *cluster_nb_sectors = nb_sectors; 1393 } else { 1394 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; 1395 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); 1396 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + 1397 nb_sectors, c); 1398 } 1399 } 1400 1401 static bool tracked_request_overlaps(BdrvTrackedRequest *req, 1402 int64_t sector_num, int nb_sectors) { 1403 /* aaaa bbbb */ 1404 if (sector_num >= req->sector_num + req->nb_sectors) { 1405 return false; 1406 } 1407 /* bbbb aaaa */ 1408 if (req->sector_num >= sector_num + nb_sectors) { 1409 return false; 1410 } 1411 return true; 1412 } 1413 1414 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs, 1415 int64_t sector_num, int nb_sectors) 1416 { 1417 BdrvTrackedRequest *req; 1418 int64_t cluster_sector_num; 1419 int cluster_nb_sectors; 1420 bool retry; 1421 1422 /* If we touch the same cluster it counts as an overlap. This guarantees 1423 * that allocating writes will be serialized and not race with each other 1424 * for the same cluster. For example, in copy-on-read it ensures that the 1425 * CoR read and write operations are atomic and guest writes cannot 1426 * interleave between them. 1427 */ 1428 round_to_clusters(bs, sector_num, nb_sectors, 1429 &cluster_sector_num, &cluster_nb_sectors); 1430 1431 do { 1432 retry = false; 1433 QLIST_FOREACH(req, &bs->tracked_requests, list) { 1434 if (tracked_request_overlaps(req, cluster_sector_num, 1435 cluster_nb_sectors)) { 1436 /* Hitting this means there was a reentrant request, for 1437 * example, a block driver issuing nested requests. This must 1438 * never happen since it means deadlock. 1439 */ 1440 assert(qemu_coroutine_self() != req->co); 1441 1442 qemu_co_queue_wait(&req->wait_queue); 1443 retry = true; 1444 break; 1445 } 1446 } 1447 } while (retry); 1448 } 1449 1450 /* 1451 * Return values: 1452 * 0 - success 1453 * -EINVAL - backing format specified, but no file 1454 * -ENOSPC - can't update the backing file because no space is left in the 1455 * image file header 1456 * -ENOTSUP - format driver doesn't support changing the backing file 1457 */ 1458 int bdrv_change_backing_file(BlockDriverState *bs, 1459 const char *backing_file, const char *backing_fmt) 1460 { 1461 BlockDriver *drv = bs->drv; 1462 int ret; 1463 1464 /* Backing file format doesn't make sense without a backing file */ 1465 if (backing_fmt && !backing_file) { 1466 return -EINVAL; 1467 } 1468 1469 if (drv->bdrv_change_backing_file != NULL) { 1470 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt); 1471 } else { 1472 ret = -ENOTSUP; 1473 } 1474 1475 if (ret == 0) { 1476 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 1477 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 1478 } 1479 return ret; 1480 } 1481 1482 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, 1483 size_t size) 1484 { 1485 int64_t len; 1486 1487 if (!bdrv_is_inserted(bs)) 1488 return -ENOMEDIUM; 1489 1490 if (bs->growable) 1491 return 0; 1492 1493 len = bdrv_getlength(bs); 1494 1495 if (offset < 0) 1496 return -EIO; 1497 1498 if ((offset > len) || (len - offset < size)) 1499 return -EIO; 1500 1501 return 0; 1502 } 1503 1504 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, 1505 int nb_sectors) 1506 { 1507 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, 1508 nb_sectors * BDRV_SECTOR_SIZE); 1509 } 1510 1511 typedef struct RwCo { 1512 BlockDriverState *bs; 1513 int64_t sector_num; 1514 int nb_sectors; 1515 QEMUIOVector *qiov; 1516 bool is_write; 1517 int ret; 1518 } RwCo; 1519 1520 static void coroutine_fn bdrv_rw_co_entry(void *opaque) 1521 { 1522 RwCo *rwco = opaque; 1523 1524 if (!rwco->is_write) { 1525 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num, 1526 rwco->nb_sectors, rwco->qiov, 0); 1527 } else { 1528 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num, 1529 rwco->nb_sectors, rwco->qiov, 0); 1530 } 1531 } 1532 1533 /* 1534 * Process a synchronous request using coroutines 1535 */ 1536 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, 1537 int nb_sectors, bool is_write) 1538 { 1539 QEMUIOVector qiov; 1540 struct iovec iov = { 1541 .iov_base = (void *)buf, 1542 .iov_len = nb_sectors * BDRV_SECTOR_SIZE, 1543 }; 1544 Coroutine *co; 1545 RwCo rwco = { 1546 .bs = bs, 1547 .sector_num = sector_num, 1548 .nb_sectors = nb_sectors, 1549 .qiov = &qiov, 1550 .is_write = is_write, 1551 .ret = NOT_DONE, 1552 }; 1553 1554 qemu_iovec_init_external(&qiov, &iov, 1); 1555 1556 /** 1557 * In sync call context, when the vcpu is blocked, this throttling timer 1558 * will not fire; so the I/O throttling function has to be disabled here 1559 * if it has been enabled. 1560 */ 1561 if (bs->io_limits_enabled) { 1562 fprintf(stderr, "Disabling I/O throttling on '%s' due " 1563 "to synchronous I/O.\n", bdrv_get_device_name(bs)); 1564 bdrv_io_limits_disable(bs); 1565 } 1566 1567 if (qemu_in_coroutine()) { 1568 /* Fast-path if already in coroutine context */ 1569 bdrv_rw_co_entry(&rwco); 1570 } else { 1571 co = qemu_coroutine_create(bdrv_rw_co_entry); 1572 qemu_coroutine_enter(co, &rwco); 1573 while (rwco.ret == NOT_DONE) { 1574 qemu_aio_wait(); 1575 } 1576 } 1577 return rwco.ret; 1578 } 1579 1580 /* return < 0 if error. See bdrv_write() for the return codes */ 1581 int bdrv_read(BlockDriverState *bs, int64_t sector_num, 1582 uint8_t *buf, int nb_sectors) 1583 { 1584 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false); 1585 } 1586 1587 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num, 1588 int nb_sectors, int dirty) 1589 { 1590 int64_t start, end; 1591 unsigned long val, idx, bit; 1592 1593 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; 1594 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK; 1595 1596 for (; start <= end; start++) { 1597 idx = start / (sizeof(unsigned long) * 8); 1598 bit = start % (sizeof(unsigned long) * 8); 1599 val = bs->dirty_bitmap[idx]; 1600 if (dirty) { 1601 if (!(val & (1UL << bit))) { 1602 bs->dirty_count++; 1603 val |= 1UL << bit; 1604 } 1605 } else { 1606 if (val & (1UL << bit)) { 1607 bs->dirty_count--; 1608 val &= ~(1UL << bit); 1609 } 1610 } 1611 bs->dirty_bitmap[idx] = val; 1612 } 1613 } 1614 1615 /* Return < 0 if error. Important errors are: 1616 -EIO generic I/O error (may happen for all errors) 1617 -ENOMEDIUM No media inserted. 1618 -EINVAL Invalid sector number or nb_sectors 1619 -EACCES Trying to write a read-only device 1620 */ 1621 int bdrv_write(BlockDriverState *bs, int64_t sector_num, 1622 const uint8_t *buf, int nb_sectors) 1623 { 1624 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true); 1625 } 1626 1627 int bdrv_pread(BlockDriverState *bs, int64_t offset, 1628 void *buf, int count1) 1629 { 1630 uint8_t tmp_buf[BDRV_SECTOR_SIZE]; 1631 int len, nb_sectors, count; 1632 int64_t sector_num; 1633 int ret; 1634 1635 count = count1; 1636 /* first read to align to sector start */ 1637 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1); 1638 if (len > count) 1639 len = count; 1640 sector_num = offset >> BDRV_SECTOR_BITS; 1641 if (len > 0) { 1642 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0) 1643 return ret; 1644 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len); 1645 count -= len; 1646 if (count == 0) 1647 return count1; 1648 sector_num++; 1649 buf += len; 1650 } 1651 1652 /* read the sectors "in place" */ 1653 nb_sectors = count >> BDRV_SECTOR_BITS; 1654 if (nb_sectors > 0) { 1655 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0) 1656 return ret; 1657 sector_num += nb_sectors; 1658 len = nb_sectors << BDRV_SECTOR_BITS; 1659 buf += len; 1660 count -= len; 1661 } 1662 1663 /* add data from the last sector */ 1664 if (count > 0) { 1665 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0) 1666 return ret; 1667 memcpy(buf, tmp_buf, count); 1668 } 1669 return count1; 1670 } 1671 1672 int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 1673 const void *buf, int count1) 1674 { 1675 uint8_t tmp_buf[BDRV_SECTOR_SIZE]; 1676 int len, nb_sectors, count; 1677 int64_t sector_num; 1678 int ret; 1679 1680 count = count1; 1681 /* first write to align to sector start */ 1682 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1); 1683 if (len > count) 1684 len = count; 1685 sector_num = offset >> BDRV_SECTOR_BITS; 1686 if (len > 0) { 1687 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0) 1688 return ret; 1689 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len); 1690 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0) 1691 return ret; 1692 count -= len; 1693 if (count == 0) 1694 return count1; 1695 sector_num++; 1696 buf += len; 1697 } 1698 1699 /* write the sectors "in place" */ 1700 nb_sectors = count >> BDRV_SECTOR_BITS; 1701 if (nb_sectors > 0) { 1702 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0) 1703 return ret; 1704 sector_num += nb_sectors; 1705 len = nb_sectors << BDRV_SECTOR_BITS; 1706 buf += len; 1707 count -= len; 1708 } 1709 1710 /* add data from the last sector */ 1711 if (count > 0) { 1712 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0) 1713 return ret; 1714 memcpy(tmp_buf, buf, count); 1715 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0) 1716 return ret; 1717 } 1718 return count1; 1719 } 1720 1721 /* 1722 * Writes to the file and ensures that no writes are reordered across this 1723 * request (acts as a barrier) 1724 * 1725 * Returns 0 on success, -errno in error cases. 1726 */ 1727 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, 1728 const void *buf, int count) 1729 { 1730 int ret; 1731 1732 ret = bdrv_pwrite(bs, offset, buf, count); 1733 if (ret < 0) { 1734 return ret; 1735 } 1736 1737 /* No flush needed for cache modes that use O_DSYNC */ 1738 if ((bs->open_flags & BDRV_O_CACHE_WB) != 0) { 1739 bdrv_flush(bs); 1740 } 1741 1742 return 0; 1743 } 1744 1745 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, 1746 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 1747 { 1748 /* Perform I/O through a temporary buffer so that users who scribble over 1749 * their read buffer while the operation is in progress do not end up 1750 * modifying the image file. This is critical for zero-copy guest I/O 1751 * where anything might happen inside guest memory. 1752 */ 1753 void *bounce_buffer; 1754 1755 BlockDriver *drv = bs->drv; 1756 struct iovec iov; 1757 QEMUIOVector bounce_qiov; 1758 int64_t cluster_sector_num; 1759 int cluster_nb_sectors; 1760 size_t skip_bytes; 1761 int ret; 1762 1763 /* Cover entire cluster so no additional backing file I/O is required when 1764 * allocating cluster in the image file. 1765 */ 1766 round_to_clusters(bs, sector_num, nb_sectors, 1767 &cluster_sector_num, &cluster_nb_sectors); 1768 1769 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, 1770 cluster_sector_num, cluster_nb_sectors); 1771 1772 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; 1773 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len); 1774 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 1775 1776 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, 1777 &bounce_qiov); 1778 if (ret < 0) { 1779 goto err; 1780 } 1781 1782 if (drv->bdrv_co_write_zeroes && 1783 buffer_is_zero(bounce_buffer, iov.iov_len)) { 1784 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, 1785 cluster_nb_sectors); 1786 } else { 1787 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, 1788 &bounce_qiov); 1789 } 1790 1791 if (ret < 0) { 1792 /* It might be okay to ignore write errors for guest requests. If this 1793 * is a deliberate copy-on-read then we don't want to ignore the error. 1794 * Simply report it in all cases. 1795 */ 1796 goto err; 1797 } 1798 1799 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; 1800 qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes, 1801 nb_sectors * BDRV_SECTOR_SIZE); 1802 1803 err: 1804 qemu_vfree(bounce_buffer); 1805 return ret; 1806 } 1807 1808 /* 1809 * Handle a read request in coroutine context 1810 */ 1811 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, 1812 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 1813 BdrvRequestFlags flags) 1814 { 1815 BlockDriver *drv = bs->drv; 1816 BdrvTrackedRequest req; 1817 int ret; 1818 1819 if (!drv) { 1820 return -ENOMEDIUM; 1821 } 1822 if (bdrv_check_request(bs, sector_num, nb_sectors)) { 1823 return -EIO; 1824 } 1825 1826 /* throttling disk read I/O */ 1827 if (bs->io_limits_enabled) { 1828 bdrv_io_limits_intercept(bs, false, nb_sectors); 1829 } 1830 1831 if (bs->copy_on_read) { 1832 flags |= BDRV_REQ_COPY_ON_READ; 1833 } 1834 if (flags & BDRV_REQ_COPY_ON_READ) { 1835 bs->copy_on_read_in_flight++; 1836 } 1837 1838 if (bs->copy_on_read_in_flight) { 1839 wait_for_overlapping_requests(bs, sector_num, nb_sectors); 1840 } 1841 1842 tracked_request_begin(&req, bs, sector_num, nb_sectors, false); 1843 1844 if (flags & BDRV_REQ_COPY_ON_READ) { 1845 int pnum; 1846 1847 ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum); 1848 if (ret < 0) { 1849 goto out; 1850 } 1851 1852 if (!ret || pnum != nb_sectors) { 1853 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); 1854 goto out; 1855 } 1856 } 1857 1858 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); 1859 1860 out: 1861 tracked_request_end(&req); 1862 1863 if (flags & BDRV_REQ_COPY_ON_READ) { 1864 bs->copy_on_read_in_flight--; 1865 } 1866 1867 return ret; 1868 } 1869 1870 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, 1871 int nb_sectors, QEMUIOVector *qiov) 1872 { 1873 trace_bdrv_co_readv(bs, sector_num, nb_sectors); 1874 1875 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); 1876 } 1877 1878 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, 1879 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 1880 { 1881 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); 1882 1883 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 1884 BDRV_REQ_COPY_ON_READ); 1885 } 1886 1887 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, 1888 int64_t sector_num, int nb_sectors) 1889 { 1890 BlockDriver *drv = bs->drv; 1891 QEMUIOVector qiov; 1892 struct iovec iov; 1893 int ret; 1894 1895 /* TODO Emulate only part of misaligned requests instead of letting block 1896 * drivers return -ENOTSUP and emulate everything */ 1897 1898 /* First try the efficient write zeroes operation */ 1899 if (drv->bdrv_co_write_zeroes) { 1900 ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors); 1901 if (ret != -ENOTSUP) { 1902 return ret; 1903 } 1904 } 1905 1906 /* Fall back to bounce buffer if write zeroes is unsupported */ 1907 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE; 1908 iov.iov_base = qemu_blockalign(bs, iov.iov_len); 1909 memset(iov.iov_base, 0, iov.iov_len); 1910 qemu_iovec_init_external(&qiov, &iov, 1); 1911 1912 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov); 1913 1914 qemu_vfree(iov.iov_base); 1915 return ret; 1916 } 1917 1918 /* 1919 * Handle a write request in coroutine context 1920 */ 1921 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, 1922 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, 1923 BdrvRequestFlags flags) 1924 { 1925 BlockDriver *drv = bs->drv; 1926 BdrvTrackedRequest req; 1927 int ret; 1928 1929 if (!bs->drv) { 1930 return -ENOMEDIUM; 1931 } 1932 if (bs->read_only) { 1933 return -EACCES; 1934 } 1935 if (bdrv_check_request(bs, sector_num, nb_sectors)) { 1936 return -EIO; 1937 } 1938 1939 /* throttling disk write I/O */ 1940 if (bs->io_limits_enabled) { 1941 bdrv_io_limits_intercept(bs, true, nb_sectors); 1942 } 1943 1944 if (bs->copy_on_read_in_flight) { 1945 wait_for_overlapping_requests(bs, sector_num, nb_sectors); 1946 } 1947 1948 tracked_request_begin(&req, bs, sector_num, nb_sectors, true); 1949 1950 if (flags & BDRV_REQ_ZERO_WRITE) { 1951 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors); 1952 } else { 1953 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); 1954 } 1955 1956 if (bs->dirty_bitmap) { 1957 set_dirty_bitmap(bs, sector_num, nb_sectors, 1); 1958 } 1959 1960 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) { 1961 bs->wr_highest_sector = sector_num + nb_sectors - 1; 1962 } 1963 1964 tracked_request_end(&req); 1965 1966 return ret; 1967 } 1968 1969 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, 1970 int nb_sectors, QEMUIOVector *qiov) 1971 { 1972 trace_bdrv_co_writev(bs, sector_num, nb_sectors); 1973 1974 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); 1975 } 1976 1977 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, 1978 int64_t sector_num, int nb_sectors) 1979 { 1980 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors); 1981 1982 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, 1983 BDRV_REQ_ZERO_WRITE); 1984 } 1985 1986 /** 1987 * Truncate file to 'offset' bytes (needed only for file protocols) 1988 */ 1989 int bdrv_truncate(BlockDriverState *bs, int64_t offset) 1990 { 1991 BlockDriver *drv = bs->drv; 1992 int ret; 1993 if (!drv) 1994 return -ENOMEDIUM; 1995 if (!drv->bdrv_truncate) 1996 return -ENOTSUP; 1997 if (bs->read_only) 1998 return -EACCES; 1999 if (bdrv_in_use(bs)) 2000 return -EBUSY; 2001 ret = drv->bdrv_truncate(bs, offset); 2002 if (ret == 0) { 2003 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); 2004 bdrv_dev_resize_cb(bs); 2005 } 2006 return ret; 2007 } 2008 2009 /** 2010 * Length of a allocated file in bytes. Sparse files are counted by actual 2011 * allocated space. Return < 0 if error or unknown. 2012 */ 2013 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs) 2014 { 2015 BlockDriver *drv = bs->drv; 2016 if (!drv) { 2017 return -ENOMEDIUM; 2018 } 2019 if (drv->bdrv_get_allocated_file_size) { 2020 return drv->bdrv_get_allocated_file_size(bs); 2021 } 2022 if (bs->file) { 2023 return bdrv_get_allocated_file_size(bs->file); 2024 } 2025 return -ENOTSUP; 2026 } 2027 2028 /** 2029 * Length of a file in bytes. Return < 0 if error or unknown. 2030 */ 2031 int64_t bdrv_getlength(BlockDriverState *bs) 2032 { 2033 BlockDriver *drv = bs->drv; 2034 if (!drv) 2035 return -ENOMEDIUM; 2036 2037 if (bs->growable || bdrv_dev_has_removable_media(bs)) { 2038 if (drv->bdrv_getlength) { 2039 return drv->bdrv_getlength(bs); 2040 } 2041 } 2042 return bs->total_sectors * BDRV_SECTOR_SIZE; 2043 } 2044 2045 /* return 0 as number of sectors if no device present or error */ 2046 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) 2047 { 2048 int64_t length; 2049 length = bdrv_getlength(bs); 2050 if (length < 0) 2051 length = 0; 2052 else 2053 length = length >> BDRV_SECTOR_BITS; 2054 *nb_sectors_ptr = length; 2055 } 2056 2057 struct partition { 2058 uint8_t boot_ind; /* 0x80 - active */ 2059 uint8_t head; /* starting head */ 2060 uint8_t sector; /* starting sector */ 2061 uint8_t cyl; /* starting cylinder */ 2062 uint8_t sys_ind; /* What partition type */ 2063 uint8_t end_head; /* end head */ 2064 uint8_t end_sector; /* end sector */ 2065 uint8_t end_cyl; /* end cylinder */ 2066 uint32_t start_sect; /* starting sector counting from 0 */ 2067 uint32_t nr_sects; /* nr of sectors in partition */ 2068 } QEMU_PACKED; 2069 2070 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */ 2071 static int guess_disk_lchs(BlockDriverState *bs, 2072 int *pcylinders, int *pheads, int *psectors) 2073 { 2074 uint8_t buf[BDRV_SECTOR_SIZE]; 2075 int ret, i, heads, sectors, cylinders; 2076 struct partition *p; 2077 uint32_t nr_sects; 2078 uint64_t nb_sectors; 2079 bool enabled; 2080 2081 bdrv_get_geometry(bs, &nb_sectors); 2082 2083 /** 2084 * The function will be invoked during startup not only in sync I/O mode, 2085 * but also in async I/O mode. So the I/O throttling function has to 2086 * be disabled temporarily here, not permanently. 2087 */ 2088 enabled = bs->io_limits_enabled; 2089 bs->io_limits_enabled = false; 2090 ret = bdrv_read(bs, 0, buf, 1); 2091 bs->io_limits_enabled = enabled; 2092 if (ret < 0) 2093 return -1; 2094 /* test msdos magic */ 2095 if (buf[510] != 0x55 || buf[511] != 0xaa) 2096 return -1; 2097 for(i = 0; i < 4; i++) { 2098 p = ((struct partition *)(buf + 0x1be)) + i; 2099 nr_sects = le32_to_cpu(p->nr_sects); 2100 if (nr_sects && p->end_head) { 2101 /* We make the assumption that the partition terminates on 2102 a cylinder boundary */ 2103 heads = p->end_head + 1; 2104 sectors = p->end_sector & 63; 2105 if (sectors == 0) 2106 continue; 2107 cylinders = nb_sectors / (heads * sectors); 2108 if (cylinders < 1 || cylinders > 16383) 2109 continue; 2110 *pheads = heads; 2111 *psectors = sectors; 2112 *pcylinders = cylinders; 2113 #if 0 2114 printf("guessed geometry: LCHS=%d %d %d\n", 2115 cylinders, heads, sectors); 2116 #endif 2117 return 0; 2118 } 2119 } 2120 return -1; 2121 } 2122 2123 void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs) 2124 { 2125 int translation, lba_detected = 0; 2126 int cylinders, heads, secs; 2127 uint64_t nb_sectors; 2128 2129 /* if a geometry hint is available, use it */ 2130 bdrv_get_geometry(bs, &nb_sectors); 2131 bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs); 2132 translation = bdrv_get_translation_hint(bs); 2133 if (cylinders != 0) { 2134 *pcyls = cylinders; 2135 *pheads = heads; 2136 *psecs = secs; 2137 } else { 2138 if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) { 2139 if (heads > 16) { 2140 /* if heads > 16, it means that a BIOS LBA 2141 translation was active, so the default 2142 hardware geometry is OK */ 2143 lba_detected = 1; 2144 goto default_geometry; 2145 } else { 2146 *pcyls = cylinders; 2147 *pheads = heads; 2148 *psecs = secs; 2149 /* disable any translation to be in sync with 2150 the logical geometry */ 2151 if (translation == BIOS_ATA_TRANSLATION_AUTO) { 2152 bdrv_set_translation_hint(bs, 2153 BIOS_ATA_TRANSLATION_NONE); 2154 } 2155 } 2156 } else { 2157 default_geometry: 2158 /* if no geometry, use a standard physical disk geometry */ 2159 cylinders = nb_sectors / (16 * 63); 2160 2161 if (cylinders > 16383) 2162 cylinders = 16383; 2163 else if (cylinders < 2) 2164 cylinders = 2; 2165 *pcyls = cylinders; 2166 *pheads = 16; 2167 *psecs = 63; 2168 if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) { 2169 if ((*pcyls * *pheads) <= 131072) { 2170 bdrv_set_translation_hint(bs, 2171 BIOS_ATA_TRANSLATION_LARGE); 2172 } else { 2173 bdrv_set_translation_hint(bs, 2174 BIOS_ATA_TRANSLATION_LBA); 2175 } 2176 } 2177 } 2178 bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs); 2179 } 2180 } 2181 2182 void bdrv_set_geometry_hint(BlockDriverState *bs, 2183 int cyls, int heads, int secs) 2184 { 2185 bs->cyls = cyls; 2186 bs->heads = heads; 2187 bs->secs = secs; 2188 } 2189 2190 void bdrv_set_translation_hint(BlockDriverState *bs, int translation) 2191 { 2192 bs->translation = translation; 2193 } 2194 2195 void bdrv_get_geometry_hint(BlockDriverState *bs, 2196 int *pcyls, int *pheads, int *psecs) 2197 { 2198 *pcyls = bs->cyls; 2199 *pheads = bs->heads; 2200 *psecs = bs->secs; 2201 } 2202 2203 /* throttling disk io limits */ 2204 void bdrv_set_io_limits(BlockDriverState *bs, 2205 BlockIOLimit *io_limits) 2206 { 2207 bs->io_limits = *io_limits; 2208 bs->io_limits_enabled = bdrv_io_limits_enabled(bs); 2209 } 2210 2211 /* Recognize floppy formats */ 2212 typedef struct FDFormat { 2213 FDriveType drive; 2214 uint8_t last_sect; 2215 uint8_t max_track; 2216 uint8_t max_head; 2217 FDriveRate rate; 2218 } FDFormat; 2219 2220 static const FDFormat fd_formats[] = { 2221 /* First entry is default format */ 2222 /* 1.44 MB 3"1/2 floppy disks */ 2223 { FDRIVE_DRV_144, 18, 80, 1, FDRIVE_RATE_500K, }, 2224 { FDRIVE_DRV_144, 20, 80, 1, FDRIVE_RATE_500K, }, 2225 { FDRIVE_DRV_144, 21, 80, 1, FDRIVE_RATE_500K, }, 2226 { FDRIVE_DRV_144, 21, 82, 1, FDRIVE_RATE_500K, }, 2227 { FDRIVE_DRV_144, 21, 83, 1, FDRIVE_RATE_500K, }, 2228 { FDRIVE_DRV_144, 22, 80, 1, FDRIVE_RATE_500K, }, 2229 { FDRIVE_DRV_144, 23, 80, 1, FDRIVE_RATE_500K, }, 2230 { FDRIVE_DRV_144, 24, 80, 1, FDRIVE_RATE_500K, }, 2231 /* 2.88 MB 3"1/2 floppy disks */ 2232 { FDRIVE_DRV_288, 36, 80, 1, FDRIVE_RATE_1M, }, 2233 { FDRIVE_DRV_288, 39, 80, 1, FDRIVE_RATE_1M, }, 2234 { FDRIVE_DRV_288, 40, 80, 1, FDRIVE_RATE_1M, }, 2235 { FDRIVE_DRV_288, 44, 80, 1, FDRIVE_RATE_1M, }, 2236 { FDRIVE_DRV_288, 48, 80, 1, FDRIVE_RATE_1M, }, 2237 /* 720 kB 3"1/2 floppy disks */ 2238 { FDRIVE_DRV_144, 9, 80, 1, FDRIVE_RATE_250K, }, 2239 { FDRIVE_DRV_144, 10, 80, 1, FDRIVE_RATE_250K, }, 2240 { FDRIVE_DRV_144, 10, 82, 1, FDRIVE_RATE_250K, }, 2241 { FDRIVE_DRV_144, 10, 83, 1, FDRIVE_RATE_250K, }, 2242 { FDRIVE_DRV_144, 13, 80, 1, FDRIVE_RATE_250K, }, 2243 { FDRIVE_DRV_144, 14, 80, 1, FDRIVE_RATE_250K, }, 2244 /* 1.2 MB 5"1/4 floppy disks */ 2245 { FDRIVE_DRV_120, 15, 80, 1, FDRIVE_RATE_500K, }, 2246 { FDRIVE_DRV_120, 18, 80, 1, FDRIVE_RATE_500K, }, 2247 { FDRIVE_DRV_120, 18, 82, 1, FDRIVE_RATE_500K, }, 2248 { FDRIVE_DRV_120, 18, 83, 1, FDRIVE_RATE_500K, }, 2249 { FDRIVE_DRV_120, 20, 80, 1, FDRIVE_RATE_500K, }, 2250 /* 720 kB 5"1/4 floppy disks */ 2251 { FDRIVE_DRV_120, 9, 80, 1, FDRIVE_RATE_250K, }, 2252 { FDRIVE_DRV_120, 11, 80, 1, FDRIVE_RATE_250K, }, 2253 /* 360 kB 5"1/4 floppy disks */ 2254 { FDRIVE_DRV_120, 9, 40, 1, FDRIVE_RATE_300K, }, 2255 { FDRIVE_DRV_120, 9, 40, 0, FDRIVE_RATE_300K, }, 2256 { FDRIVE_DRV_120, 10, 41, 1, FDRIVE_RATE_300K, }, 2257 { FDRIVE_DRV_120, 10, 42, 1, FDRIVE_RATE_300K, }, 2258 /* 320 kB 5"1/4 floppy disks */ 2259 { FDRIVE_DRV_120, 8, 40, 1, FDRIVE_RATE_250K, }, 2260 { FDRIVE_DRV_120, 8, 40, 0, FDRIVE_RATE_250K, }, 2261 /* 360 kB must match 5"1/4 better than 3"1/2... */ 2262 { FDRIVE_DRV_144, 9, 80, 0, FDRIVE_RATE_250K, }, 2263 /* end */ 2264 { FDRIVE_DRV_NONE, -1, -1, 0, 0, }, 2265 }; 2266 2267 void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads, 2268 int *max_track, int *last_sect, 2269 FDriveType drive_in, FDriveType *drive, 2270 FDriveRate *rate) 2271 { 2272 const FDFormat *parse; 2273 uint64_t nb_sectors, size; 2274 int i, first_match, match; 2275 2276 bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect); 2277 if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) { 2278 /* User defined disk */ 2279 *rate = FDRIVE_RATE_500K; 2280 } else { 2281 bdrv_get_geometry(bs, &nb_sectors); 2282 match = -1; 2283 first_match = -1; 2284 for (i = 0; ; i++) { 2285 parse = &fd_formats[i]; 2286 if (parse->drive == FDRIVE_DRV_NONE) { 2287 break; 2288 } 2289 if (drive_in == parse->drive || 2290 drive_in == FDRIVE_DRV_NONE) { 2291 size = (parse->max_head + 1) * parse->max_track * 2292 parse->last_sect; 2293 if (nb_sectors == size) { 2294 match = i; 2295 break; 2296 } 2297 if (first_match == -1) { 2298 first_match = i; 2299 } 2300 } 2301 } 2302 if (match == -1) { 2303 if (first_match == -1) { 2304 match = 1; 2305 } else { 2306 match = first_match; 2307 } 2308 parse = &fd_formats[match]; 2309 } 2310 *nb_heads = parse->max_head + 1; 2311 *max_track = parse->max_track; 2312 *last_sect = parse->last_sect; 2313 *drive = parse->drive; 2314 *rate = parse->rate; 2315 } 2316 } 2317 2318 int bdrv_get_translation_hint(BlockDriverState *bs) 2319 { 2320 return bs->translation; 2321 } 2322 2323 void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error, 2324 BlockErrorAction on_write_error) 2325 { 2326 bs->on_read_error = on_read_error; 2327 bs->on_write_error = on_write_error; 2328 } 2329 2330 BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read) 2331 { 2332 return is_read ? bs->on_read_error : bs->on_write_error; 2333 } 2334 2335 int bdrv_is_read_only(BlockDriverState *bs) 2336 { 2337 return bs->read_only; 2338 } 2339 2340 int bdrv_is_sg(BlockDriverState *bs) 2341 { 2342 return bs->sg; 2343 } 2344 2345 int bdrv_enable_write_cache(BlockDriverState *bs) 2346 { 2347 return bs->enable_write_cache; 2348 } 2349 2350 int bdrv_is_encrypted(BlockDriverState *bs) 2351 { 2352 if (bs->backing_hd && bs->backing_hd->encrypted) 2353 return 1; 2354 return bs->encrypted; 2355 } 2356 2357 int bdrv_key_required(BlockDriverState *bs) 2358 { 2359 BlockDriverState *backing_hd = bs->backing_hd; 2360 2361 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key) 2362 return 1; 2363 return (bs->encrypted && !bs->valid_key); 2364 } 2365 2366 int bdrv_set_key(BlockDriverState *bs, const char *key) 2367 { 2368 int ret; 2369 if (bs->backing_hd && bs->backing_hd->encrypted) { 2370 ret = bdrv_set_key(bs->backing_hd, key); 2371 if (ret < 0) 2372 return ret; 2373 if (!bs->encrypted) 2374 return 0; 2375 } 2376 if (!bs->encrypted) { 2377 return -EINVAL; 2378 } else if (!bs->drv || !bs->drv->bdrv_set_key) { 2379 return -ENOMEDIUM; 2380 } 2381 ret = bs->drv->bdrv_set_key(bs, key); 2382 if (ret < 0) { 2383 bs->valid_key = 0; 2384 } else if (!bs->valid_key) { 2385 bs->valid_key = 1; 2386 /* call the change callback now, we skipped it on open */ 2387 bdrv_dev_change_media_cb(bs, true); 2388 } 2389 return ret; 2390 } 2391 2392 void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size) 2393 { 2394 if (!bs->drv) { 2395 buf[0] = '\0'; 2396 } else { 2397 pstrcpy(buf, buf_size, bs->drv->format_name); 2398 } 2399 } 2400 2401 void bdrv_iterate_format(void (*it)(void *opaque, const char *name), 2402 void *opaque) 2403 { 2404 BlockDriver *drv; 2405 2406 QLIST_FOREACH(drv, &bdrv_drivers, list) { 2407 it(opaque, drv->format_name); 2408 } 2409 } 2410 2411 BlockDriverState *bdrv_find(const char *name) 2412 { 2413 BlockDriverState *bs; 2414 2415 QTAILQ_FOREACH(bs, &bdrv_states, list) { 2416 if (!strcmp(name, bs->device_name)) { 2417 return bs; 2418 } 2419 } 2420 return NULL; 2421 } 2422 2423 BlockDriverState *bdrv_next(BlockDriverState *bs) 2424 { 2425 if (!bs) { 2426 return QTAILQ_FIRST(&bdrv_states); 2427 } 2428 return QTAILQ_NEXT(bs, list); 2429 } 2430 2431 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque) 2432 { 2433 BlockDriverState *bs; 2434 2435 QTAILQ_FOREACH(bs, &bdrv_states, list) { 2436 it(opaque, bs); 2437 } 2438 } 2439 2440 const char *bdrv_get_device_name(BlockDriverState *bs) 2441 { 2442 return bs->device_name; 2443 } 2444 2445 void bdrv_flush_all(void) 2446 { 2447 BlockDriverState *bs; 2448 2449 QTAILQ_FOREACH(bs, &bdrv_states, list) { 2450 bdrv_flush(bs); 2451 } 2452 } 2453 2454 int bdrv_has_zero_init(BlockDriverState *bs) 2455 { 2456 assert(bs->drv); 2457 2458 if (bs->drv->bdrv_has_zero_init) { 2459 return bs->drv->bdrv_has_zero_init(bs); 2460 } 2461 2462 return 1; 2463 } 2464 2465 typedef struct BdrvCoIsAllocatedData { 2466 BlockDriverState *bs; 2467 int64_t sector_num; 2468 int nb_sectors; 2469 int *pnum; 2470 int ret; 2471 bool done; 2472 } BdrvCoIsAllocatedData; 2473 2474 /* 2475 * Returns true iff the specified sector is present in the disk image. Drivers 2476 * not implementing the functionality are assumed to not support backing files, 2477 * hence all their sectors are reported as allocated. 2478 * 2479 * If 'sector_num' is beyond the end of the disk image the return value is 0 2480 * and 'pnum' is set to 0. 2481 * 2482 * 'pnum' is set to the number of sectors (including and immediately following 2483 * the specified sector) that are known to be in the same 2484 * allocated/unallocated state. 2485 * 2486 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes 2487 * beyond the end of the disk image it will be clamped. 2488 */ 2489 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num, 2490 int nb_sectors, int *pnum) 2491 { 2492 int64_t n; 2493 2494 if (sector_num >= bs->total_sectors) { 2495 *pnum = 0; 2496 return 0; 2497 } 2498 2499 n = bs->total_sectors - sector_num; 2500 if (n < nb_sectors) { 2501 nb_sectors = n; 2502 } 2503 2504 if (!bs->drv->bdrv_co_is_allocated) { 2505 *pnum = nb_sectors; 2506 return 1; 2507 } 2508 2509 return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum); 2510 } 2511 2512 /* Coroutine wrapper for bdrv_is_allocated() */ 2513 static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque) 2514 { 2515 BdrvCoIsAllocatedData *data = opaque; 2516 BlockDriverState *bs = data->bs; 2517 2518 data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors, 2519 data->pnum); 2520 data->done = true; 2521 } 2522 2523 /* 2524 * Synchronous wrapper around bdrv_co_is_allocated(). 2525 * 2526 * See bdrv_co_is_allocated() for details. 2527 */ 2528 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, 2529 int *pnum) 2530 { 2531 Coroutine *co; 2532 BdrvCoIsAllocatedData data = { 2533 .bs = bs, 2534 .sector_num = sector_num, 2535 .nb_sectors = nb_sectors, 2536 .pnum = pnum, 2537 .done = false, 2538 }; 2539 2540 co = qemu_coroutine_create(bdrv_is_allocated_co_entry); 2541 qemu_coroutine_enter(co, &data); 2542 while (!data.done) { 2543 qemu_aio_wait(); 2544 } 2545 return data.ret; 2546 } 2547 2548 BlockInfoList *qmp_query_block(Error **errp) 2549 { 2550 BlockInfoList *head = NULL, *cur_item = NULL; 2551 BlockDriverState *bs; 2552 2553 QTAILQ_FOREACH(bs, &bdrv_states, list) { 2554 BlockInfoList *info = g_malloc0(sizeof(*info)); 2555 2556 info->value = g_malloc0(sizeof(*info->value)); 2557 info->value->device = g_strdup(bs->device_name); 2558 info->value->type = g_strdup("unknown"); 2559 info->value->locked = bdrv_dev_is_medium_locked(bs); 2560 info->value->removable = bdrv_dev_has_removable_media(bs); 2561 2562 if (bdrv_dev_has_removable_media(bs)) { 2563 info->value->has_tray_open = true; 2564 info->value->tray_open = bdrv_dev_is_tray_open(bs); 2565 } 2566 2567 if (bdrv_iostatus_is_enabled(bs)) { 2568 info->value->has_io_status = true; 2569 info->value->io_status = bs->iostatus; 2570 } 2571 2572 if (bs->drv) { 2573 info->value->has_inserted = true; 2574 info->value->inserted = g_malloc0(sizeof(*info->value->inserted)); 2575 info->value->inserted->file = g_strdup(bs->filename); 2576 info->value->inserted->ro = bs->read_only; 2577 info->value->inserted->drv = g_strdup(bs->drv->format_name); 2578 info->value->inserted->encrypted = bs->encrypted; 2579 if (bs->backing_file[0]) { 2580 info->value->inserted->has_backing_file = true; 2581 info->value->inserted->backing_file = g_strdup(bs->backing_file); 2582 } 2583 2584 if (bs->io_limits_enabled) { 2585 info->value->inserted->bps = 2586 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]; 2587 info->value->inserted->bps_rd = 2588 bs->io_limits.bps[BLOCK_IO_LIMIT_READ]; 2589 info->value->inserted->bps_wr = 2590 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE]; 2591 info->value->inserted->iops = 2592 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]; 2593 info->value->inserted->iops_rd = 2594 bs->io_limits.iops[BLOCK_IO_LIMIT_READ]; 2595 info->value->inserted->iops_wr = 2596 bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE]; 2597 } 2598 } 2599 2600 /* XXX: waiting for the qapi to support GSList */ 2601 if (!cur_item) { 2602 head = cur_item = info; 2603 } else { 2604 cur_item->next = info; 2605 cur_item = info; 2606 } 2607 } 2608 2609 return head; 2610 } 2611 2612 /* Consider exposing this as a full fledged QMP command */ 2613 static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp) 2614 { 2615 BlockStats *s; 2616 2617 s = g_malloc0(sizeof(*s)); 2618 2619 if (bs->device_name[0]) { 2620 s->has_device = true; 2621 s->device = g_strdup(bs->device_name); 2622 } 2623 2624 s->stats = g_malloc0(sizeof(*s->stats)); 2625 s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ]; 2626 s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE]; 2627 s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ]; 2628 s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE]; 2629 s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE; 2630 s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH]; 2631 s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE]; 2632 s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ]; 2633 s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH]; 2634 2635 if (bs->file) { 2636 s->has_parent = true; 2637 s->parent = qmp_query_blockstat(bs->file, NULL); 2638 } 2639 2640 return s; 2641 } 2642 2643 BlockStatsList *qmp_query_blockstats(Error **errp) 2644 { 2645 BlockStatsList *head = NULL, *cur_item = NULL; 2646 BlockDriverState *bs; 2647 2648 QTAILQ_FOREACH(bs, &bdrv_states, list) { 2649 BlockStatsList *info = g_malloc0(sizeof(*info)); 2650 info->value = qmp_query_blockstat(bs, NULL); 2651 2652 /* XXX: waiting for the qapi to support GSList */ 2653 if (!cur_item) { 2654 head = cur_item = info; 2655 } else { 2656 cur_item->next = info; 2657 cur_item = info; 2658 } 2659 } 2660 2661 return head; 2662 } 2663 2664 const char *bdrv_get_encrypted_filename(BlockDriverState *bs) 2665 { 2666 if (bs->backing_hd && bs->backing_hd->encrypted) 2667 return bs->backing_file; 2668 else if (bs->encrypted) 2669 return bs->filename; 2670 else 2671 return NULL; 2672 } 2673 2674 void bdrv_get_backing_filename(BlockDriverState *bs, 2675 char *filename, int filename_size) 2676 { 2677 pstrcpy(filename, filename_size, bs->backing_file); 2678 } 2679 2680 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 2681 const uint8_t *buf, int nb_sectors) 2682 { 2683 BlockDriver *drv = bs->drv; 2684 if (!drv) 2685 return -ENOMEDIUM; 2686 if (!drv->bdrv_write_compressed) 2687 return -ENOTSUP; 2688 if (bdrv_check_request(bs, sector_num, nb_sectors)) 2689 return -EIO; 2690 2691 if (bs->dirty_bitmap) { 2692 set_dirty_bitmap(bs, sector_num, nb_sectors, 1); 2693 } 2694 2695 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); 2696 } 2697 2698 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 2699 { 2700 BlockDriver *drv = bs->drv; 2701 if (!drv) 2702 return -ENOMEDIUM; 2703 if (!drv->bdrv_get_info) 2704 return -ENOTSUP; 2705 memset(bdi, 0, sizeof(*bdi)); 2706 return drv->bdrv_get_info(bs, bdi); 2707 } 2708 2709 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 2710 int64_t pos, int size) 2711 { 2712 BlockDriver *drv = bs->drv; 2713 if (!drv) 2714 return -ENOMEDIUM; 2715 if (drv->bdrv_save_vmstate) 2716 return drv->bdrv_save_vmstate(bs, buf, pos, size); 2717 if (bs->file) 2718 return bdrv_save_vmstate(bs->file, buf, pos, size); 2719 return -ENOTSUP; 2720 } 2721 2722 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, 2723 int64_t pos, int size) 2724 { 2725 BlockDriver *drv = bs->drv; 2726 if (!drv) 2727 return -ENOMEDIUM; 2728 if (drv->bdrv_load_vmstate) 2729 return drv->bdrv_load_vmstate(bs, buf, pos, size); 2730 if (bs->file) 2731 return bdrv_load_vmstate(bs->file, buf, pos, size); 2732 return -ENOTSUP; 2733 } 2734 2735 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event) 2736 { 2737 BlockDriver *drv = bs->drv; 2738 2739 if (!drv || !drv->bdrv_debug_event) { 2740 return; 2741 } 2742 2743 return drv->bdrv_debug_event(bs, event); 2744 2745 } 2746 2747 /**************************************************************/ 2748 /* handling of snapshots */ 2749 2750 int bdrv_can_snapshot(BlockDriverState *bs) 2751 { 2752 BlockDriver *drv = bs->drv; 2753 if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { 2754 return 0; 2755 } 2756 2757 if (!drv->bdrv_snapshot_create) { 2758 if (bs->file != NULL) { 2759 return bdrv_can_snapshot(bs->file); 2760 } 2761 return 0; 2762 } 2763 2764 return 1; 2765 } 2766 2767 int bdrv_is_snapshot(BlockDriverState *bs) 2768 { 2769 return !!(bs->open_flags & BDRV_O_SNAPSHOT); 2770 } 2771 2772 BlockDriverState *bdrv_snapshots(void) 2773 { 2774 BlockDriverState *bs; 2775 2776 if (bs_snapshots) { 2777 return bs_snapshots; 2778 } 2779 2780 bs = NULL; 2781 while ((bs = bdrv_next(bs))) { 2782 if (bdrv_can_snapshot(bs)) { 2783 bs_snapshots = bs; 2784 return bs; 2785 } 2786 } 2787 return NULL; 2788 } 2789 2790 int bdrv_snapshot_create(BlockDriverState *bs, 2791 QEMUSnapshotInfo *sn_info) 2792 { 2793 BlockDriver *drv = bs->drv; 2794 if (!drv) 2795 return -ENOMEDIUM; 2796 if (drv->bdrv_snapshot_create) 2797 return drv->bdrv_snapshot_create(bs, sn_info); 2798 if (bs->file) 2799 return bdrv_snapshot_create(bs->file, sn_info); 2800 return -ENOTSUP; 2801 } 2802 2803 int bdrv_snapshot_goto(BlockDriverState *bs, 2804 const char *snapshot_id) 2805 { 2806 BlockDriver *drv = bs->drv; 2807 int ret, open_ret; 2808 2809 if (!drv) 2810 return -ENOMEDIUM; 2811 if (drv->bdrv_snapshot_goto) 2812 return drv->bdrv_snapshot_goto(bs, snapshot_id); 2813 2814 if (bs->file) { 2815 drv->bdrv_close(bs); 2816 ret = bdrv_snapshot_goto(bs->file, snapshot_id); 2817 open_ret = drv->bdrv_open(bs, bs->open_flags); 2818 if (open_ret < 0) { 2819 bdrv_delete(bs->file); 2820 bs->drv = NULL; 2821 return open_ret; 2822 } 2823 return ret; 2824 } 2825 2826 return -ENOTSUP; 2827 } 2828 2829 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id) 2830 { 2831 BlockDriver *drv = bs->drv; 2832 if (!drv) 2833 return -ENOMEDIUM; 2834 if (drv->bdrv_snapshot_delete) 2835 return drv->bdrv_snapshot_delete(bs, snapshot_id); 2836 if (bs->file) 2837 return bdrv_snapshot_delete(bs->file, snapshot_id); 2838 return -ENOTSUP; 2839 } 2840 2841 int bdrv_snapshot_list(BlockDriverState *bs, 2842 QEMUSnapshotInfo **psn_info) 2843 { 2844 BlockDriver *drv = bs->drv; 2845 if (!drv) 2846 return -ENOMEDIUM; 2847 if (drv->bdrv_snapshot_list) 2848 return drv->bdrv_snapshot_list(bs, psn_info); 2849 if (bs->file) 2850 return bdrv_snapshot_list(bs->file, psn_info); 2851 return -ENOTSUP; 2852 } 2853 2854 int bdrv_snapshot_load_tmp(BlockDriverState *bs, 2855 const char *snapshot_name) 2856 { 2857 BlockDriver *drv = bs->drv; 2858 if (!drv) { 2859 return -ENOMEDIUM; 2860 } 2861 if (!bs->read_only) { 2862 return -EINVAL; 2863 } 2864 if (drv->bdrv_snapshot_load_tmp) { 2865 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name); 2866 } 2867 return -ENOTSUP; 2868 } 2869 2870 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, 2871 const char *backing_file) 2872 { 2873 if (!bs->drv) { 2874 return NULL; 2875 } 2876 2877 if (bs->backing_hd) { 2878 if (strcmp(bs->backing_file, backing_file) == 0) { 2879 return bs->backing_hd; 2880 } else { 2881 return bdrv_find_backing_image(bs->backing_hd, backing_file); 2882 } 2883 } 2884 2885 return NULL; 2886 } 2887 2888 #define NB_SUFFIXES 4 2889 2890 char *get_human_readable_size(char *buf, int buf_size, int64_t size) 2891 { 2892 static const char suffixes[NB_SUFFIXES] = "KMGT"; 2893 int64_t base; 2894 int i; 2895 2896 if (size <= 999) { 2897 snprintf(buf, buf_size, "%" PRId64, size); 2898 } else { 2899 base = 1024; 2900 for(i = 0; i < NB_SUFFIXES; i++) { 2901 if (size < (10 * base)) { 2902 snprintf(buf, buf_size, "%0.1f%c", 2903 (double)size / base, 2904 suffixes[i]); 2905 break; 2906 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) { 2907 snprintf(buf, buf_size, "%" PRId64 "%c", 2908 ((size + (base >> 1)) / base), 2909 suffixes[i]); 2910 break; 2911 } 2912 base = base * 1024; 2913 } 2914 } 2915 return buf; 2916 } 2917 2918 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn) 2919 { 2920 char buf1[128], date_buf[128], clock_buf[128]; 2921 #ifdef _WIN32 2922 struct tm *ptm; 2923 #else 2924 struct tm tm; 2925 #endif 2926 time_t ti; 2927 int64_t secs; 2928 2929 if (!sn) { 2930 snprintf(buf, buf_size, 2931 "%-10s%-20s%7s%20s%15s", 2932 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK"); 2933 } else { 2934 ti = sn->date_sec; 2935 #ifdef _WIN32 2936 ptm = localtime(&ti); 2937 strftime(date_buf, sizeof(date_buf), 2938 "%Y-%m-%d %H:%M:%S", ptm); 2939 #else 2940 localtime_r(&ti, &tm); 2941 strftime(date_buf, sizeof(date_buf), 2942 "%Y-%m-%d %H:%M:%S", &tm); 2943 #endif 2944 secs = sn->vm_clock_nsec / 1000000000; 2945 snprintf(clock_buf, sizeof(clock_buf), 2946 "%02d:%02d:%02d.%03d", 2947 (int)(secs / 3600), 2948 (int)((secs / 60) % 60), 2949 (int)(secs % 60), 2950 (int)((sn->vm_clock_nsec / 1000000) % 1000)); 2951 snprintf(buf, buf_size, 2952 "%-10s%-20s%7s%20s%15s", 2953 sn->id_str, sn->name, 2954 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size), 2955 date_buf, 2956 clock_buf); 2957 } 2958 return buf; 2959 } 2960 2961 /**************************************************************/ 2962 /* async I/Os */ 2963 2964 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, 2965 QEMUIOVector *qiov, int nb_sectors, 2966 BlockDriverCompletionFunc *cb, void *opaque) 2967 { 2968 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); 2969 2970 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 2971 cb, opaque, false); 2972 } 2973 2974 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, 2975 QEMUIOVector *qiov, int nb_sectors, 2976 BlockDriverCompletionFunc *cb, void *opaque) 2977 { 2978 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); 2979 2980 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 2981 cb, opaque, true); 2982 } 2983 2984 2985 typedef struct MultiwriteCB { 2986 int error; 2987 int num_requests; 2988 int num_callbacks; 2989 struct { 2990 BlockDriverCompletionFunc *cb; 2991 void *opaque; 2992 QEMUIOVector *free_qiov; 2993 } callbacks[]; 2994 } MultiwriteCB; 2995 2996 static void multiwrite_user_cb(MultiwriteCB *mcb) 2997 { 2998 int i; 2999 3000 for (i = 0; i < mcb->num_callbacks; i++) { 3001 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); 3002 if (mcb->callbacks[i].free_qiov) { 3003 qemu_iovec_destroy(mcb->callbacks[i].free_qiov); 3004 } 3005 g_free(mcb->callbacks[i].free_qiov); 3006 } 3007 } 3008 3009 static void multiwrite_cb(void *opaque, int ret) 3010 { 3011 MultiwriteCB *mcb = opaque; 3012 3013 trace_multiwrite_cb(mcb, ret); 3014 3015 if (ret < 0 && !mcb->error) { 3016 mcb->error = ret; 3017 } 3018 3019 mcb->num_requests--; 3020 if (mcb->num_requests == 0) { 3021 multiwrite_user_cb(mcb); 3022 g_free(mcb); 3023 } 3024 } 3025 3026 static int multiwrite_req_compare(const void *a, const void *b) 3027 { 3028 const BlockRequest *req1 = a, *req2 = b; 3029 3030 /* 3031 * Note that we can't simply subtract req2->sector from req1->sector 3032 * here as that could overflow the return value. 3033 */ 3034 if (req1->sector > req2->sector) { 3035 return 1; 3036 } else if (req1->sector < req2->sector) { 3037 return -1; 3038 } else { 3039 return 0; 3040 } 3041 } 3042 3043 /* 3044 * Takes a bunch of requests and tries to merge them. Returns the number of 3045 * requests that remain after merging. 3046 */ 3047 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, 3048 int num_reqs, MultiwriteCB *mcb) 3049 { 3050 int i, outidx; 3051 3052 // Sort requests by start sector 3053 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); 3054 3055 // Check if adjacent requests touch the same clusters. If so, combine them, 3056 // filling up gaps with zero sectors. 3057 outidx = 0; 3058 for (i = 1; i < num_reqs; i++) { 3059 int merge = 0; 3060 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; 3061 3062 // Handle exactly sequential writes and overlapping writes. 3063 if (reqs[i].sector <= oldreq_last) { 3064 merge = 1; 3065 } 3066 3067 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { 3068 merge = 0; 3069 } 3070 3071 if (merge) { 3072 size_t size; 3073 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); 3074 qemu_iovec_init(qiov, 3075 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); 3076 3077 // Add the first request to the merged one. If the requests are 3078 // overlapping, drop the last sectors of the first request. 3079 size = (reqs[i].sector - reqs[outidx].sector) << 9; 3080 qemu_iovec_concat(qiov, reqs[outidx].qiov, size); 3081 3082 // We should need to add any zeros between the two requests 3083 assert (reqs[i].sector <= oldreq_last); 3084 3085 // Add the second request 3086 qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size); 3087 3088 reqs[outidx].nb_sectors = qiov->size >> 9; 3089 reqs[outidx].qiov = qiov; 3090 3091 mcb->callbacks[i].free_qiov = reqs[outidx].qiov; 3092 } else { 3093 outidx++; 3094 reqs[outidx].sector = reqs[i].sector; 3095 reqs[outidx].nb_sectors = reqs[i].nb_sectors; 3096 reqs[outidx].qiov = reqs[i].qiov; 3097 } 3098 } 3099 3100 return outidx + 1; 3101 } 3102 3103 /* 3104 * Submit multiple AIO write requests at once. 3105 * 3106 * On success, the function returns 0 and all requests in the reqs array have 3107 * been submitted. In error case this function returns -1, and any of the 3108 * requests may or may not be submitted yet. In particular, this means that the 3109 * callback will be called for some of the requests, for others it won't. The 3110 * caller must check the error field of the BlockRequest to wait for the right 3111 * callbacks (if error != 0, no callback will be called). 3112 * 3113 * The implementation may modify the contents of the reqs array, e.g. to merge 3114 * requests. However, the fields opaque and error are left unmodified as they 3115 * are used to signal failure for a single request to the caller. 3116 */ 3117 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) 3118 { 3119 MultiwriteCB *mcb; 3120 int i; 3121 3122 /* don't submit writes if we don't have a medium */ 3123 if (bs->drv == NULL) { 3124 for (i = 0; i < num_reqs; i++) { 3125 reqs[i].error = -ENOMEDIUM; 3126 } 3127 return -1; 3128 } 3129 3130 if (num_reqs == 0) { 3131 return 0; 3132 } 3133 3134 // Create MultiwriteCB structure 3135 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); 3136 mcb->num_requests = 0; 3137 mcb->num_callbacks = num_reqs; 3138 3139 for (i = 0; i < num_reqs; i++) { 3140 mcb->callbacks[i].cb = reqs[i].cb; 3141 mcb->callbacks[i].opaque = reqs[i].opaque; 3142 } 3143 3144 // Check for mergable requests 3145 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); 3146 3147 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); 3148 3149 /* Run the aio requests. */ 3150 mcb->num_requests = num_reqs; 3151 for (i = 0; i < num_reqs; i++) { 3152 bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov, 3153 reqs[i].nb_sectors, multiwrite_cb, mcb); 3154 } 3155 3156 return 0; 3157 } 3158 3159 void bdrv_aio_cancel(BlockDriverAIOCB *acb) 3160 { 3161 acb->pool->cancel(acb); 3162 } 3163 3164 /* block I/O throttling */ 3165 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors, 3166 bool is_write, double elapsed_time, uint64_t *wait) 3167 { 3168 uint64_t bps_limit = 0; 3169 double bytes_limit, bytes_base, bytes_res; 3170 double slice_time, wait_time; 3171 3172 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) { 3173 bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]; 3174 } else if (bs->io_limits.bps[is_write]) { 3175 bps_limit = bs->io_limits.bps[is_write]; 3176 } else { 3177 if (wait) { 3178 *wait = 0; 3179 } 3180 3181 return false; 3182 } 3183 3184 slice_time = bs->slice_end - bs->slice_start; 3185 slice_time /= (NANOSECONDS_PER_SECOND); 3186 bytes_limit = bps_limit * slice_time; 3187 bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write]; 3188 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) { 3189 bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write]; 3190 } 3191 3192 /* bytes_base: the bytes of data which have been read/written; and 3193 * it is obtained from the history statistic info. 3194 * bytes_res: the remaining bytes of data which need to be read/written. 3195 * (bytes_base + bytes_res) / bps_limit: used to calcuate 3196 * the total time for completing reading/writting all data. 3197 */ 3198 bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE; 3199 3200 if (bytes_base + bytes_res <= bytes_limit) { 3201 if (wait) { 3202 *wait = 0; 3203 } 3204 3205 return false; 3206 } 3207 3208 /* Calc approx time to dispatch */ 3209 wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time; 3210 3211 /* When the I/O rate at runtime exceeds the limits, 3212 * bs->slice_end need to be extended in order that the current statistic 3213 * info can be kept until the timer fire, so it is increased and tuned 3214 * based on the result of experiment. 3215 */ 3216 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10; 3217 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME; 3218 if (wait) { 3219 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10; 3220 } 3221 3222 return true; 3223 } 3224 3225 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write, 3226 double elapsed_time, uint64_t *wait) 3227 { 3228 uint64_t iops_limit = 0; 3229 double ios_limit, ios_base; 3230 double slice_time, wait_time; 3231 3232 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) { 3233 iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]; 3234 } else if (bs->io_limits.iops[is_write]) { 3235 iops_limit = bs->io_limits.iops[is_write]; 3236 } else { 3237 if (wait) { 3238 *wait = 0; 3239 } 3240 3241 return false; 3242 } 3243 3244 slice_time = bs->slice_end - bs->slice_start; 3245 slice_time /= (NANOSECONDS_PER_SECOND); 3246 ios_limit = iops_limit * slice_time; 3247 ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write]; 3248 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) { 3249 ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write]; 3250 } 3251 3252 if (ios_base + 1 <= ios_limit) { 3253 if (wait) { 3254 *wait = 0; 3255 } 3256 3257 return false; 3258 } 3259 3260 /* Calc approx time to dispatch */ 3261 wait_time = (ios_base + 1) / iops_limit; 3262 if (wait_time > elapsed_time) { 3263 wait_time = wait_time - elapsed_time; 3264 } else { 3265 wait_time = 0; 3266 } 3267 3268 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10; 3269 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME; 3270 if (wait) { 3271 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10; 3272 } 3273 3274 return true; 3275 } 3276 3277 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors, 3278 bool is_write, int64_t *wait) 3279 { 3280 int64_t now, max_wait; 3281 uint64_t bps_wait = 0, iops_wait = 0; 3282 double elapsed_time; 3283 int bps_ret, iops_ret; 3284 3285 now = qemu_get_clock_ns(vm_clock); 3286 if ((bs->slice_start < now) 3287 && (bs->slice_end > now)) { 3288 bs->slice_end = now + bs->slice_time; 3289 } else { 3290 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME; 3291 bs->slice_start = now; 3292 bs->slice_end = now + bs->slice_time; 3293 3294 bs->io_base.bytes[is_write] = bs->nr_bytes[is_write]; 3295 bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write]; 3296 3297 bs->io_base.ios[is_write] = bs->nr_ops[is_write]; 3298 bs->io_base.ios[!is_write] = bs->nr_ops[!is_write]; 3299 } 3300 3301 elapsed_time = now - bs->slice_start; 3302 elapsed_time /= (NANOSECONDS_PER_SECOND); 3303 3304 bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors, 3305 is_write, elapsed_time, &bps_wait); 3306 iops_ret = bdrv_exceed_iops_limits(bs, is_write, 3307 elapsed_time, &iops_wait); 3308 if (bps_ret || iops_ret) { 3309 max_wait = bps_wait > iops_wait ? bps_wait : iops_wait; 3310 if (wait) { 3311 *wait = max_wait; 3312 } 3313 3314 now = qemu_get_clock_ns(vm_clock); 3315 if (bs->slice_end < now + max_wait) { 3316 bs->slice_end = now + max_wait; 3317 } 3318 3319 return true; 3320 } 3321 3322 if (wait) { 3323 *wait = 0; 3324 } 3325 3326 return false; 3327 } 3328 3329 /**************************************************************/ 3330 /* async block device emulation */ 3331 3332 typedef struct BlockDriverAIOCBSync { 3333 BlockDriverAIOCB common; 3334 QEMUBH *bh; 3335 int ret; 3336 /* vector translation state */ 3337 QEMUIOVector *qiov; 3338 uint8_t *bounce; 3339 int is_write; 3340 } BlockDriverAIOCBSync; 3341 3342 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb) 3343 { 3344 BlockDriverAIOCBSync *acb = 3345 container_of(blockacb, BlockDriverAIOCBSync, common); 3346 qemu_bh_delete(acb->bh); 3347 acb->bh = NULL; 3348 qemu_aio_release(acb); 3349 } 3350 3351 static AIOPool bdrv_em_aio_pool = { 3352 .aiocb_size = sizeof(BlockDriverAIOCBSync), 3353 .cancel = bdrv_aio_cancel_em, 3354 }; 3355 3356 static void bdrv_aio_bh_cb(void *opaque) 3357 { 3358 BlockDriverAIOCBSync *acb = opaque; 3359 3360 if (!acb->is_write) 3361 qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size); 3362 qemu_vfree(acb->bounce); 3363 acb->common.cb(acb->common.opaque, acb->ret); 3364 qemu_bh_delete(acb->bh); 3365 acb->bh = NULL; 3366 qemu_aio_release(acb); 3367 } 3368 3369 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, 3370 int64_t sector_num, 3371 QEMUIOVector *qiov, 3372 int nb_sectors, 3373 BlockDriverCompletionFunc *cb, 3374 void *opaque, 3375 int is_write) 3376 3377 { 3378 BlockDriverAIOCBSync *acb; 3379 3380 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque); 3381 acb->is_write = is_write; 3382 acb->qiov = qiov; 3383 acb->bounce = qemu_blockalign(bs, qiov->size); 3384 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb); 3385 3386 if (is_write) { 3387 qemu_iovec_to_buffer(acb->qiov, acb->bounce); 3388 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); 3389 } else { 3390 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); 3391 } 3392 3393 qemu_bh_schedule(acb->bh); 3394 3395 return &acb->common; 3396 } 3397 3398 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, 3399 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 3400 BlockDriverCompletionFunc *cb, void *opaque) 3401 { 3402 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 3403 } 3404 3405 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, 3406 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 3407 BlockDriverCompletionFunc *cb, void *opaque) 3408 { 3409 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); 3410 } 3411 3412 3413 typedef struct BlockDriverAIOCBCoroutine { 3414 BlockDriverAIOCB common; 3415 BlockRequest req; 3416 bool is_write; 3417 QEMUBH* bh; 3418 } BlockDriverAIOCBCoroutine; 3419 3420 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb) 3421 { 3422 qemu_aio_flush(); 3423 } 3424 3425 static AIOPool bdrv_em_co_aio_pool = { 3426 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine), 3427 .cancel = bdrv_aio_co_cancel_em, 3428 }; 3429 3430 static void bdrv_co_em_bh(void *opaque) 3431 { 3432 BlockDriverAIOCBCoroutine *acb = opaque; 3433 3434 acb->common.cb(acb->common.opaque, acb->req.error); 3435 qemu_bh_delete(acb->bh); 3436 qemu_aio_release(acb); 3437 } 3438 3439 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ 3440 static void coroutine_fn bdrv_co_do_rw(void *opaque) 3441 { 3442 BlockDriverAIOCBCoroutine *acb = opaque; 3443 BlockDriverState *bs = acb->common.bs; 3444 3445 if (!acb->is_write) { 3446 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, 3447 acb->req.nb_sectors, acb->req.qiov, 0); 3448 } else { 3449 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, 3450 acb->req.nb_sectors, acb->req.qiov, 0); 3451 } 3452 3453 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); 3454 qemu_bh_schedule(acb->bh); 3455 } 3456 3457 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, 3458 int64_t sector_num, 3459 QEMUIOVector *qiov, 3460 int nb_sectors, 3461 BlockDriverCompletionFunc *cb, 3462 void *opaque, 3463 bool is_write) 3464 { 3465 Coroutine *co; 3466 BlockDriverAIOCBCoroutine *acb; 3467 3468 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque); 3469 acb->req.sector = sector_num; 3470 acb->req.nb_sectors = nb_sectors; 3471 acb->req.qiov = qiov; 3472 acb->is_write = is_write; 3473 3474 co = qemu_coroutine_create(bdrv_co_do_rw); 3475 qemu_coroutine_enter(co, acb); 3476 3477 return &acb->common; 3478 } 3479 3480 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) 3481 { 3482 BlockDriverAIOCBCoroutine *acb = opaque; 3483 BlockDriverState *bs = acb->common.bs; 3484 3485 acb->req.error = bdrv_co_flush(bs); 3486 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); 3487 qemu_bh_schedule(acb->bh); 3488 } 3489 3490 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, 3491 BlockDriverCompletionFunc *cb, void *opaque) 3492 { 3493 trace_bdrv_aio_flush(bs, opaque); 3494 3495 Coroutine *co; 3496 BlockDriverAIOCBCoroutine *acb; 3497 3498 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque); 3499 co = qemu_coroutine_create(bdrv_aio_flush_co_entry); 3500 qemu_coroutine_enter(co, acb); 3501 3502 return &acb->common; 3503 } 3504 3505 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) 3506 { 3507 BlockDriverAIOCBCoroutine *acb = opaque; 3508 BlockDriverState *bs = acb->common.bs; 3509 3510 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); 3511 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); 3512 qemu_bh_schedule(acb->bh); 3513 } 3514 3515 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs, 3516 int64_t sector_num, int nb_sectors, 3517 BlockDriverCompletionFunc *cb, void *opaque) 3518 { 3519 Coroutine *co; 3520 BlockDriverAIOCBCoroutine *acb; 3521 3522 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); 3523 3524 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque); 3525 acb->req.sector = sector_num; 3526 acb->req.nb_sectors = nb_sectors; 3527 co = qemu_coroutine_create(bdrv_aio_discard_co_entry); 3528 qemu_coroutine_enter(co, acb); 3529 3530 return &acb->common; 3531 } 3532 3533 void bdrv_init(void) 3534 { 3535 module_call_init(MODULE_INIT_BLOCK); 3536 } 3537 3538 void bdrv_init_with_whitelist(void) 3539 { 3540 use_bdrv_whitelist = 1; 3541 bdrv_init(); 3542 } 3543 3544 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs, 3545 BlockDriverCompletionFunc *cb, void *opaque) 3546 { 3547 BlockDriverAIOCB *acb; 3548 3549 if (pool->free_aiocb) { 3550 acb = pool->free_aiocb; 3551 pool->free_aiocb = acb->next; 3552 } else { 3553 acb = g_malloc0(pool->aiocb_size); 3554 acb->pool = pool; 3555 } 3556 acb->bs = bs; 3557 acb->cb = cb; 3558 acb->opaque = opaque; 3559 return acb; 3560 } 3561 3562 void qemu_aio_release(void *p) 3563 { 3564 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p; 3565 AIOPool *pool = acb->pool; 3566 acb->next = pool->free_aiocb; 3567 pool->free_aiocb = acb; 3568 } 3569 3570 /**************************************************************/ 3571 /* Coroutine block device emulation */ 3572 3573 typedef struct CoroutineIOCompletion { 3574 Coroutine *coroutine; 3575 int ret; 3576 } CoroutineIOCompletion; 3577 3578 static void bdrv_co_io_em_complete(void *opaque, int ret) 3579 { 3580 CoroutineIOCompletion *co = opaque; 3581 3582 co->ret = ret; 3583 qemu_coroutine_enter(co->coroutine, NULL); 3584 } 3585 3586 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, 3587 int nb_sectors, QEMUIOVector *iov, 3588 bool is_write) 3589 { 3590 CoroutineIOCompletion co = { 3591 .coroutine = qemu_coroutine_self(), 3592 }; 3593 BlockDriverAIOCB *acb; 3594 3595 if (is_write) { 3596 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, 3597 bdrv_co_io_em_complete, &co); 3598 } else { 3599 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, 3600 bdrv_co_io_em_complete, &co); 3601 } 3602 3603 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); 3604 if (!acb) { 3605 return -EIO; 3606 } 3607 qemu_coroutine_yield(); 3608 3609 return co.ret; 3610 } 3611 3612 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, 3613 int64_t sector_num, int nb_sectors, 3614 QEMUIOVector *iov) 3615 { 3616 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); 3617 } 3618 3619 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, 3620 int64_t sector_num, int nb_sectors, 3621 QEMUIOVector *iov) 3622 { 3623 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); 3624 } 3625 3626 static void coroutine_fn bdrv_flush_co_entry(void *opaque) 3627 { 3628 RwCo *rwco = opaque; 3629 3630 rwco->ret = bdrv_co_flush(rwco->bs); 3631 } 3632 3633 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) 3634 { 3635 int ret; 3636 3637 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { 3638 return 0; 3639 } 3640 3641 /* Write back cached data to the OS even with cache=unsafe */ 3642 if (bs->drv->bdrv_co_flush_to_os) { 3643 ret = bs->drv->bdrv_co_flush_to_os(bs); 3644 if (ret < 0) { 3645 return ret; 3646 } 3647 } 3648 3649 /* But don't actually force it to the disk with cache=unsafe */ 3650 if (bs->open_flags & BDRV_O_NO_FLUSH) { 3651 return 0; 3652 } 3653 3654 if (bs->drv->bdrv_co_flush_to_disk) { 3655 ret = bs->drv->bdrv_co_flush_to_disk(bs); 3656 } else if (bs->drv->bdrv_aio_flush) { 3657 BlockDriverAIOCB *acb; 3658 CoroutineIOCompletion co = { 3659 .coroutine = qemu_coroutine_self(), 3660 }; 3661 3662 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); 3663 if (acb == NULL) { 3664 ret = -EIO; 3665 } else { 3666 qemu_coroutine_yield(); 3667 ret = co.ret; 3668 } 3669 } else { 3670 /* 3671 * Some block drivers always operate in either writethrough or unsafe 3672 * mode and don't support bdrv_flush therefore. Usually qemu doesn't 3673 * know how the server works (because the behaviour is hardcoded or 3674 * depends on server-side configuration), so we can't ensure that 3675 * everything is safe on disk. Returning an error doesn't work because 3676 * that would break guests even if the server operates in writethrough 3677 * mode. 3678 * 3679 * Let's hope the user knows what he's doing. 3680 */ 3681 ret = 0; 3682 } 3683 if (ret < 0) { 3684 return ret; 3685 } 3686 3687 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH 3688 * in the case of cache=unsafe, so there are no useless flushes. 3689 */ 3690 return bdrv_co_flush(bs->file); 3691 } 3692 3693 void bdrv_invalidate_cache(BlockDriverState *bs) 3694 { 3695 if (bs->drv && bs->drv->bdrv_invalidate_cache) { 3696 bs->drv->bdrv_invalidate_cache(bs); 3697 } 3698 } 3699 3700 void bdrv_invalidate_cache_all(void) 3701 { 3702 BlockDriverState *bs; 3703 3704 QTAILQ_FOREACH(bs, &bdrv_states, list) { 3705 bdrv_invalidate_cache(bs); 3706 } 3707 } 3708 3709 void bdrv_clear_incoming_migration_all(void) 3710 { 3711 BlockDriverState *bs; 3712 3713 QTAILQ_FOREACH(bs, &bdrv_states, list) { 3714 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING); 3715 } 3716 } 3717 3718 int bdrv_flush(BlockDriverState *bs) 3719 { 3720 Coroutine *co; 3721 RwCo rwco = { 3722 .bs = bs, 3723 .ret = NOT_DONE, 3724 }; 3725 3726 if (qemu_in_coroutine()) { 3727 /* Fast-path if already in coroutine context */ 3728 bdrv_flush_co_entry(&rwco); 3729 } else { 3730 co = qemu_coroutine_create(bdrv_flush_co_entry); 3731 qemu_coroutine_enter(co, &rwco); 3732 while (rwco.ret == NOT_DONE) { 3733 qemu_aio_wait(); 3734 } 3735 } 3736 3737 return rwco.ret; 3738 } 3739 3740 static void coroutine_fn bdrv_discard_co_entry(void *opaque) 3741 { 3742 RwCo *rwco = opaque; 3743 3744 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); 3745 } 3746 3747 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, 3748 int nb_sectors) 3749 { 3750 if (!bs->drv) { 3751 return -ENOMEDIUM; 3752 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) { 3753 return -EIO; 3754 } else if (bs->read_only) { 3755 return -EROFS; 3756 } else if (bs->drv->bdrv_co_discard) { 3757 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors); 3758 } else if (bs->drv->bdrv_aio_discard) { 3759 BlockDriverAIOCB *acb; 3760 CoroutineIOCompletion co = { 3761 .coroutine = qemu_coroutine_self(), 3762 }; 3763 3764 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, 3765 bdrv_co_io_em_complete, &co); 3766 if (acb == NULL) { 3767 return -EIO; 3768 } else { 3769 qemu_coroutine_yield(); 3770 return co.ret; 3771 } 3772 } else { 3773 return 0; 3774 } 3775 } 3776 3777 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) 3778 { 3779 Coroutine *co; 3780 RwCo rwco = { 3781 .bs = bs, 3782 .sector_num = sector_num, 3783 .nb_sectors = nb_sectors, 3784 .ret = NOT_DONE, 3785 }; 3786 3787 if (qemu_in_coroutine()) { 3788 /* Fast-path if already in coroutine context */ 3789 bdrv_discard_co_entry(&rwco); 3790 } else { 3791 co = qemu_coroutine_create(bdrv_discard_co_entry); 3792 qemu_coroutine_enter(co, &rwco); 3793 while (rwco.ret == NOT_DONE) { 3794 qemu_aio_wait(); 3795 } 3796 } 3797 3798 return rwco.ret; 3799 } 3800 3801 /**************************************************************/ 3802 /* removable device support */ 3803 3804 /** 3805 * Return TRUE if the media is present 3806 */ 3807 int bdrv_is_inserted(BlockDriverState *bs) 3808 { 3809 BlockDriver *drv = bs->drv; 3810 3811 if (!drv) 3812 return 0; 3813 if (!drv->bdrv_is_inserted) 3814 return 1; 3815 return drv->bdrv_is_inserted(bs); 3816 } 3817 3818 /** 3819 * Return whether the media changed since the last call to this 3820 * function, or -ENOTSUP if we don't know. Most drivers don't know. 3821 */ 3822 int bdrv_media_changed(BlockDriverState *bs) 3823 { 3824 BlockDriver *drv = bs->drv; 3825 3826 if (drv && drv->bdrv_media_changed) { 3827 return drv->bdrv_media_changed(bs); 3828 } 3829 return -ENOTSUP; 3830 } 3831 3832 /** 3833 * If eject_flag is TRUE, eject the media. Otherwise, close the tray 3834 */ 3835 void bdrv_eject(BlockDriverState *bs, bool eject_flag) 3836 { 3837 BlockDriver *drv = bs->drv; 3838 3839 if (drv && drv->bdrv_eject) { 3840 drv->bdrv_eject(bs, eject_flag); 3841 } 3842 3843 if (bs->device_name[0] != '\0') { 3844 bdrv_emit_qmp_eject_event(bs, eject_flag); 3845 } 3846 } 3847 3848 /** 3849 * Lock or unlock the media (if it is locked, the user won't be able 3850 * to eject it manually). 3851 */ 3852 void bdrv_lock_medium(BlockDriverState *bs, bool locked) 3853 { 3854 BlockDriver *drv = bs->drv; 3855 3856 trace_bdrv_lock_medium(bs, locked); 3857 3858 if (drv && drv->bdrv_lock_medium) { 3859 drv->bdrv_lock_medium(bs, locked); 3860 } 3861 } 3862 3863 /* needed for generic scsi interface */ 3864 3865 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 3866 { 3867 BlockDriver *drv = bs->drv; 3868 3869 if (drv && drv->bdrv_ioctl) 3870 return drv->bdrv_ioctl(bs, req, buf); 3871 return -ENOTSUP; 3872 } 3873 3874 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, 3875 unsigned long int req, void *buf, 3876 BlockDriverCompletionFunc *cb, void *opaque) 3877 { 3878 BlockDriver *drv = bs->drv; 3879 3880 if (drv && drv->bdrv_aio_ioctl) 3881 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); 3882 return NULL; 3883 } 3884 3885 void bdrv_set_buffer_alignment(BlockDriverState *bs, int align) 3886 { 3887 bs->buffer_alignment = align; 3888 } 3889 3890 void *qemu_blockalign(BlockDriverState *bs, size_t size) 3891 { 3892 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size); 3893 } 3894 3895 void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable) 3896 { 3897 int64_t bitmap_size; 3898 3899 bs->dirty_count = 0; 3900 if (enable) { 3901 if (!bs->dirty_bitmap) { 3902 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) + 3903 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1; 3904 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8; 3905 3906 bs->dirty_bitmap = g_malloc0(bitmap_size); 3907 } 3908 } else { 3909 if (bs->dirty_bitmap) { 3910 g_free(bs->dirty_bitmap); 3911 bs->dirty_bitmap = NULL; 3912 } 3913 } 3914 } 3915 3916 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector) 3917 { 3918 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK; 3919 3920 if (bs->dirty_bitmap && 3921 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) { 3922 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] & 3923 (1UL << (chunk % (sizeof(unsigned long) * 8)))); 3924 } else { 3925 return 0; 3926 } 3927 } 3928 3929 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, 3930 int nr_sectors) 3931 { 3932 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0); 3933 } 3934 3935 int64_t bdrv_get_dirty_count(BlockDriverState *bs) 3936 { 3937 return bs->dirty_count; 3938 } 3939 3940 void bdrv_set_in_use(BlockDriverState *bs, int in_use) 3941 { 3942 assert(bs->in_use != in_use); 3943 bs->in_use = in_use; 3944 } 3945 3946 int bdrv_in_use(BlockDriverState *bs) 3947 { 3948 return bs->in_use; 3949 } 3950 3951 void bdrv_iostatus_enable(BlockDriverState *bs) 3952 { 3953 bs->iostatus_enabled = true; 3954 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 3955 } 3956 3957 /* The I/O status is only enabled if the drive explicitly 3958 * enables it _and_ the VM is configured to stop on errors */ 3959 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs) 3960 { 3961 return (bs->iostatus_enabled && 3962 (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC || 3963 bs->on_write_error == BLOCK_ERR_STOP_ANY || 3964 bs->on_read_error == BLOCK_ERR_STOP_ANY)); 3965 } 3966 3967 void bdrv_iostatus_disable(BlockDriverState *bs) 3968 { 3969 bs->iostatus_enabled = false; 3970 } 3971 3972 void bdrv_iostatus_reset(BlockDriverState *bs) 3973 { 3974 if (bdrv_iostatus_is_enabled(bs)) { 3975 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 3976 } 3977 } 3978 3979 /* XXX: Today this is set by device models because it makes the implementation 3980 quite simple. However, the block layer knows about the error, so it's 3981 possible to implement this without device models being involved */ 3982 void bdrv_iostatus_set_err(BlockDriverState *bs, int error) 3983 { 3984 if (bdrv_iostatus_is_enabled(bs) && 3985 bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 3986 assert(error >= 0); 3987 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 3988 BLOCK_DEVICE_IO_STATUS_FAILED; 3989 } 3990 } 3991 3992 void 3993 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes, 3994 enum BlockAcctType type) 3995 { 3996 assert(type < BDRV_MAX_IOTYPE); 3997 3998 cookie->bytes = bytes; 3999 cookie->start_time_ns = get_clock(); 4000 cookie->type = type; 4001 } 4002 4003 void 4004 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie) 4005 { 4006 assert(cookie->type < BDRV_MAX_IOTYPE); 4007 4008 bs->nr_bytes[cookie->type] += cookie->bytes; 4009 bs->nr_ops[cookie->type]++; 4010 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns; 4011 } 4012 4013 int bdrv_img_create(const char *filename, const char *fmt, 4014 const char *base_filename, const char *base_fmt, 4015 char *options, uint64_t img_size, int flags) 4016 { 4017 QEMUOptionParameter *param = NULL, *create_options = NULL; 4018 QEMUOptionParameter *backing_fmt, *backing_file, *size; 4019 BlockDriverState *bs = NULL; 4020 BlockDriver *drv, *proto_drv; 4021 BlockDriver *backing_drv = NULL; 4022 int ret = 0; 4023 4024 /* Find driver and parse its options */ 4025 drv = bdrv_find_format(fmt); 4026 if (!drv) { 4027 error_report("Unknown file format '%s'", fmt); 4028 ret = -EINVAL; 4029 goto out; 4030 } 4031 4032 proto_drv = bdrv_find_protocol(filename); 4033 if (!proto_drv) { 4034 error_report("Unknown protocol '%s'", filename); 4035 ret = -EINVAL; 4036 goto out; 4037 } 4038 4039 create_options = append_option_parameters(create_options, 4040 drv->create_options); 4041 create_options = append_option_parameters(create_options, 4042 proto_drv->create_options); 4043 4044 /* Create parameter list with default values */ 4045 param = parse_option_parameters("", create_options, param); 4046 4047 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size); 4048 4049 /* Parse -o options */ 4050 if (options) { 4051 param = parse_option_parameters(options, create_options, param); 4052 if (param == NULL) { 4053 error_report("Invalid options for file format '%s'.", fmt); 4054 ret = -EINVAL; 4055 goto out; 4056 } 4057 } 4058 4059 if (base_filename) { 4060 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE, 4061 base_filename)) { 4062 error_report("Backing file not supported for file format '%s'", 4063 fmt); 4064 ret = -EINVAL; 4065 goto out; 4066 } 4067 } 4068 4069 if (base_fmt) { 4070 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) { 4071 error_report("Backing file format not supported for file " 4072 "format '%s'", fmt); 4073 ret = -EINVAL; 4074 goto out; 4075 } 4076 } 4077 4078 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE); 4079 if (backing_file && backing_file->value.s) { 4080 if (!strcmp(filename, backing_file->value.s)) { 4081 error_report("Error: Trying to create an image with the " 4082 "same filename as the backing file"); 4083 ret = -EINVAL; 4084 goto out; 4085 } 4086 } 4087 4088 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT); 4089 if (backing_fmt && backing_fmt->value.s) { 4090 backing_drv = bdrv_find_format(backing_fmt->value.s); 4091 if (!backing_drv) { 4092 error_report("Unknown backing file format '%s'", 4093 backing_fmt->value.s); 4094 ret = -EINVAL; 4095 goto out; 4096 } 4097 } 4098 4099 // The size for the image must always be specified, with one exception: 4100 // If we are using a backing file, we can obtain the size from there 4101 size = get_option_parameter(param, BLOCK_OPT_SIZE); 4102 if (size && size->value.n == -1) { 4103 if (backing_file && backing_file->value.s) { 4104 uint64_t size; 4105 char buf[32]; 4106 4107 bs = bdrv_new(""); 4108 4109 ret = bdrv_open(bs, backing_file->value.s, flags, backing_drv); 4110 if (ret < 0) { 4111 error_report("Could not open '%s'", backing_file->value.s); 4112 goto out; 4113 } 4114 bdrv_get_geometry(bs, &size); 4115 size *= 512; 4116 4117 snprintf(buf, sizeof(buf), "%" PRId64, size); 4118 set_option_parameter(param, BLOCK_OPT_SIZE, buf); 4119 } else { 4120 error_report("Image creation needs a size parameter"); 4121 ret = -EINVAL; 4122 goto out; 4123 } 4124 } 4125 4126 printf("Formatting '%s', fmt=%s ", filename, fmt); 4127 print_option_parameters(param); 4128 puts(""); 4129 4130 ret = bdrv_create(drv, filename, param); 4131 4132 if (ret < 0) { 4133 if (ret == -ENOTSUP) { 4134 error_report("Formatting or formatting option not supported for " 4135 "file format '%s'", fmt); 4136 } else if (ret == -EFBIG) { 4137 error_report("The image size is too large for file format '%s'", 4138 fmt); 4139 } else { 4140 error_report("%s: error while creating %s: %s", filename, fmt, 4141 strerror(-ret)); 4142 } 4143 } 4144 4145 out: 4146 free_option_parameters(create_options); 4147 free_option_parameters(param); 4148 4149 if (bs) { 4150 bdrv_delete(bs); 4151 } 4152 4153 return ret; 4154 } 4155 4156 void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs, 4157 int64_t speed, BlockDriverCompletionFunc *cb, 4158 void *opaque, Error **errp) 4159 { 4160 BlockJob *job; 4161 4162 if (bs->job || bdrv_in_use(bs)) { 4163 error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); 4164 return NULL; 4165 } 4166 bdrv_set_in_use(bs, 1); 4167 4168 job = g_malloc0(job_type->instance_size); 4169 job->job_type = job_type; 4170 job->bs = bs; 4171 job->cb = cb; 4172 job->opaque = opaque; 4173 bs->job = job; 4174 4175 /* Only set speed when necessary to avoid NotSupported error */ 4176 if (speed != 0) { 4177 Error *local_err = NULL; 4178 4179 block_job_set_speed(job, speed, &local_err); 4180 if (error_is_set(&local_err)) { 4181 bs->job = NULL; 4182 g_free(job); 4183 bdrv_set_in_use(bs, 0); 4184 error_propagate(errp, local_err); 4185 return NULL; 4186 } 4187 } 4188 return job; 4189 } 4190 4191 void block_job_complete(BlockJob *job, int ret) 4192 { 4193 BlockDriverState *bs = job->bs; 4194 4195 assert(bs->job == job); 4196 job->cb(job->opaque, ret); 4197 bs->job = NULL; 4198 g_free(job); 4199 bdrv_set_in_use(bs, 0); 4200 } 4201 4202 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) 4203 { 4204 Error *local_err = NULL; 4205 4206 if (!job->job_type->set_speed) { 4207 error_set(errp, QERR_NOT_SUPPORTED); 4208 return; 4209 } 4210 job->job_type->set_speed(job, speed, &local_err); 4211 if (error_is_set(&local_err)) { 4212 error_propagate(errp, local_err); 4213 return; 4214 } 4215 4216 job->speed = speed; 4217 } 4218 4219 void block_job_cancel(BlockJob *job) 4220 { 4221 job->cancelled = true; 4222 } 4223 4224 bool block_job_is_cancelled(BlockJob *job) 4225 { 4226 return job->cancelled; 4227 } 4228 4229 void block_job_cancel_sync(BlockJob *job) 4230 { 4231 BlockDriverState *bs = job->bs; 4232 4233 assert(bs->job == job); 4234 block_job_cancel(job); 4235 while (bs->job != NULL && bs->job->busy) { 4236 qemu_aio_wait(); 4237 } 4238 } 4239