1 /* 2 * QEMU Block backends 3 * 4 * Copyright (C) 2014 Red Hat, Inc. 5 * 6 * Authors: 7 * Markus Armbruster <armbru@redhat.com>, 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2.1 10 * or later. See the COPYING.LIB file in the top-level directory. 11 */ 12 13 #include "sysemu/block-backend.h" 14 #include "block/block_int.h" 15 #include "block/blockjob.h" 16 #include "block/throttle-groups.h" 17 #include "sysemu/blockdev.h" 18 #include "sysemu/sysemu.h" 19 #include "qapi-event.h" 20 21 /* Number of coroutines to reserve per attached device model */ 22 #define COROUTINE_POOL_RESERVATION 64 23 24 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb); 25 26 struct BlockBackend { 27 char *name; 28 int refcnt; 29 BlockDriverState *bs; 30 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */ 31 QTAILQ_ENTRY(BlockBackend) link; /* for blk_backends */ 32 33 void *dev; /* attached device model, if any */ 34 /* TODO change to DeviceState when all users are qdevified */ 35 const BlockDevOps *dev_ops; 36 void *dev_opaque; 37 38 /* the block size for which the guest device expects atomicity */ 39 int guest_block_size; 40 41 /* If the BDS tree is removed, some of its options are stored here (which 42 * can be used to restore those options in the new BDS on insert) */ 43 BlockBackendRootState root_state; 44 45 /* I/O stats (display with "info blockstats"). */ 46 BlockAcctStats stats; 47 48 BlockdevOnError on_read_error, on_write_error; 49 bool iostatus_enabled; 50 BlockDeviceIoStatus iostatus; 51 }; 52 53 typedef struct BlockBackendAIOCB { 54 BlockAIOCB common; 55 QEMUBH *bh; 56 BlockBackend *blk; 57 int ret; 58 } BlockBackendAIOCB; 59 60 static const AIOCBInfo block_backend_aiocb_info = { 61 .get_aio_context = blk_aiocb_get_aio_context, 62 .aiocb_size = sizeof(BlockBackendAIOCB), 63 }; 64 65 static void drive_info_del(DriveInfo *dinfo); 66 67 /* All the BlockBackends (except for hidden ones) */ 68 static QTAILQ_HEAD(, BlockBackend) blk_backends = 69 QTAILQ_HEAD_INITIALIZER(blk_backends); 70 71 /* 72 * Create a new BlockBackend with @name, with a reference count of one. 73 * @name must not be null or empty. 74 * Fail if a BlockBackend with this name already exists. 75 * Store an error through @errp on failure, unless it's null. 76 * Return the new BlockBackend on success, null on failure. 77 */ 78 BlockBackend *blk_new(const char *name, Error **errp) 79 { 80 BlockBackend *blk; 81 82 assert(name && name[0]); 83 if (!id_wellformed(name)) { 84 error_setg(errp, "Invalid device name"); 85 return NULL; 86 } 87 if (blk_by_name(name)) { 88 error_setg(errp, "Device with id '%s' already exists", name); 89 return NULL; 90 } 91 if (bdrv_find_node(name)) { 92 error_setg(errp, 93 "Device name '%s' conflicts with an existing node name", 94 name); 95 return NULL; 96 } 97 98 blk = g_new0(BlockBackend, 1); 99 blk->name = g_strdup(name); 100 blk->refcnt = 1; 101 QTAILQ_INSERT_TAIL(&blk_backends, blk, link); 102 return blk; 103 } 104 105 /* 106 * Create a new BlockBackend with a new BlockDriverState attached. 107 * Otherwise just like blk_new(), which see. 108 */ 109 BlockBackend *blk_new_with_bs(const char *name, Error **errp) 110 { 111 BlockBackend *blk; 112 BlockDriverState *bs; 113 114 blk = blk_new(name, errp); 115 if (!blk) { 116 return NULL; 117 } 118 119 bs = bdrv_new_root(); 120 blk->bs = bs; 121 bs->blk = blk; 122 return blk; 123 } 124 125 /* 126 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState. 127 * 128 * Just as with bdrv_open(), after having called this function the reference to 129 * @options belongs to the block layer (even on failure). 130 * 131 * TODO: Remove @filename and @flags; it should be possible to specify a whole 132 * BDS tree just by specifying the @options QDict (or @reference, 133 * alternatively). At the time of adding this function, this is not possible, 134 * though, so callers of this function have to be able to specify @filename and 135 * @flags. 136 */ 137 BlockBackend *blk_new_open(const char *name, const char *filename, 138 const char *reference, QDict *options, int flags, 139 Error **errp) 140 { 141 BlockBackend *blk; 142 int ret; 143 144 blk = blk_new_with_bs(name, errp); 145 if (!blk) { 146 QDECREF(options); 147 return NULL; 148 } 149 150 ret = bdrv_open(&blk->bs, filename, reference, options, flags, errp); 151 if (ret < 0) { 152 blk_unref(blk); 153 return NULL; 154 } 155 156 return blk; 157 } 158 159 static void blk_delete(BlockBackend *blk) 160 { 161 assert(!blk->refcnt); 162 assert(!blk->dev); 163 if (blk->bs) { 164 assert(blk->bs->blk == blk); 165 blk->bs->blk = NULL; 166 bdrv_unref(blk->bs); 167 blk->bs = NULL; 168 } 169 if (blk->root_state.throttle_state) { 170 g_free(blk->root_state.throttle_group); 171 throttle_group_unref(blk->root_state.throttle_state); 172 } 173 /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */ 174 if (blk->name[0]) { 175 QTAILQ_REMOVE(&blk_backends, blk, link); 176 } 177 g_free(blk->name); 178 drive_info_del(blk->legacy_dinfo); 179 g_free(blk); 180 } 181 182 static void drive_info_del(DriveInfo *dinfo) 183 { 184 if (!dinfo) { 185 return; 186 } 187 qemu_opts_del(dinfo->opts); 188 g_free(dinfo->serial); 189 g_free(dinfo); 190 } 191 192 /* 193 * Increment @blk's reference count. 194 * @blk must not be null. 195 */ 196 void blk_ref(BlockBackend *blk) 197 { 198 blk->refcnt++; 199 } 200 201 /* 202 * Decrement @blk's reference count. 203 * If this drops it to zero, destroy @blk. 204 * For convenience, do nothing if @blk is null. 205 */ 206 void blk_unref(BlockBackend *blk) 207 { 208 if (blk) { 209 assert(blk->refcnt > 0); 210 if (!--blk->refcnt) { 211 blk_delete(blk); 212 } 213 } 214 } 215 216 /* 217 * Return the BlockBackend after @blk. 218 * If @blk is null, return the first one. 219 * Else, return @blk's next sibling, which may be null. 220 * 221 * To iterate over all BlockBackends, do 222 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 223 * ... 224 * } 225 */ 226 BlockBackend *blk_next(BlockBackend *blk) 227 { 228 return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&blk_backends); 229 } 230 231 /* 232 * Return @blk's name, a non-null string. 233 * Wart: the name is empty iff @blk has been hidden with 234 * blk_hide_on_behalf_of_hmp_drive_del(). 235 */ 236 const char *blk_name(BlockBackend *blk) 237 { 238 return blk->name; 239 } 240 241 /* 242 * Return the BlockBackend with name @name if it exists, else null. 243 * @name must not be null. 244 */ 245 BlockBackend *blk_by_name(const char *name) 246 { 247 BlockBackend *blk; 248 249 assert(name); 250 QTAILQ_FOREACH(blk, &blk_backends, link) { 251 if (!strcmp(name, blk->name)) { 252 return blk; 253 } 254 } 255 return NULL; 256 } 257 258 /* 259 * Return the BlockDriverState attached to @blk if any, else null. 260 */ 261 BlockDriverState *blk_bs(BlockBackend *blk) 262 { 263 return blk->bs; 264 } 265 266 /* 267 * Changes the BlockDriverState attached to @blk 268 */ 269 void blk_set_bs(BlockBackend *blk, BlockDriverState *bs) 270 { 271 bdrv_ref(bs); 272 273 if (blk->bs) { 274 blk->bs->blk = NULL; 275 bdrv_unref(blk->bs); 276 } 277 assert(bs->blk == NULL); 278 279 blk->bs = bs; 280 bs->blk = blk; 281 } 282 283 /* 284 * Return @blk's DriveInfo if any, else null. 285 */ 286 DriveInfo *blk_legacy_dinfo(BlockBackend *blk) 287 { 288 return blk->legacy_dinfo; 289 } 290 291 /* 292 * Set @blk's DriveInfo to @dinfo, and return it. 293 * @blk must not have a DriveInfo set already. 294 * No other BlockBackend may have the same DriveInfo set. 295 */ 296 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo) 297 { 298 assert(!blk->legacy_dinfo); 299 return blk->legacy_dinfo = dinfo; 300 } 301 302 /* 303 * Return the BlockBackend with DriveInfo @dinfo. 304 * It must exist. 305 */ 306 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo) 307 { 308 BlockBackend *blk; 309 310 QTAILQ_FOREACH(blk, &blk_backends, link) { 311 if (blk->legacy_dinfo == dinfo) { 312 return blk; 313 } 314 } 315 abort(); 316 } 317 318 /* 319 * Hide @blk. 320 * @blk must not have been hidden already. 321 * Make attached BlockDriverState, if any, anonymous. 322 * Once hidden, @blk is invisible to all functions that don't receive 323 * it as argument. For example, blk_by_name() won't return it. 324 * Strictly for use by do_drive_del(). 325 * TODO get rid of it! 326 */ 327 void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk) 328 { 329 QTAILQ_REMOVE(&blk_backends, blk, link); 330 blk->name[0] = 0; 331 if (blk->bs) { 332 bdrv_make_anon(blk->bs); 333 } 334 } 335 336 /* 337 * Associates a new BlockDriverState with @blk. 338 */ 339 void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs) 340 { 341 assert(!blk->bs && !bs->blk); 342 bdrv_ref(bs); 343 blk->bs = bs; 344 bs->blk = blk; 345 } 346 347 /* 348 * Attach device model @dev to @blk. 349 * Return 0 on success, -EBUSY when a device model is attached already. 350 */ 351 int blk_attach_dev(BlockBackend *blk, void *dev) 352 /* TODO change to DeviceState *dev when all users are qdevified */ 353 { 354 if (blk->dev) { 355 return -EBUSY; 356 } 357 blk_ref(blk); 358 blk->dev = dev; 359 blk_iostatus_reset(blk); 360 return 0; 361 } 362 363 /* 364 * Attach device model @dev to @blk. 365 * @blk must not have a device model attached already. 366 * TODO qdevified devices don't use this, remove when devices are qdevified 367 */ 368 void blk_attach_dev_nofail(BlockBackend *blk, void *dev) 369 { 370 if (blk_attach_dev(blk, dev) < 0) { 371 abort(); 372 } 373 } 374 375 /* 376 * Detach device model @dev from @blk. 377 * @dev must be currently attached to @blk. 378 */ 379 void blk_detach_dev(BlockBackend *blk, void *dev) 380 /* TODO change to DeviceState *dev when all users are qdevified */ 381 { 382 assert(blk->dev == dev); 383 blk->dev = NULL; 384 blk->dev_ops = NULL; 385 blk->dev_opaque = NULL; 386 blk->guest_block_size = 512; 387 blk_unref(blk); 388 } 389 390 /* 391 * Return the device model attached to @blk if any, else null. 392 */ 393 void *blk_get_attached_dev(BlockBackend *blk) 394 /* TODO change to return DeviceState * when all users are qdevified */ 395 { 396 return blk->dev; 397 } 398 399 /* 400 * Set @blk's device model callbacks to @ops. 401 * @opaque is the opaque argument to pass to the callbacks. 402 * This is for use by device models. 403 */ 404 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, 405 void *opaque) 406 { 407 blk->dev_ops = ops; 408 blk->dev_opaque = opaque; 409 } 410 411 /* 412 * Notify @blk's attached device model of media change. 413 * If @load is true, notify of media load. 414 * Else, notify of media eject. 415 * Also send DEVICE_TRAY_MOVED events as appropriate. 416 */ 417 void blk_dev_change_media_cb(BlockBackend *blk, bool load) 418 { 419 if (blk->dev_ops && blk->dev_ops->change_media_cb) { 420 bool tray_was_closed = !blk_dev_is_tray_open(blk); 421 422 blk->dev_ops->change_media_cb(blk->dev_opaque, load); 423 if (tray_was_closed) { 424 /* tray open */ 425 qapi_event_send_device_tray_moved(blk_name(blk), 426 true, &error_abort); 427 } 428 if (load) { 429 /* tray close */ 430 qapi_event_send_device_tray_moved(blk_name(blk), 431 false, &error_abort); 432 } 433 } 434 } 435 436 /* 437 * Does @blk's attached device model have removable media? 438 * %true if no device model is attached. 439 */ 440 bool blk_dev_has_removable_media(BlockBackend *blk) 441 { 442 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb); 443 } 444 445 /* 446 * Notify @blk's attached device model of a media eject request. 447 * If @force is true, the medium is about to be yanked out forcefully. 448 */ 449 void blk_dev_eject_request(BlockBackend *blk, bool force) 450 { 451 if (blk->dev_ops && blk->dev_ops->eject_request_cb) { 452 blk->dev_ops->eject_request_cb(blk->dev_opaque, force); 453 } 454 } 455 456 /* 457 * Does @blk's attached device model have a tray, and is it open? 458 */ 459 bool blk_dev_is_tray_open(BlockBackend *blk) 460 { 461 if (blk->dev_ops && blk->dev_ops->is_tray_open) { 462 return blk->dev_ops->is_tray_open(blk->dev_opaque); 463 } 464 return false; 465 } 466 467 /* 468 * Does @blk's attached device model have the medium locked? 469 * %false if the device model has no such lock. 470 */ 471 bool blk_dev_is_medium_locked(BlockBackend *blk) 472 { 473 if (blk->dev_ops && blk->dev_ops->is_medium_locked) { 474 return blk->dev_ops->is_medium_locked(blk->dev_opaque); 475 } 476 return false; 477 } 478 479 /* 480 * Notify @blk's attached device model of a backend size change. 481 */ 482 void blk_dev_resize_cb(BlockBackend *blk) 483 { 484 if (blk->dev_ops && blk->dev_ops->resize_cb) { 485 blk->dev_ops->resize_cb(blk->dev_opaque); 486 } 487 } 488 489 void blk_iostatus_enable(BlockBackend *blk) 490 { 491 blk->iostatus_enabled = true; 492 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 493 } 494 495 /* The I/O status is only enabled if the drive explicitly 496 * enables it _and_ the VM is configured to stop on errors */ 497 bool blk_iostatus_is_enabled(const BlockBackend *blk) 498 { 499 return (blk->iostatus_enabled && 500 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || 501 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP || 502 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP)); 503 } 504 505 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk) 506 { 507 return blk->iostatus; 508 } 509 510 void blk_iostatus_disable(BlockBackend *blk) 511 { 512 blk->iostatus_enabled = false; 513 } 514 515 void blk_iostatus_reset(BlockBackend *blk) 516 { 517 if (blk_iostatus_is_enabled(blk)) { 518 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 519 if (blk->bs && blk->bs->job) { 520 block_job_iostatus_reset(blk->bs->job); 521 } 522 } 523 } 524 525 void blk_iostatus_set_err(BlockBackend *blk, int error) 526 { 527 assert(blk_iostatus_is_enabled(blk)); 528 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 529 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 530 BLOCK_DEVICE_IO_STATUS_FAILED; 531 } 532 } 533 534 static int blk_check_byte_request(BlockBackend *blk, int64_t offset, 535 size_t size) 536 { 537 int64_t len; 538 539 if (size > INT_MAX) { 540 return -EIO; 541 } 542 543 if (!blk_is_available(blk)) { 544 return -ENOMEDIUM; 545 } 546 547 len = blk_getlength(blk); 548 if (len < 0) { 549 return len; 550 } 551 552 if (offset < 0) { 553 return -EIO; 554 } 555 556 if (offset > len || len - offset < size) { 557 return -EIO; 558 } 559 560 return 0; 561 } 562 563 static int blk_check_request(BlockBackend *blk, int64_t sector_num, 564 int nb_sectors) 565 { 566 if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) { 567 return -EIO; 568 } 569 570 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { 571 return -EIO; 572 } 573 574 return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE, 575 nb_sectors * BDRV_SECTOR_SIZE); 576 } 577 578 int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf, 579 int nb_sectors) 580 { 581 int ret = blk_check_request(blk, sector_num, nb_sectors); 582 if (ret < 0) { 583 return ret; 584 } 585 586 return bdrv_read(blk->bs, sector_num, buf, nb_sectors); 587 } 588 589 int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf, 590 int nb_sectors) 591 { 592 int ret = blk_check_request(blk, sector_num, nb_sectors); 593 if (ret < 0) { 594 return ret; 595 } 596 597 return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors); 598 } 599 600 int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf, 601 int nb_sectors) 602 { 603 int ret = blk_check_request(blk, sector_num, nb_sectors); 604 if (ret < 0) { 605 return ret; 606 } 607 608 return bdrv_write(blk->bs, sector_num, buf, nb_sectors); 609 } 610 611 int blk_write_zeroes(BlockBackend *blk, int64_t sector_num, 612 int nb_sectors, BdrvRequestFlags flags) 613 { 614 int ret = blk_check_request(blk, sector_num, nb_sectors); 615 if (ret < 0) { 616 return ret; 617 } 618 619 return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags); 620 } 621 622 static void error_callback_bh(void *opaque) 623 { 624 struct BlockBackendAIOCB *acb = opaque; 625 qemu_bh_delete(acb->bh); 626 acb->common.cb(acb->common.opaque, acb->ret); 627 qemu_aio_unref(acb); 628 } 629 630 static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb, 631 void *opaque, int ret) 632 { 633 struct BlockBackendAIOCB *acb; 634 QEMUBH *bh; 635 636 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque); 637 acb->blk = blk; 638 acb->ret = ret; 639 640 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb); 641 acb->bh = bh; 642 qemu_bh_schedule(bh); 643 644 return &acb->common; 645 } 646 647 BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num, 648 int nb_sectors, BdrvRequestFlags flags, 649 BlockCompletionFunc *cb, void *opaque) 650 { 651 int ret = blk_check_request(blk, sector_num, nb_sectors); 652 if (ret < 0) { 653 return abort_aio_request(blk, cb, opaque, ret); 654 } 655 656 return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags, 657 cb, opaque); 658 } 659 660 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count) 661 { 662 int ret = blk_check_byte_request(blk, offset, count); 663 if (ret < 0) { 664 return ret; 665 } 666 667 return bdrv_pread(blk->bs, offset, buf, count); 668 } 669 670 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count) 671 { 672 int ret = blk_check_byte_request(blk, offset, count); 673 if (ret < 0) { 674 return ret; 675 } 676 677 return bdrv_pwrite(blk->bs, offset, buf, count); 678 } 679 680 int64_t blk_getlength(BlockBackend *blk) 681 { 682 if (!blk_is_available(blk)) { 683 return -ENOMEDIUM; 684 } 685 686 return bdrv_getlength(blk->bs); 687 } 688 689 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) 690 { 691 if (!blk->bs) { 692 *nb_sectors_ptr = 0; 693 } else { 694 bdrv_get_geometry(blk->bs, nb_sectors_ptr); 695 } 696 } 697 698 int64_t blk_nb_sectors(BlockBackend *blk) 699 { 700 if (!blk_is_available(blk)) { 701 return -ENOMEDIUM; 702 } 703 704 return bdrv_nb_sectors(blk->bs); 705 } 706 707 BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num, 708 QEMUIOVector *iov, int nb_sectors, 709 BlockCompletionFunc *cb, void *opaque) 710 { 711 int ret = blk_check_request(blk, sector_num, nb_sectors); 712 if (ret < 0) { 713 return abort_aio_request(blk, cb, opaque, ret); 714 } 715 716 return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque); 717 } 718 719 BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num, 720 QEMUIOVector *iov, int nb_sectors, 721 BlockCompletionFunc *cb, void *opaque) 722 { 723 int ret = blk_check_request(blk, sector_num, nb_sectors); 724 if (ret < 0) { 725 return abort_aio_request(blk, cb, opaque, ret); 726 } 727 728 return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque); 729 } 730 731 BlockAIOCB *blk_aio_flush(BlockBackend *blk, 732 BlockCompletionFunc *cb, void *opaque) 733 { 734 if (!blk_is_available(blk)) { 735 return abort_aio_request(blk, cb, opaque, -ENOMEDIUM); 736 } 737 738 return bdrv_aio_flush(blk->bs, cb, opaque); 739 } 740 741 BlockAIOCB *blk_aio_discard(BlockBackend *blk, 742 int64_t sector_num, int nb_sectors, 743 BlockCompletionFunc *cb, void *opaque) 744 { 745 int ret = blk_check_request(blk, sector_num, nb_sectors); 746 if (ret < 0) { 747 return abort_aio_request(blk, cb, opaque, ret); 748 } 749 750 return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque); 751 } 752 753 void blk_aio_cancel(BlockAIOCB *acb) 754 { 755 bdrv_aio_cancel(acb); 756 } 757 758 void blk_aio_cancel_async(BlockAIOCB *acb) 759 { 760 bdrv_aio_cancel_async(acb); 761 } 762 763 int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs) 764 { 765 int i, ret; 766 767 for (i = 0; i < num_reqs; i++) { 768 ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors); 769 if (ret < 0) { 770 return ret; 771 } 772 } 773 774 return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs); 775 } 776 777 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 778 { 779 if (!blk_is_available(blk)) { 780 return -ENOMEDIUM; 781 } 782 783 return bdrv_ioctl(blk->bs, req, buf); 784 } 785 786 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, 787 BlockCompletionFunc *cb, void *opaque) 788 { 789 if (!blk_is_available(blk)) { 790 return abort_aio_request(blk, cb, opaque, -ENOMEDIUM); 791 } 792 793 return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque); 794 } 795 796 int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors) 797 { 798 int ret = blk_check_request(blk, sector_num, nb_sectors); 799 if (ret < 0) { 800 return ret; 801 } 802 803 return bdrv_co_discard(blk->bs, sector_num, nb_sectors); 804 } 805 806 int blk_co_flush(BlockBackend *blk) 807 { 808 if (!blk_is_available(blk)) { 809 return -ENOMEDIUM; 810 } 811 812 return bdrv_co_flush(blk->bs); 813 } 814 815 int blk_flush(BlockBackend *blk) 816 { 817 if (!blk_is_available(blk)) { 818 return -ENOMEDIUM; 819 } 820 821 return bdrv_flush(blk->bs); 822 } 823 824 int blk_flush_all(void) 825 { 826 return bdrv_flush_all(); 827 } 828 829 void blk_drain(BlockBackend *blk) 830 { 831 if (blk->bs) { 832 bdrv_drain(blk->bs); 833 } 834 } 835 836 void blk_drain_all(void) 837 { 838 bdrv_drain_all(); 839 } 840 841 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, 842 BlockdevOnError on_write_error) 843 { 844 blk->on_read_error = on_read_error; 845 blk->on_write_error = on_write_error; 846 } 847 848 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read) 849 { 850 return is_read ? blk->on_read_error : blk->on_write_error; 851 } 852 853 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, 854 int error) 855 { 856 BlockdevOnError on_err = blk_get_on_error(blk, is_read); 857 858 switch (on_err) { 859 case BLOCKDEV_ON_ERROR_ENOSPC: 860 return (error == ENOSPC) ? 861 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; 862 case BLOCKDEV_ON_ERROR_STOP: 863 return BLOCK_ERROR_ACTION_STOP; 864 case BLOCKDEV_ON_ERROR_REPORT: 865 return BLOCK_ERROR_ACTION_REPORT; 866 case BLOCKDEV_ON_ERROR_IGNORE: 867 return BLOCK_ERROR_ACTION_IGNORE; 868 default: 869 abort(); 870 } 871 } 872 873 static void send_qmp_error_event(BlockBackend *blk, 874 BlockErrorAction action, 875 bool is_read, int error) 876 { 877 IoOperationType optype; 878 879 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; 880 qapi_event_send_block_io_error(blk_name(blk), optype, action, 881 blk_iostatus_is_enabled(blk), 882 error == ENOSPC, strerror(error), 883 &error_abort); 884 } 885 886 /* This is done by device models because, while the block layer knows 887 * about the error, it does not know whether an operation comes from 888 * the device or the block layer (from a job, for example). 889 */ 890 void blk_error_action(BlockBackend *blk, BlockErrorAction action, 891 bool is_read, int error) 892 { 893 assert(error >= 0); 894 895 if (action == BLOCK_ERROR_ACTION_STOP) { 896 /* First set the iostatus, so that "info block" returns an iostatus 897 * that matches the events raised so far (an additional error iostatus 898 * is fine, but not a lost one). 899 */ 900 blk_iostatus_set_err(blk, error); 901 902 /* Then raise the request to stop the VM and the event. 903 * qemu_system_vmstop_request_prepare has two effects. First, 904 * it ensures that the STOP event always comes after the 905 * BLOCK_IO_ERROR event. Second, it ensures that even if management 906 * can observe the STOP event and do a "cont" before the STOP 907 * event is issued, the VM will not stop. In this case, vm_start() 908 * also ensures that the STOP/RESUME pair of events is emitted. 909 */ 910 qemu_system_vmstop_request_prepare(); 911 send_qmp_error_event(blk, action, is_read, error); 912 qemu_system_vmstop_request(RUN_STATE_IO_ERROR); 913 } else { 914 send_qmp_error_event(blk, action, is_read, error); 915 } 916 } 917 918 int blk_is_read_only(BlockBackend *blk) 919 { 920 if (blk->bs) { 921 return bdrv_is_read_only(blk->bs); 922 } else { 923 return blk->root_state.read_only; 924 } 925 } 926 927 int blk_is_sg(BlockBackend *blk) 928 { 929 if (!blk->bs) { 930 return 0; 931 } 932 933 return bdrv_is_sg(blk->bs); 934 } 935 936 int blk_enable_write_cache(BlockBackend *blk) 937 { 938 if (blk->bs) { 939 return bdrv_enable_write_cache(blk->bs); 940 } else { 941 return !!(blk->root_state.open_flags & BDRV_O_CACHE_WB); 942 } 943 } 944 945 void blk_set_enable_write_cache(BlockBackend *blk, bool wce) 946 { 947 if (blk->bs) { 948 bdrv_set_enable_write_cache(blk->bs, wce); 949 } else { 950 if (wce) { 951 blk->root_state.open_flags |= BDRV_O_CACHE_WB; 952 } else { 953 blk->root_state.open_flags &= ~BDRV_O_CACHE_WB; 954 } 955 } 956 } 957 958 void blk_invalidate_cache(BlockBackend *blk, Error **errp) 959 { 960 if (!blk->bs) { 961 error_setg(errp, "Device '%s' has no medium", blk->name); 962 return; 963 } 964 965 bdrv_invalidate_cache(blk->bs, errp); 966 } 967 968 bool blk_is_inserted(BlockBackend *blk) 969 { 970 return blk->bs && bdrv_is_inserted(blk->bs); 971 } 972 973 bool blk_is_available(BlockBackend *blk) 974 { 975 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk); 976 } 977 978 void blk_lock_medium(BlockBackend *blk, bool locked) 979 { 980 if (blk->bs) { 981 bdrv_lock_medium(blk->bs, locked); 982 } 983 } 984 985 void blk_eject(BlockBackend *blk, bool eject_flag) 986 { 987 if (blk->bs) { 988 bdrv_eject(blk->bs, eject_flag); 989 } 990 } 991 992 int blk_get_flags(BlockBackend *blk) 993 { 994 if (blk->bs) { 995 return bdrv_get_flags(blk->bs); 996 } else { 997 return blk->root_state.open_flags; 998 } 999 } 1000 1001 int blk_get_max_transfer_length(BlockBackend *blk) 1002 { 1003 if (blk->bs) { 1004 return blk->bs->bl.max_transfer_length; 1005 } else { 1006 return 0; 1007 } 1008 } 1009 1010 void blk_set_guest_block_size(BlockBackend *blk, int align) 1011 { 1012 blk->guest_block_size = align; 1013 } 1014 1015 void *blk_blockalign(BlockBackend *blk, size_t size) 1016 { 1017 return qemu_blockalign(blk ? blk->bs : NULL, size); 1018 } 1019 1020 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) 1021 { 1022 if (!blk->bs) { 1023 return false; 1024 } 1025 1026 return bdrv_op_is_blocked(blk->bs, op, errp); 1027 } 1028 1029 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason) 1030 { 1031 if (blk->bs) { 1032 bdrv_op_unblock(blk->bs, op, reason); 1033 } 1034 } 1035 1036 void blk_op_block_all(BlockBackend *blk, Error *reason) 1037 { 1038 if (blk->bs) { 1039 bdrv_op_block_all(blk->bs, reason); 1040 } 1041 } 1042 1043 void blk_op_unblock_all(BlockBackend *blk, Error *reason) 1044 { 1045 if (blk->bs) { 1046 bdrv_op_unblock_all(blk->bs, reason); 1047 } 1048 } 1049 1050 AioContext *blk_get_aio_context(BlockBackend *blk) 1051 { 1052 if (blk->bs) { 1053 return bdrv_get_aio_context(blk->bs); 1054 } else { 1055 return qemu_get_aio_context(); 1056 } 1057 } 1058 1059 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb) 1060 { 1061 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb); 1062 return blk_get_aio_context(blk_acb->blk); 1063 } 1064 1065 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context) 1066 { 1067 if (blk->bs) { 1068 bdrv_set_aio_context(blk->bs, new_context); 1069 } 1070 } 1071 1072 void blk_add_aio_context_notifier(BlockBackend *blk, 1073 void (*attached_aio_context)(AioContext *new_context, void *opaque), 1074 void (*detach_aio_context)(void *opaque), void *opaque) 1075 { 1076 if (blk->bs) { 1077 bdrv_add_aio_context_notifier(blk->bs, attached_aio_context, 1078 detach_aio_context, opaque); 1079 } 1080 } 1081 1082 void blk_remove_aio_context_notifier(BlockBackend *blk, 1083 void (*attached_aio_context)(AioContext *, 1084 void *), 1085 void (*detach_aio_context)(void *), 1086 void *opaque) 1087 { 1088 if (blk->bs) { 1089 bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context, 1090 detach_aio_context, opaque); 1091 } 1092 } 1093 1094 void blk_add_close_notifier(BlockBackend *blk, Notifier *notify) 1095 { 1096 if (blk->bs) { 1097 bdrv_add_close_notifier(blk->bs, notify); 1098 } 1099 } 1100 1101 void blk_io_plug(BlockBackend *blk) 1102 { 1103 if (blk->bs) { 1104 bdrv_io_plug(blk->bs); 1105 } 1106 } 1107 1108 void blk_io_unplug(BlockBackend *blk) 1109 { 1110 if (blk->bs) { 1111 bdrv_io_unplug(blk->bs); 1112 } 1113 } 1114 1115 BlockAcctStats *blk_get_stats(BlockBackend *blk) 1116 { 1117 return &blk->stats; 1118 } 1119 1120 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, 1121 BlockCompletionFunc *cb, void *opaque) 1122 { 1123 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque); 1124 } 1125 1126 int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num, 1127 int nb_sectors, BdrvRequestFlags flags) 1128 { 1129 int ret = blk_check_request(blk, sector_num, nb_sectors); 1130 if (ret < 0) { 1131 return ret; 1132 } 1133 1134 return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags); 1135 } 1136 1137 int blk_write_compressed(BlockBackend *blk, int64_t sector_num, 1138 const uint8_t *buf, int nb_sectors) 1139 { 1140 int ret = blk_check_request(blk, sector_num, nb_sectors); 1141 if (ret < 0) { 1142 return ret; 1143 } 1144 1145 return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors); 1146 } 1147 1148 int blk_truncate(BlockBackend *blk, int64_t offset) 1149 { 1150 if (!blk_is_available(blk)) { 1151 return -ENOMEDIUM; 1152 } 1153 1154 return bdrv_truncate(blk->bs, offset); 1155 } 1156 1157 int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors) 1158 { 1159 int ret = blk_check_request(blk, sector_num, nb_sectors); 1160 if (ret < 0) { 1161 return ret; 1162 } 1163 1164 return bdrv_discard(blk->bs, sector_num, nb_sectors); 1165 } 1166 1167 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, 1168 int64_t pos, int size) 1169 { 1170 if (!blk_is_available(blk)) { 1171 return -ENOMEDIUM; 1172 } 1173 1174 return bdrv_save_vmstate(blk->bs, buf, pos, size); 1175 } 1176 1177 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size) 1178 { 1179 if (!blk_is_available(blk)) { 1180 return -ENOMEDIUM; 1181 } 1182 1183 return bdrv_load_vmstate(blk->bs, buf, pos, size); 1184 } 1185 1186 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz) 1187 { 1188 if (!blk_is_available(blk)) { 1189 return -ENOMEDIUM; 1190 } 1191 1192 return bdrv_probe_blocksizes(blk->bs, bsz); 1193 } 1194 1195 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo) 1196 { 1197 if (!blk_is_available(blk)) { 1198 return -ENOMEDIUM; 1199 } 1200 1201 return bdrv_probe_geometry(blk->bs, geo); 1202 } 1203 1204 /* 1205 * Updates the BlockBackendRootState object with data from the currently 1206 * attached BlockDriverState. 1207 */ 1208 void blk_update_root_state(BlockBackend *blk) 1209 { 1210 assert(blk->bs); 1211 1212 blk->root_state.open_flags = blk->bs->open_flags; 1213 blk->root_state.read_only = blk->bs->read_only; 1214 blk->root_state.detect_zeroes = blk->bs->detect_zeroes; 1215 1216 if (blk->root_state.throttle_group) { 1217 g_free(blk->root_state.throttle_group); 1218 throttle_group_unref(blk->root_state.throttle_state); 1219 } 1220 if (blk->bs->throttle_state) { 1221 const char *name = throttle_group_get_name(blk->bs); 1222 blk->root_state.throttle_group = g_strdup(name); 1223 blk->root_state.throttle_state = throttle_group_incref(name); 1224 } else { 1225 blk->root_state.throttle_group = NULL; 1226 blk->root_state.throttle_state = NULL; 1227 } 1228 } 1229 1230 BlockBackendRootState *blk_get_root_state(BlockBackend *blk) 1231 { 1232 return &blk->root_state; 1233 } 1234