1 /* 2 * QEMU Block backends 3 * 4 * Copyright (C) 2014 Red Hat, Inc. 5 * 6 * Authors: 7 * Markus Armbruster <armbru@redhat.com>, 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2.1 10 * or later. See the COPYING.LIB file in the top-level directory. 11 */ 12 13 #include "sysemu/block-backend.h" 14 #include "block/block_int.h" 15 #include "sysemu/blockdev.h" 16 #include "qapi-event.h" 17 18 /* Number of coroutines to reserve per attached device model */ 19 #define COROUTINE_POOL_RESERVATION 64 20 21 struct BlockBackend { 22 char *name; 23 int refcnt; 24 BlockDriverState *bs; 25 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */ 26 QTAILQ_ENTRY(BlockBackend) link; /* for blk_backends */ 27 28 void *dev; /* attached device model, if any */ 29 /* TODO change to DeviceState when all users are qdevified */ 30 const BlockDevOps *dev_ops; 31 void *dev_opaque; 32 }; 33 34 typedef struct BlockBackendAIOCB { 35 BlockAIOCB common; 36 QEMUBH *bh; 37 int ret; 38 } BlockBackendAIOCB; 39 40 static const AIOCBInfo block_backend_aiocb_info = { 41 .aiocb_size = sizeof(BlockBackendAIOCB), 42 }; 43 44 static void drive_info_del(DriveInfo *dinfo); 45 46 /* All the BlockBackends (except for hidden ones) */ 47 static QTAILQ_HEAD(, BlockBackend) blk_backends = 48 QTAILQ_HEAD_INITIALIZER(blk_backends); 49 50 /* 51 * Create a new BlockBackend with @name, with a reference count of one. 52 * @name must not be null or empty. 53 * Fail if a BlockBackend with this name already exists. 54 * Store an error through @errp on failure, unless it's null. 55 * Return the new BlockBackend on success, null on failure. 56 */ 57 BlockBackend *blk_new(const char *name, Error **errp) 58 { 59 BlockBackend *blk; 60 61 assert(name && name[0]); 62 if (!id_wellformed(name)) { 63 error_setg(errp, "Invalid device name"); 64 return NULL; 65 } 66 if (blk_by_name(name)) { 67 error_setg(errp, "Device with id '%s' already exists", name); 68 return NULL; 69 } 70 if (bdrv_find_node(name)) { 71 error_setg(errp, 72 "Device name '%s' conflicts with an existing node name", 73 name); 74 return NULL; 75 } 76 77 blk = g_new0(BlockBackend, 1); 78 blk->name = g_strdup(name); 79 blk->refcnt = 1; 80 QTAILQ_INSERT_TAIL(&blk_backends, blk, link); 81 return blk; 82 } 83 84 /* 85 * Create a new BlockBackend with a new BlockDriverState attached. 86 * Otherwise just like blk_new(), which see. 87 */ 88 BlockBackend *blk_new_with_bs(const char *name, Error **errp) 89 { 90 BlockBackend *blk; 91 BlockDriverState *bs; 92 93 blk = blk_new(name, errp); 94 if (!blk) { 95 return NULL; 96 } 97 98 bs = bdrv_new_root(); 99 blk->bs = bs; 100 bs->blk = blk; 101 return blk; 102 } 103 104 /* 105 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState. 106 * 107 * Just as with bdrv_open(), after having called this function the reference to 108 * @options belongs to the block layer (even on failure). 109 * 110 * TODO: Remove @filename and @flags; it should be possible to specify a whole 111 * BDS tree just by specifying the @options QDict (or @reference, 112 * alternatively). At the time of adding this function, this is not possible, 113 * though, so callers of this function have to be able to specify @filename and 114 * @flags. 115 */ 116 BlockBackend *blk_new_open(const char *name, const char *filename, 117 const char *reference, QDict *options, int flags, 118 Error **errp) 119 { 120 BlockBackend *blk; 121 int ret; 122 123 blk = blk_new_with_bs(name, errp); 124 if (!blk) { 125 QDECREF(options); 126 return NULL; 127 } 128 129 ret = bdrv_open(&blk->bs, filename, reference, options, flags, NULL, errp); 130 if (ret < 0) { 131 blk_unref(blk); 132 return NULL; 133 } 134 135 return blk; 136 } 137 138 static void blk_delete(BlockBackend *blk) 139 { 140 assert(!blk->refcnt); 141 assert(!blk->dev); 142 if (blk->bs) { 143 assert(blk->bs->blk == blk); 144 blk->bs->blk = NULL; 145 bdrv_unref(blk->bs); 146 blk->bs = NULL; 147 } 148 /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */ 149 if (blk->name[0]) { 150 QTAILQ_REMOVE(&blk_backends, blk, link); 151 } 152 g_free(blk->name); 153 drive_info_del(blk->legacy_dinfo); 154 g_free(blk); 155 } 156 157 static void drive_info_del(DriveInfo *dinfo) 158 { 159 if (!dinfo) { 160 return; 161 } 162 qemu_opts_del(dinfo->opts); 163 g_free(dinfo->serial); 164 g_free(dinfo); 165 } 166 167 /* 168 * Increment @blk's reference count. 169 * @blk must not be null. 170 */ 171 void blk_ref(BlockBackend *blk) 172 { 173 blk->refcnt++; 174 } 175 176 /* 177 * Decrement @blk's reference count. 178 * If this drops it to zero, destroy @blk. 179 * For convenience, do nothing if @blk is null. 180 */ 181 void blk_unref(BlockBackend *blk) 182 { 183 if (blk) { 184 assert(blk->refcnt > 0); 185 if (!--blk->refcnt) { 186 blk_delete(blk); 187 } 188 } 189 } 190 191 /* 192 * Return the BlockBackend after @blk. 193 * If @blk is null, return the first one. 194 * Else, return @blk's next sibling, which may be null. 195 * 196 * To iterate over all BlockBackends, do 197 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 198 * ... 199 * } 200 */ 201 BlockBackend *blk_next(BlockBackend *blk) 202 { 203 return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&blk_backends); 204 } 205 206 /* 207 * Return @blk's name, a non-null string. 208 * Wart: the name is empty iff @blk has been hidden with 209 * blk_hide_on_behalf_of_hmp_drive_del(). 210 */ 211 const char *blk_name(BlockBackend *blk) 212 { 213 return blk->name; 214 } 215 216 /* 217 * Return the BlockBackend with name @name if it exists, else null. 218 * @name must not be null. 219 */ 220 BlockBackend *blk_by_name(const char *name) 221 { 222 BlockBackend *blk; 223 224 assert(name); 225 QTAILQ_FOREACH(blk, &blk_backends, link) { 226 if (!strcmp(name, blk->name)) { 227 return blk; 228 } 229 } 230 return NULL; 231 } 232 233 /* 234 * Return the BlockDriverState attached to @blk if any, else null. 235 */ 236 BlockDriverState *blk_bs(BlockBackend *blk) 237 { 238 return blk->bs; 239 } 240 241 /* 242 * Return @blk's DriveInfo if any, else null. 243 */ 244 DriveInfo *blk_legacy_dinfo(BlockBackend *blk) 245 { 246 return blk->legacy_dinfo; 247 } 248 249 /* 250 * Set @blk's DriveInfo to @dinfo, and return it. 251 * @blk must not have a DriveInfo set already. 252 * No other BlockBackend may have the same DriveInfo set. 253 */ 254 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo) 255 { 256 assert(!blk->legacy_dinfo); 257 return blk->legacy_dinfo = dinfo; 258 } 259 260 /* 261 * Return the BlockBackend with DriveInfo @dinfo. 262 * It must exist. 263 */ 264 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo) 265 { 266 BlockBackend *blk; 267 268 QTAILQ_FOREACH(blk, &blk_backends, link) { 269 if (blk->legacy_dinfo == dinfo) { 270 return blk; 271 } 272 } 273 abort(); 274 } 275 276 /* 277 * Hide @blk. 278 * @blk must not have been hidden already. 279 * Make attached BlockDriverState, if any, anonymous. 280 * Once hidden, @blk is invisible to all functions that don't receive 281 * it as argument. For example, blk_by_name() won't return it. 282 * Strictly for use by do_drive_del(). 283 * TODO get rid of it! 284 */ 285 void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk) 286 { 287 QTAILQ_REMOVE(&blk_backends, blk, link); 288 blk->name[0] = 0; 289 if (blk->bs) { 290 bdrv_make_anon(blk->bs); 291 } 292 } 293 294 /* 295 * Attach device model @dev to @blk. 296 * Return 0 on success, -EBUSY when a device model is attached already. 297 */ 298 int blk_attach_dev(BlockBackend *blk, void *dev) 299 /* TODO change to DeviceState *dev when all users are qdevified */ 300 { 301 if (blk->dev) { 302 return -EBUSY; 303 } 304 blk_ref(blk); 305 blk->dev = dev; 306 bdrv_iostatus_reset(blk->bs); 307 return 0; 308 } 309 310 /* 311 * Attach device model @dev to @blk. 312 * @blk must not have a device model attached already. 313 * TODO qdevified devices don't use this, remove when devices are qdevified 314 */ 315 void blk_attach_dev_nofail(BlockBackend *blk, void *dev) 316 { 317 if (blk_attach_dev(blk, dev) < 0) { 318 abort(); 319 } 320 } 321 322 /* 323 * Detach device model @dev from @blk. 324 * @dev must be currently attached to @blk. 325 */ 326 void blk_detach_dev(BlockBackend *blk, void *dev) 327 /* TODO change to DeviceState *dev when all users are qdevified */ 328 { 329 assert(blk->dev == dev); 330 blk->dev = NULL; 331 blk->dev_ops = NULL; 332 blk->dev_opaque = NULL; 333 bdrv_set_guest_block_size(blk->bs, 512); 334 blk_unref(blk); 335 } 336 337 /* 338 * Return the device model attached to @blk if any, else null. 339 */ 340 void *blk_get_attached_dev(BlockBackend *blk) 341 /* TODO change to return DeviceState * when all users are qdevified */ 342 { 343 return blk->dev; 344 } 345 346 /* 347 * Set @blk's device model callbacks to @ops. 348 * @opaque is the opaque argument to pass to the callbacks. 349 * This is for use by device models. 350 */ 351 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, 352 void *opaque) 353 { 354 blk->dev_ops = ops; 355 blk->dev_opaque = opaque; 356 } 357 358 /* 359 * Notify @blk's attached device model of media change. 360 * If @load is true, notify of media load. 361 * Else, notify of media eject. 362 * Also send DEVICE_TRAY_MOVED events as appropriate. 363 */ 364 void blk_dev_change_media_cb(BlockBackend *blk, bool load) 365 { 366 if (blk->dev_ops && blk->dev_ops->change_media_cb) { 367 bool tray_was_closed = !blk_dev_is_tray_open(blk); 368 369 blk->dev_ops->change_media_cb(blk->dev_opaque, load); 370 if (tray_was_closed) { 371 /* tray open */ 372 qapi_event_send_device_tray_moved(blk_name(blk), 373 true, &error_abort); 374 } 375 if (load) { 376 /* tray close */ 377 qapi_event_send_device_tray_moved(blk_name(blk), 378 false, &error_abort); 379 } 380 } 381 } 382 383 /* 384 * Does @blk's attached device model have removable media? 385 * %true if no device model is attached. 386 */ 387 bool blk_dev_has_removable_media(BlockBackend *blk) 388 { 389 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb); 390 } 391 392 /* 393 * Notify @blk's attached device model of a media eject request. 394 * If @force is true, the medium is about to be yanked out forcefully. 395 */ 396 void blk_dev_eject_request(BlockBackend *blk, bool force) 397 { 398 if (blk->dev_ops && blk->dev_ops->eject_request_cb) { 399 blk->dev_ops->eject_request_cb(blk->dev_opaque, force); 400 } 401 } 402 403 /* 404 * Does @blk's attached device model have a tray, and is it open? 405 */ 406 bool blk_dev_is_tray_open(BlockBackend *blk) 407 { 408 if (blk->dev_ops && blk->dev_ops->is_tray_open) { 409 return blk->dev_ops->is_tray_open(blk->dev_opaque); 410 } 411 return false; 412 } 413 414 /* 415 * Does @blk's attached device model have the medium locked? 416 * %false if the device model has no such lock. 417 */ 418 bool blk_dev_is_medium_locked(BlockBackend *blk) 419 { 420 if (blk->dev_ops && blk->dev_ops->is_medium_locked) { 421 return blk->dev_ops->is_medium_locked(blk->dev_opaque); 422 } 423 return false; 424 } 425 426 /* 427 * Notify @blk's attached device model of a backend size change. 428 */ 429 void blk_dev_resize_cb(BlockBackend *blk) 430 { 431 if (blk->dev_ops && blk->dev_ops->resize_cb) { 432 blk->dev_ops->resize_cb(blk->dev_opaque); 433 } 434 } 435 436 void blk_iostatus_enable(BlockBackend *blk) 437 { 438 bdrv_iostatus_enable(blk->bs); 439 } 440 441 static int blk_check_byte_request(BlockBackend *blk, int64_t offset, 442 size_t size) 443 { 444 int64_t len; 445 446 if (size > INT_MAX) { 447 return -EIO; 448 } 449 450 if (!blk_is_inserted(blk)) { 451 return -ENOMEDIUM; 452 } 453 454 len = blk_getlength(blk); 455 if (len < 0) { 456 return len; 457 } 458 459 if (offset < 0) { 460 return -EIO; 461 } 462 463 if (offset > len || len - offset < size) { 464 return -EIO; 465 } 466 467 return 0; 468 } 469 470 static int blk_check_request(BlockBackend *blk, int64_t sector_num, 471 int nb_sectors) 472 { 473 if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) { 474 return -EIO; 475 } 476 477 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { 478 return -EIO; 479 } 480 481 return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE, 482 nb_sectors * BDRV_SECTOR_SIZE); 483 } 484 485 int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf, 486 int nb_sectors) 487 { 488 int ret = blk_check_request(blk, sector_num, nb_sectors); 489 if (ret < 0) { 490 return ret; 491 } 492 493 return bdrv_read(blk->bs, sector_num, buf, nb_sectors); 494 } 495 496 int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf, 497 int nb_sectors) 498 { 499 int ret = blk_check_request(blk, sector_num, nb_sectors); 500 if (ret < 0) { 501 return ret; 502 } 503 504 return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors); 505 } 506 507 int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf, 508 int nb_sectors) 509 { 510 int ret = blk_check_request(blk, sector_num, nb_sectors); 511 if (ret < 0) { 512 return ret; 513 } 514 515 return bdrv_write(blk->bs, sector_num, buf, nb_sectors); 516 } 517 518 int blk_write_zeroes(BlockBackend *blk, int64_t sector_num, 519 int nb_sectors, BdrvRequestFlags flags) 520 { 521 int ret = blk_check_request(blk, sector_num, nb_sectors); 522 if (ret < 0) { 523 return ret; 524 } 525 526 return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags); 527 } 528 529 static void error_callback_bh(void *opaque) 530 { 531 struct BlockBackendAIOCB *acb = opaque; 532 qemu_bh_delete(acb->bh); 533 acb->common.cb(acb->common.opaque, acb->ret); 534 qemu_aio_unref(acb); 535 } 536 537 static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb, 538 void *opaque, int ret) 539 { 540 struct BlockBackendAIOCB *acb; 541 QEMUBH *bh; 542 543 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque); 544 acb->ret = ret; 545 546 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb); 547 acb->bh = bh; 548 qemu_bh_schedule(bh); 549 550 return &acb->common; 551 } 552 553 BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num, 554 int nb_sectors, BdrvRequestFlags flags, 555 BlockCompletionFunc *cb, void *opaque) 556 { 557 int ret = blk_check_request(blk, sector_num, nb_sectors); 558 if (ret < 0) { 559 return abort_aio_request(blk, cb, opaque, ret); 560 } 561 562 return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags, 563 cb, opaque); 564 } 565 566 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count) 567 { 568 int ret = blk_check_byte_request(blk, offset, count); 569 if (ret < 0) { 570 return ret; 571 } 572 573 return bdrv_pread(blk->bs, offset, buf, count); 574 } 575 576 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count) 577 { 578 int ret = blk_check_byte_request(blk, offset, count); 579 if (ret < 0) { 580 return ret; 581 } 582 583 return bdrv_pwrite(blk->bs, offset, buf, count); 584 } 585 586 int64_t blk_getlength(BlockBackend *blk) 587 { 588 return bdrv_getlength(blk->bs); 589 } 590 591 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) 592 { 593 bdrv_get_geometry(blk->bs, nb_sectors_ptr); 594 } 595 596 int64_t blk_nb_sectors(BlockBackend *blk) 597 { 598 return bdrv_nb_sectors(blk->bs); 599 } 600 601 BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num, 602 QEMUIOVector *iov, int nb_sectors, 603 BlockCompletionFunc *cb, void *opaque) 604 { 605 int ret = blk_check_request(blk, sector_num, nb_sectors); 606 if (ret < 0) { 607 return abort_aio_request(blk, cb, opaque, ret); 608 } 609 610 return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque); 611 } 612 613 BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num, 614 QEMUIOVector *iov, int nb_sectors, 615 BlockCompletionFunc *cb, void *opaque) 616 { 617 int ret = blk_check_request(blk, sector_num, nb_sectors); 618 if (ret < 0) { 619 return abort_aio_request(blk, cb, opaque, ret); 620 } 621 622 return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque); 623 } 624 625 BlockAIOCB *blk_aio_flush(BlockBackend *blk, 626 BlockCompletionFunc *cb, void *opaque) 627 { 628 return bdrv_aio_flush(blk->bs, cb, opaque); 629 } 630 631 BlockAIOCB *blk_aio_discard(BlockBackend *blk, 632 int64_t sector_num, int nb_sectors, 633 BlockCompletionFunc *cb, void *opaque) 634 { 635 int ret = blk_check_request(blk, sector_num, nb_sectors); 636 if (ret < 0) { 637 return abort_aio_request(blk, cb, opaque, ret); 638 } 639 640 return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque); 641 } 642 643 void blk_aio_cancel(BlockAIOCB *acb) 644 { 645 bdrv_aio_cancel(acb); 646 } 647 648 void blk_aio_cancel_async(BlockAIOCB *acb) 649 { 650 bdrv_aio_cancel_async(acb); 651 } 652 653 int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs) 654 { 655 int i, ret; 656 657 for (i = 0; i < num_reqs; i++) { 658 ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors); 659 if (ret < 0) { 660 return ret; 661 } 662 } 663 664 return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs); 665 } 666 667 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 668 { 669 return bdrv_ioctl(blk->bs, req, buf); 670 } 671 672 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, 673 BlockCompletionFunc *cb, void *opaque) 674 { 675 return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque); 676 } 677 678 int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors) 679 { 680 int ret = blk_check_request(blk, sector_num, nb_sectors); 681 if (ret < 0) { 682 return ret; 683 } 684 685 return bdrv_co_discard(blk->bs, sector_num, nb_sectors); 686 } 687 688 int blk_co_flush(BlockBackend *blk) 689 { 690 return bdrv_co_flush(blk->bs); 691 } 692 693 int blk_flush(BlockBackend *blk) 694 { 695 return bdrv_flush(blk->bs); 696 } 697 698 int blk_flush_all(void) 699 { 700 return bdrv_flush_all(); 701 } 702 703 void blk_drain_all(void) 704 { 705 bdrv_drain_all(); 706 } 707 708 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read) 709 { 710 return bdrv_get_on_error(blk->bs, is_read); 711 } 712 713 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, 714 int error) 715 { 716 return bdrv_get_error_action(blk->bs, is_read, error); 717 } 718 719 void blk_error_action(BlockBackend *blk, BlockErrorAction action, 720 bool is_read, int error) 721 { 722 bdrv_error_action(blk->bs, action, is_read, error); 723 } 724 725 int blk_is_read_only(BlockBackend *blk) 726 { 727 return bdrv_is_read_only(blk->bs); 728 } 729 730 int blk_is_sg(BlockBackend *blk) 731 { 732 return bdrv_is_sg(blk->bs); 733 } 734 735 int blk_enable_write_cache(BlockBackend *blk) 736 { 737 return bdrv_enable_write_cache(blk->bs); 738 } 739 740 void blk_set_enable_write_cache(BlockBackend *blk, bool wce) 741 { 742 bdrv_set_enable_write_cache(blk->bs, wce); 743 } 744 745 void blk_invalidate_cache(BlockBackend *blk, Error **errp) 746 { 747 bdrv_invalidate_cache(blk->bs, errp); 748 } 749 750 int blk_is_inserted(BlockBackend *blk) 751 { 752 return bdrv_is_inserted(blk->bs); 753 } 754 755 void blk_lock_medium(BlockBackend *blk, bool locked) 756 { 757 bdrv_lock_medium(blk->bs, locked); 758 } 759 760 void blk_eject(BlockBackend *blk, bool eject_flag) 761 { 762 bdrv_eject(blk->bs, eject_flag); 763 } 764 765 int blk_get_flags(BlockBackend *blk) 766 { 767 return bdrv_get_flags(blk->bs); 768 } 769 770 int blk_get_max_transfer_length(BlockBackend *blk) 771 { 772 return blk->bs->bl.max_transfer_length; 773 } 774 775 void blk_set_guest_block_size(BlockBackend *blk, int align) 776 { 777 bdrv_set_guest_block_size(blk->bs, align); 778 } 779 780 void *blk_blockalign(BlockBackend *blk, size_t size) 781 { 782 return qemu_blockalign(blk ? blk->bs : NULL, size); 783 } 784 785 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) 786 { 787 return bdrv_op_is_blocked(blk->bs, op, errp); 788 } 789 790 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason) 791 { 792 bdrv_op_unblock(blk->bs, op, reason); 793 } 794 795 void blk_op_block_all(BlockBackend *blk, Error *reason) 796 { 797 bdrv_op_block_all(blk->bs, reason); 798 } 799 800 void blk_op_unblock_all(BlockBackend *blk, Error *reason) 801 { 802 bdrv_op_unblock_all(blk->bs, reason); 803 } 804 805 AioContext *blk_get_aio_context(BlockBackend *blk) 806 { 807 return bdrv_get_aio_context(blk->bs); 808 } 809 810 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context) 811 { 812 bdrv_set_aio_context(blk->bs, new_context); 813 } 814 815 void blk_add_aio_context_notifier(BlockBackend *blk, 816 void (*attached_aio_context)(AioContext *new_context, void *opaque), 817 void (*detach_aio_context)(void *opaque), void *opaque) 818 { 819 bdrv_add_aio_context_notifier(blk->bs, attached_aio_context, 820 detach_aio_context, opaque); 821 } 822 823 void blk_remove_aio_context_notifier(BlockBackend *blk, 824 void (*attached_aio_context)(AioContext *, 825 void *), 826 void (*detach_aio_context)(void *), 827 void *opaque) 828 { 829 bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context, 830 detach_aio_context, opaque); 831 } 832 833 void blk_add_close_notifier(BlockBackend *blk, Notifier *notify) 834 { 835 bdrv_add_close_notifier(blk->bs, notify); 836 } 837 838 void blk_io_plug(BlockBackend *blk) 839 { 840 bdrv_io_plug(blk->bs); 841 } 842 843 void blk_io_unplug(BlockBackend *blk) 844 { 845 bdrv_io_unplug(blk->bs); 846 } 847 848 BlockAcctStats *blk_get_stats(BlockBackend *blk) 849 { 850 return bdrv_get_stats(blk->bs); 851 } 852 853 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, 854 BlockCompletionFunc *cb, void *opaque) 855 { 856 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque); 857 } 858 859 int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num, 860 int nb_sectors, BdrvRequestFlags flags) 861 { 862 int ret = blk_check_request(blk, sector_num, nb_sectors); 863 if (ret < 0) { 864 return ret; 865 } 866 867 return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags); 868 } 869 870 int blk_write_compressed(BlockBackend *blk, int64_t sector_num, 871 const uint8_t *buf, int nb_sectors) 872 { 873 int ret = blk_check_request(blk, sector_num, nb_sectors); 874 if (ret < 0) { 875 return ret; 876 } 877 878 return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors); 879 } 880 881 int blk_truncate(BlockBackend *blk, int64_t offset) 882 { 883 return bdrv_truncate(blk->bs, offset); 884 } 885 886 int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors) 887 { 888 int ret = blk_check_request(blk, sector_num, nb_sectors); 889 if (ret < 0) { 890 return ret; 891 } 892 893 return bdrv_discard(blk->bs, sector_num, nb_sectors); 894 } 895 896 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, 897 int64_t pos, int size) 898 { 899 return bdrv_save_vmstate(blk->bs, buf, pos, size); 900 } 901 902 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size) 903 { 904 return bdrv_load_vmstate(blk->bs, buf, pos, size); 905 } 906 907 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz) 908 { 909 return bdrv_probe_blocksizes(blk->bs, bsz); 910 } 911 912 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo) 913 { 914 return bdrv_probe_geometry(blk->bs, geo); 915 } 916