1 /* 2 * QEMU Block backends 3 * 4 * Copyright (C) 2014-2016 Red Hat, Inc. 5 * 6 * Authors: 7 * Markus Armbruster <armbru@redhat.com>, 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2.1 10 * or later. See the COPYING.LIB file in the top-level directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "sysemu/block-backend.h" 15 #include "block/block_int.h" 16 #include "block/blockjob.h" 17 #include "block/throttle-groups.h" 18 #include "sysemu/blockdev.h" 19 #include "sysemu/sysemu.h" 20 #include "qapi/error.h" 21 #include "qapi/qapi-events-block.h" 22 #include "qemu/id.h" 23 #include "qemu/option.h" 24 #include "trace.h" 25 #include "migration/misc.h" 26 27 /* Number of coroutines to reserve per attached device model */ 28 #define COROUTINE_POOL_RESERVATION 64 29 30 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 31 32 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb); 33 34 typedef struct BlockBackendAioNotifier { 35 void (*attached_aio_context)(AioContext *new_context, void *opaque); 36 void (*detach_aio_context)(void *opaque); 37 void *opaque; 38 QLIST_ENTRY(BlockBackendAioNotifier) list; 39 } BlockBackendAioNotifier; 40 41 struct BlockBackend { 42 char *name; 43 int refcnt; 44 BdrvChild *root; 45 AioContext *ctx; 46 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */ 47 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */ 48 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */ 49 BlockBackendPublic public; 50 51 DeviceState *dev; /* attached device model, if any */ 52 const BlockDevOps *dev_ops; 53 void *dev_opaque; 54 55 /* the block size for which the guest device expects atomicity */ 56 int guest_block_size; 57 58 /* If the BDS tree is removed, some of its options are stored here (which 59 * can be used to restore those options in the new BDS on insert) */ 60 BlockBackendRootState root_state; 61 62 bool enable_write_cache; 63 64 /* I/O stats (display with "info blockstats"). */ 65 BlockAcctStats stats; 66 67 BlockdevOnError on_read_error, on_write_error; 68 bool iostatus_enabled; 69 BlockDeviceIoStatus iostatus; 70 71 uint64_t perm; 72 uint64_t shared_perm; 73 bool disable_perm; 74 75 bool allow_aio_context_change; 76 bool allow_write_beyond_eof; 77 78 NotifierList remove_bs_notifiers, insert_bs_notifiers; 79 QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers; 80 81 int quiesce_counter; 82 VMChangeStateEntry *vmsh; 83 bool force_allow_inactivate; 84 85 /* Number of in-flight aio requests. BlockDriverState also counts 86 * in-flight requests but aio requests can exist even when blk->root is 87 * NULL, so we cannot rely on its counter for that case. 88 * Accessed with atomic ops. 89 */ 90 unsigned int in_flight; 91 }; 92 93 typedef struct BlockBackendAIOCB { 94 BlockAIOCB common; 95 BlockBackend *blk; 96 int ret; 97 } BlockBackendAIOCB; 98 99 static const AIOCBInfo block_backend_aiocb_info = { 100 .get_aio_context = blk_aiocb_get_aio_context, 101 .aiocb_size = sizeof(BlockBackendAIOCB), 102 }; 103 104 static void drive_info_del(DriveInfo *dinfo); 105 static BlockBackend *bdrv_first_blk(BlockDriverState *bs); 106 107 /* All BlockBackends */ 108 static QTAILQ_HEAD(, BlockBackend) block_backends = 109 QTAILQ_HEAD_INITIALIZER(block_backends); 110 111 /* All BlockBackends referenced by the monitor and which are iterated through by 112 * blk_next() */ 113 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends = 114 QTAILQ_HEAD_INITIALIZER(monitor_block_backends); 115 116 static void blk_root_inherit_options(int *child_flags, QDict *child_options, 117 int parent_flags, QDict *parent_options) 118 { 119 /* We're not supposed to call this function for root nodes */ 120 abort(); 121 } 122 static void blk_root_drained_begin(BdrvChild *child); 123 static bool blk_root_drained_poll(BdrvChild *child); 124 static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter); 125 126 static void blk_root_change_media(BdrvChild *child, bool load); 127 static void blk_root_resize(BdrvChild *child); 128 129 static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx, 130 GSList **ignore, Error **errp); 131 static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx, 132 GSList **ignore); 133 134 static char *blk_root_get_parent_desc(BdrvChild *child) 135 { 136 BlockBackend *blk = child->opaque; 137 char *dev_id; 138 139 if (blk->name) { 140 return g_strdup(blk->name); 141 } 142 143 dev_id = blk_get_attached_dev_id(blk); 144 if (*dev_id) { 145 return dev_id; 146 } else { 147 /* TODO Callback into the BB owner for something more detailed */ 148 g_free(dev_id); 149 return g_strdup("a block device"); 150 } 151 } 152 153 static const char *blk_root_get_name(BdrvChild *child) 154 { 155 return blk_name(child->opaque); 156 } 157 158 static void blk_vm_state_changed(void *opaque, int running, RunState state) 159 { 160 Error *local_err = NULL; 161 BlockBackend *blk = opaque; 162 163 if (state == RUN_STATE_INMIGRATE) { 164 return; 165 } 166 167 qemu_del_vm_change_state_handler(blk->vmsh); 168 blk->vmsh = NULL; 169 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err); 170 if (local_err) { 171 error_report_err(local_err); 172 } 173 } 174 175 /* 176 * Notifies the user of the BlockBackend that migration has completed. qdev 177 * devices can tighten their permissions in response (specifically revoke 178 * shared write permissions that we needed for storage migration). 179 * 180 * If an error is returned, the VM cannot be allowed to be resumed. 181 */ 182 static void blk_root_activate(BdrvChild *child, Error **errp) 183 { 184 BlockBackend *blk = child->opaque; 185 Error *local_err = NULL; 186 187 if (!blk->disable_perm) { 188 return; 189 } 190 191 blk->disable_perm = false; 192 193 blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err); 194 if (local_err) { 195 error_propagate(errp, local_err); 196 blk->disable_perm = true; 197 return; 198 } 199 200 if (runstate_check(RUN_STATE_INMIGRATE)) { 201 /* Activation can happen when migration process is still active, for 202 * example when nbd_server_add is called during non-shared storage 203 * migration. Defer the shared_perm update to migration completion. */ 204 if (!blk->vmsh) { 205 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed, 206 blk); 207 } 208 return; 209 } 210 211 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err); 212 if (local_err) { 213 error_propagate(errp, local_err); 214 blk->disable_perm = true; 215 return; 216 } 217 } 218 219 void blk_set_force_allow_inactivate(BlockBackend *blk) 220 { 221 blk->force_allow_inactivate = true; 222 } 223 224 static bool blk_can_inactivate(BlockBackend *blk) 225 { 226 /* If it is a guest device, inactivate is ok. */ 227 if (blk->dev || blk_name(blk)[0]) { 228 return true; 229 } 230 231 /* Inactivating means no more writes to the image can be done, 232 * even if those writes would be changes invisible to the 233 * guest. For block job BBs that satisfy this, we can just allow 234 * it. This is the case for mirror job source, which is required 235 * by libvirt non-shared block migration. */ 236 if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) { 237 return true; 238 } 239 240 return blk->force_allow_inactivate; 241 } 242 243 static int blk_root_inactivate(BdrvChild *child) 244 { 245 BlockBackend *blk = child->opaque; 246 247 if (blk->disable_perm) { 248 return 0; 249 } 250 251 if (!blk_can_inactivate(blk)) { 252 return -EPERM; 253 } 254 255 blk->disable_perm = true; 256 if (blk->root) { 257 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort); 258 } 259 260 return 0; 261 } 262 263 static void blk_root_attach(BdrvChild *child) 264 { 265 BlockBackend *blk = child->opaque; 266 BlockBackendAioNotifier *notifier; 267 268 trace_blk_root_attach(child, blk, child->bs); 269 270 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { 271 bdrv_add_aio_context_notifier(child->bs, 272 notifier->attached_aio_context, 273 notifier->detach_aio_context, 274 notifier->opaque); 275 } 276 } 277 278 static void blk_root_detach(BdrvChild *child) 279 { 280 BlockBackend *blk = child->opaque; 281 BlockBackendAioNotifier *notifier; 282 283 trace_blk_root_detach(child, blk, child->bs); 284 285 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { 286 bdrv_remove_aio_context_notifier(child->bs, 287 notifier->attached_aio_context, 288 notifier->detach_aio_context, 289 notifier->opaque); 290 } 291 } 292 293 static const BdrvChildRole child_root = { 294 .inherit_options = blk_root_inherit_options, 295 296 .change_media = blk_root_change_media, 297 .resize = blk_root_resize, 298 .get_name = blk_root_get_name, 299 .get_parent_desc = blk_root_get_parent_desc, 300 301 .drained_begin = blk_root_drained_begin, 302 .drained_poll = blk_root_drained_poll, 303 .drained_end = blk_root_drained_end, 304 305 .activate = blk_root_activate, 306 .inactivate = blk_root_inactivate, 307 308 .attach = blk_root_attach, 309 .detach = blk_root_detach, 310 311 .can_set_aio_ctx = blk_root_can_set_aio_ctx, 312 .set_aio_ctx = blk_root_set_aio_ctx, 313 }; 314 315 /* 316 * Create a new BlockBackend with a reference count of one. 317 * 318 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions 319 * to request for a block driver node that is attached to this BlockBackend. 320 * @shared_perm is a bitmask which describes which permissions may be granted 321 * to other users of the attached node. 322 * Both sets of permissions can be changed later using blk_set_perm(). 323 * 324 * Return the new BlockBackend on success, null on failure. 325 */ 326 BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm) 327 { 328 BlockBackend *blk; 329 330 blk = g_new0(BlockBackend, 1); 331 blk->refcnt = 1; 332 blk->ctx = ctx; 333 blk->perm = perm; 334 blk->shared_perm = shared_perm; 335 blk_set_enable_write_cache(blk, true); 336 337 blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT; 338 blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; 339 340 block_acct_init(&blk->stats); 341 342 notifier_list_init(&blk->remove_bs_notifiers); 343 notifier_list_init(&blk->insert_bs_notifiers); 344 QLIST_INIT(&blk->aio_notifiers); 345 346 QTAILQ_INSERT_TAIL(&block_backends, blk, link); 347 return blk; 348 } 349 350 /* 351 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both. 352 * The new BlockBackend is in the main AioContext. 353 * 354 * Just as with bdrv_open(), after having called this function the reference to 355 * @options belongs to the block layer (even on failure). 356 * 357 * TODO: Remove @filename and @flags; it should be possible to specify a whole 358 * BDS tree just by specifying the @options QDict (or @reference, 359 * alternatively). At the time of adding this function, this is not possible, 360 * though, so callers of this function have to be able to specify @filename and 361 * @flags. 362 */ 363 BlockBackend *blk_new_open(const char *filename, const char *reference, 364 QDict *options, int flags, Error **errp) 365 { 366 BlockBackend *blk; 367 BlockDriverState *bs; 368 uint64_t perm = 0; 369 370 /* blk_new_open() is mainly used in .bdrv_create implementations and the 371 * tools where sharing isn't a concern because the BDS stays private, so we 372 * just request permission according to the flags. 373 * 374 * The exceptions are xen_disk and blockdev_init(); in these cases, the 375 * caller of blk_new_open() doesn't make use of the permissions, but they 376 * shouldn't hurt either. We can still share everything here because the 377 * guest devices will add their own blockers if they can't share. */ 378 if ((flags & BDRV_O_NO_IO) == 0) { 379 perm |= BLK_PERM_CONSISTENT_READ; 380 if (flags & BDRV_O_RDWR) { 381 perm |= BLK_PERM_WRITE; 382 } 383 } 384 if (flags & BDRV_O_RESIZE) { 385 perm |= BLK_PERM_RESIZE; 386 } 387 388 blk = blk_new(qemu_get_aio_context(), perm, BLK_PERM_ALL); 389 bs = bdrv_open(filename, reference, options, flags, errp); 390 if (!bs) { 391 blk_unref(blk); 392 return NULL; 393 } 394 395 blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk->ctx, 396 perm, BLK_PERM_ALL, blk, errp); 397 if (!blk->root) { 398 blk_unref(blk); 399 return NULL; 400 } 401 402 return blk; 403 } 404 405 static void blk_delete(BlockBackend *blk) 406 { 407 assert(!blk->refcnt); 408 assert(!blk->name); 409 assert(!blk->dev); 410 if (blk->public.throttle_group_member.throttle_state) { 411 blk_io_limits_disable(blk); 412 } 413 if (blk->root) { 414 blk_remove_bs(blk); 415 } 416 if (blk->vmsh) { 417 qemu_del_vm_change_state_handler(blk->vmsh); 418 blk->vmsh = NULL; 419 } 420 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers)); 421 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers)); 422 assert(QLIST_EMPTY(&blk->aio_notifiers)); 423 QTAILQ_REMOVE(&block_backends, blk, link); 424 drive_info_del(blk->legacy_dinfo); 425 block_acct_cleanup(&blk->stats); 426 g_free(blk); 427 } 428 429 static void drive_info_del(DriveInfo *dinfo) 430 { 431 if (!dinfo) { 432 return; 433 } 434 qemu_opts_del(dinfo->opts); 435 g_free(dinfo); 436 } 437 438 int blk_get_refcnt(BlockBackend *blk) 439 { 440 return blk ? blk->refcnt : 0; 441 } 442 443 /* 444 * Increment @blk's reference count. 445 * @blk must not be null. 446 */ 447 void blk_ref(BlockBackend *blk) 448 { 449 assert(blk->refcnt > 0); 450 blk->refcnt++; 451 } 452 453 /* 454 * Decrement @blk's reference count. 455 * If this drops it to zero, destroy @blk. 456 * For convenience, do nothing if @blk is null. 457 */ 458 void blk_unref(BlockBackend *blk) 459 { 460 if (blk) { 461 assert(blk->refcnt > 0); 462 if (blk->refcnt > 1) { 463 blk->refcnt--; 464 } else { 465 blk_drain(blk); 466 /* blk_drain() cannot resurrect blk, nobody held a reference */ 467 assert(blk->refcnt == 1); 468 blk->refcnt = 0; 469 blk_delete(blk); 470 } 471 } 472 } 473 474 /* 475 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the 476 * ones which are hidden (i.e. are not referenced by the monitor). 477 */ 478 BlockBackend *blk_all_next(BlockBackend *blk) 479 { 480 return blk ? QTAILQ_NEXT(blk, link) 481 : QTAILQ_FIRST(&block_backends); 482 } 483 484 void blk_remove_all_bs(void) 485 { 486 BlockBackend *blk = NULL; 487 488 while ((blk = blk_all_next(blk)) != NULL) { 489 AioContext *ctx = blk_get_aio_context(blk); 490 491 aio_context_acquire(ctx); 492 if (blk->root) { 493 blk_remove_bs(blk); 494 } 495 aio_context_release(ctx); 496 } 497 } 498 499 /* 500 * Return the monitor-owned BlockBackend after @blk. 501 * If @blk is null, return the first one. 502 * Else, return @blk's next sibling, which may be null. 503 * 504 * To iterate over all BlockBackends, do 505 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 506 * ... 507 * } 508 */ 509 BlockBackend *blk_next(BlockBackend *blk) 510 { 511 return blk ? QTAILQ_NEXT(blk, monitor_link) 512 : QTAILQ_FIRST(&monitor_block_backends); 513 } 514 515 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by 516 * the monitor or attached to a BlockBackend */ 517 BlockDriverState *bdrv_next(BdrvNextIterator *it) 518 { 519 BlockDriverState *bs, *old_bs; 520 521 /* Must be called from the main loop */ 522 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 523 524 /* First, return all root nodes of BlockBackends. In order to avoid 525 * returning a BDS twice when multiple BBs refer to it, we only return it 526 * if the BB is the first one in the parent list of the BDS. */ 527 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) { 528 BlockBackend *old_blk = it->blk; 529 530 old_bs = old_blk ? blk_bs(old_blk) : NULL; 531 532 do { 533 it->blk = blk_all_next(it->blk); 534 bs = it->blk ? blk_bs(it->blk) : NULL; 535 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk)); 536 537 if (it->blk) { 538 blk_ref(it->blk); 539 } 540 blk_unref(old_blk); 541 542 if (bs) { 543 bdrv_ref(bs); 544 bdrv_unref(old_bs); 545 return bs; 546 } 547 it->phase = BDRV_NEXT_MONITOR_OWNED; 548 } else { 549 old_bs = it->bs; 550 } 551 552 /* Then return the monitor-owned BDSes without a BB attached. Ignore all 553 * BDSes that are attached to a BlockBackend here; they have been handled 554 * by the above block already */ 555 do { 556 it->bs = bdrv_next_monitor_owned(it->bs); 557 bs = it->bs; 558 } while (bs && bdrv_has_blk(bs)); 559 560 if (bs) { 561 bdrv_ref(bs); 562 } 563 bdrv_unref(old_bs); 564 565 return bs; 566 } 567 568 static void bdrv_next_reset(BdrvNextIterator *it) 569 { 570 *it = (BdrvNextIterator) { 571 .phase = BDRV_NEXT_BACKEND_ROOTS, 572 }; 573 } 574 575 BlockDriverState *bdrv_first(BdrvNextIterator *it) 576 { 577 bdrv_next_reset(it); 578 return bdrv_next(it); 579 } 580 581 /* Must be called when aborting a bdrv_next() iteration before 582 * bdrv_next() returns NULL */ 583 void bdrv_next_cleanup(BdrvNextIterator *it) 584 { 585 /* Must be called from the main loop */ 586 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 587 588 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) { 589 if (it->blk) { 590 bdrv_unref(blk_bs(it->blk)); 591 blk_unref(it->blk); 592 } 593 } else { 594 bdrv_unref(it->bs); 595 } 596 597 bdrv_next_reset(it); 598 } 599 600 /* 601 * Add a BlockBackend into the list of backends referenced by the monitor, with 602 * the given @name acting as the handle for the monitor. 603 * Strictly for use by blockdev.c. 604 * 605 * @name must not be null or empty. 606 * 607 * Returns true on success and false on failure. In the latter case, an Error 608 * object is returned through @errp. 609 */ 610 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp) 611 { 612 assert(!blk->name); 613 assert(name && name[0]); 614 615 if (!id_wellformed(name)) { 616 error_setg(errp, "Invalid device name"); 617 return false; 618 } 619 if (blk_by_name(name)) { 620 error_setg(errp, "Device with id '%s' already exists", name); 621 return false; 622 } 623 if (bdrv_find_node(name)) { 624 error_setg(errp, 625 "Device name '%s' conflicts with an existing node name", 626 name); 627 return false; 628 } 629 630 blk->name = g_strdup(name); 631 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link); 632 return true; 633 } 634 635 /* 636 * Remove a BlockBackend from the list of backends referenced by the monitor. 637 * Strictly for use by blockdev.c. 638 */ 639 void monitor_remove_blk(BlockBackend *blk) 640 { 641 if (!blk->name) { 642 return; 643 } 644 645 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link); 646 g_free(blk->name); 647 blk->name = NULL; 648 } 649 650 /* 651 * Return @blk's name, a non-null string. 652 * Returns an empty string iff @blk is not referenced by the monitor. 653 */ 654 const char *blk_name(const BlockBackend *blk) 655 { 656 return blk->name ?: ""; 657 } 658 659 /* 660 * Return the BlockBackend with name @name if it exists, else null. 661 * @name must not be null. 662 */ 663 BlockBackend *blk_by_name(const char *name) 664 { 665 BlockBackend *blk = NULL; 666 667 assert(name); 668 while ((blk = blk_next(blk)) != NULL) { 669 if (!strcmp(name, blk->name)) { 670 return blk; 671 } 672 } 673 return NULL; 674 } 675 676 /* 677 * Return the BlockDriverState attached to @blk if any, else null. 678 */ 679 BlockDriverState *blk_bs(BlockBackend *blk) 680 { 681 return blk->root ? blk->root->bs : NULL; 682 } 683 684 static BlockBackend *bdrv_first_blk(BlockDriverState *bs) 685 { 686 BdrvChild *child; 687 QLIST_FOREACH(child, &bs->parents, next_parent) { 688 if (child->role == &child_root) { 689 return child->opaque; 690 } 691 } 692 693 return NULL; 694 } 695 696 /* 697 * Returns true if @bs has an associated BlockBackend. 698 */ 699 bool bdrv_has_blk(BlockDriverState *bs) 700 { 701 return bdrv_first_blk(bs) != NULL; 702 } 703 704 /* 705 * Returns true if @bs has only BlockBackends as parents. 706 */ 707 bool bdrv_is_root_node(BlockDriverState *bs) 708 { 709 BdrvChild *c; 710 711 QLIST_FOREACH(c, &bs->parents, next_parent) { 712 if (c->role != &child_root) { 713 return false; 714 } 715 } 716 717 return true; 718 } 719 720 /* 721 * Return @blk's DriveInfo if any, else null. 722 */ 723 DriveInfo *blk_legacy_dinfo(BlockBackend *blk) 724 { 725 return blk->legacy_dinfo; 726 } 727 728 /* 729 * Set @blk's DriveInfo to @dinfo, and return it. 730 * @blk must not have a DriveInfo set already. 731 * No other BlockBackend may have the same DriveInfo set. 732 */ 733 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo) 734 { 735 assert(!blk->legacy_dinfo); 736 return blk->legacy_dinfo = dinfo; 737 } 738 739 /* 740 * Return the BlockBackend with DriveInfo @dinfo. 741 * It must exist. 742 */ 743 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo) 744 { 745 BlockBackend *blk = NULL; 746 747 while ((blk = blk_next(blk)) != NULL) { 748 if (blk->legacy_dinfo == dinfo) { 749 return blk; 750 } 751 } 752 abort(); 753 } 754 755 /* 756 * Returns a pointer to the publicly accessible fields of @blk. 757 */ 758 BlockBackendPublic *blk_get_public(BlockBackend *blk) 759 { 760 return &blk->public; 761 } 762 763 /* 764 * Returns a BlockBackend given the associated @public fields. 765 */ 766 BlockBackend *blk_by_public(BlockBackendPublic *public) 767 { 768 return container_of(public, BlockBackend, public); 769 } 770 771 /* 772 * Disassociates the currently associated BlockDriverState from @blk. 773 */ 774 void blk_remove_bs(BlockBackend *blk) 775 { 776 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 777 BlockDriverState *bs; 778 779 notifier_list_notify(&blk->remove_bs_notifiers, blk); 780 if (tgm->throttle_state) { 781 bs = blk_bs(blk); 782 bdrv_drained_begin(bs); 783 throttle_group_detach_aio_context(tgm); 784 throttle_group_attach_aio_context(tgm, qemu_get_aio_context()); 785 bdrv_drained_end(bs); 786 } 787 788 blk_update_root_state(blk); 789 790 /* bdrv_root_unref_child() will cause blk->root to become stale and may 791 * switch to a completion coroutine later on. Let's drain all I/O here 792 * to avoid that and a potential QEMU crash. 793 */ 794 blk_drain(blk); 795 bdrv_root_unref_child(blk->root); 796 blk->root = NULL; 797 } 798 799 /* 800 * Associates a new BlockDriverState with @blk. 801 */ 802 int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) 803 { 804 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 805 bdrv_ref(bs); 806 blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk->ctx, 807 blk->perm, blk->shared_perm, blk, errp); 808 if (blk->root == NULL) { 809 return -EPERM; 810 } 811 812 notifier_list_notify(&blk->insert_bs_notifiers, blk); 813 if (tgm->throttle_state) { 814 throttle_group_detach_aio_context(tgm); 815 throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs)); 816 } 817 818 return 0; 819 } 820 821 /* 822 * Sets the permission bitmasks that the user of the BlockBackend needs. 823 */ 824 int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm, 825 Error **errp) 826 { 827 int ret; 828 829 if (blk->root && !blk->disable_perm) { 830 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp); 831 if (ret < 0) { 832 return ret; 833 } 834 } 835 836 blk->perm = perm; 837 blk->shared_perm = shared_perm; 838 839 return 0; 840 } 841 842 void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm) 843 { 844 *perm = blk->perm; 845 *shared_perm = blk->shared_perm; 846 } 847 848 /* 849 * Attach device model @dev to @blk. 850 * Return 0 on success, -EBUSY when a device model is attached already. 851 */ 852 int blk_attach_dev(BlockBackend *blk, DeviceState *dev) 853 { 854 if (blk->dev) { 855 return -EBUSY; 856 } 857 858 /* While migration is still incoming, we don't need to apply the 859 * permissions of guest device BlockBackends. We might still have a block 860 * job or NBD server writing to the image for storage migration. */ 861 if (runstate_check(RUN_STATE_INMIGRATE)) { 862 blk->disable_perm = true; 863 } 864 865 blk_ref(blk); 866 blk->dev = dev; 867 blk_iostatus_reset(blk); 868 869 return 0; 870 } 871 872 /* 873 * Detach device model @dev from @blk. 874 * @dev must be currently attached to @blk. 875 */ 876 void blk_detach_dev(BlockBackend *blk, DeviceState *dev) 877 { 878 assert(blk->dev == dev); 879 blk->dev = NULL; 880 blk->dev_ops = NULL; 881 blk->dev_opaque = NULL; 882 blk->guest_block_size = 512; 883 blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort); 884 blk_unref(blk); 885 } 886 887 /* 888 * Return the device model attached to @blk if any, else null. 889 */ 890 DeviceState *blk_get_attached_dev(BlockBackend *blk) 891 { 892 return blk->dev; 893 } 894 895 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block 896 * device attached to the BlockBackend. */ 897 char *blk_get_attached_dev_id(BlockBackend *blk) 898 { 899 DeviceState *dev = blk->dev; 900 901 if (!dev) { 902 return g_strdup(""); 903 } else if (dev->id) { 904 return g_strdup(dev->id); 905 } 906 907 return object_get_canonical_path(OBJECT(dev)) ?: g_strdup(""); 908 } 909 910 /* 911 * Return the BlockBackend which has the device model @dev attached if it 912 * exists, else null. 913 * 914 * @dev must not be null. 915 */ 916 BlockBackend *blk_by_dev(void *dev) 917 { 918 BlockBackend *blk = NULL; 919 920 assert(dev != NULL); 921 while ((blk = blk_all_next(blk)) != NULL) { 922 if (blk->dev == dev) { 923 return blk; 924 } 925 } 926 return NULL; 927 } 928 929 /* 930 * Set @blk's device model callbacks to @ops. 931 * @opaque is the opaque argument to pass to the callbacks. 932 * This is for use by device models. 933 */ 934 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, 935 void *opaque) 936 { 937 blk->dev_ops = ops; 938 blk->dev_opaque = opaque; 939 940 /* Are we currently quiesced? Should we enforce this right now? */ 941 if (blk->quiesce_counter && ops->drained_begin) { 942 ops->drained_begin(opaque); 943 } 944 } 945 946 /* 947 * Notify @blk's attached device model of media change. 948 * 949 * If @load is true, notify of media load. This action can fail, meaning that 950 * the medium cannot be loaded. @errp is set then. 951 * 952 * If @load is false, notify of media eject. This can never fail. 953 * 954 * Also send DEVICE_TRAY_MOVED events as appropriate. 955 */ 956 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp) 957 { 958 if (blk->dev_ops && blk->dev_ops->change_media_cb) { 959 bool tray_was_open, tray_is_open; 960 Error *local_err = NULL; 961 962 tray_was_open = blk_dev_is_tray_open(blk); 963 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err); 964 if (local_err) { 965 assert(load == true); 966 error_propagate(errp, local_err); 967 return; 968 } 969 tray_is_open = blk_dev_is_tray_open(blk); 970 971 if (tray_was_open != tray_is_open) { 972 char *id = blk_get_attached_dev_id(blk); 973 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open); 974 g_free(id); 975 } 976 } 977 } 978 979 static void blk_root_change_media(BdrvChild *child, bool load) 980 { 981 blk_dev_change_media_cb(child->opaque, load, NULL); 982 } 983 984 /* 985 * Does @blk's attached device model have removable media? 986 * %true if no device model is attached. 987 */ 988 bool blk_dev_has_removable_media(BlockBackend *blk) 989 { 990 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb); 991 } 992 993 /* 994 * Does @blk's attached device model have a tray? 995 */ 996 bool blk_dev_has_tray(BlockBackend *blk) 997 { 998 return blk->dev_ops && blk->dev_ops->is_tray_open; 999 } 1000 1001 /* 1002 * Notify @blk's attached device model of a media eject request. 1003 * If @force is true, the medium is about to be yanked out forcefully. 1004 */ 1005 void blk_dev_eject_request(BlockBackend *blk, bool force) 1006 { 1007 if (blk->dev_ops && blk->dev_ops->eject_request_cb) { 1008 blk->dev_ops->eject_request_cb(blk->dev_opaque, force); 1009 } 1010 } 1011 1012 /* 1013 * Does @blk's attached device model have a tray, and is it open? 1014 */ 1015 bool blk_dev_is_tray_open(BlockBackend *blk) 1016 { 1017 if (blk_dev_has_tray(blk)) { 1018 return blk->dev_ops->is_tray_open(blk->dev_opaque); 1019 } 1020 return false; 1021 } 1022 1023 /* 1024 * Does @blk's attached device model have the medium locked? 1025 * %false if the device model has no such lock. 1026 */ 1027 bool blk_dev_is_medium_locked(BlockBackend *blk) 1028 { 1029 if (blk->dev_ops && blk->dev_ops->is_medium_locked) { 1030 return blk->dev_ops->is_medium_locked(blk->dev_opaque); 1031 } 1032 return false; 1033 } 1034 1035 /* 1036 * Notify @blk's attached device model of a backend size change. 1037 */ 1038 static void blk_root_resize(BdrvChild *child) 1039 { 1040 BlockBackend *blk = child->opaque; 1041 1042 if (blk->dev_ops && blk->dev_ops->resize_cb) { 1043 blk->dev_ops->resize_cb(blk->dev_opaque); 1044 } 1045 } 1046 1047 void blk_iostatus_enable(BlockBackend *blk) 1048 { 1049 blk->iostatus_enabled = true; 1050 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 1051 } 1052 1053 /* The I/O status is only enabled if the drive explicitly 1054 * enables it _and_ the VM is configured to stop on errors */ 1055 bool blk_iostatus_is_enabled(const BlockBackend *blk) 1056 { 1057 return (blk->iostatus_enabled && 1058 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || 1059 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP || 1060 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP)); 1061 } 1062 1063 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk) 1064 { 1065 return blk->iostatus; 1066 } 1067 1068 void blk_iostatus_disable(BlockBackend *blk) 1069 { 1070 blk->iostatus_enabled = false; 1071 } 1072 1073 void blk_iostatus_reset(BlockBackend *blk) 1074 { 1075 if (blk_iostatus_is_enabled(blk)) { 1076 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 1077 } 1078 } 1079 1080 void blk_iostatus_set_err(BlockBackend *blk, int error) 1081 { 1082 assert(blk_iostatus_is_enabled(blk)); 1083 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 1084 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 1085 BLOCK_DEVICE_IO_STATUS_FAILED; 1086 } 1087 } 1088 1089 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow) 1090 { 1091 blk->allow_write_beyond_eof = allow; 1092 } 1093 1094 void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow) 1095 { 1096 blk->allow_aio_context_change = allow; 1097 } 1098 1099 static int blk_check_byte_request(BlockBackend *blk, int64_t offset, 1100 size_t size) 1101 { 1102 int64_t len; 1103 1104 if (size > INT_MAX) { 1105 return -EIO; 1106 } 1107 1108 if (!blk_is_available(blk)) { 1109 return -ENOMEDIUM; 1110 } 1111 1112 if (offset < 0) { 1113 return -EIO; 1114 } 1115 1116 if (!blk->allow_write_beyond_eof) { 1117 len = blk_getlength(blk); 1118 if (len < 0) { 1119 return len; 1120 } 1121 1122 if (offset > len || len - offset < size) { 1123 return -EIO; 1124 } 1125 } 1126 1127 return 0; 1128 } 1129 1130 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, 1131 unsigned int bytes, QEMUIOVector *qiov, 1132 BdrvRequestFlags flags) 1133 { 1134 int ret; 1135 BlockDriverState *bs = blk_bs(blk); 1136 1137 trace_blk_co_preadv(blk, bs, offset, bytes, flags); 1138 1139 ret = blk_check_byte_request(blk, offset, bytes); 1140 if (ret < 0) { 1141 return ret; 1142 } 1143 1144 bdrv_inc_in_flight(bs); 1145 1146 /* throttling disk I/O */ 1147 if (blk->public.throttle_group_member.throttle_state) { 1148 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, 1149 bytes, false); 1150 } 1151 1152 ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags); 1153 bdrv_dec_in_flight(bs); 1154 return ret; 1155 } 1156 1157 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, 1158 unsigned int bytes, QEMUIOVector *qiov, 1159 BdrvRequestFlags flags) 1160 { 1161 int ret; 1162 BlockDriverState *bs = blk_bs(blk); 1163 1164 trace_blk_co_pwritev(blk, bs, offset, bytes, flags); 1165 1166 ret = blk_check_byte_request(blk, offset, bytes); 1167 if (ret < 0) { 1168 return ret; 1169 } 1170 1171 bdrv_inc_in_flight(bs); 1172 /* throttling disk I/O */ 1173 if (blk->public.throttle_group_member.throttle_state) { 1174 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, 1175 bytes, true); 1176 } 1177 1178 if (!blk->enable_write_cache) { 1179 flags |= BDRV_REQ_FUA; 1180 } 1181 1182 ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags); 1183 bdrv_dec_in_flight(bs); 1184 return ret; 1185 } 1186 1187 typedef struct BlkRwCo { 1188 BlockBackend *blk; 1189 int64_t offset; 1190 void *iobuf; 1191 int ret; 1192 BdrvRequestFlags flags; 1193 } BlkRwCo; 1194 1195 static void blk_read_entry(void *opaque) 1196 { 1197 BlkRwCo *rwco = opaque; 1198 QEMUIOVector *qiov = rwco->iobuf; 1199 1200 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size, 1201 qiov, rwco->flags); 1202 aio_wait_kick(); 1203 } 1204 1205 static void blk_write_entry(void *opaque) 1206 { 1207 BlkRwCo *rwco = opaque; 1208 QEMUIOVector *qiov = rwco->iobuf; 1209 1210 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size, 1211 qiov, rwco->flags); 1212 aio_wait_kick(); 1213 } 1214 1215 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf, 1216 int64_t bytes, CoroutineEntry co_entry, 1217 BdrvRequestFlags flags) 1218 { 1219 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1220 BlkRwCo rwco = { 1221 .blk = blk, 1222 .offset = offset, 1223 .iobuf = &qiov, 1224 .flags = flags, 1225 .ret = NOT_DONE, 1226 }; 1227 1228 if (qemu_in_coroutine()) { 1229 /* Fast-path if already in coroutine context */ 1230 co_entry(&rwco); 1231 } else { 1232 Coroutine *co = qemu_coroutine_create(co_entry, &rwco); 1233 bdrv_coroutine_enter(blk_bs(blk), co); 1234 BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE); 1235 } 1236 1237 return rwco.ret; 1238 } 1239 1240 int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf, 1241 int count) 1242 { 1243 int ret; 1244 1245 ret = blk_check_byte_request(blk, offset, count); 1246 if (ret < 0) { 1247 return ret; 1248 } 1249 1250 blk_root_drained_begin(blk->root); 1251 ret = blk_pread(blk, offset, buf, count); 1252 blk_root_drained_end(blk->root, NULL); 1253 return ret; 1254 } 1255 1256 int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, 1257 int bytes, BdrvRequestFlags flags) 1258 { 1259 return blk_prw(blk, offset, NULL, bytes, blk_write_entry, 1260 flags | BDRV_REQ_ZERO_WRITE); 1261 } 1262 1263 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) 1264 { 1265 return bdrv_make_zero(blk->root, flags); 1266 } 1267 1268 void blk_inc_in_flight(BlockBackend *blk) 1269 { 1270 atomic_inc(&blk->in_flight); 1271 } 1272 1273 void blk_dec_in_flight(BlockBackend *blk) 1274 { 1275 atomic_dec(&blk->in_flight); 1276 aio_wait_kick(); 1277 } 1278 1279 static void error_callback_bh(void *opaque) 1280 { 1281 struct BlockBackendAIOCB *acb = opaque; 1282 1283 blk_dec_in_flight(acb->blk); 1284 acb->common.cb(acb->common.opaque, acb->ret); 1285 qemu_aio_unref(acb); 1286 } 1287 1288 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, 1289 BlockCompletionFunc *cb, 1290 void *opaque, int ret) 1291 { 1292 struct BlockBackendAIOCB *acb; 1293 1294 blk_inc_in_flight(blk); 1295 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque); 1296 acb->blk = blk; 1297 acb->ret = ret; 1298 1299 aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb); 1300 return &acb->common; 1301 } 1302 1303 typedef struct BlkAioEmAIOCB { 1304 BlockAIOCB common; 1305 BlkRwCo rwco; 1306 int bytes; 1307 bool has_returned; 1308 } BlkAioEmAIOCB; 1309 1310 static const AIOCBInfo blk_aio_em_aiocb_info = { 1311 .aiocb_size = sizeof(BlkAioEmAIOCB), 1312 }; 1313 1314 static void blk_aio_complete(BlkAioEmAIOCB *acb) 1315 { 1316 if (acb->has_returned) { 1317 acb->common.cb(acb->common.opaque, acb->rwco.ret); 1318 blk_dec_in_flight(acb->rwco.blk); 1319 qemu_aio_unref(acb); 1320 } 1321 } 1322 1323 static void blk_aio_complete_bh(void *opaque) 1324 { 1325 BlkAioEmAIOCB *acb = opaque; 1326 assert(acb->has_returned); 1327 blk_aio_complete(acb); 1328 } 1329 1330 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, 1331 void *iobuf, CoroutineEntry co_entry, 1332 BdrvRequestFlags flags, 1333 BlockCompletionFunc *cb, void *opaque) 1334 { 1335 BlkAioEmAIOCB *acb; 1336 Coroutine *co; 1337 1338 blk_inc_in_flight(blk); 1339 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); 1340 acb->rwco = (BlkRwCo) { 1341 .blk = blk, 1342 .offset = offset, 1343 .iobuf = iobuf, 1344 .flags = flags, 1345 .ret = NOT_DONE, 1346 }; 1347 acb->bytes = bytes; 1348 acb->has_returned = false; 1349 1350 co = qemu_coroutine_create(co_entry, acb); 1351 bdrv_coroutine_enter(blk_bs(blk), co); 1352 1353 acb->has_returned = true; 1354 if (acb->rwco.ret != NOT_DONE) { 1355 aio_bh_schedule_oneshot(blk_get_aio_context(blk), 1356 blk_aio_complete_bh, acb); 1357 } 1358 1359 return &acb->common; 1360 } 1361 1362 static void blk_aio_read_entry(void *opaque) 1363 { 1364 BlkAioEmAIOCB *acb = opaque; 1365 BlkRwCo *rwco = &acb->rwco; 1366 QEMUIOVector *qiov = rwco->iobuf; 1367 1368 assert(qiov->size == acb->bytes); 1369 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes, 1370 qiov, rwco->flags); 1371 blk_aio_complete(acb); 1372 } 1373 1374 static void blk_aio_write_entry(void *opaque) 1375 { 1376 BlkAioEmAIOCB *acb = opaque; 1377 BlkRwCo *rwco = &acb->rwco; 1378 QEMUIOVector *qiov = rwco->iobuf; 1379 1380 assert(!qiov || qiov->size == acb->bytes); 1381 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes, 1382 qiov, rwco->flags); 1383 blk_aio_complete(acb); 1384 } 1385 1386 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, 1387 int count, BdrvRequestFlags flags, 1388 BlockCompletionFunc *cb, void *opaque) 1389 { 1390 return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry, 1391 flags | BDRV_REQ_ZERO_WRITE, cb, opaque); 1392 } 1393 1394 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count) 1395 { 1396 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0); 1397 if (ret < 0) { 1398 return ret; 1399 } 1400 return count; 1401 } 1402 1403 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count, 1404 BdrvRequestFlags flags) 1405 { 1406 int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry, 1407 flags); 1408 if (ret < 0) { 1409 return ret; 1410 } 1411 return count; 1412 } 1413 1414 int64_t blk_getlength(BlockBackend *blk) 1415 { 1416 if (!blk_is_available(blk)) { 1417 return -ENOMEDIUM; 1418 } 1419 1420 return bdrv_getlength(blk_bs(blk)); 1421 } 1422 1423 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) 1424 { 1425 if (!blk_bs(blk)) { 1426 *nb_sectors_ptr = 0; 1427 } else { 1428 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr); 1429 } 1430 } 1431 1432 int64_t blk_nb_sectors(BlockBackend *blk) 1433 { 1434 if (!blk_is_available(blk)) { 1435 return -ENOMEDIUM; 1436 } 1437 1438 return bdrv_nb_sectors(blk_bs(blk)); 1439 } 1440 1441 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, 1442 QEMUIOVector *qiov, BdrvRequestFlags flags, 1443 BlockCompletionFunc *cb, void *opaque) 1444 { 1445 return blk_aio_prwv(blk, offset, qiov->size, qiov, 1446 blk_aio_read_entry, flags, cb, opaque); 1447 } 1448 1449 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, 1450 QEMUIOVector *qiov, BdrvRequestFlags flags, 1451 BlockCompletionFunc *cb, void *opaque) 1452 { 1453 return blk_aio_prwv(blk, offset, qiov->size, qiov, 1454 blk_aio_write_entry, flags, cb, opaque); 1455 } 1456 1457 static void blk_aio_flush_entry(void *opaque) 1458 { 1459 BlkAioEmAIOCB *acb = opaque; 1460 BlkRwCo *rwco = &acb->rwco; 1461 1462 rwco->ret = blk_co_flush(rwco->blk); 1463 blk_aio_complete(acb); 1464 } 1465 1466 BlockAIOCB *blk_aio_flush(BlockBackend *blk, 1467 BlockCompletionFunc *cb, void *opaque) 1468 { 1469 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque); 1470 } 1471 1472 static void blk_aio_pdiscard_entry(void *opaque) 1473 { 1474 BlkAioEmAIOCB *acb = opaque; 1475 BlkRwCo *rwco = &acb->rwco; 1476 1477 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes); 1478 blk_aio_complete(acb); 1479 } 1480 1481 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, 1482 int64_t offset, int bytes, 1483 BlockCompletionFunc *cb, void *opaque) 1484 { 1485 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0, 1486 cb, opaque); 1487 } 1488 1489 void blk_aio_cancel(BlockAIOCB *acb) 1490 { 1491 bdrv_aio_cancel(acb); 1492 } 1493 1494 void blk_aio_cancel_async(BlockAIOCB *acb) 1495 { 1496 bdrv_aio_cancel_async(acb); 1497 } 1498 1499 int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 1500 { 1501 if (!blk_is_available(blk)) { 1502 return -ENOMEDIUM; 1503 } 1504 1505 return bdrv_co_ioctl(blk_bs(blk), req, buf); 1506 } 1507 1508 static void blk_ioctl_entry(void *opaque) 1509 { 1510 BlkRwCo *rwco = opaque; 1511 QEMUIOVector *qiov = rwco->iobuf; 1512 1513 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, 1514 qiov->iov[0].iov_base); 1515 aio_wait_kick(); 1516 } 1517 1518 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 1519 { 1520 return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0); 1521 } 1522 1523 static void blk_aio_ioctl_entry(void *opaque) 1524 { 1525 BlkAioEmAIOCB *acb = opaque; 1526 BlkRwCo *rwco = &acb->rwco; 1527 1528 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, rwco->iobuf); 1529 1530 blk_aio_complete(acb); 1531 } 1532 1533 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, 1534 BlockCompletionFunc *cb, void *opaque) 1535 { 1536 return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque); 1537 } 1538 1539 int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes) 1540 { 1541 int ret = blk_check_byte_request(blk, offset, bytes); 1542 if (ret < 0) { 1543 return ret; 1544 } 1545 1546 return bdrv_co_pdiscard(blk->root, offset, bytes); 1547 } 1548 1549 int blk_co_flush(BlockBackend *blk) 1550 { 1551 if (!blk_is_available(blk)) { 1552 return -ENOMEDIUM; 1553 } 1554 1555 return bdrv_co_flush(blk_bs(blk)); 1556 } 1557 1558 static void blk_flush_entry(void *opaque) 1559 { 1560 BlkRwCo *rwco = opaque; 1561 rwco->ret = blk_co_flush(rwco->blk); 1562 aio_wait_kick(); 1563 } 1564 1565 int blk_flush(BlockBackend *blk) 1566 { 1567 return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0); 1568 } 1569 1570 void blk_drain(BlockBackend *blk) 1571 { 1572 BlockDriverState *bs = blk_bs(blk); 1573 1574 if (bs) { 1575 bdrv_drained_begin(bs); 1576 } 1577 1578 /* We may have -ENOMEDIUM completions in flight */ 1579 AIO_WAIT_WHILE(blk_get_aio_context(blk), 1580 atomic_mb_read(&blk->in_flight) > 0); 1581 1582 if (bs) { 1583 bdrv_drained_end(bs); 1584 } 1585 } 1586 1587 void blk_drain_all(void) 1588 { 1589 BlockBackend *blk = NULL; 1590 1591 bdrv_drain_all_begin(); 1592 1593 while ((blk = blk_all_next(blk)) != NULL) { 1594 AioContext *ctx = blk_get_aio_context(blk); 1595 1596 aio_context_acquire(ctx); 1597 1598 /* We may have -ENOMEDIUM completions in flight */ 1599 AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0); 1600 1601 aio_context_release(ctx); 1602 } 1603 1604 bdrv_drain_all_end(); 1605 } 1606 1607 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, 1608 BlockdevOnError on_write_error) 1609 { 1610 blk->on_read_error = on_read_error; 1611 blk->on_write_error = on_write_error; 1612 } 1613 1614 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read) 1615 { 1616 return is_read ? blk->on_read_error : blk->on_write_error; 1617 } 1618 1619 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, 1620 int error) 1621 { 1622 BlockdevOnError on_err = blk_get_on_error(blk, is_read); 1623 1624 switch (on_err) { 1625 case BLOCKDEV_ON_ERROR_ENOSPC: 1626 return (error == ENOSPC) ? 1627 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; 1628 case BLOCKDEV_ON_ERROR_STOP: 1629 return BLOCK_ERROR_ACTION_STOP; 1630 case BLOCKDEV_ON_ERROR_REPORT: 1631 return BLOCK_ERROR_ACTION_REPORT; 1632 case BLOCKDEV_ON_ERROR_IGNORE: 1633 return BLOCK_ERROR_ACTION_IGNORE; 1634 case BLOCKDEV_ON_ERROR_AUTO: 1635 default: 1636 abort(); 1637 } 1638 } 1639 1640 static void send_qmp_error_event(BlockBackend *blk, 1641 BlockErrorAction action, 1642 bool is_read, int error) 1643 { 1644 IoOperationType optype; 1645 BlockDriverState *bs = blk_bs(blk); 1646 1647 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; 1648 qapi_event_send_block_io_error(blk_name(blk), !!bs, 1649 bs ? bdrv_get_node_name(bs) : NULL, optype, 1650 action, blk_iostatus_is_enabled(blk), 1651 error == ENOSPC, strerror(error)); 1652 } 1653 1654 /* This is done by device models because, while the block layer knows 1655 * about the error, it does not know whether an operation comes from 1656 * the device or the block layer (from a job, for example). 1657 */ 1658 void blk_error_action(BlockBackend *blk, BlockErrorAction action, 1659 bool is_read, int error) 1660 { 1661 assert(error >= 0); 1662 1663 if (action == BLOCK_ERROR_ACTION_STOP) { 1664 /* First set the iostatus, so that "info block" returns an iostatus 1665 * that matches the events raised so far (an additional error iostatus 1666 * is fine, but not a lost one). 1667 */ 1668 blk_iostatus_set_err(blk, error); 1669 1670 /* Then raise the request to stop the VM and the event. 1671 * qemu_system_vmstop_request_prepare has two effects. First, 1672 * it ensures that the STOP event always comes after the 1673 * BLOCK_IO_ERROR event. Second, it ensures that even if management 1674 * can observe the STOP event and do a "cont" before the STOP 1675 * event is issued, the VM will not stop. In this case, vm_start() 1676 * also ensures that the STOP/RESUME pair of events is emitted. 1677 */ 1678 qemu_system_vmstop_request_prepare(); 1679 send_qmp_error_event(blk, action, is_read, error); 1680 qemu_system_vmstop_request(RUN_STATE_IO_ERROR); 1681 } else { 1682 send_qmp_error_event(blk, action, is_read, error); 1683 } 1684 } 1685 1686 bool blk_is_read_only(BlockBackend *blk) 1687 { 1688 BlockDriverState *bs = blk_bs(blk); 1689 1690 if (bs) { 1691 return bdrv_is_read_only(bs); 1692 } else { 1693 return blk->root_state.read_only; 1694 } 1695 } 1696 1697 bool blk_is_sg(BlockBackend *blk) 1698 { 1699 BlockDriverState *bs = blk_bs(blk); 1700 1701 if (!bs) { 1702 return false; 1703 } 1704 1705 return bdrv_is_sg(bs); 1706 } 1707 1708 bool blk_enable_write_cache(BlockBackend *blk) 1709 { 1710 return blk->enable_write_cache; 1711 } 1712 1713 void blk_set_enable_write_cache(BlockBackend *blk, bool wce) 1714 { 1715 blk->enable_write_cache = wce; 1716 } 1717 1718 void blk_invalidate_cache(BlockBackend *blk, Error **errp) 1719 { 1720 BlockDriverState *bs = blk_bs(blk); 1721 1722 if (!bs) { 1723 error_setg(errp, "Device '%s' has no medium", blk->name); 1724 return; 1725 } 1726 1727 bdrv_invalidate_cache(bs, errp); 1728 } 1729 1730 bool blk_is_inserted(BlockBackend *blk) 1731 { 1732 BlockDriverState *bs = blk_bs(blk); 1733 1734 return bs && bdrv_is_inserted(bs); 1735 } 1736 1737 bool blk_is_available(BlockBackend *blk) 1738 { 1739 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk); 1740 } 1741 1742 void blk_lock_medium(BlockBackend *blk, bool locked) 1743 { 1744 BlockDriverState *bs = blk_bs(blk); 1745 1746 if (bs) { 1747 bdrv_lock_medium(bs, locked); 1748 } 1749 } 1750 1751 void blk_eject(BlockBackend *blk, bool eject_flag) 1752 { 1753 BlockDriverState *bs = blk_bs(blk); 1754 char *id; 1755 1756 if (bs) { 1757 bdrv_eject(bs, eject_flag); 1758 } 1759 1760 /* Whether or not we ejected on the backend, 1761 * the frontend experienced a tray event. */ 1762 id = blk_get_attached_dev_id(blk); 1763 qapi_event_send_device_tray_moved(blk_name(blk), id, 1764 eject_flag); 1765 g_free(id); 1766 } 1767 1768 int blk_get_flags(BlockBackend *blk) 1769 { 1770 BlockDriverState *bs = blk_bs(blk); 1771 1772 if (bs) { 1773 return bdrv_get_flags(bs); 1774 } else { 1775 return blk->root_state.open_flags; 1776 } 1777 } 1778 1779 /* Returns the minimum request alignment, in bytes; guaranteed nonzero */ 1780 uint32_t blk_get_request_alignment(BlockBackend *blk) 1781 { 1782 BlockDriverState *bs = blk_bs(blk); 1783 return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE; 1784 } 1785 1786 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */ 1787 uint32_t blk_get_max_transfer(BlockBackend *blk) 1788 { 1789 BlockDriverState *bs = blk_bs(blk); 1790 uint32_t max = 0; 1791 1792 if (bs) { 1793 max = bs->bl.max_transfer; 1794 } 1795 return MIN_NON_ZERO(max, INT_MAX); 1796 } 1797 1798 int blk_get_max_iov(BlockBackend *blk) 1799 { 1800 return blk->root->bs->bl.max_iov; 1801 } 1802 1803 void blk_set_guest_block_size(BlockBackend *blk, int align) 1804 { 1805 blk->guest_block_size = align; 1806 } 1807 1808 void *blk_try_blockalign(BlockBackend *blk, size_t size) 1809 { 1810 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size); 1811 } 1812 1813 void *blk_blockalign(BlockBackend *blk, size_t size) 1814 { 1815 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size); 1816 } 1817 1818 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) 1819 { 1820 BlockDriverState *bs = blk_bs(blk); 1821 1822 if (!bs) { 1823 return false; 1824 } 1825 1826 return bdrv_op_is_blocked(bs, op, errp); 1827 } 1828 1829 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason) 1830 { 1831 BlockDriverState *bs = blk_bs(blk); 1832 1833 if (bs) { 1834 bdrv_op_unblock(bs, op, reason); 1835 } 1836 } 1837 1838 void blk_op_block_all(BlockBackend *blk, Error *reason) 1839 { 1840 BlockDriverState *bs = blk_bs(blk); 1841 1842 if (bs) { 1843 bdrv_op_block_all(bs, reason); 1844 } 1845 } 1846 1847 void blk_op_unblock_all(BlockBackend *blk, Error *reason) 1848 { 1849 BlockDriverState *bs = blk_bs(blk); 1850 1851 if (bs) { 1852 bdrv_op_unblock_all(bs, reason); 1853 } 1854 } 1855 1856 AioContext *blk_get_aio_context(BlockBackend *blk) 1857 { 1858 BlockDriverState *bs = blk_bs(blk); 1859 1860 if (bs) { 1861 AioContext *ctx = bdrv_get_aio_context(blk_bs(blk)); 1862 assert(ctx == blk->ctx); 1863 } 1864 1865 return blk->ctx; 1866 } 1867 1868 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb) 1869 { 1870 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb); 1871 return blk_get_aio_context(blk_acb->blk); 1872 } 1873 1874 static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context, 1875 bool update_root_node, Error **errp) 1876 { 1877 BlockDriverState *bs = blk_bs(blk); 1878 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 1879 int ret; 1880 1881 if (bs) { 1882 if (update_root_node) { 1883 ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root, 1884 errp); 1885 if (ret < 0) { 1886 return ret; 1887 } 1888 } 1889 if (tgm->throttle_state) { 1890 bdrv_drained_begin(bs); 1891 throttle_group_detach_aio_context(tgm); 1892 throttle_group_attach_aio_context(tgm, new_context); 1893 bdrv_drained_end(bs); 1894 } 1895 } 1896 1897 blk->ctx = new_context; 1898 return 0; 1899 } 1900 1901 int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, 1902 Error **errp) 1903 { 1904 return blk_do_set_aio_context(blk, new_context, true, errp); 1905 } 1906 1907 static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx, 1908 GSList **ignore, Error **errp) 1909 { 1910 BlockBackend *blk = child->opaque; 1911 1912 if (blk->allow_aio_context_change) { 1913 return true; 1914 } 1915 1916 /* Only manually created BlockBackends that are not attached to anything 1917 * can change their AioContext without updating their user. */ 1918 if (!blk->name || blk->dev) { 1919 /* TODO Add BB name/QOM path */ 1920 error_setg(errp, "Cannot change iothread of active block backend"); 1921 return false; 1922 } 1923 1924 return true; 1925 } 1926 1927 static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx, 1928 GSList **ignore) 1929 { 1930 BlockBackend *blk = child->opaque; 1931 blk_do_set_aio_context(blk, ctx, false, &error_abort); 1932 } 1933 1934 void blk_add_aio_context_notifier(BlockBackend *blk, 1935 void (*attached_aio_context)(AioContext *new_context, void *opaque), 1936 void (*detach_aio_context)(void *opaque), void *opaque) 1937 { 1938 BlockBackendAioNotifier *notifier; 1939 BlockDriverState *bs = blk_bs(blk); 1940 1941 notifier = g_new(BlockBackendAioNotifier, 1); 1942 notifier->attached_aio_context = attached_aio_context; 1943 notifier->detach_aio_context = detach_aio_context; 1944 notifier->opaque = opaque; 1945 QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list); 1946 1947 if (bs) { 1948 bdrv_add_aio_context_notifier(bs, attached_aio_context, 1949 detach_aio_context, opaque); 1950 } 1951 } 1952 1953 void blk_remove_aio_context_notifier(BlockBackend *blk, 1954 void (*attached_aio_context)(AioContext *, 1955 void *), 1956 void (*detach_aio_context)(void *), 1957 void *opaque) 1958 { 1959 BlockBackendAioNotifier *notifier; 1960 BlockDriverState *bs = blk_bs(blk); 1961 1962 if (bs) { 1963 bdrv_remove_aio_context_notifier(bs, attached_aio_context, 1964 detach_aio_context, opaque); 1965 } 1966 1967 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { 1968 if (notifier->attached_aio_context == attached_aio_context && 1969 notifier->detach_aio_context == detach_aio_context && 1970 notifier->opaque == opaque) { 1971 QLIST_REMOVE(notifier, list); 1972 g_free(notifier); 1973 return; 1974 } 1975 } 1976 1977 abort(); 1978 } 1979 1980 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify) 1981 { 1982 notifier_list_add(&blk->remove_bs_notifiers, notify); 1983 } 1984 1985 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify) 1986 { 1987 notifier_list_add(&blk->insert_bs_notifiers, notify); 1988 } 1989 1990 void blk_io_plug(BlockBackend *blk) 1991 { 1992 BlockDriverState *bs = blk_bs(blk); 1993 1994 if (bs) { 1995 bdrv_io_plug(bs); 1996 } 1997 } 1998 1999 void blk_io_unplug(BlockBackend *blk) 2000 { 2001 BlockDriverState *bs = blk_bs(blk); 2002 2003 if (bs) { 2004 bdrv_io_unplug(bs); 2005 } 2006 } 2007 2008 BlockAcctStats *blk_get_stats(BlockBackend *blk) 2009 { 2010 return &blk->stats; 2011 } 2012 2013 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, 2014 BlockCompletionFunc *cb, void *opaque) 2015 { 2016 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque); 2017 } 2018 2019 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, 2020 int bytes, BdrvRequestFlags flags) 2021 { 2022 return blk_co_pwritev(blk, offset, bytes, NULL, 2023 flags | BDRV_REQ_ZERO_WRITE); 2024 } 2025 2026 int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf, 2027 int count) 2028 { 2029 return blk_prw(blk, offset, (void *) buf, count, blk_write_entry, 2030 BDRV_REQ_WRITE_COMPRESSED); 2031 } 2032 2033 int blk_truncate(BlockBackend *blk, int64_t offset, PreallocMode prealloc, 2034 Error **errp) 2035 { 2036 if (!blk_is_available(blk)) { 2037 error_setg(errp, "No medium inserted"); 2038 return -ENOMEDIUM; 2039 } 2040 2041 return bdrv_truncate(blk->root, offset, prealloc, errp); 2042 } 2043 2044 static void blk_pdiscard_entry(void *opaque) 2045 { 2046 BlkRwCo *rwco = opaque; 2047 QEMUIOVector *qiov = rwco->iobuf; 2048 2049 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size); 2050 aio_wait_kick(); 2051 } 2052 2053 int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes) 2054 { 2055 return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0); 2056 } 2057 2058 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, 2059 int64_t pos, int size) 2060 { 2061 int ret; 2062 2063 if (!blk_is_available(blk)) { 2064 return -ENOMEDIUM; 2065 } 2066 2067 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size); 2068 if (ret < 0) { 2069 return ret; 2070 } 2071 2072 if (ret == size && !blk->enable_write_cache) { 2073 ret = bdrv_flush(blk_bs(blk)); 2074 } 2075 2076 return ret < 0 ? ret : size; 2077 } 2078 2079 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size) 2080 { 2081 if (!blk_is_available(blk)) { 2082 return -ENOMEDIUM; 2083 } 2084 2085 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size); 2086 } 2087 2088 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz) 2089 { 2090 if (!blk_is_available(blk)) { 2091 return -ENOMEDIUM; 2092 } 2093 2094 return bdrv_probe_blocksizes(blk_bs(blk), bsz); 2095 } 2096 2097 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo) 2098 { 2099 if (!blk_is_available(blk)) { 2100 return -ENOMEDIUM; 2101 } 2102 2103 return bdrv_probe_geometry(blk_bs(blk), geo); 2104 } 2105 2106 /* 2107 * Updates the BlockBackendRootState object with data from the currently 2108 * attached BlockDriverState. 2109 */ 2110 void blk_update_root_state(BlockBackend *blk) 2111 { 2112 assert(blk->root); 2113 2114 blk->root_state.open_flags = blk->root->bs->open_flags; 2115 blk->root_state.read_only = blk->root->bs->read_only; 2116 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes; 2117 } 2118 2119 /* 2120 * Returns the detect-zeroes setting to be used for bdrv_open() of a 2121 * BlockDriverState which is supposed to inherit the root state. 2122 */ 2123 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk) 2124 { 2125 return blk->root_state.detect_zeroes; 2126 } 2127 2128 /* 2129 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is 2130 * supposed to inherit the root state. 2131 */ 2132 int blk_get_open_flags_from_root_state(BlockBackend *blk) 2133 { 2134 int bs_flags; 2135 2136 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR; 2137 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR; 2138 2139 return bs_flags; 2140 } 2141 2142 BlockBackendRootState *blk_get_root_state(BlockBackend *blk) 2143 { 2144 return &blk->root_state; 2145 } 2146 2147 int blk_commit_all(void) 2148 { 2149 BlockBackend *blk = NULL; 2150 2151 while ((blk = blk_all_next(blk)) != NULL) { 2152 AioContext *aio_context = blk_get_aio_context(blk); 2153 2154 aio_context_acquire(aio_context); 2155 if (blk_is_inserted(blk) && blk->root->bs->backing) { 2156 int ret = bdrv_commit(blk->root->bs); 2157 if (ret < 0) { 2158 aio_context_release(aio_context); 2159 return ret; 2160 } 2161 } 2162 aio_context_release(aio_context); 2163 } 2164 return 0; 2165 } 2166 2167 2168 /* throttling disk I/O limits */ 2169 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg) 2170 { 2171 throttle_group_config(&blk->public.throttle_group_member, cfg); 2172 } 2173 2174 void blk_io_limits_disable(BlockBackend *blk) 2175 { 2176 BlockDriverState *bs = blk_bs(blk); 2177 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 2178 assert(tgm->throttle_state); 2179 if (bs) { 2180 bdrv_drained_begin(bs); 2181 } 2182 throttle_group_unregister_tgm(tgm); 2183 if (bs) { 2184 bdrv_drained_end(bs); 2185 } 2186 } 2187 2188 /* should be called before blk_set_io_limits if a limit is set */ 2189 void blk_io_limits_enable(BlockBackend *blk, const char *group) 2190 { 2191 assert(!blk->public.throttle_group_member.throttle_state); 2192 throttle_group_register_tgm(&blk->public.throttle_group_member, 2193 group, blk_get_aio_context(blk)); 2194 } 2195 2196 void blk_io_limits_update_group(BlockBackend *blk, const char *group) 2197 { 2198 /* this BB is not part of any group */ 2199 if (!blk->public.throttle_group_member.throttle_state) { 2200 return; 2201 } 2202 2203 /* this BB is a part of the same group than the one we want */ 2204 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member), 2205 group)) { 2206 return; 2207 } 2208 2209 /* need to change the group this bs belong to */ 2210 blk_io_limits_disable(blk); 2211 blk_io_limits_enable(blk, group); 2212 } 2213 2214 static void blk_root_drained_begin(BdrvChild *child) 2215 { 2216 BlockBackend *blk = child->opaque; 2217 2218 if (++blk->quiesce_counter == 1) { 2219 if (blk->dev_ops && blk->dev_ops->drained_begin) { 2220 blk->dev_ops->drained_begin(blk->dev_opaque); 2221 } 2222 } 2223 2224 /* Note that blk->root may not be accessible here yet if we are just 2225 * attaching to a BlockDriverState that is drained. Use child instead. */ 2226 2227 if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) { 2228 throttle_group_restart_tgm(&blk->public.throttle_group_member); 2229 } 2230 } 2231 2232 static bool blk_root_drained_poll(BdrvChild *child) 2233 { 2234 BlockBackend *blk = child->opaque; 2235 assert(blk->quiesce_counter); 2236 return !!blk->in_flight; 2237 } 2238 2239 static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter) 2240 { 2241 BlockBackend *blk = child->opaque; 2242 assert(blk->quiesce_counter); 2243 2244 assert(blk->public.throttle_group_member.io_limits_disabled); 2245 atomic_dec(&blk->public.throttle_group_member.io_limits_disabled); 2246 2247 if (--blk->quiesce_counter == 0) { 2248 if (blk->dev_ops && blk->dev_ops->drained_end) { 2249 blk->dev_ops->drained_end(blk->dev_opaque); 2250 } 2251 } 2252 } 2253 2254 void blk_register_buf(BlockBackend *blk, void *host, size_t size) 2255 { 2256 bdrv_register_buf(blk_bs(blk), host, size); 2257 } 2258 2259 void blk_unregister_buf(BlockBackend *blk, void *host) 2260 { 2261 bdrv_unregister_buf(blk_bs(blk), host); 2262 } 2263 2264 int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, 2265 BlockBackend *blk_out, int64_t off_out, 2266 int bytes, BdrvRequestFlags read_flags, 2267 BdrvRequestFlags write_flags) 2268 { 2269 int r; 2270 r = blk_check_byte_request(blk_in, off_in, bytes); 2271 if (r) { 2272 return r; 2273 } 2274 r = blk_check_byte_request(blk_out, off_out, bytes); 2275 if (r) { 2276 return r; 2277 } 2278 return bdrv_co_copy_range(blk_in->root, off_in, 2279 blk_out->root, off_out, 2280 bytes, read_flags, write_flags); 2281 } 2282 2283 const BdrvChild *blk_root(BlockBackend *blk) 2284 { 2285 return blk->root; 2286 } 2287