1 /* 2 * QEMU Block backends 3 * 4 * Copyright (C) 2014-2016 Red Hat, Inc. 5 * 6 * Authors: 7 * Markus Armbruster <armbru@redhat.com>, 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2.1 10 * or later. See the COPYING.LIB file in the top-level directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "sysemu/block-backend.h" 15 #include "block/block_int.h" 16 #include "block/blockjob.h" 17 #include "block/coroutines.h" 18 #include "block/throttle-groups.h" 19 #include "hw/qdev-core.h" 20 #include "sysemu/blockdev.h" 21 #include "sysemu/runstate.h" 22 #include "sysemu/replay.h" 23 #include "qapi/error.h" 24 #include "qapi/qapi-events-block.h" 25 #include "qemu/id.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/option.h" 28 #include "trace.h" 29 #include "migration/misc.h" 30 31 /* Number of coroutines to reserve per attached device model */ 32 #define COROUTINE_POOL_RESERVATION 64 33 34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ 35 36 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb); 37 38 typedef struct BlockBackendAioNotifier { 39 void (*attached_aio_context)(AioContext *new_context, void *opaque); 40 void (*detach_aio_context)(void *opaque); 41 void *opaque; 42 QLIST_ENTRY(BlockBackendAioNotifier) list; 43 } BlockBackendAioNotifier; 44 45 struct BlockBackend { 46 char *name; 47 int refcnt; 48 BdrvChild *root; 49 AioContext *ctx; 50 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */ 51 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */ 52 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */ 53 BlockBackendPublic public; 54 55 DeviceState *dev; /* attached device model, if any */ 56 const BlockDevOps *dev_ops; 57 void *dev_opaque; 58 59 /* If the BDS tree is removed, some of its options are stored here (which 60 * can be used to restore those options in the new BDS on insert) */ 61 BlockBackendRootState root_state; 62 63 bool enable_write_cache; 64 65 /* I/O stats (display with "info blockstats"). */ 66 BlockAcctStats stats; 67 68 BlockdevOnError on_read_error, on_write_error; 69 bool iostatus_enabled; 70 BlockDeviceIoStatus iostatus; 71 72 uint64_t perm; 73 uint64_t shared_perm; 74 bool disable_perm; 75 76 bool allow_aio_context_change; 77 bool allow_write_beyond_eof; 78 79 /* Protected by BQL */ 80 NotifierList remove_bs_notifiers, insert_bs_notifiers; 81 QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers; 82 83 int quiesce_counter; 84 CoQueue queued_requests; 85 bool disable_request_queuing; 86 87 VMChangeStateEntry *vmsh; 88 bool force_allow_inactivate; 89 90 /* Number of in-flight aio requests. BlockDriverState also counts 91 * in-flight requests but aio requests can exist even when blk->root is 92 * NULL, so we cannot rely on its counter for that case. 93 * Accessed with atomic ops. 94 */ 95 unsigned int in_flight; 96 }; 97 98 typedef struct BlockBackendAIOCB { 99 BlockAIOCB common; 100 BlockBackend *blk; 101 int ret; 102 } BlockBackendAIOCB; 103 104 static const AIOCBInfo block_backend_aiocb_info = { 105 .get_aio_context = blk_aiocb_get_aio_context, 106 .aiocb_size = sizeof(BlockBackendAIOCB), 107 }; 108 109 static void drive_info_del(DriveInfo *dinfo); 110 static BlockBackend *bdrv_first_blk(BlockDriverState *bs); 111 112 /* All BlockBackends. Protected by BQL. */ 113 static QTAILQ_HEAD(, BlockBackend) block_backends = 114 QTAILQ_HEAD_INITIALIZER(block_backends); 115 116 /* 117 * All BlockBackends referenced by the monitor and which are iterated through by 118 * blk_next(). Protected by BQL. 119 */ 120 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends = 121 QTAILQ_HEAD_INITIALIZER(monitor_block_backends); 122 123 static void blk_root_inherit_options(BdrvChildRole role, bool parent_is_format, 124 int *child_flags, QDict *child_options, 125 int parent_flags, QDict *parent_options) 126 { 127 /* We're not supposed to call this function for root nodes */ 128 abort(); 129 } 130 static void blk_root_drained_begin(BdrvChild *child); 131 static bool blk_root_drained_poll(BdrvChild *child); 132 static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter); 133 134 static void blk_root_change_media(BdrvChild *child, bool load); 135 static void blk_root_resize(BdrvChild *child); 136 137 static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx, 138 GSList **ignore, Error **errp); 139 static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx, 140 GSList **ignore); 141 142 static char *blk_root_get_parent_desc(BdrvChild *child) 143 { 144 BlockBackend *blk = child->opaque; 145 g_autofree char *dev_id = NULL; 146 147 if (blk->name) { 148 return g_strdup_printf("block device '%s'", blk->name); 149 } 150 151 dev_id = blk_get_attached_dev_id(blk); 152 if (*dev_id) { 153 return g_strdup_printf("block device '%s'", dev_id); 154 } else { 155 /* TODO Callback into the BB owner for something more detailed */ 156 return g_strdup("an unnamed block device"); 157 } 158 } 159 160 static const char *blk_root_get_name(BdrvChild *child) 161 { 162 return blk_name(child->opaque); 163 } 164 165 static void blk_vm_state_changed(void *opaque, bool running, RunState state) 166 { 167 Error *local_err = NULL; 168 BlockBackend *blk = opaque; 169 170 if (state == RUN_STATE_INMIGRATE) { 171 return; 172 } 173 174 qemu_del_vm_change_state_handler(blk->vmsh); 175 blk->vmsh = NULL; 176 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err); 177 if (local_err) { 178 error_report_err(local_err); 179 } 180 } 181 182 /* 183 * Notifies the user of the BlockBackend that migration has completed. qdev 184 * devices can tighten their permissions in response (specifically revoke 185 * shared write permissions that we needed for storage migration). 186 * 187 * If an error is returned, the VM cannot be allowed to be resumed. 188 */ 189 static void blk_root_activate(BdrvChild *child, Error **errp) 190 { 191 BlockBackend *blk = child->opaque; 192 Error *local_err = NULL; 193 uint64_t saved_shared_perm; 194 195 if (!blk->disable_perm) { 196 return; 197 } 198 199 blk->disable_perm = false; 200 201 /* 202 * blk->shared_perm contains the permissions we want to share once 203 * migration is really completely done. For now, we need to share 204 * all; but we also need to retain blk->shared_perm, which is 205 * overwritten by a successful blk_set_perm() call. Save it and 206 * restore it below. 207 */ 208 saved_shared_perm = blk->shared_perm; 209 210 blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err); 211 if (local_err) { 212 error_propagate(errp, local_err); 213 blk->disable_perm = true; 214 return; 215 } 216 blk->shared_perm = saved_shared_perm; 217 218 if (runstate_check(RUN_STATE_INMIGRATE)) { 219 /* Activation can happen when migration process is still active, for 220 * example when nbd_server_add is called during non-shared storage 221 * migration. Defer the shared_perm update to migration completion. */ 222 if (!blk->vmsh) { 223 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed, 224 blk); 225 } 226 return; 227 } 228 229 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err); 230 if (local_err) { 231 error_propagate(errp, local_err); 232 blk->disable_perm = true; 233 return; 234 } 235 } 236 237 void blk_set_force_allow_inactivate(BlockBackend *blk) 238 { 239 GLOBAL_STATE_CODE(); 240 blk->force_allow_inactivate = true; 241 } 242 243 static bool blk_can_inactivate(BlockBackend *blk) 244 { 245 /* If it is a guest device, inactivate is ok. */ 246 if (blk->dev || blk_name(blk)[0]) { 247 return true; 248 } 249 250 /* Inactivating means no more writes to the image can be done, 251 * even if those writes would be changes invisible to the 252 * guest. For block job BBs that satisfy this, we can just allow 253 * it. This is the case for mirror job source, which is required 254 * by libvirt non-shared block migration. */ 255 if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) { 256 return true; 257 } 258 259 return blk->force_allow_inactivate; 260 } 261 262 static int blk_root_inactivate(BdrvChild *child) 263 { 264 BlockBackend *blk = child->opaque; 265 266 if (blk->disable_perm) { 267 return 0; 268 } 269 270 if (!blk_can_inactivate(blk)) { 271 return -EPERM; 272 } 273 274 blk->disable_perm = true; 275 if (blk->root) { 276 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort); 277 } 278 279 return 0; 280 } 281 282 static void blk_root_attach(BdrvChild *child) 283 { 284 BlockBackend *blk = child->opaque; 285 BlockBackendAioNotifier *notifier; 286 287 trace_blk_root_attach(child, blk, child->bs); 288 289 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { 290 bdrv_add_aio_context_notifier(child->bs, 291 notifier->attached_aio_context, 292 notifier->detach_aio_context, 293 notifier->opaque); 294 } 295 } 296 297 static void blk_root_detach(BdrvChild *child) 298 { 299 BlockBackend *blk = child->opaque; 300 BlockBackendAioNotifier *notifier; 301 302 trace_blk_root_detach(child, blk, child->bs); 303 304 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { 305 bdrv_remove_aio_context_notifier(child->bs, 306 notifier->attached_aio_context, 307 notifier->detach_aio_context, 308 notifier->opaque); 309 } 310 } 311 312 static AioContext *blk_root_get_parent_aio_context(BdrvChild *c) 313 { 314 BlockBackend *blk = c->opaque; 315 316 return blk_get_aio_context(blk); 317 } 318 319 static const BdrvChildClass child_root = { 320 .inherit_options = blk_root_inherit_options, 321 322 .change_media = blk_root_change_media, 323 .resize = blk_root_resize, 324 .get_name = blk_root_get_name, 325 .get_parent_desc = blk_root_get_parent_desc, 326 327 .drained_begin = blk_root_drained_begin, 328 .drained_poll = blk_root_drained_poll, 329 .drained_end = blk_root_drained_end, 330 331 .activate = blk_root_activate, 332 .inactivate = blk_root_inactivate, 333 334 .attach = blk_root_attach, 335 .detach = blk_root_detach, 336 337 .can_set_aio_ctx = blk_root_can_set_aio_ctx, 338 .set_aio_ctx = blk_root_set_aio_ctx, 339 340 .get_parent_aio_context = blk_root_get_parent_aio_context, 341 }; 342 343 /* 344 * Create a new BlockBackend with a reference count of one. 345 * 346 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions 347 * to request for a block driver node that is attached to this BlockBackend. 348 * @shared_perm is a bitmask which describes which permissions may be granted 349 * to other users of the attached node. 350 * Both sets of permissions can be changed later using blk_set_perm(). 351 * 352 * Return the new BlockBackend on success, null on failure. 353 */ 354 BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm) 355 { 356 BlockBackend *blk; 357 358 GLOBAL_STATE_CODE(); 359 360 blk = g_new0(BlockBackend, 1); 361 blk->refcnt = 1; 362 blk->ctx = ctx; 363 blk->perm = perm; 364 blk->shared_perm = shared_perm; 365 blk_set_enable_write_cache(blk, true); 366 367 blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT; 368 blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; 369 370 block_acct_init(&blk->stats); 371 372 qemu_co_queue_init(&blk->queued_requests); 373 notifier_list_init(&blk->remove_bs_notifiers); 374 notifier_list_init(&blk->insert_bs_notifiers); 375 QLIST_INIT(&blk->aio_notifiers); 376 377 QTAILQ_INSERT_TAIL(&block_backends, blk, link); 378 return blk; 379 } 380 381 /* 382 * Create a new BlockBackend connected to an existing BlockDriverState. 383 * 384 * @perm is a bitmasks of BLK_PERM_* constants which describes the 385 * permissions to request for @bs that is attached to this 386 * BlockBackend. @shared_perm is a bitmask which describes which 387 * permissions may be granted to other users of the attached node. 388 * Both sets of permissions can be changed later using blk_set_perm(). 389 * 390 * Return the new BlockBackend on success, null on failure. 391 */ 392 BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, 393 uint64_t shared_perm, Error **errp) 394 { 395 BlockBackend *blk = blk_new(bdrv_get_aio_context(bs), perm, shared_perm); 396 397 GLOBAL_STATE_CODE(); 398 399 if (blk_insert_bs(blk, bs, errp) < 0) { 400 blk_unref(blk); 401 return NULL; 402 } 403 return blk; 404 } 405 406 /* 407 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both. 408 * The new BlockBackend is in the main AioContext. 409 * 410 * Just as with bdrv_open(), after having called this function the reference to 411 * @options belongs to the block layer (even on failure). 412 * 413 * TODO: Remove @filename and @flags; it should be possible to specify a whole 414 * BDS tree just by specifying the @options QDict (or @reference, 415 * alternatively). At the time of adding this function, this is not possible, 416 * though, so callers of this function have to be able to specify @filename and 417 * @flags. 418 */ 419 BlockBackend *blk_new_open(const char *filename, const char *reference, 420 QDict *options, int flags, Error **errp) 421 { 422 BlockBackend *blk; 423 BlockDriverState *bs; 424 uint64_t perm = 0; 425 uint64_t shared = BLK_PERM_ALL; 426 427 GLOBAL_STATE_CODE(); 428 429 /* 430 * blk_new_open() is mainly used in .bdrv_create implementations and the 431 * tools where sharing isn't a major concern because the BDS stays private 432 * and the file is generally not supposed to be used by a second process, 433 * so we just request permission according to the flags. 434 * 435 * The exceptions are xen_disk and blockdev_init(); in these cases, the 436 * caller of blk_new_open() doesn't make use of the permissions, but they 437 * shouldn't hurt either. We can still share everything here because the 438 * guest devices will add their own blockers if they can't share. 439 */ 440 if ((flags & BDRV_O_NO_IO) == 0) { 441 perm |= BLK_PERM_CONSISTENT_READ; 442 if (flags & BDRV_O_RDWR) { 443 perm |= BLK_PERM_WRITE; 444 } 445 } 446 if (flags & BDRV_O_RESIZE) { 447 perm |= BLK_PERM_RESIZE; 448 } 449 if (flags & BDRV_O_NO_SHARE) { 450 shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED; 451 } 452 453 blk = blk_new(qemu_get_aio_context(), perm, shared); 454 bs = bdrv_open(filename, reference, options, flags, errp); 455 if (!bs) { 456 blk_unref(blk); 457 return NULL; 458 } 459 460 blk->root = bdrv_root_attach_child(bs, "root", &child_root, 461 BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, 462 perm, shared, blk, errp); 463 if (!blk->root) { 464 blk_unref(blk); 465 return NULL; 466 } 467 468 return blk; 469 } 470 471 static void blk_delete(BlockBackend *blk) 472 { 473 assert(!blk->refcnt); 474 assert(!blk->name); 475 assert(!blk->dev); 476 if (blk->public.throttle_group_member.throttle_state) { 477 blk_io_limits_disable(blk); 478 } 479 if (blk->root) { 480 blk_remove_bs(blk); 481 } 482 if (blk->vmsh) { 483 qemu_del_vm_change_state_handler(blk->vmsh); 484 blk->vmsh = NULL; 485 } 486 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers)); 487 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers)); 488 assert(QLIST_EMPTY(&blk->aio_notifiers)); 489 QTAILQ_REMOVE(&block_backends, blk, link); 490 drive_info_del(blk->legacy_dinfo); 491 block_acct_cleanup(&blk->stats); 492 g_free(blk); 493 } 494 495 static void drive_info_del(DriveInfo *dinfo) 496 { 497 if (!dinfo) { 498 return; 499 } 500 qemu_opts_del(dinfo->opts); 501 g_free(dinfo); 502 } 503 504 int blk_get_refcnt(BlockBackend *blk) 505 { 506 GLOBAL_STATE_CODE(); 507 return blk ? blk->refcnt : 0; 508 } 509 510 /* 511 * Increment @blk's reference count. 512 * @blk must not be null. 513 */ 514 void blk_ref(BlockBackend *blk) 515 { 516 assert(blk->refcnt > 0); 517 GLOBAL_STATE_CODE(); 518 blk->refcnt++; 519 } 520 521 /* 522 * Decrement @blk's reference count. 523 * If this drops it to zero, destroy @blk. 524 * For convenience, do nothing if @blk is null. 525 */ 526 void blk_unref(BlockBackend *blk) 527 { 528 GLOBAL_STATE_CODE(); 529 if (blk) { 530 assert(blk->refcnt > 0); 531 if (blk->refcnt > 1) { 532 blk->refcnt--; 533 } else { 534 blk_drain(blk); 535 /* blk_drain() cannot resurrect blk, nobody held a reference */ 536 assert(blk->refcnt == 1); 537 blk->refcnt = 0; 538 blk_delete(blk); 539 } 540 } 541 } 542 543 /* 544 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the 545 * ones which are hidden (i.e. are not referenced by the monitor). 546 */ 547 BlockBackend *blk_all_next(BlockBackend *blk) 548 { 549 GLOBAL_STATE_CODE(); 550 return blk ? QTAILQ_NEXT(blk, link) 551 : QTAILQ_FIRST(&block_backends); 552 } 553 554 void blk_remove_all_bs(void) 555 { 556 BlockBackend *blk = NULL; 557 558 GLOBAL_STATE_CODE(); 559 560 while ((blk = blk_all_next(blk)) != NULL) { 561 AioContext *ctx = blk_get_aio_context(blk); 562 563 aio_context_acquire(ctx); 564 if (blk->root) { 565 blk_remove_bs(blk); 566 } 567 aio_context_release(ctx); 568 } 569 } 570 571 /* 572 * Return the monitor-owned BlockBackend after @blk. 573 * If @blk is null, return the first one. 574 * Else, return @blk's next sibling, which may be null. 575 * 576 * To iterate over all BlockBackends, do 577 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { 578 * ... 579 * } 580 */ 581 BlockBackend *blk_next(BlockBackend *blk) 582 { 583 GLOBAL_STATE_CODE(); 584 return blk ? QTAILQ_NEXT(blk, monitor_link) 585 : QTAILQ_FIRST(&monitor_block_backends); 586 } 587 588 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by 589 * the monitor or attached to a BlockBackend */ 590 BlockDriverState *bdrv_next(BdrvNextIterator *it) 591 { 592 BlockDriverState *bs, *old_bs; 593 594 /* Must be called from the main loop */ 595 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 596 597 /* First, return all root nodes of BlockBackends. In order to avoid 598 * returning a BDS twice when multiple BBs refer to it, we only return it 599 * if the BB is the first one in the parent list of the BDS. */ 600 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) { 601 BlockBackend *old_blk = it->blk; 602 603 old_bs = old_blk ? blk_bs(old_blk) : NULL; 604 605 do { 606 it->blk = blk_all_next(it->blk); 607 bs = it->blk ? blk_bs(it->blk) : NULL; 608 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk)); 609 610 if (it->blk) { 611 blk_ref(it->blk); 612 } 613 blk_unref(old_blk); 614 615 if (bs) { 616 bdrv_ref(bs); 617 bdrv_unref(old_bs); 618 return bs; 619 } 620 it->phase = BDRV_NEXT_MONITOR_OWNED; 621 } else { 622 old_bs = it->bs; 623 } 624 625 /* Then return the monitor-owned BDSes without a BB attached. Ignore all 626 * BDSes that are attached to a BlockBackend here; they have been handled 627 * by the above block already */ 628 do { 629 it->bs = bdrv_next_monitor_owned(it->bs); 630 bs = it->bs; 631 } while (bs && bdrv_has_blk(bs)); 632 633 if (bs) { 634 bdrv_ref(bs); 635 } 636 bdrv_unref(old_bs); 637 638 return bs; 639 } 640 641 static void bdrv_next_reset(BdrvNextIterator *it) 642 { 643 *it = (BdrvNextIterator) { 644 .phase = BDRV_NEXT_BACKEND_ROOTS, 645 }; 646 } 647 648 BlockDriverState *bdrv_first(BdrvNextIterator *it) 649 { 650 GLOBAL_STATE_CODE(); 651 bdrv_next_reset(it); 652 return bdrv_next(it); 653 } 654 655 /* Must be called when aborting a bdrv_next() iteration before 656 * bdrv_next() returns NULL */ 657 void bdrv_next_cleanup(BdrvNextIterator *it) 658 { 659 /* Must be called from the main loop */ 660 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 661 662 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) { 663 if (it->blk) { 664 bdrv_unref(blk_bs(it->blk)); 665 blk_unref(it->blk); 666 } 667 } else { 668 bdrv_unref(it->bs); 669 } 670 671 bdrv_next_reset(it); 672 } 673 674 /* 675 * Add a BlockBackend into the list of backends referenced by the monitor, with 676 * the given @name acting as the handle for the monitor. 677 * Strictly for use by blockdev.c. 678 * 679 * @name must not be null or empty. 680 * 681 * Returns true on success and false on failure. In the latter case, an Error 682 * object is returned through @errp. 683 */ 684 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp) 685 { 686 assert(!blk->name); 687 assert(name && name[0]); 688 GLOBAL_STATE_CODE(); 689 690 if (!id_wellformed(name)) { 691 error_setg(errp, "Invalid device name"); 692 return false; 693 } 694 if (blk_by_name(name)) { 695 error_setg(errp, "Device with id '%s' already exists", name); 696 return false; 697 } 698 if (bdrv_find_node(name)) { 699 error_setg(errp, 700 "Device name '%s' conflicts with an existing node name", 701 name); 702 return false; 703 } 704 705 blk->name = g_strdup(name); 706 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link); 707 return true; 708 } 709 710 /* 711 * Remove a BlockBackend from the list of backends referenced by the monitor. 712 * Strictly for use by blockdev.c. 713 */ 714 void monitor_remove_blk(BlockBackend *blk) 715 { 716 GLOBAL_STATE_CODE(); 717 718 if (!blk->name) { 719 return; 720 } 721 722 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link); 723 g_free(blk->name); 724 blk->name = NULL; 725 } 726 727 /* 728 * Return @blk's name, a non-null string. 729 * Returns an empty string iff @blk is not referenced by the monitor. 730 */ 731 const char *blk_name(const BlockBackend *blk) 732 { 733 IO_CODE(); 734 return blk->name ?: ""; 735 } 736 737 /* 738 * Return the BlockBackend with name @name if it exists, else null. 739 * @name must not be null. 740 */ 741 BlockBackend *blk_by_name(const char *name) 742 { 743 BlockBackend *blk = NULL; 744 745 GLOBAL_STATE_CODE(); 746 assert(name); 747 while ((blk = blk_next(blk)) != NULL) { 748 if (!strcmp(name, blk->name)) { 749 return blk; 750 } 751 } 752 return NULL; 753 } 754 755 /* 756 * Return the BlockDriverState attached to @blk if any, else null. 757 */ 758 BlockDriverState *blk_bs(BlockBackend *blk) 759 { 760 IO_CODE(); 761 return blk->root ? blk->root->bs : NULL; 762 } 763 764 static BlockBackend *bdrv_first_blk(BlockDriverState *bs) 765 { 766 BdrvChild *child; 767 768 GLOBAL_STATE_CODE(); 769 770 QLIST_FOREACH(child, &bs->parents, next_parent) { 771 if (child->klass == &child_root) { 772 return child->opaque; 773 } 774 } 775 776 return NULL; 777 } 778 779 /* 780 * Returns true if @bs has an associated BlockBackend. 781 */ 782 bool bdrv_has_blk(BlockDriverState *bs) 783 { 784 GLOBAL_STATE_CODE(); 785 return bdrv_first_blk(bs) != NULL; 786 } 787 788 /* 789 * Returns true if @bs has only BlockBackends as parents. 790 */ 791 bool bdrv_is_root_node(BlockDriverState *bs) 792 { 793 BdrvChild *c; 794 795 GLOBAL_STATE_CODE(); 796 QLIST_FOREACH(c, &bs->parents, next_parent) { 797 if (c->klass != &child_root) { 798 return false; 799 } 800 } 801 802 return true; 803 } 804 805 /* 806 * Return @blk's DriveInfo if any, else null. 807 */ 808 DriveInfo *blk_legacy_dinfo(BlockBackend *blk) 809 { 810 GLOBAL_STATE_CODE(); 811 return blk->legacy_dinfo; 812 } 813 814 /* 815 * Set @blk's DriveInfo to @dinfo, and return it. 816 * @blk must not have a DriveInfo set already. 817 * No other BlockBackend may have the same DriveInfo set. 818 */ 819 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo) 820 { 821 assert(!blk->legacy_dinfo); 822 GLOBAL_STATE_CODE(); 823 return blk->legacy_dinfo = dinfo; 824 } 825 826 /* 827 * Return the BlockBackend with DriveInfo @dinfo. 828 * It must exist. 829 */ 830 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo) 831 { 832 BlockBackend *blk = NULL; 833 GLOBAL_STATE_CODE(); 834 835 while ((blk = blk_next(blk)) != NULL) { 836 if (blk->legacy_dinfo == dinfo) { 837 return blk; 838 } 839 } 840 abort(); 841 } 842 843 /* 844 * Returns a pointer to the publicly accessible fields of @blk. 845 */ 846 BlockBackendPublic *blk_get_public(BlockBackend *blk) 847 { 848 GLOBAL_STATE_CODE(); 849 return &blk->public; 850 } 851 852 /* 853 * Returns a BlockBackend given the associated @public fields. 854 */ 855 BlockBackend *blk_by_public(BlockBackendPublic *public) 856 { 857 GLOBAL_STATE_CODE(); 858 return container_of(public, BlockBackend, public); 859 } 860 861 /* 862 * Disassociates the currently associated BlockDriverState from @blk. 863 */ 864 void blk_remove_bs(BlockBackend *blk) 865 { 866 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 867 BdrvChild *root; 868 869 GLOBAL_STATE_CODE(); 870 871 notifier_list_notify(&blk->remove_bs_notifiers, blk); 872 if (tgm->throttle_state) { 873 BlockDriverState *bs = blk_bs(blk); 874 875 /* 876 * Take a ref in case blk_bs() changes across bdrv_drained_begin(), for 877 * example, if a temporary filter node is removed by a blockjob. 878 */ 879 bdrv_ref(bs); 880 bdrv_drained_begin(bs); 881 throttle_group_detach_aio_context(tgm); 882 throttle_group_attach_aio_context(tgm, qemu_get_aio_context()); 883 bdrv_drained_end(bs); 884 bdrv_unref(bs); 885 } 886 887 blk_update_root_state(blk); 888 889 /* bdrv_root_unref_child() will cause blk->root to become stale and may 890 * switch to a completion coroutine later on. Let's drain all I/O here 891 * to avoid that and a potential QEMU crash. 892 */ 893 blk_drain(blk); 894 root = blk->root; 895 blk->root = NULL; 896 bdrv_root_unref_child(root); 897 } 898 899 /* 900 * Associates a new BlockDriverState with @blk. 901 */ 902 int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) 903 { 904 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 905 GLOBAL_STATE_CODE(); 906 bdrv_ref(bs); 907 blk->root = bdrv_root_attach_child(bs, "root", &child_root, 908 BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, 909 blk->perm, blk->shared_perm, 910 blk, errp); 911 if (blk->root == NULL) { 912 return -EPERM; 913 } 914 915 notifier_list_notify(&blk->insert_bs_notifiers, blk); 916 if (tgm->throttle_state) { 917 throttle_group_detach_aio_context(tgm); 918 throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs)); 919 } 920 921 return 0; 922 } 923 924 /* 925 * Change BlockDriverState associated with @blk. 926 */ 927 int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp) 928 { 929 GLOBAL_STATE_CODE(); 930 return bdrv_replace_child_bs(blk->root, new_bs, errp); 931 } 932 933 /* 934 * Sets the permission bitmasks that the user of the BlockBackend needs. 935 */ 936 int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm, 937 Error **errp) 938 { 939 int ret; 940 GLOBAL_STATE_CODE(); 941 942 if (blk->root && !blk->disable_perm) { 943 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp); 944 if (ret < 0) { 945 return ret; 946 } 947 } 948 949 blk->perm = perm; 950 blk->shared_perm = shared_perm; 951 952 return 0; 953 } 954 955 void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm) 956 { 957 GLOBAL_STATE_CODE(); 958 *perm = blk->perm; 959 *shared_perm = blk->shared_perm; 960 } 961 962 /* 963 * Attach device model @dev to @blk. 964 * Return 0 on success, -EBUSY when a device model is attached already. 965 */ 966 int blk_attach_dev(BlockBackend *blk, DeviceState *dev) 967 { 968 GLOBAL_STATE_CODE(); 969 if (blk->dev) { 970 return -EBUSY; 971 } 972 973 /* While migration is still incoming, we don't need to apply the 974 * permissions of guest device BlockBackends. We might still have a block 975 * job or NBD server writing to the image for storage migration. */ 976 if (runstate_check(RUN_STATE_INMIGRATE)) { 977 blk->disable_perm = true; 978 } 979 980 blk_ref(blk); 981 blk->dev = dev; 982 blk_iostatus_reset(blk); 983 984 return 0; 985 } 986 987 /* 988 * Detach device model @dev from @blk. 989 * @dev must be currently attached to @blk. 990 */ 991 void blk_detach_dev(BlockBackend *blk, DeviceState *dev) 992 { 993 assert(blk->dev == dev); 994 GLOBAL_STATE_CODE(); 995 blk->dev = NULL; 996 blk->dev_ops = NULL; 997 blk->dev_opaque = NULL; 998 blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort); 999 blk_unref(blk); 1000 } 1001 1002 /* 1003 * Return the device model attached to @blk if any, else null. 1004 */ 1005 DeviceState *blk_get_attached_dev(BlockBackend *blk) 1006 { 1007 GLOBAL_STATE_CODE(); 1008 return blk->dev; 1009 } 1010 1011 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block 1012 * device attached to the BlockBackend. */ 1013 char *blk_get_attached_dev_id(BlockBackend *blk) 1014 { 1015 DeviceState *dev = blk->dev; 1016 IO_CODE(); 1017 1018 if (!dev) { 1019 return g_strdup(""); 1020 } else if (dev->id) { 1021 return g_strdup(dev->id); 1022 } 1023 1024 return object_get_canonical_path(OBJECT(dev)) ?: g_strdup(""); 1025 } 1026 1027 /* 1028 * Return the BlockBackend which has the device model @dev attached if it 1029 * exists, else null. 1030 * 1031 * @dev must not be null. 1032 */ 1033 BlockBackend *blk_by_dev(void *dev) 1034 { 1035 BlockBackend *blk = NULL; 1036 1037 GLOBAL_STATE_CODE(); 1038 1039 assert(dev != NULL); 1040 while ((blk = blk_all_next(blk)) != NULL) { 1041 if (blk->dev == dev) { 1042 return blk; 1043 } 1044 } 1045 return NULL; 1046 } 1047 1048 /* 1049 * Set @blk's device model callbacks to @ops. 1050 * @opaque is the opaque argument to pass to the callbacks. 1051 * This is for use by device models. 1052 */ 1053 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, 1054 void *opaque) 1055 { 1056 GLOBAL_STATE_CODE(); 1057 blk->dev_ops = ops; 1058 blk->dev_opaque = opaque; 1059 1060 /* Are we currently quiesced? Should we enforce this right now? */ 1061 if (blk->quiesce_counter && ops && ops->drained_begin) { 1062 ops->drained_begin(opaque); 1063 } 1064 } 1065 1066 /* 1067 * Notify @blk's attached device model of media change. 1068 * 1069 * If @load is true, notify of media load. This action can fail, meaning that 1070 * the medium cannot be loaded. @errp is set then. 1071 * 1072 * If @load is false, notify of media eject. This can never fail. 1073 * 1074 * Also send DEVICE_TRAY_MOVED events as appropriate. 1075 */ 1076 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp) 1077 { 1078 GLOBAL_STATE_CODE(); 1079 if (blk->dev_ops && blk->dev_ops->change_media_cb) { 1080 bool tray_was_open, tray_is_open; 1081 Error *local_err = NULL; 1082 1083 tray_was_open = blk_dev_is_tray_open(blk); 1084 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err); 1085 if (local_err) { 1086 assert(load == true); 1087 error_propagate(errp, local_err); 1088 return; 1089 } 1090 tray_is_open = blk_dev_is_tray_open(blk); 1091 1092 if (tray_was_open != tray_is_open) { 1093 char *id = blk_get_attached_dev_id(blk); 1094 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open); 1095 g_free(id); 1096 } 1097 } 1098 } 1099 1100 static void blk_root_change_media(BdrvChild *child, bool load) 1101 { 1102 blk_dev_change_media_cb(child->opaque, load, NULL); 1103 } 1104 1105 /* 1106 * Does @blk's attached device model have removable media? 1107 * %true if no device model is attached. 1108 */ 1109 bool blk_dev_has_removable_media(BlockBackend *blk) 1110 { 1111 GLOBAL_STATE_CODE(); 1112 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb); 1113 } 1114 1115 /* 1116 * Does @blk's attached device model have a tray? 1117 */ 1118 bool blk_dev_has_tray(BlockBackend *blk) 1119 { 1120 IO_CODE(); 1121 return blk->dev_ops && blk->dev_ops->is_tray_open; 1122 } 1123 1124 /* 1125 * Notify @blk's attached device model of a media eject request. 1126 * If @force is true, the medium is about to be yanked out forcefully. 1127 */ 1128 void blk_dev_eject_request(BlockBackend *blk, bool force) 1129 { 1130 GLOBAL_STATE_CODE(); 1131 if (blk->dev_ops && blk->dev_ops->eject_request_cb) { 1132 blk->dev_ops->eject_request_cb(blk->dev_opaque, force); 1133 } 1134 } 1135 1136 /* 1137 * Does @blk's attached device model have a tray, and is it open? 1138 */ 1139 bool blk_dev_is_tray_open(BlockBackend *blk) 1140 { 1141 IO_CODE(); 1142 if (blk_dev_has_tray(blk)) { 1143 return blk->dev_ops->is_tray_open(blk->dev_opaque); 1144 } 1145 return false; 1146 } 1147 1148 /* 1149 * Does @blk's attached device model have the medium locked? 1150 * %false if the device model has no such lock. 1151 */ 1152 bool blk_dev_is_medium_locked(BlockBackend *blk) 1153 { 1154 GLOBAL_STATE_CODE(); 1155 if (blk->dev_ops && blk->dev_ops->is_medium_locked) { 1156 return blk->dev_ops->is_medium_locked(blk->dev_opaque); 1157 } 1158 return false; 1159 } 1160 1161 /* 1162 * Notify @blk's attached device model of a backend size change. 1163 */ 1164 static void blk_root_resize(BdrvChild *child) 1165 { 1166 BlockBackend *blk = child->opaque; 1167 1168 if (blk->dev_ops && blk->dev_ops->resize_cb) { 1169 blk->dev_ops->resize_cb(blk->dev_opaque); 1170 } 1171 } 1172 1173 void blk_iostatus_enable(BlockBackend *blk) 1174 { 1175 GLOBAL_STATE_CODE(); 1176 blk->iostatus_enabled = true; 1177 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 1178 } 1179 1180 /* The I/O status is only enabled if the drive explicitly 1181 * enables it _and_ the VM is configured to stop on errors */ 1182 bool blk_iostatus_is_enabled(const BlockBackend *blk) 1183 { 1184 IO_CODE(); 1185 return (blk->iostatus_enabled && 1186 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || 1187 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP || 1188 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP)); 1189 } 1190 1191 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk) 1192 { 1193 GLOBAL_STATE_CODE(); 1194 return blk->iostatus; 1195 } 1196 1197 void blk_iostatus_disable(BlockBackend *blk) 1198 { 1199 GLOBAL_STATE_CODE(); 1200 blk->iostatus_enabled = false; 1201 } 1202 1203 void blk_iostatus_reset(BlockBackend *blk) 1204 { 1205 GLOBAL_STATE_CODE(); 1206 if (blk_iostatus_is_enabled(blk)) { 1207 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 1208 } 1209 } 1210 1211 void blk_iostatus_set_err(BlockBackend *blk, int error) 1212 { 1213 IO_CODE(); 1214 assert(blk_iostatus_is_enabled(blk)); 1215 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 1216 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 1217 BLOCK_DEVICE_IO_STATUS_FAILED; 1218 } 1219 } 1220 1221 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow) 1222 { 1223 IO_CODE(); 1224 blk->allow_write_beyond_eof = allow; 1225 } 1226 1227 void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow) 1228 { 1229 IO_CODE(); 1230 blk->allow_aio_context_change = allow; 1231 } 1232 1233 void blk_set_disable_request_queuing(BlockBackend *blk, bool disable) 1234 { 1235 IO_CODE(); 1236 blk->disable_request_queuing = disable; 1237 } 1238 1239 static int blk_check_byte_request(BlockBackend *blk, int64_t offset, 1240 int64_t bytes) 1241 { 1242 int64_t len; 1243 1244 if (bytes < 0) { 1245 return -EIO; 1246 } 1247 1248 if (!blk_is_available(blk)) { 1249 return -ENOMEDIUM; 1250 } 1251 1252 if (offset < 0) { 1253 return -EIO; 1254 } 1255 1256 if (!blk->allow_write_beyond_eof) { 1257 len = blk_getlength(blk); 1258 if (len < 0) { 1259 return len; 1260 } 1261 1262 if (offset > len || len - offset < bytes) { 1263 return -EIO; 1264 } 1265 } 1266 1267 return 0; 1268 } 1269 1270 /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1271 static void coroutine_fn blk_wait_while_drained(BlockBackend *blk) 1272 { 1273 assert(blk->in_flight > 0); 1274 1275 if (blk->quiesce_counter && !blk->disable_request_queuing) { 1276 blk_dec_in_flight(blk); 1277 qemu_co_queue_wait(&blk->queued_requests, NULL); 1278 blk_inc_in_flight(blk); 1279 } 1280 } 1281 1282 /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1283 static int coroutine_fn 1284 blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes, 1285 QEMUIOVector *qiov, size_t qiov_offset, 1286 BdrvRequestFlags flags) 1287 { 1288 int ret; 1289 BlockDriverState *bs; 1290 IO_CODE(); 1291 1292 blk_wait_while_drained(blk); 1293 1294 /* Call blk_bs() only after waiting, the graph may have changed */ 1295 bs = blk_bs(blk); 1296 trace_blk_co_preadv(blk, bs, offset, bytes, flags); 1297 1298 ret = blk_check_byte_request(blk, offset, bytes); 1299 if (ret < 0) { 1300 return ret; 1301 } 1302 1303 bdrv_inc_in_flight(bs); 1304 1305 /* throttling disk I/O */ 1306 if (blk->public.throttle_group_member.throttle_state) { 1307 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, 1308 bytes, false); 1309 } 1310 1311 ret = bdrv_co_preadv_part(blk->root, offset, bytes, qiov, qiov_offset, 1312 flags); 1313 bdrv_dec_in_flight(bs); 1314 return ret; 1315 } 1316 1317 int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes, 1318 void *buf, BdrvRequestFlags flags) 1319 { 1320 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1321 IO_OR_GS_CODE(); 1322 1323 assert(bytes <= SIZE_MAX); 1324 1325 return blk_co_preadv(blk, offset, bytes, &qiov, flags); 1326 } 1327 1328 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, 1329 int64_t bytes, QEMUIOVector *qiov, 1330 BdrvRequestFlags flags) 1331 { 1332 int ret; 1333 IO_OR_GS_CODE(); 1334 1335 blk_inc_in_flight(blk); 1336 ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, 0, flags); 1337 blk_dec_in_flight(blk); 1338 1339 return ret; 1340 } 1341 1342 int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset, 1343 int64_t bytes, QEMUIOVector *qiov, 1344 size_t qiov_offset, BdrvRequestFlags flags) 1345 { 1346 int ret; 1347 IO_OR_GS_CODE(); 1348 1349 blk_inc_in_flight(blk); 1350 ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, qiov_offset, flags); 1351 blk_dec_in_flight(blk); 1352 1353 return ret; 1354 } 1355 1356 /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1357 static int coroutine_fn 1358 blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes, 1359 QEMUIOVector *qiov, size_t qiov_offset, 1360 BdrvRequestFlags flags) 1361 { 1362 int ret; 1363 BlockDriverState *bs; 1364 IO_CODE(); 1365 1366 blk_wait_while_drained(blk); 1367 1368 /* Call blk_bs() only after waiting, the graph may have changed */ 1369 bs = blk_bs(blk); 1370 trace_blk_co_pwritev(blk, bs, offset, bytes, flags); 1371 1372 ret = blk_check_byte_request(blk, offset, bytes); 1373 if (ret < 0) { 1374 return ret; 1375 } 1376 1377 bdrv_inc_in_flight(bs); 1378 /* throttling disk I/O */ 1379 if (blk->public.throttle_group_member.throttle_state) { 1380 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, 1381 bytes, true); 1382 } 1383 1384 if (!blk->enable_write_cache) { 1385 flags |= BDRV_REQ_FUA; 1386 } 1387 1388 ret = bdrv_co_pwritev_part(blk->root, offset, bytes, qiov, qiov_offset, 1389 flags); 1390 bdrv_dec_in_flight(bs); 1391 return ret; 1392 } 1393 1394 int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, 1395 int64_t bytes, 1396 QEMUIOVector *qiov, size_t qiov_offset, 1397 BdrvRequestFlags flags) 1398 { 1399 int ret; 1400 IO_OR_GS_CODE(); 1401 1402 blk_inc_in_flight(blk); 1403 ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags); 1404 blk_dec_in_flight(blk); 1405 1406 return ret; 1407 } 1408 1409 int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes, 1410 const void *buf, BdrvRequestFlags flags) 1411 { 1412 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 1413 IO_OR_GS_CODE(); 1414 1415 assert(bytes <= SIZE_MAX); 1416 1417 return blk_co_pwritev(blk, offset, bytes, &qiov, flags); 1418 } 1419 1420 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, 1421 int64_t bytes, QEMUIOVector *qiov, 1422 BdrvRequestFlags flags) 1423 { 1424 IO_OR_GS_CODE(); 1425 return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags); 1426 } 1427 1428 typedef struct BlkRwCo { 1429 BlockBackend *blk; 1430 int64_t offset; 1431 void *iobuf; 1432 int ret; 1433 BdrvRequestFlags flags; 1434 } BlkRwCo; 1435 1436 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) 1437 { 1438 GLOBAL_STATE_CODE(); 1439 return bdrv_make_zero(blk->root, flags); 1440 } 1441 1442 void blk_inc_in_flight(BlockBackend *blk) 1443 { 1444 IO_CODE(); 1445 qatomic_inc(&blk->in_flight); 1446 } 1447 1448 void blk_dec_in_flight(BlockBackend *blk) 1449 { 1450 IO_CODE(); 1451 qatomic_dec(&blk->in_flight); 1452 aio_wait_kick(); 1453 } 1454 1455 static void error_callback_bh(void *opaque) 1456 { 1457 struct BlockBackendAIOCB *acb = opaque; 1458 1459 blk_dec_in_flight(acb->blk); 1460 acb->common.cb(acb->common.opaque, acb->ret); 1461 qemu_aio_unref(acb); 1462 } 1463 1464 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, 1465 BlockCompletionFunc *cb, 1466 void *opaque, int ret) 1467 { 1468 struct BlockBackendAIOCB *acb; 1469 IO_CODE(); 1470 1471 blk_inc_in_flight(blk); 1472 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque); 1473 acb->blk = blk; 1474 acb->ret = ret; 1475 1476 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk), 1477 error_callback_bh, acb); 1478 return &acb->common; 1479 } 1480 1481 typedef struct BlkAioEmAIOCB { 1482 BlockAIOCB common; 1483 BlkRwCo rwco; 1484 int64_t bytes; 1485 bool has_returned; 1486 } BlkAioEmAIOCB; 1487 1488 static AioContext *blk_aio_em_aiocb_get_aio_context(BlockAIOCB *acb_) 1489 { 1490 BlkAioEmAIOCB *acb = container_of(acb_, BlkAioEmAIOCB, common); 1491 1492 return blk_get_aio_context(acb->rwco.blk); 1493 } 1494 1495 static const AIOCBInfo blk_aio_em_aiocb_info = { 1496 .aiocb_size = sizeof(BlkAioEmAIOCB), 1497 .get_aio_context = blk_aio_em_aiocb_get_aio_context, 1498 }; 1499 1500 static void blk_aio_complete(BlkAioEmAIOCB *acb) 1501 { 1502 if (acb->has_returned) { 1503 acb->common.cb(acb->common.opaque, acb->rwco.ret); 1504 blk_dec_in_flight(acb->rwco.blk); 1505 qemu_aio_unref(acb); 1506 } 1507 } 1508 1509 static void blk_aio_complete_bh(void *opaque) 1510 { 1511 BlkAioEmAIOCB *acb = opaque; 1512 assert(acb->has_returned); 1513 blk_aio_complete(acb); 1514 } 1515 1516 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, 1517 int64_t bytes, 1518 void *iobuf, CoroutineEntry co_entry, 1519 BdrvRequestFlags flags, 1520 BlockCompletionFunc *cb, void *opaque) 1521 { 1522 BlkAioEmAIOCB *acb; 1523 Coroutine *co; 1524 1525 blk_inc_in_flight(blk); 1526 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); 1527 acb->rwco = (BlkRwCo) { 1528 .blk = blk, 1529 .offset = offset, 1530 .iobuf = iobuf, 1531 .flags = flags, 1532 .ret = NOT_DONE, 1533 }; 1534 acb->bytes = bytes; 1535 acb->has_returned = false; 1536 1537 co = qemu_coroutine_create(co_entry, acb); 1538 bdrv_coroutine_enter(blk_bs(blk), co); 1539 1540 acb->has_returned = true; 1541 if (acb->rwco.ret != NOT_DONE) { 1542 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk), 1543 blk_aio_complete_bh, acb); 1544 } 1545 1546 return &acb->common; 1547 } 1548 1549 static void coroutine_fn blk_aio_read_entry(void *opaque) 1550 { 1551 BlkAioEmAIOCB *acb = opaque; 1552 BlkRwCo *rwco = &acb->rwco; 1553 QEMUIOVector *qiov = rwco->iobuf; 1554 1555 assert(qiov->size == acb->bytes); 1556 rwco->ret = blk_co_do_preadv_part(rwco->blk, rwco->offset, acb->bytes, qiov, 1557 0, rwco->flags); 1558 blk_aio_complete(acb); 1559 } 1560 1561 static void coroutine_fn blk_aio_write_entry(void *opaque) 1562 { 1563 BlkAioEmAIOCB *acb = opaque; 1564 BlkRwCo *rwco = &acb->rwco; 1565 QEMUIOVector *qiov = rwco->iobuf; 1566 1567 assert(!qiov || qiov->size == acb->bytes); 1568 rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes, 1569 qiov, 0, rwco->flags); 1570 blk_aio_complete(acb); 1571 } 1572 1573 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, 1574 int64_t bytes, BdrvRequestFlags flags, 1575 BlockCompletionFunc *cb, void *opaque) 1576 { 1577 IO_CODE(); 1578 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_write_entry, 1579 flags | BDRV_REQ_ZERO_WRITE, cb, opaque); 1580 } 1581 1582 int64_t blk_getlength(BlockBackend *blk) 1583 { 1584 IO_CODE(); 1585 if (!blk_is_available(blk)) { 1586 return -ENOMEDIUM; 1587 } 1588 1589 return bdrv_getlength(blk_bs(blk)); 1590 } 1591 1592 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) 1593 { 1594 IO_CODE(); 1595 if (!blk_bs(blk)) { 1596 *nb_sectors_ptr = 0; 1597 } else { 1598 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr); 1599 } 1600 } 1601 1602 int64_t blk_nb_sectors(BlockBackend *blk) 1603 { 1604 IO_CODE(); 1605 if (!blk_is_available(blk)) { 1606 return -ENOMEDIUM; 1607 } 1608 1609 return bdrv_nb_sectors(blk_bs(blk)); 1610 } 1611 1612 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, 1613 QEMUIOVector *qiov, BdrvRequestFlags flags, 1614 BlockCompletionFunc *cb, void *opaque) 1615 { 1616 IO_CODE(); 1617 assert((uint64_t)qiov->size <= INT64_MAX); 1618 return blk_aio_prwv(blk, offset, qiov->size, qiov, 1619 blk_aio_read_entry, flags, cb, opaque); 1620 } 1621 1622 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, 1623 QEMUIOVector *qiov, BdrvRequestFlags flags, 1624 BlockCompletionFunc *cb, void *opaque) 1625 { 1626 IO_CODE(); 1627 assert((uint64_t)qiov->size <= INT64_MAX); 1628 return blk_aio_prwv(blk, offset, qiov->size, qiov, 1629 blk_aio_write_entry, flags, cb, opaque); 1630 } 1631 1632 void blk_aio_cancel(BlockAIOCB *acb) 1633 { 1634 GLOBAL_STATE_CODE(); 1635 bdrv_aio_cancel(acb); 1636 } 1637 1638 void blk_aio_cancel_async(BlockAIOCB *acb) 1639 { 1640 IO_CODE(); 1641 bdrv_aio_cancel_async(acb); 1642 } 1643 1644 /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1645 static int coroutine_fn 1646 blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 1647 { 1648 IO_CODE(); 1649 1650 blk_wait_while_drained(blk); 1651 1652 if (!blk_is_available(blk)) { 1653 return -ENOMEDIUM; 1654 } 1655 1656 return bdrv_co_ioctl(blk_bs(blk), req, buf); 1657 } 1658 1659 int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req, 1660 void *buf) 1661 { 1662 int ret; 1663 IO_OR_GS_CODE(); 1664 1665 blk_inc_in_flight(blk); 1666 ret = blk_co_do_ioctl(blk, req, buf); 1667 blk_dec_in_flight(blk); 1668 1669 return ret; 1670 } 1671 1672 static void coroutine_fn blk_aio_ioctl_entry(void *opaque) 1673 { 1674 BlkAioEmAIOCB *acb = opaque; 1675 BlkRwCo *rwco = &acb->rwco; 1676 1677 rwco->ret = blk_co_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf); 1678 1679 blk_aio_complete(acb); 1680 } 1681 1682 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, 1683 BlockCompletionFunc *cb, void *opaque) 1684 { 1685 IO_CODE(); 1686 return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque); 1687 } 1688 1689 /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1690 static int coroutine_fn 1691 blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes) 1692 { 1693 int ret; 1694 IO_CODE(); 1695 1696 blk_wait_while_drained(blk); 1697 1698 ret = blk_check_byte_request(blk, offset, bytes); 1699 if (ret < 0) { 1700 return ret; 1701 } 1702 1703 return bdrv_co_pdiscard(blk->root, offset, bytes); 1704 } 1705 1706 static void coroutine_fn blk_aio_pdiscard_entry(void *opaque) 1707 { 1708 BlkAioEmAIOCB *acb = opaque; 1709 BlkRwCo *rwco = &acb->rwco; 1710 1711 rwco->ret = blk_co_do_pdiscard(rwco->blk, rwco->offset, acb->bytes); 1712 blk_aio_complete(acb); 1713 } 1714 1715 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, 1716 int64_t offset, int64_t bytes, 1717 BlockCompletionFunc *cb, void *opaque) 1718 { 1719 IO_CODE(); 1720 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0, 1721 cb, opaque); 1722 } 1723 1724 int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, 1725 int64_t bytes) 1726 { 1727 int ret; 1728 IO_OR_GS_CODE(); 1729 1730 blk_inc_in_flight(blk); 1731 ret = blk_co_do_pdiscard(blk, offset, bytes); 1732 blk_dec_in_flight(blk); 1733 1734 return ret; 1735 } 1736 1737 /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1738 static int coroutine_fn blk_co_do_flush(BlockBackend *blk) 1739 { 1740 blk_wait_while_drained(blk); 1741 IO_CODE(); 1742 1743 if (!blk_is_available(blk)) { 1744 return -ENOMEDIUM; 1745 } 1746 1747 return bdrv_co_flush(blk_bs(blk)); 1748 } 1749 1750 static void coroutine_fn blk_aio_flush_entry(void *opaque) 1751 { 1752 BlkAioEmAIOCB *acb = opaque; 1753 BlkRwCo *rwco = &acb->rwco; 1754 1755 rwco->ret = blk_co_do_flush(rwco->blk); 1756 blk_aio_complete(acb); 1757 } 1758 1759 BlockAIOCB *blk_aio_flush(BlockBackend *blk, 1760 BlockCompletionFunc *cb, void *opaque) 1761 { 1762 IO_CODE(); 1763 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque); 1764 } 1765 1766 int coroutine_fn blk_co_flush(BlockBackend *blk) 1767 { 1768 int ret; 1769 IO_OR_GS_CODE(); 1770 1771 blk_inc_in_flight(blk); 1772 ret = blk_co_do_flush(blk); 1773 blk_dec_in_flight(blk); 1774 1775 return ret; 1776 } 1777 1778 void blk_drain(BlockBackend *blk) 1779 { 1780 BlockDriverState *bs = blk_bs(blk); 1781 GLOBAL_STATE_CODE(); 1782 1783 if (bs) { 1784 bdrv_ref(bs); 1785 bdrv_drained_begin(bs); 1786 } 1787 1788 /* We may have -ENOMEDIUM completions in flight */ 1789 AIO_WAIT_WHILE(blk_get_aio_context(blk), 1790 qatomic_mb_read(&blk->in_flight) > 0); 1791 1792 if (bs) { 1793 bdrv_drained_end(bs); 1794 bdrv_unref(bs); 1795 } 1796 } 1797 1798 void blk_drain_all(void) 1799 { 1800 BlockBackend *blk = NULL; 1801 1802 GLOBAL_STATE_CODE(); 1803 1804 bdrv_drain_all_begin(); 1805 1806 while ((blk = blk_all_next(blk)) != NULL) { 1807 AioContext *ctx = blk_get_aio_context(blk); 1808 1809 aio_context_acquire(ctx); 1810 1811 /* We may have -ENOMEDIUM completions in flight */ 1812 AIO_WAIT_WHILE(ctx, qatomic_mb_read(&blk->in_flight) > 0); 1813 1814 aio_context_release(ctx); 1815 } 1816 1817 bdrv_drain_all_end(); 1818 } 1819 1820 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, 1821 BlockdevOnError on_write_error) 1822 { 1823 GLOBAL_STATE_CODE(); 1824 blk->on_read_error = on_read_error; 1825 blk->on_write_error = on_write_error; 1826 } 1827 1828 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read) 1829 { 1830 IO_CODE(); 1831 return is_read ? blk->on_read_error : blk->on_write_error; 1832 } 1833 1834 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, 1835 int error) 1836 { 1837 BlockdevOnError on_err = blk_get_on_error(blk, is_read); 1838 IO_CODE(); 1839 1840 switch (on_err) { 1841 case BLOCKDEV_ON_ERROR_ENOSPC: 1842 return (error == ENOSPC) ? 1843 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; 1844 case BLOCKDEV_ON_ERROR_STOP: 1845 return BLOCK_ERROR_ACTION_STOP; 1846 case BLOCKDEV_ON_ERROR_REPORT: 1847 return BLOCK_ERROR_ACTION_REPORT; 1848 case BLOCKDEV_ON_ERROR_IGNORE: 1849 return BLOCK_ERROR_ACTION_IGNORE; 1850 case BLOCKDEV_ON_ERROR_AUTO: 1851 default: 1852 abort(); 1853 } 1854 } 1855 1856 static void send_qmp_error_event(BlockBackend *blk, 1857 BlockErrorAction action, 1858 bool is_read, int error) 1859 { 1860 IoOperationType optype; 1861 BlockDriverState *bs = blk_bs(blk); 1862 1863 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; 1864 qapi_event_send_block_io_error(blk_name(blk), !!bs, 1865 bs ? bdrv_get_node_name(bs) : NULL, optype, 1866 action, blk_iostatus_is_enabled(blk), 1867 error == ENOSPC, strerror(error)); 1868 } 1869 1870 /* This is done by device models because, while the block layer knows 1871 * about the error, it does not know whether an operation comes from 1872 * the device or the block layer (from a job, for example). 1873 */ 1874 void blk_error_action(BlockBackend *blk, BlockErrorAction action, 1875 bool is_read, int error) 1876 { 1877 assert(error >= 0); 1878 IO_CODE(); 1879 1880 if (action == BLOCK_ERROR_ACTION_STOP) { 1881 /* First set the iostatus, so that "info block" returns an iostatus 1882 * that matches the events raised so far (an additional error iostatus 1883 * is fine, but not a lost one). 1884 */ 1885 blk_iostatus_set_err(blk, error); 1886 1887 /* Then raise the request to stop the VM and the event. 1888 * qemu_system_vmstop_request_prepare has two effects. First, 1889 * it ensures that the STOP event always comes after the 1890 * BLOCK_IO_ERROR event. Second, it ensures that even if management 1891 * can observe the STOP event and do a "cont" before the STOP 1892 * event is issued, the VM will not stop. In this case, vm_start() 1893 * also ensures that the STOP/RESUME pair of events is emitted. 1894 */ 1895 qemu_system_vmstop_request_prepare(); 1896 send_qmp_error_event(blk, action, is_read, error); 1897 qemu_system_vmstop_request(RUN_STATE_IO_ERROR); 1898 } else { 1899 send_qmp_error_event(blk, action, is_read, error); 1900 } 1901 } 1902 1903 /* 1904 * Returns true if the BlockBackend can support taking write permissions 1905 * (because its root node is not read-only). 1906 */ 1907 bool blk_supports_write_perm(BlockBackend *blk) 1908 { 1909 BlockDriverState *bs = blk_bs(blk); 1910 GLOBAL_STATE_CODE(); 1911 1912 if (bs) { 1913 return !bdrv_is_read_only(bs); 1914 } else { 1915 return blk->root_state.open_flags & BDRV_O_RDWR; 1916 } 1917 } 1918 1919 /* 1920 * Returns true if the BlockBackend can be written to in its current 1921 * configuration (i.e. if write permission have been requested) 1922 */ 1923 bool blk_is_writable(BlockBackend *blk) 1924 { 1925 IO_CODE(); 1926 return blk->perm & BLK_PERM_WRITE; 1927 } 1928 1929 bool blk_is_sg(BlockBackend *blk) 1930 { 1931 BlockDriverState *bs = blk_bs(blk); 1932 GLOBAL_STATE_CODE(); 1933 1934 if (!bs) { 1935 return false; 1936 } 1937 1938 return bdrv_is_sg(bs); 1939 } 1940 1941 bool blk_enable_write_cache(BlockBackend *blk) 1942 { 1943 IO_CODE(); 1944 return blk->enable_write_cache; 1945 } 1946 1947 void blk_set_enable_write_cache(BlockBackend *blk, bool wce) 1948 { 1949 GLOBAL_STATE_CODE(); 1950 blk->enable_write_cache = wce; 1951 } 1952 1953 void blk_activate(BlockBackend *blk, Error **errp) 1954 { 1955 BlockDriverState *bs = blk_bs(blk); 1956 GLOBAL_STATE_CODE(); 1957 1958 if (!bs) { 1959 error_setg(errp, "Device '%s' has no medium", blk->name); 1960 return; 1961 } 1962 1963 bdrv_activate(bs, errp); 1964 } 1965 1966 bool blk_is_inserted(BlockBackend *blk) 1967 { 1968 BlockDriverState *bs = blk_bs(blk); 1969 IO_CODE(); 1970 1971 return bs && bdrv_is_inserted(bs); 1972 } 1973 1974 bool blk_is_available(BlockBackend *blk) 1975 { 1976 IO_CODE(); 1977 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk); 1978 } 1979 1980 void blk_lock_medium(BlockBackend *blk, bool locked) 1981 { 1982 BlockDriverState *bs = blk_bs(blk); 1983 IO_CODE(); 1984 1985 if (bs) { 1986 bdrv_lock_medium(bs, locked); 1987 } 1988 } 1989 1990 void blk_eject(BlockBackend *blk, bool eject_flag) 1991 { 1992 BlockDriverState *bs = blk_bs(blk); 1993 char *id; 1994 IO_CODE(); 1995 1996 if (bs) { 1997 bdrv_eject(bs, eject_flag); 1998 } 1999 2000 /* Whether or not we ejected on the backend, 2001 * the frontend experienced a tray event. */ 2002 id = blk_get_attached_dev_id(blk); 2003 qapi_event_send_device_tray_moved(blk_name(blk), id, 2004 eject_flag); 2005 g_free(id); 2006 } 2007 2008 int blk_get_flags(BlockBackend *blk) 2009 { 2010 BlockDriverState *bs = blk_bs(blk); 2011 GLOBAL_STATE_CODE(); 2012 2013 if (bs) { 2014 return bdrv_get_flags(bs); 2015 } else { 2016 return blk->root_state.open_flags; 2017 } 2018 } 2019 2020 /* Returns the minimum request alignment, in bytes; guaranteed nonzero */ 2021 uint32_t blk_get_request_alignment(BlockBackend *blk) 2022 { 2023 BlockDriverState *bs = blk_bs(blk); 2024 IO_CODE(); 2025 return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE; 2026 } 2027 2028 /* Returns the maximum hardware transfer length, in bytes; guaranteed nonzero */ 2029 uint64_t blk_get_max_hw_transfer(BlockBackend *blk) 2030 { 2031 BlockDriverState *bs = blk_bs(blk); 2032 uint64_t max = INT_MAX; 2033 IO_CODE(); 2034 2035 if (bs) { 2036 max = MIN_NON_ZERO(max, bs->bl.max_hw_transfer); 2037 max = MIN_NON_ZERO(max, bs->bl.max_transfer); 2038 } 2039 return ROUND_DOWN(max, blk_get_request_alignment(blk)); 2040 } 2041 2042 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */ 2043 uint32_t blk_get_max_transfer(BlockBackend *blk) 2044 { 2045 BlockDriverState *bs = blk_bs(blk); 2046 uint32_t max = INT_MAX; 2047 IO_CODE(); 2048 2049 if (bs) { 2050 max = MIN_NON_ZERO(max, bs->bl.max_transfer); 2051 } 2052 return ROUND_DOWN(max, blk_get_request_alignment(blk)); 2053 } 2054 2055 int blk_get_max_hw_iov(BlockBackend *blk) 2056 { 2057 IO_CODE(); 2058 return MIN_NON_ZERO(blk->root->bs->bl.max_hw_iov, 2059 blk->root->bs->bl.max_iov); 2060 } 2061 2062 int blk_get_max_iov(BlockBackend *blk) 2063 { 2064 IO_CODE(); 2065 return blk->root->bs->bl.max_iov; 2066 } 2067 2068 void *blk_try_blockalign(BlockBackend *blk, size_t size) 2069 { 2070 IO_CODE(); 2071 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size); 2072 } 2073 2074 void *blk_blockalign(BlockBackend *blk, size_t size) 2075 { 2076 IO_CODE(); 2077 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size); 2078 } 2079 2080 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) 2081 { 2082 BlockDriverState *bs = blk_bs(blk); 2083 GLOBAL_STATE_CODE(); 2084 2085 if (!bs) { 2086 return false; 2087 } 2088 2089 return bdrv_op_is_blocked(bs, op, errp); 2090 } 2091 2092 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason) 2093 { 2094 BlockDriverState *bs = blk_bs(blk); 2095 GLOBAL_STATE_CODE(); 2096 2097 if (bs) { 2098 bdrv_op_unblock(bs, op, reason); 2099 } 2100 } 2101 2102 void blk_op_block_all(BlockBackend *blk, Error *reason) 2103 { 2104 BlockDriverState *bs = blk_bs(blk); 2105 GLOBAL_STATE_CODE(); 2106 2107 if (bs) { 2108 bdrv_op_block_all(bs, reason); 2109 } 2110 } 2111 2112 void blk_op_unblock_all(BlockBackend *blk, Error *reason) 2113 { 2114 BlockDriverState *bs = blk_bs(blk); 2115 GLOBAL_STATE_CODE(); 2116 2117 if (bs) { 2118 bdrv_op_unblock_all(bs, reason); 2119 } 2120 } 2121 2122 AioContext *blk_get_aio_context(BlockBackend *blk) 2123 { 2124 BlockDriverState *bs = blk_bs(blk); 2125 IO_CODE(); 2126 2127 if (bs) { 2128 AioContext *ctx = bdrv_get_aio_context(blk_bs(blk)); 2129 assert(ctx == blk->ctx); 2130 } 2131 2132 return blk->ctx; 2133 } 2134 2135 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb) 2136 { 2137 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb); 2138 return blk_get_aio_context(blk_acb->blk); 2139 } 2140 2141 static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context, 2142 bool update_root_node, Error **errp) 2143 { 2144 BlockDriverState *bs = blk_bs(blk); 2145 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 2146 int ret; 2147 2148 if (bs) { 2149 bdrv_ref(bs); 2150 2151 if (update_root_node) { 2152 ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root, 2153 errp); 2154 if (ret < 0) { 2155 bdrv_unref(bs); 2156 return ret; 2157 } 2158 } 2159 if (tgm->throttle_state) { 2160 bdrv_drained_begin(bs); 2161 throttle_group_detach_aio_context(tgm); 2162 throttle_group_attach_aio_context(tgm, new_context); 2163 bdrv_drained_end(bs); 2164 } 2165 2166 bdrv_unref(bs); 2167 } 2168 2169 blk->ctx = new_context; 2170 return 0; 2171 } 2172 2173 int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, 2174 Error **errp) 2175 { 2176 GLOBAL_STATE_CODE(); 2177 return blk_do_set_aio_context(blk, new_context, true, errp); 2178 } 2179 2180 static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx, 2181 GSList **ignore, Error **errp) 2182 { 2183 BlockBackend *blk = child->opaque; 2184 2185 if (blk->allow_aio_context_change) { 2186 return true; 2187 } 2188 2189 /* Only manually created BlockBackends that are not attached to anything 2190 * can change their AioContext without updating their user. */ 2191 if (!blk->name || blk->dev) { 2192 /* TODO Add BB name/QOM path */ 2193 error_setg(errp, "Cannot change iothread of active block backend"); 2194 return false; 2195 } 2196 2197 return true; 2198 } 2199 2200 static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx, 2201 GSList **ignore) 2202 { 2203 BlockBackend *blk = child->opaque; 2204 blk_do_set_aio_context(blk, ctx, false, &error_abort); 2205 } 2206 2207 void blk_add_aio_context_notifier(BlockBackend *blk, 2208 void (*attached_aio_context)(AioContext *new_context, void *opaque), 2209 void (*detach_aio_context)(void *opaque), void *opaque) 2210 { 2211 BlockBackendAioNotifier *notifier; 2212 BlockDriverState *bs = blk_bs(blk); 2213 GLOBAL_STATE_CODE(); 2214 2215 notifier = g_new(BlockBackendAioNotifier, 1); 2216 notifier->attached_aio_context = attached_aio_context; 2217 notifier->detach_aio_context = detach_aio_context; 2218 notifier->opaque = opaque; 2219 QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list); 2220 2221 if (bs) { 2222 bdrv_add_aio_context_notifier(bs, attached_aio_context, 2223 detach_aio_context, opaque); 2224 } 2225 } 2226 2227 void blk_remove_aio_context_notifier(BlockBackend *blk, 2228 void (*attached_aio_context)(AioContext *, 2229 void *), 2230 void (*detach_aio_context)(void *), 2231 void *opaque) 2232 { 2233 BlockBackendAioNotifier *notifier; 2234 BlockDriverState *bs = blk_bs(blk); 2235 2236 GLOBAL_STATE_CODE(); 2237 2238 if (bs) { 2239 bdrv_remove_aio_context_notifier(bs, attached_aio_context, 2240 detach_aio_context, opaque); 2241 } 2242 2243 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { 2244 if (notifier->attached_aio_context == attached_aio_context && 2245 notifier->detach_aio_context == detach_aio_context && 2246 notifier->opaque == opaque) { 2247 QLIST_REMOVE(notifier, list); 2248 g_free(notifier); 2249 return; 2250 } 2251 } 2252 2253 abort(); 2254 } 2255 2256 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify) 2257 { 2258 GLOBAL_STATE_CODE(); 2259 notifier_list_add(&blk->remove_bs_notifiers, notify); 2260 } 2261 2262 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify) 2263 { 2264 GLOBAL_STATE_CODE(); 2265 notifier_list_add(&blk->insert_bs_notifiers, notify); 2266 } 2267 2268 void blk_io_plug(BlockBackend *blk) 2269 { 2270 BlockDriverState *bs = blk_bs(blk); 2271 IO_CODE(); 2272 2273 if (bs) { 2274 bdrv_io_plug(bs); 2275 } 2276 } 2277 2278 void blk_io_unplug(BlockBackend *blk) 2279 { 2280 BlockDriverState *bs = blk_bs(blk); 2281 IO_CODE(); 2282 2283 if (bs) { 2284 bdrv_io_unplug(bs); 2285 } 2286 } 2287 2288 BlockAcctStats *blk_get_stats(BlockBackend *blk) 2289 { 2290 IO_CODE(); 2291 return &blk->stats; 2292 } 2293 2294 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, 2295 BlockCompletionFunc *cb, void *opaque) 2296 { 2297 IO_CODE(); 2298 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque); 2299 } 2300 2301 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, 2302 int64_t bytes, BdrvRequestFlags flags) 2303 { 2304 IO_OR_GS_CODE(); 2305 return blk_co_pwritev(blk, offset, bytes, NULL, 2306 flags | BDRV_REQ_ZERO_WRITE); 2307 } 2308 2309 int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset, 2310 int64_t bytes, const void *buf) 2311 { 2312 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); 2313 IO_OR_GS_CODE(); 2314 return blk_co_pwritev_part(blk, offset, bytes, &qiov, 0, 2315 BDRV_REQ_WRITE_COMPRESSED); 2316 } 2317 2318 int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact, 2319 PreallocMode prealloc, BdrvRequestFlags flags, 2320 Error **errp) 2321 { 2322 IO_OR_GS_CODE(); 2323 if (!blk_is_available(blk)) { 2324 error_setg(errp, "No medium inserted"); 2325 return -ENOMEDIUM; 2326 } 2327 2328 return bdrv_co_truncate(blk->root, offset, exact, prealloc, flags, errp); 2329 } 2330 2331 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, 2332 int64_t pos, int size) 2333 { 2334 int ret; 2335 GLOBAL_STATE_CODE(); 2336 2337 if (!blk_is_available(blk)) { 2338 return -ENOMEDIUM; 2339 } 2340 2341 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size); 2342 if (ret < 0) { 2343 return ret; 2344 } 2345 2346 if (ret == size && !blk->enable_write_cache) { 2347 ret = bdrv_flush(blk_bs(blk)); 2348 } 2349 2350 return ret < 0 ? ret : size; 2351 } 2352 2353 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size) 2354 { 2355 GLOBAL_STATE_CODE(); 2356 if (!blk_is_available(blk)) { 2357 return -ENOMEDIUM; 2358 } 2359 2360 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size); 2361 } 2362 2363 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz) 2364 { 2365 GLOBAL_STATE_CODE(); 2366 if (!blk_is_available(blk)) { 2367 return -ENOMEDIUM; 2368 } 2369 2370 return bdrv_probe_blocksizes(blk_bs(blk), bsz); 2371 } 2372 2373 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo) 2374 { 2375 GLOBAL_STATE_CODE(); 2376 if (!blk_is_available(blk)) { 2377 return -ENOMEDIUM; 2378 } 2379 2380 return bdrv_probe_geometry(blk_bs(blk), geo); 2381 } 2382 2383 /* 2384 * Updates the BlockBackendRootState object with data from the currently 2385 * attached BlockDriverState. 2386 */ 2387 void blk_update_root_state(BlockBackend *blk) 2388 { 2389 GLOBAL_STATE_CODE(); 2390 assert(blk->root); 2391 2392 blk->root_state.open_flags = blk->root->bs->open_flags; 2393 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes; 2394 } 2395 2396 /* 2397 * Returns the detect-zeroes setting to be used for bdrv_open() of a 2398 * BlockDriverState which is supposed to inherit the root state. 2399 */ 2400 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk) 2401 { 2402 GLOBAL_STATE_CODE(); 2403 return blk->root_state.detect_zeroes; 2404 } 2405 2406 /* 2407 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is 2408 * supposed to inherit the root state. 2409 */ 2410 int blk_get_open_flags_from_root_state(BlockBackend *blk) 2411 { 2412 GLOBAL_STATE_CODE(); 2413 return blk->root_state.open_flags; 2414 } 2415 2416 BlockBackendRootState *blk_get_root_state(BlockBackend *blk) 2417 { 2418 GLOBAL_STATE_CODE(); 2419 return &blk->root_state; 2420 } 2421 2422 int blk_commit_all(void) 2423 { 2424 BlockBackend *blk = NULL; 2425 GLOBAL_STATE_CODE(); 2426 2427 while ((blk = blk_all_next(blk)) != NULL) { 2428 AioContext *aio_context = blk_get_aio_context(blk); 2429 BlockDriverState *unfiltered_bs = bdrv_skip_filters(blk_bs(blk)); 2430 2431 aio_context_acquire(aio_context); 2432 if (blk_is_inserted(blk) && bdrv_cow_child(unfiltered_bs)) { 2433 int ret; 2434 2435 ret = bdrv_commit(unfiltered_bs); 2436 if (ret < 0) { 2437 aio_context_release(aio_context); 2438 return ret; 2439 } 2440 } 2441 aio_context_release(aio_context); 2442 } 2443 return 0; 2444 } 2445 2446 2447 /* throttling disk I/O limits */ 2448 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg) 2449 { 2450 GLOBAL_STATE_CODE(); 2451 throttle_group_config(&blk->public.throttle_group_member, cfg); 2452 } 2453 2454 void blk_io_limits_disable(BlockBackend *blk) 2455 { 2456 BlockDriverState *bs = blk_bs(blk); 2457 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 2458 assert(tgm->throttle_state); 2459 GLOBAL_STATE_CODE(); 2460 if (bs) { 2461 bdrv_ref(bs); 2462 bdrv_drained_begin(bs); 2463 } 2464 throttle_group_unregister_tgm(tgm); 2465 if (bs) { 2466 bdrv_drained_end(bs); 2467 bdrv_unref(bs); 2468 } 2469 } 2470 2471 /* should be called before blk_set_io_limits if a limit is set */ 2472 void blk_io_limits_enable(BlockBackend *blk, const char *group) 2473 { 2474 assert(!blk->public.throttle_group_member.throttle_state); 2475 GLOBAL_STATE_CODE(); 2476 throttle_group_register_tgm(&blk->public.throttle_group_member, 2477 group, blk_get_aio_context(blk)); 2478 } 2479 2480 void blk_io_limits_update_group(BlockBackend *blk, const char *group) 2481 { 2482 GLOBAL_STATE_CODE(); 2483 /* this BB is not part of any group */ 2484 if (!blk->public.throttle_group_member.throttle_state) { 2485 return; 2486 } 2487 2488 /* this BB is a part of the same group than the one we want */ 2489 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member), 2490 group)) { 2491 return; 2492 } 2493 2494 /* need to change the group this bs belong to */ 2495 blk_io_limits_disable(blk); 2496 blk_io_limits_enable(blk, group); 2497 } 2498 2499 static void blk_root_drained_begin(BdrvChild *child) 2500 { 2501 BlockBackend *blk = child->opaque; 2502 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; 2503 2504 if (++blk->quiesce_counter == 1) { 2505 if (blk->dev_ops && blk->dev_ops->drained_begin) { 2506 blk->dev_ops->drained_begin(blk->dev_opaque); 2507 } 2508 } 2509 2510 /* Note that blk->root may not be accessible here yet if we are just 2511 * attaching to a BlockDriverState that is drained. Use child instead. */ 2512 2513 if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) { 2514 throttle_group_restart_tgm(tgm); 2515 } 2516 } 2517 2518 static bool blk_root_drained_poll(BdrvChild *child) 2519 { 2520 BlockBackend *blk = child->opaque; 2521 bool busy = false; 2522 assert(blk->quiesce_counter); 2523 2524 if (blk->dev_ops && blk->dev_ops->drained_poll) { 2525 busy = blk->dev_ops->drained_poll(blk->dev_opaque); 2526 } 2527 return busy || !!blk->in_flight; 2528 } 2529 2530 static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter) 2531 { 2532 BlockBackend *blk = child->opaque; 2533 assert(blk->quiesce_counter); 2534 2535 assert(blk->public.throttle_group_member.io_limits_disabled); 2536 qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled); 2537 2538 if (--blk->quiesce_counter == 0) { 2539 if (blk->dev_ops && blk->dev_ops->drained_end) { 2540 blk->dev_ops->drained_end(blk->dev_opaque); 2541 } 2542 while (qemu_co_enter_next(&blk->queued_requests, NULL)) { 2543 /* Resume all queued requests */ 2544 } 2545 } 2546 } 2547 2548 bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp) 2549 { 2550 GLOBAL_STATE_CODE(); 2551 return bdrv_register_buf(blk_bs(blk), host, size, errp); 2552 } 2553 2554 void blk_unregister_buf(BlockBackend *blk, void *host, size_t size) 2555 { 2556 GLOBAL_STATE_CODE(); 2557 bdrv_unregister_buf(blk_bs(blk), host, size); 2558 } 2559 2560 int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, 2561 BlockBackend *blk_out, int64_t off_out, 2562 int64_t bytes, BdrvRequestFlags read_flags, 2563 BdrvRequestFlags write_flags) 2564 { 2565 int r; 2566 IO_CODE(); 2567 2568 r = blk_check_byte_request(blk_in, off_in, bytes); 2569 if (r) { 2570 return r; 2571 } 2572 r = blk_check_byte_request(blk_out, off_out, bytes); 2573 if (r) { 2574 return r; 2575 } 2576 return bdrv_co_copy_range(blk_in->root, off_in, 2577 blk_out->root, off_out, 2578 bytes, read_flags, write_flags); 2579 } 2580 2581 const BdrvChild *blk_root(BlockBackend *blk) 2582 { 2583 GLOBAL_STATE_CODE(); 2584 return blk->root; 2585 } 2586 2587 int blk_make_empty(BlockBackend *blk, Error **errp) 2588 { 2589 GLOBAL_STATE_CODE(); 2590 if (!blk_is_available(blk)) { 2591 error_setg(errp, "No medium inserted"); 2592 return -ENOMEDIUM; 2593 } 2594 2595 return bdrv_make_empty(blk->root, errp); 2596 } 2597