Lines Matching +full:hot +full:- +full:removable
4 * Copyright (C) 2014-2016 Red Hat, Inc.
10 * or later. See the COPYING.LIB file in the top-level directory.
14 #include "system/block-backend.h"
18 #include "block/throttle-groups.h"
19 #include "hw/qdev-core.h"
24 #include "qapi/qapi-events-block.h"
26 #include "qemu/main-loop.h"
89 /* Number of in-flight aio requests. BlockDriverState also counts
90 * in-flight requests but aio requests can exist even when blk->root is
145 BlockBackend *blk = child->opaque; in blk_root_get_parent_desc()
148 if (blk->name) { in blk_root_get_parent_desc()
149 return g_strdup_printf("block device '%s'", blk->name); in blk_root_get_parent_desc()
163 return blk_name(child->opaque); in blk_root_get_name()
175 qemu_del_vm_change_state_handler(blk->vmsh); in blk_vm_state_changed()
176 blk->vmsh = NULL; in blk_vm_state_changed()
177 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err); in blk_vm_state_changed()
192 BlockBackend *blk = child->opaque; in blk_root_activate()
196 if (!blk->disable_perm) { in blk_root_activate()
200 blk->disable_perm = false; in blk_root_activate()
203 * blk->shared_perm contains the permissions we want to share once in blk_root_activate()
205 * all; but we also need to retain blk->shared_perm, which is in blk_root_activate()
209 saved_shared_perm = blk->shared_perm; in blk_root_activate()
211 blk_set_perm_locked(blk, blk->perm, BLK_PERM_ALL, &local_err); in blk_root_activate()
214 blk->disable_perm = true; in blk_root_activate()
217 blk->shared_perm = saved_shared_perm; in blk_root_activate()
221 * example when nbd_server_add is called during non-shared storage in blk_root_activate()
223 if (!blk->vmsh) { in blk_root_activate()
224 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed, in blk_root_activate()
230 blk_set_perm_locked(blk, blk->perm, blk->shared_perm, &local_err); in blk_root_activate()
233 blk->disable_perm = true; in blk_root_activate()
241 blk->force_allow_inactivate = true; in blk_set_force_allow_inactivate()
247 if (blk->dev || blk_name(blk)[0]) { in blk_can_inactivate()
255 * by libvirt non-shared block migration. */ in blk_can_inactivate()
256 if (!(blk->perm & ~BLK_PERM_CONSISTENT_READ)) { in blk_can_inactivate()
260 return blk->force_allow_inactivate; in blk_can_inactivate()
265 BlockBackend *blk = child->opaque; in blk_root_inactivate()
267 if (blk->disable_perm) { in blk_root_inactivate()
272 return -EPERM; in blk_root_inactivate()
275 blk->disable_perm = true; in blk_root_inactivate()
276 if (blk->root) { in blk_root_inactivate()
277 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort); in blk_root_inactivate()
285 BlockBackend *blk = child->opaque; in blk_root_attach()
288 trace_blk_root_attach(child, blk, child->bs); in blk_root_attach()
290 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { in blk_root_attach()
291 bdrv_add_aio_context_notifier(child->bs, in blk_root_attach()
292 notifier->attached_aio_context, in blk_root_attach()
293 notifier->detach_aio_context, in blk_root_attach()
294 notifier->opaque); in blk_root_attach()
300 BlockBackend *blk = child->opaque; in blk_root_detach()
303 trace_blk_root_detach(child, blk, child->bs); in blk_root_detach()
305 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { in blk_root_detach()
306 bdrv_remove_aio_context_notifier(child->bs, in blk_root_detach()
307 notifier->attached_aio_context, in blk_root_detach()
308 notifier->detach_aio_context, in blk_root_detach()
309 notifier->opaque); in blk_root_detach()
315 BlockBackend *blk = c->opaque; in blk_root_get_parent_aio_context()
362 blk->refcnt = 1; in blk_new()
363 blk->ctx = ctx; in blk_new()
364 blk->perm = perm; in blk_new()
365 blk->shared_perm = shared_perm; in blk_new()
368 blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT; in blk_new()
369 blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; in blk_new()
371 block_acct_init(&blk->stats); in blk_new()
373 qemu_mutex_init(&blk->queued_requests_lock); in blk_new()
374 qemu_co_queue_init(&blk->queued_requests); in blk_new()
375 notifier_list_init(&blk->remove_bs_notifiers); in blk_new()
376 notifier_list_init(&blk->insert_bs_notifiers); in blk_new()
377 QLIST_INIT(&blk->aio_notifiers); in blk_new()
464 blk->perm = perm; in blk_new_open()
465 blk->shared_perm = shared; in blk_new_open()
470 if (!blk->root) { in blk_new_open()
480 assert(!blk->refcnt); in blk_delete()
481 assert(!blk->name); in blk_delete()
482 assert(!blk->dev); in blk_delete()
483 if (blk->public.throttle_group_member.throttle_state) { in blk_delete()
486 if (blk->root) { in blk_delete()
489 if (blk->vmsh) { in blk_delete()
490 qemu_del_vm_change_state_handler(blk->vmsh); in blk_delete()
491 blk->vmsh = NULL; in blk_delete()
493 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers)); in blk_delete()
494 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers)); in blk_delete()
495 assert(QLIST_EMPTY(&blk->aio_notifiers)); in blk_delete()
496 assert(qemu_co_queue_empty(&blk->queued_requests)); in blk_delete()
497 qemu_mutex_destroy(&blk->queued_requests_lock); in blk_delete()
499 drive_info_del(blk->legacy_dinfo); in blk_delete()
500 block_acct_cleanup(&blk->stats); in blk_delete()
509 qemu_opts_del(dinfo->opts); in drive_info_del()
516 return blk ? blk->refcnt : 0; in blk_get_refcnt()
525 assert(blk->refcnt > 0); in blk_ref()
527 blk->refcnt++; in blk_ref()
539 assert(blk->refcnt > 0); in blk_unref()
540 if (blk->refcnt > 1) { in blk_unref()
541 blk->refcnt--; in blk_unref()
545 assert(blk->refcnt == 1); in blk_unref()
546 blk->refcnt = 0; in blk_unref()
570 if (blk->root) { in blk_remove_all_bs()
577 * Return the monitor-owned BlockBackend after @blk.
593 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
602 old_bs = it->bs; in bdrv_next()
607 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) { in bdrv_next()
608 BlockBackend *old_blk = it->blk; in bdrv_next()
611 it->blk = blk_all_next(it->blk); in bdrv_next()
612 bs = it->blk ? blk_bs(it->blk) : NULL; in bdrv_next()
613 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk)); in bdrv_next()
615 if (it->blk) { in bdrv_next()
616 blk_ref(it->blk); in bdrv_next()
623 it->bs = bs; in bdrv_next()
626 it->phase = BDRV_NEXT_MONITOR_OWNED; in bdrv_next()
629 /* Then return the monitor-owned BDSes without a BB attached. Ignore all in bdrv_next()
633 it->bs = bdrv_next_monitor_owned(it->bs); in bdrv_next()
634 bs = it->bs; in bdrv_next()
666 bdrv_unref(it->bs); in bdrv_next_cleanup()
668 if (it->phase == BDRV_NEXT_BACKEND_ROOTS && it->blk) { in bdrv_next_cleanup()
669 blk_unref(it->blk); in bdrv_next_cleanup()
687 assert(!blk->name); in monitor_add_blk()
706 blk->name = g_strdup(name); in monitor_add_blk()
719 if (!blk->name) { in monitor_remove_blk()
724 g_free(blk->name); in monitor_remove_blk()
725 blk->name = NULL; in monitor_remove_blk()
729 * Return @blk's name, a non-null string.
735 return blk->name ?: ""; in blk_name()
749 if (!strcmp(name, blk->name)) { in blk_by_name()
762 return blk->root ? blk->root->bs : NULL; in blk_bs()
772 QLIST_FOREACH(child, &bs->parents, next_parent) { in bdrv_first_blk()
773 if (child->klass == &child_root) { in bdrv_first_blk()
774 return child->opaque; in bdrv_first_blk()
800 QLIST_FOREACH(c, &bs->parents, next_parent) { in bdrv_is_root_node()
801 if (c->klass != &child_root) { in bdrv_is_root_node()
815 return blk->legacy_dinfo; in blk_legacy_dinfo()
825 assert(!blk->legacy_dinfo); in blk_set_legacy_dinfo()
827 return blk->legacy_dinfo = dinfo; in blk_set_legacy_dinfo()
840 if (blk->legacy_dinfo == dinfo) { in blk_by_legacy_dinfo()
853 return &blk->public; in blk_get_public()
861 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_remove_bs()
866 notifier_list_notify(&blk->remove_bs_notifiers, blk); in blk_remove_bs()
867 if (tgm->throttle_state) { in blk_remove_bs()
884 /* bdrv_root_unref_child() will cause blk->root to become stale and may in blk_remove_bs()
889 root = blk->root; in blk_remove_bs()
890 blk->root = NULL; in blk_remove_bs()
902 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_insert_bs()
909 if ((bs->open_flags & BDRV_O_INACTIVE) && blk_can_inactivate(blk)) { in blk_insert_bs()
910 blk->disable_perm = true; in blk_insert_bs()
914 perm = blk->perm; in blk_insert_bs()
915 shared_perm = blk->shared_perm; in blk_insert_bs()
918 blk->root = bdrv_root_attach_child(bs, "root", &child_root, in blk_insert_bs()
922 if (blk->root == NULL) { in blk_insert_bs()
923 return -EPERM; in blk_insert_bs()
926 notifier_list_notify(&blk->insert_bs_notifiers, blk); in blk_insert_bs()
927 if (tgm->throttle_state) { in blk_insert_bs()
941 return bdrv_replace_child_bs(blk->root, new_bs, errp); in blk_replace_bs()
954 if (blk->root && !blk->disable_perm) { in blk_set_perm_locked()
955 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp); in blk_set_perm_locked()
961 blk->perm = perm; in blk_set_perm_locked()
962 blk->shared_perm = shared_perm; in blk_set_perm_locked()
979 *perm = blk->perm; in blk_get_perm()
980 *shared_perm = blk->shared_perm; in blk_get_perm()
985 * Return 0 on success, -EBUSY when a device model is attached already.
990 if (blk->dev) { in blk_attach_dev()
991 return -EBUSY; in blk_attach_dev()
998 blk->disable_perm = true; in blk_attach_dev()
1002 blk->dev = dev; in blk_attach_dev()
1014 assert(blk->dev == dev); in blk_detach_dev()
1016 blk->dev = NULL; in blk_detach_dev()
1017 blk->dev_ops = NULL; in blk_detach_dev()
1018 blk->dev_opaque = NULL; in blk_detach_dev()
1029 return blk->dev; in blk_get_attached_dev()
1038 DeviceState *dev = blk->dev; in blk_get_attached_dev_id_or_path()
1043 } else if (want_id && dev->id) { in blk_get_attached_dev_id_or_path()
1044 return g_strdup(dev->id); in blk_get_attached_dev_id_or_path()
1078 if (blk->dev == dev) { in blk_by_dev()
1094 blk->dev_ops = ops; in blk_set_dev_ops()
1095 blk->dev_opaque = opaque; in blk_set_dev_ops()
1098 if (qatomic_read(&blk->quiesce_counter) && ops && ops->drained_begin) { in blk_set_dev_ops()
1099 ops->drained_begin(opaque); in blk_set_dev_ops()
1116 if (blk->dev_ops && blk->dev_ops->change_media_cb) { in blk_dev_change_media_cb()
1121 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err); in blk_dev_change_media_cb()
1139 blk_dev_change_media_cb(child->opaque, load, NULL); in blk_root_change_media()
1143 * Does @blk's attached device model have removable media?
1149 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb); in blk_dev_has_removable_media()
1158 return blk->dev_ops && blk->dev_ops->is_tray_open; in blk_dev_has_tray()
1168 if (blk->dev_ops && blk->dev_ops->eject_request_cb) { in blk_dev_eject_request()
1169 blk->dev_ops->eject_request_cb(blk->dev_opaque, force); in blk_dev_eject_request()
1180 return blk->dev_ops->is_tray_open(blk->dev_opaque); in blk_dev_is_tray_open()
1192 if (blk->dev_ops && blk->dev_ops->is_medium_locked) { in blk_dev_is_medium_locked()
1193 return blk->dev_ops->is_medium_locked(blk->dev_opaque); in blk_dev_is_medium_locked()
1203 BlockBackend *blk = child->opaque; in blk_root_resize()
1205 if (blk->dev_ops && blk->dev_ops->resize_cb) { in blk_root_resize()
1206 blk->dev_ops->resize_cb(blk->dev_opaque); in blk_root_resize()
1213 blk->iostatus_enabled = true; in blk_iostatus_enable()
1214 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; in blk_iostatus_enable()
1222 return (blk->iostatus_enabled && in blk_iostatus_is_enabled()
1223 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || in blk_iostatus_is_enabled()
1224 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP || in blk_iostatus_is_enabled()
1225 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP)); in blk_iostatus_is_enabled()
1231 return blk->iostatus; in blk_iostatus()
1238 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; in blk_iostatus_reset()
1246 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { in blk_iostatus_set_err()
1247 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : in blk_iostatus_set_err()
1255 blk->allow_write_beyond_eof = allow; in blk_set_allow_write_beyond_eof()
1261 blk->allow_aio_context_change = allow; in blk_set_allow_aio_context_change()
1267 qatomic_set(&blk->disable_request_queuing, disable); in blk_set_disable_request_queuing()
1276 return -EIO; in blk_check_byte_request()
1280 return -ENOMEDIUM; in blk_check_byte_request()
1284 return -EIO; in blk_check_byte_request()
1287 if (!blk->allow_write_beyond_eof) { in blk_check_byte_request()
1293 if (offset > len || len - offset < bytes) { in blk_check_byte_request()
1294 return -EIO; in blk_check_byte_request()
1305 return qatomic_read(&blk->quiesce_counter); in blk_in_drain()
1311 assert(blk->in_flight > 0); in blk_wait_while_drained()
1313 if (qatomic_read(&blk->quiesce_counter) && in blk_wait_while_drained()
1314 !qatomic_read(&blk->disable_request_queuing)) { in blk_wait_while_drained()
1320 qemu_mutex_lock(&blk->queued_requests_lock); in blk_wait_while_drained()
1322 qemu_co_queue_wait(&blk->queued_requests, &blk->queued_requests_lock); in blk_wait_while_drained()
1324 qemu_mutex_unlock(&blk->queued_requests_lock); in blk_wait_while_drained()
1353 if (blk->public.throttle_group_member.throttle_state) { in blk_co_do_preadv_part()
1354 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, in blk_co_do_preadv_part()
1358 ret = bdrv_co_preadv_part(blk->root, offset, bytes, qiov, qiov_offset, in blk_co_do_preadv_part()
1427 if (blk->public.throttle_group_member.throttle_state) { in blk_co_do_pwritev_part()
1428 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, in blk_co_do_pwritev_part()
1432 if (!blk->enable_write_cache) { in blk_co_do_pwritev_part()
1436 ret = bdrv_co_pwritev_part(blk->root, offset, bytes, qiov, qiov_offset, in blk_co_do_pwritev_part()
1510 return bdrv_make_zero(blk->root, flags); in blk_make_zero()
1516 qatomic_inc(&blk->in_flight); in blk_inc_in_flight()
1522 qatomic_dec(&blk->in_flight); in blk_dec_in_flight()
1530 blk_dec_in_flight(acb->blk); in error_callback_bh()
1531 acb->common.cb(acb->common.opaque, acb->ret); in error_callback_bh()
1544 acb->blk = blk; in blk_abort_aio_request()
1545 acb->ret = ret; in blk_abort_aio_request()
1549 return &acb->common; in blk_abort_aio_request()
1565 if (acb->has_returned) { in blk_aio_complete()
1566 acb->common.cb(acb->common.opaque, acb->rwco.ret); in blk_aio_complete()
1567 blk_dec_in_flight(acb->rwco.blk); in blk_aio_complete()
1575 assert(acb->has_returned); in blk_aio_complete_bh()
1590 acb->rwco = (BlkRwCo) { in blk_aio_prwv()
1597 acb->bytes = bytes; in blk_aio_prwv()
1598 acb->has_returned = false; in blk_aio_prwv()
1603 acb->has_returned = true; in blk_aio_prwv()
1604 if (acb->rwco.ret != NOT_DONE) { in blk_aio_prwv()
1609 return &acb->common; in blk_aio_prwv()
1615 BlkRwCo *rwco = &acb->rwco; in blk_aio_read_entry()
1616 QEMUIOVector *qiov = rwco->iobuf; in blk_aio_read_entry()
1618 assert(qiov->size == acb->bytes); in blk_aio_read_entry()
1619 rwco->ret = blk_co_do_preadv_part(rwco->blk, rwco->offset, acb->bytes, qiov, in blk_aio_read_entry()
1620 0, rwco->flags); in blk_aio_read_entry()
1627 BlkRwCo *rwco = &acb->rwco; in blk_aio_write_entry()
1628 QEMUIOVector *qiov = rwco->iobuf; in blk_aio_write_entry()
1630 assert(!qiov || qiov->size == acb->bytes); in blk_aio_write_entry()
1631 rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes, in blk_aio_write_entry()
1632 qiov, 0, rwco->flags); in blk_aio_write_entry()
1651 return -ENOMEDIUM; in blk_co_getlength()
1665 return -ENOMEDIUM; in blk_co_nb_sectors()
1672 * This wrapper is written by hand because this function is in the hot I/O path,
1682 return -ENOMEDIUM; in blk_nb_sectors()
1697 * This wrapper is written by hand because this function is in the hot I/O path.
1711 assert((uint64_t)qiov->size <= INT64_MAX); in blk_aio_preadv()
1712 return blk_aio_prwv(blk, offset, qiov->size, qiov, in blk_aio_preadv()
1721 assert((uint64_t)qiov->size <= INT64_MAX); in blk_aio_pwritev()
1722 return blk_aio_prwv(blk, offset, qiov->size, qiov, in blk_aio_pwritev()
1748 return -ENOMEDIUM; in blk_co_do_ioctl()
1770 BlkRwCo *rwco = &acb->rwco; in blk_aio_ioctl_entry()
1772 rwco->ret = blk_co_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf); in blk_aio_ioctl_entry()
1799 return bdrv_co_pdiscard(blk->root, offset, bytes); in blk_co_do_pdiscard()
1805 BlkRwCo *rwco = &acb->rwco; in blk_aio_pdiscard_entry()
1807 rwco->ret = blk_co_do_pdiscard(rwco->blk, rwco->offset, acb->bytes); in blk_aio_pdiscard_entry()
1841 return -ENOMEDIUM; in blk_co_do_flush()
1850 BlkRwCo *rwco = &acb->rwco; in blk_aio_flush_entry()
1852 rwco->ret = blk_co_do_flush(rwco->blk); in blk_aio_flush_entry()
1878 BlkRwCo *rwco = &acb->rwco; in blk_aio_zone_report_entry()
1880 rwco->ret = blk_co_zone_report(rwco->blk, rwco->offset, in blk_aio_zone_report_entry()
1881 (unsigned int*)(uintptr_t)acb->bytes, in blk_aio_zone_report_entry()
1882 rwco->iobuf); in blk_aio_zone_report_entry()
1897 acb->rwco = (BlkRwCo) { in blk_aio_zone_report()
1903 acb->bytes = (int64_t)(uintptr_t)nr_zones, in blk_aio_zone_report()
1904 acb->has_returned = false; in blk_aio_zone_report()
1909 acb->has_returned = true; in blk_aio_zone_report()
1910 if (acb->rwco.ret != NOT_DONE) { in blk_aio_zone_report()
1915 return &acb->common; in blk_aio_zone_report()
1921 BlkRwCo *rwco = &acb->rwco; in blk_aio_zone_mgmt_entry()
1923 rwco->ret = blk_co_zone_mgmt(rwco->blk, in blk_aio_zone_mgmt_entry()
1924 (BlockZoneOp)(uintptr_t)rwco->iobuf, in blk_aio_zone_mgmt_entry()
1925 rwco->offset, acb->bytes); in blk_aio_zone_mgmt_entry()
1938 acb->rwco = (BlkRwCo) { in blk_aio_zone_mgmt()
1944 acb->bytes = len; in blk_aio_zone_mgmt()
1945 acb->has_returned = false; in blk_aio_zone_mgmt()
1950 acb->has_returned = true; in blk_aio_zone_mgmt()
1951 if (acb->rwco.ret != NOT_DONE) { in blk_aio_zone_mgmt()
1956 return &acb->common; in blk_aio_zone_mgmt()
1962 BlkRwCo *rwco = &acb->rwco; in blk_aio_zone_append_entry()
1964 rwco->ret = blk_co_zone_append(rwco->blk, (int64_t *)(uintptr_t)acb->bytes, in blk_aio_zone_append_entry()
1965 rwco->iobuf, rwco->flags); in blk_aio_zone_append_entry()
1978 acb->rwco = (BlkRwCo) { in blk_aio_zone_append()
1984 acb->bytes = (int64_t)(uintptr_t)offset; in blk_aio_zone_append()
1985 acb->has_returned = false; in blk_aio_zone_append()
1989 acb->has_returned = true; in blk_aio_zone_append()
1990 if (acb->rwco.ret != NOT_DONE) { in blk_aio_zone_append()
1995 return &acb->common; in blk_aio_zone_append()
2016 return -ENOMEDIUM; in blk_co_zone_report()
2065 return -ENOMEDIUM; in blk_co_zone_append()
2083 /* We may have -ENOMEDIUM completions in flight */ in blk_drain()
2085 qatomic_read(&blk->in_flight) > 0); in blk_drain()
2102 /* We may have -ENOMEDIUM completions in flight */ in blk_drain_all()
2103 AIO_WAIT_WHILE_UNLOCKED(NULL, qatomic_read(&blk->in_flight) > 0); in blk_drain_all()
2113 blk->on_read_error = on_read_error; in blk_set_on_error()
2114 blk->on_write_error = on_write_error; in blk_set_on_error()
2120 return is_read ? blk->on_read_error : blk->on_write_error; in blk_get_on_error()
2195 * (because its root node is not read-only).
2205 return blk->root_state.open_flags & BDRV_O_RDWR; in blk_supports_write_perm()
2216 return blk->perm & BLK_PERM_WRITE; in blk_is_writable()
2234 return blk->enable_write_cache; in blk_enable_write_cache()
2240 blk->enable_write_cache = wce; in blk_set_enable_write_cache()
2296 return blk->root_state.open_flags; in blk_get_flags()
2305 return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE; in blk_get_request_alignment()
2316 max = MIN_NON_ZERO(max, bs->bl.max_hw_transfer); in blk_get_max_hw_transfer()
2317 max = MIN_NON_ZERO(max, bs->bl.max_transfer); in blk_get_max_hw_transfer()
2330 max = MIN_NON_ZERO(max, bs->bl.max_transfer); in blk_get_max_transfer()
2338 return MIN_NON_ZERO(blk->root->bs->bl.max_hw_iov, in blk_get_max_hw_iov()
2339 blk->root->bs->bl.max_iov); in blk_get_max_hw_iov()
2345 return blk->root->bs->bl.max_iov; in blk_get_max_iov()
2366 * in-flight counter will prevent its context from changing.
2376 return qatomic_read(&blk->ctx); in blk_get_aio_context()
2389 qatomic_set(&blk->ctx, new_context); in blk_set_aio_context()
2395 old_allow_change = blk->allow_aio_context_change; in blk_set_aio_context()
2396 blk->allow_aio_context_change = true; in blk_set_aio_context()
2400 blk->allow_aio_context_change = old_allow_change; in blk_set_aio_context()
2414 BlockBackend *blk = s->blk; in blk_root_set_aio_ctx_commit()
2415 AioContext *new_context = s->new_ctx; in blk_root_set_aio_ctx_commit()
2416 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_root_set_aio_ctx_commit()
2418 qatomic_set(&blk->ctx, new_context); in blk_root_set_aio_ctx_commit()
2419 if (tgm->throttle_state) { in blk_root_set_aio_ctx_commit()
2434 BlockBackend *blk = child->opaque; in blk_root_change_aio_ctx()
2437 if (!blk->allow_aio_context_change) { in blk_root_change_aio_ctx()
2443 if (!blk->name || blk->dev) { in blk_root_change_aio_ctx()
2469 notifier->attached_aio_context = attached_aio_context; in blk_add_aio_context_notifier()
2470 notifier->detach_aio_context = detach_aio_context; in blk_add_aio_context_notifier()
2471 notifier->opaque = opaque; in blk_add_aio_context_notifier()
2472 QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list); in blk_add_aio_context_notifier()
2496 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { in blk_remove_aio_context_notifier()
2497 if (notifier->attached_aio_context == attached_aio_context && in blk_remove_aio_context_notifier()
2498 notifier->detach_aio_context == detach_aio_context && in blk_remove_aio_context_notifier()
2499 notifier->opaque == opaque) { in blk_remove_aio_context_notifier()
2512 notifier_list_add(&blk->remove_bs_notifiers, notify); in blk_add_remove_bs_notifier()
2518 return &blk->stats; in blk_get_stats()
2553 return -ENOMEDIUM; in blk_co_truncate()
2556 return bdrv_co_truncate(blk->root, offset, exact, prealloc, flags, errp); in blk_co_truncate()
2566 return -ENOMEDIUM; in blk_save_vmstate()
2574 if (ret == size && !blk->enable_write_cache) { in blk_save_vmstate()
2585 return -ENOMEDIUM; in blk_load_vmstate()
2597 return -ENOMEDIUM; in blk_probe_blocksizes()
2607 return -ENOMEDIUM; in blk_probe_geometry()
2620 assert(blk->root); in blk_update_root_state()
2622 blk->root_state.open_flags = blk->root->bs->open_flags; in blk_update_root_state()
2623 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes; in blk_update_root_state()
2627 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2633 return blk->root_state.detect_zeroes; in blk_get_detect_zeroes_from_root_state()
2643 return blk->root_state.open_flags; in blk_get_open_flags_from_root_state()
2649 return &blk->root_state; in blk_get_root_state()
2678 throttle_group_config(&blk->public.throttle_group_member, cfg); in blk_set_io_limits()
2684 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_io_limits_disable()
2685 assert(tgm->throttle_state); in blk_io_limits_disable()
2701 assert(!blk->public.throttle_group_member.throttle_state); in blk_io_limits_enable()
2703 throttle_group_register_tgm(&blk->public.throttle_group_member, in blk_io_limits_enable()
2711 if (!blk->public.throttle_group_member.throttle_state) { in blk_io_limits_update_group()
2716 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member), in blk_io_limits_update_group()
2728 BlockBackend *blk = child->opaque; in blk_root_drained_begin()
2729 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_root_drained_begin()
2731 if (qatomic_fetch_inc(&blk->quiesce_counter) == 0) { in blk_root_drained_begin()
2732 if (blk->dev_ops && blk->dev_ops->drained_begin) { in blk_root_drained_begin()
2733 blk->dev_ops->drained_begin(blk->dev_opaque); in blk_root_drained_begin()
2737 /* Note that blk->root may not be accessible here yet if we are just in blk_root_drained_begin()
2740 if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) { in blk_root_drained_begin()
2747 BlockBackend *blk = child->opaque; in blk_root_drained_poll()
2749 assert(qatomic_read(&blk->quiesce_counter)); in blk_root_drained_poll()
2751 if (blk->dev_ops && blk->dev_ops->drained_poll) { in blk_root_drained_poll()
2752 busy = blk->dev_ops->drained_poll(blk->dev_opaque); in blk_root_drained_poll()
2754 return busy || !!blk->in_flight; in blk_root_drained_poll()
2759 BlockBackend *blk = child->opaque; in blk_root_drained_end()
2760 assert(qatomic_read(&blk->quiesce_counter)); in blk_root_drained_end()
2762 assert(blk->public.throttle_group_member.io_limits_disabled); in blk_root_drained_end()
2763 qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled); in blk_root_drained_end()
2765 if (qatomic_fetch_dec(&blk->quiesce_counter) == 1) { in blk_root_drained_end()
2766 if (blk->dev_ops && blk->dev_ops->drained_end) { in blk_root_drained_end()
2767 blk->dev_ops->drained_end(blk->dev_opaque); in blk_root_drained_end()
2769 qemu_mutex_lock(&blk->queued_requests_lock); in blk_root_drained_end()
2770 while (qemu_co_enter_next(&blk->queued_requests, in blk_root_drained_end()
2771 &blk->queued_requests_lock)) { in blk_root_drained_end()
2774 qemu_mutex_unlock(&blk->queued_requests_lock); in blk_root_drained_end()
2819 return bdrv_co_copy_range(blk_in->root, off_in, in blk_co_copy_range()
2820 blk_out->root, off_out, in blk_co_copy_range()
2827 return blk->root; in blk_root()
2837 return -ENOMEDIUM; in blk_make_empty()
2840 return bdrv_make_empty(blk->root, errp); in blk_make_empty()