Lines Matching refs:blk

99     BlockBackend *blk;  member
122 blk_set_perm_locked(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
145 BlockBackend *blk = child->opaque; in blk_root_get_parent_desc() local
148 if (blk->name) { in blk_root_get_parent_desc()
149 return g_strdup_printf("block device '%s'", blk->name); in blk_root_get_parent_desc()
152 dev_id = blk_get_attached_dev_id(blk); in blk_root_get_parent_desc()
169 BlockBackend *blk = opaque; in blk_vm_state_changed() local
175 qemu_del_vm_change_state_handler(blk->vmsh); in blk_vm_state_changed()
176 blk->vmsh = NULL; in blk_vm_state_changed()
177 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err); in blk_vm_state_changed()
192 BlockBackend *blk = child->opaque; in blk_root_activate() local
196 if (!blk->disable_perm) { in blk_root_activate()
200 blk->disable_perm = false; in blk_root_activate()
209 saved_shared_perm = blk->shared_perm; in blk_root_activate()
211 blk_set_perm_locked(blk, blk->perm, BLK_PERM_ALL, &local_err); in blk_root_activate()
214 blk->disable_perm = true; in blk_root_activate()
217 blk->shared_perm = saved_shared_perm; in blk_root_activate()
223 if (!blk->vmsh) { in blk_root_activate()
224 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed, in blk_root_activate()
225 blk); in blk_root_activate()
230 blk_set_perm_locked(blk, blk->perm, blk->shared_perm, &local_err); in blk_root_activate()
233 blk->disable_perm = true; in blk_root_activate()
238 void blk_set_force_allow_inactivate(BlockBackend *blk) in blk_set_force_allow_inactivate() argument
241 blk->force_allow_inactivate = true; in blk_set_force_allow_inactivate()
244 static bool blk_can_inactivate(BlockBackend *blk) in blk_can_inactivate() argument
247 if (blk->dev || blk_name(blk)[0]) { in blk_can_inactivate()
256 if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) { in blk_can_inactivate()
260 return blk->force_allow_inactivate; in blk_can_inactivate()
265 BlockBackend *blk = child->opaque; in blk_root_inactivate() local
267 if (blk->disable_perm) { in blk_root_inactivate()
271 if (!blk_can_inactivate(blk)) { in blk_root_inactivate()
275 blk->disable_perm = true; in blk_root_inactivate()
276 if (blk->root) { in blk_root_inactivate()
277 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort); in blk_root_inactivate()
285 BlockBackend *blk = child->opaque; in blk_root_attach() local
288 trace_blk_root_attach(child, blk, child->bs); in blk_root_attach()
290 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { in blk_root_attach()
300 BlockBackend *blk = child->opaque; in blk_root_detach() local
303 trace_blk_root_detach(child, blk, child->bs); in blk_root_detach()
305 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { in blk_root_detach()
315 BlockBackend *blk = c->opaque; in blk_root_get_parent_aio_context() local
318 return blk_get_aio_context(blk); in blk_root_get_parent_aio_context()
357 BlockBackend *blk; in blk_new() local
361 blk = g_new0(BlockBackend, 1); in blk_new()
362 blk->refcnt = 1; in blk_new()
363 blk->ctx = ctx; in blk_new()
364 blk->perm = perm; in blk_new()
365 blk->shared_perm = shared_perm; in blk_new()
366 blk_set_enable_write_cache(blk, true); in blk_new()
368 blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT; in blk_new()
369 blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; in blk_new()
371 block_acct_init(&blk->stats); in blk_new()
373 qemu_mutex_init(&blk->queued_requests_lock); in blk_new()
374 qemu_co_queue_init(&blk->queued_requests); in blk_new()
375 notifier_list_init(&blk->remove_bs_notifiers); in blk_new()
376 notifier_list_init(&blk->insert_bs_notifiers); in blk_new()
377 QLIST_INIT(&blk->aio_notifiers); in blk_new()
379 QTAILQ_INSERT_TAIL(&block_backends, blk, link); in blk_new()
380 return blk; in blk_new()
397 BlockBackend *blk = blk_new(bdrv_get_aio_context(bs), perm, shared_perm); in blk_new_with_bs() local
401 if (blk_insert_bs(blk, bs, errp) < 0) { in blk_new_with_bs()
402 blk_unref(blk); in blk_new_with_bs()
405 return blk; in blk_new_with_bs()
426 BlockBackend *blk; in blk_new_open() local
463 blk = blk_new(bdrv_get_aio_context(bs), perm, shared); in blk_new_open()
464 blk->perm = perm; in blk_new_open()
465 blk->shared_perm = shared; in blk_new_open()
467 blk_insert_bs(blk, bs, errp); in blk_new_open()
470 if (!blk->root) { in blk_new_open()
471 blk_unref(blk); in blk_new_open()
475 return blk; in blk_new_open()
478 static void blk_delete(BlockBackend *blk) in blk_delete() argument
480 assert(!blk->refcnt); in blk_delete()
481 assert(!blk->name); in blk_delete()
482 assert(!blk->dev); in blk_delete()
483 if (blk->public.throttle_group_member.throttle_state) { in blk_delete()
484 blk_io_limits_disable(blk); in blk_delete()
486 if (blk->root) { in blk_delete()
487 blk_remove_bs(blk); in blk_delete()
489 if (blk->vmsh) { in blk_delete()
490 qemu_del_vm_change_state_handler(blk->vmsh); in blk_delete()
491 blk->vmsh = NULL; in blk_delete()
493 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers)); in blk_delete()
494 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers)); in blk_delete()
495 assert(QLIST_EMPTY(&blk->aio_notifiers)); in blk_delete()
496 assert(qemu_co_queue_empty(&blk->queued_requests)); in blk_delete()
497 qemu_mutex_destroy(&blk->queued_requests_lock); in blk_delete()
498 QTAILQ_REMOVE(&block_backends, blk, link); in blk_delete()
499 drive_info_del(blk->legacy_dinfo); in blk_delete()
500 block_acct_cleanup(&blk->stats); in blk_delete()
501 g_free(blk); in blk_delete()
513 int blk_get_refcnt(BlockBackend *blk) in blk_get_refcnt() argument
516 return blk ? blk->refcnt : 0; in blk_get_refcnt()
523 void blk_ref(BlockBackend *blk) in blk_ref() argument
525 assert(blk->refcnt > 0); in blk_ref()
527 blk->refcnt++; in blk_ref()
535 void blk_unref(BlockBackend *blk) in blk_unref() argument
538 if (blk) { in blk_unref()
539 assert(blk->refcnt > 0); in blk_unref()
540 if (blk->refcnt > 1) { in blk_unref()
541 blk->refcnt--; in blk_unref()
543 blk_drain(blk); in blk_unref()
545 assert(blk->refcnt == 1); in blk_unref()
546 blk->refcnt = 0; in blk_unref()
547 blk_delete(blk); in blk_unref()
556 BlockBackend *blk_all_next(BlockBackend *blk) in blk_all_next() argument
559 return blk ? QTAILQ_NEXT(blk, link) in blk_all_next()
565 BlockBackend *blk = NULL; in blk_remove_all_bs() local
569 while ((blk = blk_all_next(blk)) != NULL) { in blk_remove_all_bs()
570 if (blk->root) { in blk_remove_all_bs()
571 blk_remove_bs(blk); in blk_remove_all_bs()
586 BlockBackend *blk_next(BlockBackend *blk) in blk_next() argument
589 return blk ? QTAILQ_NEXT(blk, monitor_link) in blk_next()
608 BlockBackend *old_blk = it->blk; in bdrv_next()
611 it->blk = blk_all_next(it->blk); in bdrv_next()
612 bs = it->blk ? blk_bs(it->blk) : NULL; in bdrv_next()
613 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk)); in bdrv_next()
615 if (it->blk) { in bdrv_next()
616 blk_ref(it->blk); in bdrv_next()
668 if (it->phase == BDRV_NEXT_BACKEND_ROOTS && it->blk) { in bdrv_next_cleanup()
669 blk_unref(it->blk); in bdrv_next_cleanup()
685 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp) in monitor_add_blk() argument
687 assert(!blk->name); in monitor_add_blk()
706 blk->name = g_strdup(name); in monitor_add_blk()
707 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link); in monitor_add_blk()
715 void monitor_remove_blk(BlockBackend *blk) in monitor_remove_blk() argument
719 if (!blk->name) { in monitor_remove_blk()
723 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link); in monitor_remove_blk()
724 g_free(blk->name); in monitor_remove_blk()
725 blk->name = NULL; in monitor_remove_blk()
732 const char *blk_name(const BlockBackend *blk) in blk_name() argument
735 return blk->name ?: ""; in blk_name()
744 BlockBackend *blk = NULL; in blk_by_name() local
748 while ((blk = blk_next(blk)) != NULL) { in blk_by_name()
749 if (!strcmp(name, blk->name)) { in blk_by_name()
750 return blk; in blk_by_name()
759 BlockDriverState *blk_bs(BlockBackend *blk) in blk_bs() argument
762 return blk->root ? blk->root->bs : NULL; in blk_bs()
812 DriveInfo *blk_legacy_dinfo(BlockBackend *blk) in blk_legacy_dinfo() argument
815 return blk->legacy_dinfo; in blk_legacy_dinfo()
823 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo) in blk_set_legacy_dinfo() argument
825 assert(!blk->legacy_dinfo); in blk_set_legacy_dinfo()
827 return blk->legacy_dinfo = dinfo; in blk_set_legacy_dinfo()
836 BlockBackend *blk = NULL; in blk_by_legacy_dinfo() local
839 while ((blk = blk_next(blk)) != NULL) { in blk_by_legacy_dinfo()
840 if (blk->legacy_dinfo == dinfo) { in blk_by_legacy_dinfo()
841 return blk; in blk_by_legacy_dinfo()
850 BlockBackendPublic *blk_get_public(BlockBackend *blk) in blk_get_public() argument
853 return &blk->public; in blk_get_public()
859 void blk_remove_bs(BlockBackend *blk) in blk_remove_bs() argument
861 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_remove_bs()
866 notifier_list_notify(&blk->remove_bs_notifiers, blk); in blk_remove_bs()
868 BlockDriverState *bs = blk_bs(blk); in blk_remove_bs()
882 blk_update_root_state(blk); in blk_remove_bs()
888 blk_drain(blk); in blk_remove_bs()
889 root = blk->root; in blk_remove_bs()
890 blk->root = NULL; in blk_remove_bs()
900 int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) in blk_insert_bs() argument
902 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_insert_bs()
907 blk->root = bdrv_root_attach_child(bs, "root", &child_root, in blk_insert_bs()
909 blk->perm, blk->shared_perm, in blk_insert_bs()
910 blk, errp); in blk_insert_bs()
912 if (blk->root == NULL) { in blk_insert_bs()
916 notifier_list_notify(&blk->insert_bs_notifiers, blk); in blk_insert_bs()
928 int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp) in blk_replace_bs() argument
931 return bdrv_replace_child_bs(blk->root, new_bs, errp); in blk_replace_bs()
938 blk_set_perm_locked(BlockBackend *blk, uint64_t perm, uint64_t shared_perm, in blk_set_perm_locked() argument
944 if (blk->root && !blk->disable_perm) { in blk_set_perm_locked()
945 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp); in blk_set_perm_locked()
951 blk->perm = perm; in blk_set_perm_locked()
952 blk->shared_perm = shared_perm; in blk_set_perm_locked()
957 int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm, in blk_set_perm() argument
963 return blk_set_perm_locked(blk, perm, shared_perm, errp); in blk_set_perm()
966 void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm) in blk_get_perm() argument
969 *perm = blk->perm; in blk_get_perm()
970 *shared_perm = blk->shared_perm; in blk_get_perm()
977 int blk_attach_dev(BlockBackend *blk, DeviceState *dev) in blk_attach_dev() argument
980 if (blk->dev) { in blk_attach_dev()
988 blk->disable_perm = true; in blk_attach_dev()
991 blk_ref(blk); in blk_attach_dev()
992 blk->dev = dev; in blk_attach_dev()
993 blk_iostatus_reset(blk); in blk_attach_dev()
1002 void blk_detach_dev(BlockBackend *blk, DeviceState *dev) in blk_detach_dev() argument
1004 assert(blk->dev == dev); in blk_detach_dev()
1006 blk->dev = NULL; in blk_detach_dev()
1007 blk->dev_ops = NULL; in blk_detach_dev()
1008 blk->dev_opaque = NULL; in blk_detach_dev()
1009 blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort); in blk_detach_dev()
1010 blk_unref(blk); in blk_detach_dev()
1016 DeviceState *blk_get_attached_dev(BlockBackend *blk) in blk_get_attached_dev() argument
1019 return blk->dev; in blk_get_attached_dev()
1022 static char *blk_get_attached_dev_id_or_path(BlockBackend *blk, bool want_id) in blk_get_attached_dev_id_or_path() argument
1024 DeviceState *dev = blk->dev; in blk_get_attached_dev_id_or_path()
1040 char *blk_get_attached_dev_id(BlockBackend *blk) in blk_get_attached_dev_id() argument
1042 return blk_get_attached_dev_id_or_path(blk, true); in blk_get_attached_dev_id()
1045 static char *blk_get_attached_dev_path(BlockBackend *blk) in blk_get_attached_dev_path() argument
1047 return blk_get_attached_dev_id_or_path(blk, false); in blk_get_attached_dev_path()
1058 BlockBackend *blk = NULL; in blk_by_dev() local
1063 while ((blk = blk_all_next(blk)) != NULL) { in blk_by_dev()
1064 if (blk->dev == dev) { in blk_by_dev()
1065 return blk; in blk_by_dev()
1076 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, in blk_set_dev_ops() argument
1080 blk->dev_ops = ops; in blk_set_dev_ops()
1081 blk->dev_opaque = opaque; in blk_set_dev_ops()
1084 if (qatomic_read(&blk->quiesce_counter) && ops && ops->drained_begin) { in blk_set_dev_ops()
1099 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp) in blk_dev_change_media_cb() argument
1102 if (blk->dev_ops && blk->dev_ops->change_media_cb) { in blk_dev_change_media_cb()
1106 tray_was_open = blk_dev_is_tray_open(blk); in blk_dev_change_media_cb()
1107 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err); in blk_dev_change_media_cb()
1113 tray_is_open = blk_dev_is_tray_open(blk); in blk_dev_change_media_cb()
1116 char *id = blk_get_attached_dev_id(blk); in blk_dev_change_media_cb()
1117 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open); in blk_dev_change_media_cb()
1132 bool blk_dev_has_removable_media(BlockBackend *blk) in blk_dev_has_removable_media() argument
1135 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb); in blk_dev_has_removable_media()
1141 bool blk_dev_has_tray(BlockBackend *blk) in blk_dev_has_tray() argument
1144 return blk->dev_ops && blk->dev_ops->is_tray_open; in blk_dev_has_tray()
1151 void blk_dev_eject_request(BlockBackend *blk, bool force) in blk_dev_eject_request() argument
1154 if (blk->dev_ops && blk->dev_ops->eject_request_cb) { in blk_dev_eject_request()
1155 blk->dev_ops->eject_request_cb(blk->dev_opaque, force); in blk_dev_eject_request()
1162 bool blk_dev_is_tray_open(BlockBackend *blk) in blk_dev_is_tray_open() argument
1165 if (blk_dev_has_tray(blk)) { in blk_dev_is_tray_open()
1166 return blk->dev_ops->is_tray_open(blk->dev_opaque); in blk_dev_is_tray_open()
1175 bool blk_dev_is_medium_locked(BlockBackend *blk) in blk_dev_is_medium_locked() argument
1178 if (blk->dev_ops && blk->dev_ops->is_medium_locked) { in blk_dev_is_medium_locked()
1179 return blk->dev_ops->is_medium_locked(blk->dev_opaque); in blk_dev_is_medium_locked()
1189 BlockBackend *blk = child->opaque; in blk_root_resize() local
1191 if (blk->dev_ops && blk->dev_ops->resize_cb) { in blk_root_resize()
1192 blk->dev_ops->resize_cb(blk->dev_opaque); in blk_root_resize()
1196 void blk_iostatus_enable(BlockBackend *blk) in blk_iostatus_enable() argument
1199 blk->iostatus_enabled = true; in blk_iostatus_enable()
1200 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; in blk_iostatus_enable()
1205 bool blk_iostatus_is_enabled(const BlockBackend *blk) in blk_iostatus_is_enabled() argument
1208 return (blk->iostatus_enabled && in blk_iostatus_is_enabled()
1209 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC || in blk_iostatus_is_enabled()
1210 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP || in blk_iostatus_is_enabled()
1211 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP)); in blk_iostatus_is_enabled()
1214 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk) in blk_iostatus() argument
1217 return blk->iostatus; in blk_iostatus()
1220 void blk_iostatus_reset(BlockBackend *blk) in blk_iostatus_reset() argument
1223 if (blk_iostatus_is_enabled(blk)) { in blk_iostatus_reset()
1224 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK; in blk_iostatus_reset()
1228 void blk_iostatus_set_err(BlockBackend *blk, int error) in blk_iostatus_set_err() argument
1231 assert(blk_iostatus_is_enabled(blk)); in blk_iostatus_set_err()
1232 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { in blk_iostatus_set_err()
1233 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : in blk_iostatus_set_err()
1238 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow) in blk_set_allow_write_beyond_eof() argument
1241 blk->allow_write_beyond_eof = allow; in blk_set_allow_write_beyond_eof()
1244 void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow) in blk_set_allow_aio_context_change() argument
1247 blk->allow_aio_context_change = allow; in blk_set_allow_aio_context_change()
1250 void blk_set_disable_request_queuing(BlockBackend *blk, bool disable) in blk_set_disable_request_queuing() argument
1253 qatomic_set(&blk->disable_request_queuing, disable); in blk_set_disable_request_queuing()
1257 blk_check_byte_request(BlockBackend *blk, int64_t offset, int64_t bytes) in blk_check_byte_request() argument
1265 if (!blk_co_is_available(blk)) { in blk_check_byte_request()
1273 if (!blk->allow_write_beyond_eof) { in blk_check_byte_request()
1274 len = bdrv_co_getlength(blk_bs(blk)); in blk_check_byte_request()
1288 bool blk_in_drain(BlockBackend *blk) in blk_in_drain() argument
1291 return qatomic_read(&blk->quiesce_counter); in blk_in_drain()
1295 static void coroutine_fn blk_wait_while_drained(BlockBackend *blk) in blk_wait_while_drained() argument
1297 assert(blk->in_flight > 0); in blk_wait_while_drained()
1299 if (qatomic_read(&blk->quiesce_counter) && in blk_wait_while_drained()
1300 !qatomic_read(&blk->disable_request_queuing)) { in blk_wait_while_drained()
1306 qemu_mutex_lock(&blk->queued_requests_lock); in blk_wait_while_drained()
1307 blk_dec_in_flight(blk); in blk_wait_while_drained()
1308 qemu_co_queue_wait(&blk->queued_requests, &blk->queued_requests_lock); in blk_wait_while_drained()
1309 blk_inc_in_flight(blk); in blk_wait_while_drained()
1310 qemu_mutex_unlock(&blk->queued_requests_lock); in blk_wait_while_drained()
1316 blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes, in blk_co_do_preadv_part() argument
1324 blk_wait_while_drained(blk); in blk_co_do_preadv_part()
1328 bs = blk_bs(blk); in blk_co_do_preadv_part()
1329 trace_blk_co_preadv(blk, bs, offset, bytes, flags); in blk_co_do_preadv_part()
1331 ret = blk_check_byte_request(blk, offset, bytes); in blk_co_do_preadv_part()
1339 if (blk->public.throttle_group_member.throttle_state) { in blk_co_do_preadv_part()
1340 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, in blk_co_do_preadv_part()
1344 ret = bdrv_co_preadv_part(blk->root, offset, bytes, qiov, qiov_offset, in blk_co_do_preadv_part()
1350 int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes, in blk_co_pread() argument
1358 return blk_co_preadv(blk, offset, bytes, &qiov, flags); in blk_co_pread()
1361 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, in blk_co_preadv() argument
1368 blk_inc_in_flight(blk); in blk_co_preadv()
1369 ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, 0, flags); in blk_co_preadv()
1370 blk_dec_in_flight(blk); in blk_co_preadv()
1375 int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset, in blk_co_preadv_part() argument
1382 blk_inc_in_flight(blk); in blk_co_preadv_part()
1383 ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, qiov_offset, flags); in blk_co_preadv_part()
1384 blk_dec_in_flight(blk); in blk_co_preadv_part()
1391 blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes, in blk_co_do_pwritev_part() argument
1399 blk_wait_while_drained(blk); in blk_co_do_pwritev_part()
1403 bs = blk_bs(blk); in blk_co_do_pwritev_part()
1404 trace_blk_co_pwritev(blk, bs, offset, bytes, flags); in blk_co_do_pwritev_part()
1406 ret = blk_check_byte_request(blk, offset, bytes); in blk_co_do_pwritev_part()
1413 if (blk->public.throttle_group_member.throttle_state) { in blk_co_do_pwritev_part()
1414 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member, in blk_co_do_pwritev_part()
1418 if (!blk->enable_write_cache) { in blk_co_do_pwritev_part()
1422 ret = bdrv_co_pwritev_part(blk->root, offset, bytes, qiov, qiov_offset, in blk_co_do_pwritev_part()
1428 int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, in blk_co_pwritev_part() argument
1436 blk_inc_in_flight(blk); in blk_co_pwritev_part()
1437 ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags); in blk_co_pwritev_part()
1438 blk_dec_in_flight(blk); in blk_co_pwritev_part()
1443 int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes, in blk_co_pwrite() argument
1451 return blk_co_pwritev(blk, offset, bytes, &qiov, flags); in blk_co_pwrite()
1454 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, in blk_co_pwritev() argument
1459 return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags); in blk_co_pwritev()
1462 int coroutine_fn blk_co_block_status_above(BlockBackend *blk, in blk_co_block_status_above() argument
1470 return bdrv_co_block_status_above(blk_bs(blk), base, offset, bytes, pnum, in blk_co_block_status_above()
1474 int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk, in blk_co_is_allocated_above() argument
1481 return bdrv_co_is_allocated_above(blk_bs(blk), base, include_base, offset, in blk_co_is_allocated_above()
1486 BlockBackend *blk; member
1493 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) in blk_make_zero() argument
1496 return bdrv_make_zero(blk->root, flags); in blk_make_zero()
1499 void blk_inc_in_flight(BlockBackend *blk) in blk_inc_in_flight() argument
1502 qatomic_inc(&blk->in_flight); in blk_inc_in_flight()
1505 void blk_dec_in_flight(BlockBackend *blk) in blk_dec_in_flight() argument
1508 qatomic_dec(&blk->in_flight); in blk_dec_in_flight()
1516 blk_dec_in_flight(acb->blk); in error_callback_bh()
1521 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, in blk_abort_aio_request() argument
1528 blk_inc_in_flight(blk); in blk_abort_aio_request()
1529 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque); in blk_abort_aio_request()
1530 acb->blk = blk; in blk_abort_aio_request()
1553 blk_dec_in_flight(acb->rwco.blk); in blk_aio_complete()
1565 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, in blk_aio_prwv() argument
1574 blk_inc_in_flight(blk); in blk_aio_prwv()
1575 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); in blk_aio_prwv()
1577 .blk = blk, in blk_aio_prwv()
1605 rwco->ret = blk_co_do_preadv_part(rwco->blk, rwco->offset, acb->bytes, qiov, in blk_aio_read_entry()
1617 rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes, in blk_aio_write_entry()
1622 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, in blk_aio_pwrite_zeroes() argument
1627 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_write_entry, in blk_aio_pwrite_zeroes()
1631 int64_t coroutine_fn blk_co_getlength(BlockBackend *blk) in blk_co_getlength() argument
1636 if (!blk_co_is_available(blk)) { in blk_co_getlength()
1640 return bdrv_co_getlength(blk_bs(blk)); in blk_co_getlength()
1643 int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk) in blk_co_nb_sectors() argument
1645 BlockDriverState *bs = blk_bs(blk); in blk_co_nb_sectors()
1661 int64_t coroutine_mixed_fn blk_nb_sectors(BlockBackend *blk) in blk_nb_sectors() argument
1663 BlockDriverState *bs = blk_bs(blk); in blk_nb_sectors()
1675 void coroutine_fn blk_co_get_geometry(BlockBackend *blk, in blk_co_get_geometry() argument
1678 int64_t ret = blk_co_nb_sectors(blk); in blk_co_get_geometry()
1685 void coroutine_mixed_fn blk_get_geometry(BlockBackend *blk, in blk_get_geometry() argument
1688 int64_t ret = blk_nb_sectors(blk); in blk_get_geometry()
1692 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, in blk_aio_preadv() argument
1698 return blk_aio_prwv(blk, offset, qiov->size, qiov, in blk_aio_preadv()
1702 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, in blk_aio_pwritev() argument
1708 return blk_aio_prwv(blk, offset, qiov->size, qiov, in blk_aio_pwritev()
1726 blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf) in blk_co_do_ioctl() argument
1730 blk_wait_while_drained(blk); in blk_co_do_ioctl()
1733 if (!blk_co_is_available(blk)) { in blk_co_do_ioctl()
1737 return bdrv_co_ioctl(blk_bs(blk), req, buf); in blk_co_do_ioctl()
1740 int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req, in blk_co_ioctl() argument
1746 blk_inc_in_flight(blk); in blk_co_ioctl()
1747 ret = blk_co_do_ioctl(blk, req, buf); in blk_co_ioctl()
1748 blk_dec_in_flight(blk); in blk_co_ioctl()
1758 rwco->ret = blk_co_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf); in blk_aio_ioctl_entry()
1763 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, in blk_aio_ioctl() argument
1767 return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque); in blk_aio_ioctl()
1772 blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes) in blk_co_do_pdiscard() argument
1777 blk_wait_while_drained(blk); in blk_co_do_pdiscard()
1780 ret = blk_check_byte_request(blk, offset, bytes); in blk_co_do_pdiscard()
1785 return bdrv_co_pdiscard(blk->root, offset, bytes); in blk_co_do_pdiscard()
1793 rwco->ret = blk_co_do_pdiscard(rwco->blk, rwco->offset, acb->bytes); in blk_aio_pdiscard_entry()
1797 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, in blk_aio_pdiscard() argument
1802 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0, in blk_aio_pdiscard()
1806 int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, in blk_co_pdiscard() argument
1812 blk_inc_in_flight(blk); in blk_co_pdiscard()
1813 ret = blk_co_do_pdiscard(blk, offset, bytes); in blk_co_pdiscard()
1814 blk_dec_in_flight(blk); in blk_co_pdiscard()
1820 static int coroutine_fn blk_co_do_flush(BlockBackend *blk) in blk_co_do_flush() argument
1823 blk_wait_while_drained(blk); in blk_co_do_flush()
1826 if (!blk_co_is_available(blk)) { in blk_co_do_flush()
1830 return bdrv_co_flush(blk_bs(blk)); in blk_co_do_flush()
1838 rwco->ret = blk_co_do_flush(rwco->blk); in blk_aio_flush_entry()
1842 BlockAIOCB *blk_aio_flush(BlockBackend *blk, in blk_aio_flush() argument
1846 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque); in blk_aio_flush()
1849 int coroutine_fn blk_co_flush(BlockBackend *blk) in blk_co_flush() argument
1854 blk_inc_in_flight(blk); in blk_co_flush()
1855 ret = blk_co_do_flush(blk); in blk_co_flush()
1856 blk_dec_in_flight(blk); in blk_co_flush()
1866 rwco->ret = blk_co_zone_report(rwco->blk, rwco->offset, in blk_aio_zone_report_entry()
1872 BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset, in blk_aio_zone_report() argument
1881 blk_inc_in_flight(blk); in blk_aio_zone_report()
1882 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); in blk_aio_zone_report()
1884 .blk = blk, in blk_aio_zone_report()
1909 rwco->ret = blk_co_zone_mgmt(rwco->blk, in blk_aio_zone_mgmt_entry()
1915 BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op, in blk_aio_zone_mgmt() argument
1922 blk_inc_in_flight(blk); in blk_aio_zone_mgmt()
1923 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); in blk_aio_zone_mgmt()
1925 .blk = blk, in blk_aio_zone_mgmt()
1950 rwco->ret = blk_co_zone_append(rwco->blk, (int64_t *)(uintptr_t)acb->bytes, in blk_aio_zone_append_entry()
1955 BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset, in blk_aio_zone_append() argument
1962 blk_inc_in_flight(blk); in blk_aio_zone_append()
1963 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); in blk_aio_zone_append()
1965 .blk = blk, in blk_aio_zone_append()
1990 int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset, in blk_co_zone_report() argument
1997 blk_inc_in_flight(blk); /* increase before waiting */ in blk_co_zone_report()
1998 blk_wait_while_drained(blk); in blk_co_zone_report()
2000 if (!blk_is_available(blk)) { in blk_co_zone_report()
2001 blk_dec_in_flight(blk); in blk_co_zone_report()
2004 ret = bdrv_co_zone_report(blk_bs(blk), offset, nr_zones, zones); in blk_co_zone_report()
2005 blk_dec_in_flight(blk); in blk_co_zone_report()
2016 int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op, in blk_co_zone_mgmt() argument
2022 blk_inc_in_flight(blk); in blk_co_zone_mgmt()
2023 blk_wait_while_drained(blk); in blk_co_zone_mgmt()
2026 ret = blk_check_byte_request(blk, offset, len); in blk_co_zone_mgmt()
2028 blk_dec_in_flight(blk); in blk_co_zone_mgmt()
2032 ret = bdrv_co_zone_mgmt(blk_bs(blk), op, offset, len); in blk_co_zone_mgmt()
2033 blk_dec_in_flight(blk); in blk_co_zone_mgmt()
2040 int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset, in blk_co_zone_append() argument
2046 blk_inc_in_flight(blk); in blk_co_zone_append()
2047 blk_wait_while_drained(blk); in blk_co_zone_append()
2049 if (!blk_is_available(blk)) { in blk_co_zone_append()
2050 blk_dec_in_flight(blk); in blk_co_zone_append()
2054 ret = bdrv_co_zone_append(blk_bs(blk), offset, qiov, flags); in blk_co_zone_append()
2055 blk_dec_in_flight(blk); in blk_co_zone_append()
2059 void blk_drain(BlockBackend *blk) in blk_drain() argument
2061 BlockDriverState *bs = blk_bs(blk); in blk_drain()
2070 AIO_WAIT_WHILE(blk_get_aio_context(blk), in blk_drain()
2071 qatomic_read(&blk->in_flight) > 0); in blk_drain()
2081 BlockBackend *blk = NULL; in blk_drain_all() local
2087 while ((blk = blk_all_next(blk)) != NULL) { in blk_drain_all()
2089 AIO_WAIT_WHILE_UNLOCKED(NULL, qatomic_read(&blk->in_flight) > 0); in blk_drain_all()
2095 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, in blk_set_on_error() argument
2099 blk->on_read_error = on_read_error; in blk_set_on_error()
2100 blk->on_write_error = on_write_error; in blk_set_on_error()
2103 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read) in blk_get_on_error() argument
2106 return is_read ? blk->on_read_error : blk->on_write_error; in blk_get_on_error()
2109 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, in blk_get_error_action() argument
2112 BlockdevOnError on_err = blk_get_on_error(blk, is_read); in blk_get_error_action()
2131 static void send_qmp_error_event(BlockBackend *blk, in send_qmp_error_event() argument
2136 BlockDriverState *bs = blk_bs(blk); in send_qmp_error_event()
2139 qapi_event_send_block_io_error(blk_name(blk), in send_qmp_error_event()
2140 blk_get_attached_dev_path(blk), in send_qmp_error_event()
2142 action, blk_iostatus_is_enabled(blk), in send_qmp_error_event()
2150 void blk_error_action(BlockBackend *blk, BlockErrorAction action, in blk_error_action() argument
2161 blk_iostatus_set_err(blk, error); in blk_error_action()
2172 send_qmp_error_event(blk, action, is_read, error); in blk_error_action()
2175 send_qmp_error_event(blk, action, is_read, error); in blk_error_action()
2183 bool blk_supports_write_perm(BlockBackend *blk) in blk_supports_write_perm() argument
2185 BlockDriverState *bs = blk_bs(blk); in blk_supports_write_perm()
2191 return blk->root_state.open_flags & BDRV_O_RDWR; in blk_supports_write_perm()
2199 bool blk_is_writable(BlockBackend *blk) in blk_is_writable() argument
2202 return blk->perm & BLK_PERM_WRITE; in blk_is_writable()
2205 bool blk_is_sg(BlockBackend *blk) in blk_is_sg() argument
2207 BlockDriverState *bs = blk_bs(blk); in blk_is_sg()
2217 bool blk_enable_write_cache(BlockBackend *blk) in blk_enable_write_cache() argument
2220 return blk->enable_write_cache; in blk_enable_write_cache()
2223 void blk_set_enable_write_cache(BlockBackend *blk, bool wce) in blk_set_enable_write_cache() argument
2226 blk->enable_write_cache = wce; in blk_set_enable_write_cache()
2229 bool coroutine_fn blk_co_is_inserted(BlockBackend *blk) in blk_co_is_inserted() argument
2231 BlockDriverState *bs = blk_bs(blk); in blk_co_is_inserted()
2238 bool coroutine_fn blk_co_is_available(BlockBackend *blk) in blk_co_is_available() argument
2241 return blk_co_is_inserted(blk) && !blk_dev_is_tray_open(blk); in blk_co_is_available()
2244 void coroutine_fn blk_co_lock_medium(BlockBackend *blk, bool locked) in blk_co_lock_medium() argument
2246 BlockDriverState *bs = blk_bs(blk); in blk_co_lock_medium()
2255 void coroutine_fn blk_co_eject(BlockBackend *blk, bool eject_flag) in blk_co_eject() argument
2257 BlockDriverState *bs = blk_bs(blk); in blk_co_eject()
2268 id = blk_get_attached_dev_id(blk); in blk_co_eject()
2269 qapi_event_send_device_tray_moved(blk_name(blk), id, in blk_co_eject()
2274 int blk_get_flags(BlockBackend *blk) in blk_get_flags() argument
2276 BlockDriverState *bs = blk_bs(blk); in blk_get_flags()
2282 return blk->root_state.open_flags; in blk_get_flags()
2287 uint32_t blk_get_request_alignment(BlockBackend *blk) in blk_get_request_alignment() argument
2289 BlockDriverState *bs = blk_bs(blk); in blk_get_request_alignment()
2295 uint64_t blk_get_max_hw_transfer(BlockBackend *blk) in blk_get_max_hw_transfer() argument
2297 BlockDriverState *bs = blk_bs(blk); in blk_get_max_hw_transfer()
2305 return ROUND_DOWN(max, blk_get_request_alignment(blk)); in blk_get_max_hw_transfer()
2309 uint32_t blk_get_max_transfer(BlockBackend *blk) in blk_get_max_transfer() argument
2311 BlockDriverState *bs = blk_bs(blk); in blk_get_max_transfer()
2318 return ROUND_DOWN(max, blk_get_request_alignment(blk)); in blk_get_max_transfer()
2321 int blk_get_max_hw_iov(BlockBackend *blk) in blk_get_max_hw_iov() argument
2324 return MIN_NON_ZERO(blk->root->bs->bl.max_hw_iov, in blk_get_max_hw_iov()
2325 blk->root->bs->bl.max_iov); in blk_get_max_hw_iov()
2328 int blk_get_max_iov(BlockBackend *blk) in blk_get_max_iov() argument
2331 return blk->root->bs->bl.max_iov; in blk_get_max_iov()
2334 void *blk_try_blockalign(BlockBackend *blk, size_t size) in blk_try_blockalign() argument
2337 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size); in blk_try_blockalign()
2340 void *blk_blockalign(BlockBackend *blk, size_t size) in blk_blockalign() argument
2343 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size); in blk_blockalign()
2346 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) in blk_op_is_blocked() argument
2348 BlockDriverState *bs = blk_bs(blk); in blk_op_is_blocked()
2366 AioContext *blk_get_aio_context(BlockBackend *blk) in blk_get_aio_context() argument
2370 if (!blk) { in blk_get_aio_context()
2374 return qatomic_read(&blk->ctx); in blk_get_aio_context()
2377 int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, in blk_set_aio_context() argument
2381 BlockDriverState *bs = blk_bs(blk); in blk_set_aio_context()
2387 qatomic_set(&blk->ctx, new_context); in blk_set_aio_context()
2393 old_allow_change = blk->allow_aio_context_change; in blk_set_aio_context()
2394 blk->allow_aio_context_change = true; in blk_set_aio_context()
2398 blk->allow_aio_context_change = old_allow_change; in blk_set_aio_context()
2406 BlockBackend *blk; member
2412 BlockBackend *blk = s->blk; in blk_root_set_aio_ctx_commit() local
2414 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_root_set_aio_ctx_commit()
2416 qatomic_set(&blk->ctx, new_context); in blk_root_set_aio_ctx_commit()
2432 BlockBackend *blk = child->opaque; in blk_root_change_aio_ctx() local
2435 if (!blk->allow_aio_context_change) { in blk_root_change_aio_ctx()
2441 if (!blk->name || blk->dev) { in blk_root_change_aio_ctx()
2451 .blk = blk, in blk_root_change_aio_ctx()
2458 void blk_add_aio_context_notifier(BlockBackend *blk, in blk_add_aio_context_notifier() argument
2463 BlockDriverState *bs = blk_bs(blk); in blk_add_aio_context_notifier()
2470 QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list); in blk_add_aio_context_notifier()
2478 void blk_remove_aio_context_notifier(BlockBackend *blk, in blk_remove_aio_context_notifier() argument
2485 BlockDriverState *bs = blk_bs(blk); in blk_remove_aio_context_notifier()
2494 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) { in blk_remove_aio_context_notifier()
2507 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify) in blk_add_remove_bs_notifier() argument
2510 notifier_list_add(&blk->remove_bs_notifiers, notify); in blk_add_remove_bs_notifier()
2513 BlockAcctStats *blk_get_stats(BlockBackend *blk) in blk_get_stats() argument
2516 return &blk->stats; in blk_get_stats()
2519 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, in blk_aio_get() argument
2523 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque); in blk_aio_get()
2526 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, in blk_co_pwrite_zeroes() argument
2530 return blk_co_pwritev(blk, offset, bytes, NULL, in blk_co_pwrite_zeroes()
2534 int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset, in blk_co_pwrite_compressed() argument
2539 return blk_co_pwritev_part(blk, offset, bytes, &qiov, 0, in blk_co_pwrite_compressed()
2543 int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact, in blk_co_truncate() argument
2549 if (!blk_co_is_available(blk)) { in blk_co_truncate()
2554 return bdrv_co_truncate(blk->root, offset, exact, prealloc, flags, errp); in blk_co_truncate()
2557 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, in blk_save_vmstate() argument
2563 if (!blk_is_available(blk)) { in blk_save_vmstate()
2567 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size); in blk_save_vmstate()
2572 if (ret == size && !blk->enable_write_cache) { in blk_save_vmstate()
2573 ret = bdrv_flush(blk_bs(blk)); in blk_save_vmstate()
2579 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size) in blk_load_vmstate() argument
2582 if (!blk_is_available(blk)) { in blk_load_vmstate()
2586 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size); in blk_load_vmstate()
2589 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz) in blk_probe_blocksizes() argument
2594 if (!blk_is_available(blk)) { in blk_probe_blocksizes()
2598 return bdrv_probe_blocksizes(blk_bs(blk), bsz); in blk_probe_blocksizes()
2601 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo) in blk_probe_geometry() argument
2604 if (!blk_is_available(blk)) { in blk_probe_geometry()
2608 return bdrv_probe_geometry(blk_bs(blk), geo); in blk_probe_geometry()
2615 void blk_update_root_state(BlockBackend *blk) in blk_update_root_state() argument
2618 assert(blk->root); in blk_update_root_state()
2620 blk->root_state.open_flags = blk->root->bs->open_flags; in blk_update_root_state()
2621 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes; in blk_update_root_state()
2628 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk) in blk_get_detect_zeroes_from_root_state() argument
2631 return blk->root_state.detect_zeroes; in blk_get_detect_zeroes_from_root_state()
2638 int blk_get_open_flags_from_root_state(BlockBackend *blk) in blk_get_open_flags_from_root_state() argument
2641 return blk->root_state.open_flags; in blk_get_open_flags_from_root_state()
2644 BlockBackendRootState *blk_get_root_state(BlockBackend *blk) in blk_get_root_state() argument
2647 return &blk->root_state; in blk_get_root_state()
2652 BlockBackend *blk = NULL; in blk_commit_all() local
2656 while ((blk = blk_all_next(blk)) != NULL) { in blk_commit_all()
2657 BlockDriverState *unfiltered_bs = bdrv_skip_filters(blk_bs(blk)); in blk_commit_all()
2659 if (blk_is_inserted(blk) && bdrv_cow_child(unfiltered_bs)) { in blk_commit_all()
2673 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg) in blk_set_io_limits() argument
2676 throttle_group_config(&blk->public.throttle_group_member, cfg); in blk_set_io_limits()
2679 void blk_io_limits_disable(BlockBackend *blk) in blk_io_limits_disable() argument
2681 BlockDriverState *bs = blk_bs(blk); in blk_io_limits_disable()
2682 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_io_limits_disable()
2697 void blk_io_limits_enable(BlockBackend *blk, const char *group) in blk_io_limits_enable() argument
2699 assert(!blk->public.throttle_group_member.throttle_state); in blk_io_limits_enable()
2701 throttle_group_register_tgm(&blk->public.throttle_group_member, in blk_io_limits_enable()
2702 group, blk_get_aio_context(blk)); in blk_io_limits_enable()
2705 void blk_io_limits_update_group(BlockBackend *blk, const char *group) in blk_io_limits_update_group() argument
2709 if (!blk->public.throttle_group_member.throttle_state) { in blk_io_limits_update_group()
2714 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member), in blk_io_limits_update_group()
2720 blk_io_limits_disable(blk); in blk_io_limits_update_group()
2721 blk_io_limits_enable(blk, group); in blk_io_limits_update_group()
2726 BlockBackend *blk = child->opaque; in blk_root_drained_begin() local
2727 ThrottleGroupMember *tgm = &blk->public.throttle_group_member; in blk_root_drained_begin()
2729 if (qatomic_fetch_inc(&blk->quiesce_counter) == 0) { in blk_root_drained_begin()
2730 if (blk->dev_ops && blk->dev_ops->drained_begin) { in blk_root_drained_begin()
2731 blk->dev_ops->drained_begin(blk->dev_opaque); in blk_root_drained_begin()
2745 BlockBackend *blk = child->opaque; in blk_root_drained_poll() local
2747 assert(qatomic_read(&blk->quiesce_counter)); in blk_root_drained_poll()
2749 if (blk->dev_ops && blk->dev_ops->drained_poll) { in blk_root_drained_poll()
2750 busy = blk->dev_ops->drained_poll(blk->dev_opaque); in blk_root_drained_poll()
2752 return busy || !!blk->in_flight; in blk_root_drained_poll()
2757 BlockBackend *blk = child->opaque; in blk_root_drained_end() local
2758 assert(qatomic_read(&blk->quiesce_counter)); in blk_root_drained_end()
2760 assert(blk->public.throttle_group_member.io_limits_disabled); in blk_root_drained_end()
2761 qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled); in blk_root_drained_end()
2763 if (qatomic_fetch_dec(&blk->quiesce_counter) == 1) { in blk_root_drained_end()
2764 if (blk->dev_ops && blk->dev_ops->drained_end) { in blk_root_drained_end()
2765 blk->dev_ops->drained_end(blk->dev_opaque); in blk_root_drained_end()
2767 qemu_mutex_lock(&blk->queued_requests_lock); in blk_root_drained_end()
2768 while (qemu_co_enter_next(&blk->queued_requests, in blk_root_drained_end()
2769 &blk->queued_requests_lock)) { in blk_root_drained_end()
2772 qemu_mutex_unlock(&blk->queued_requests_lock); in blk_root_drained_end()
2776 bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp) in blk_register_buf() argument
2778 BlockDriverState *bs = blk_bs(blk); in blk_register_buf()
2788 void blk_unregister_buf(BlockBackend *blk, void *host, size_t size) in blk_unregister_buf() argument
2790 BlockDriverState *bs = blk_bs(blk); in blk_unregister_buf()
2822 const BdrvChild *blk_root(BlockBackend *blk) in blk_root() argument
2825 return blk->root; in blk_root()
2828 int blk_make_empty(BlockBackend *blk, Error **errp) in blk_make_empty() argument
2833 if (!blk_is_available(blk)) { in blk_make_empty()
2838 return bdrv_make_empty(blk->root, errp); in blk_make_empty()