Lines Matching refs:bs

46 bdrv_parent_cb_resize(BlockDriverState *bs);
48 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
52 bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) in bdrv_parent_drained_begin() argument
58 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { in bdrv_parent_drained_begin()
79 bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) in bdrv_parent_drained_end() argument
85 QLIST_FOREACH(c, &bs->parents, next_parent) { in bdrv_parent_drained_end()
104 bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, in bdrv_parent_drained_poll() argument
112 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { in bdrv_parent_drained_poll()
152 BlockDriverState *bs; member
160 s->bs->bl = s->old_bl; in bdrv_refresh_limits_abort()
169 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) in bdrv_refresh_limits() argument
172 BlockDriver *drv = bs->drv; in bdrv_refresh_limits()
181 .bs = bs, in bdrv_refresh_limits()
182 .old_bl = bs->bl, in bdrv_refresh_limits()
187 memset(&bs->bl, 0, sizeof(bs->bl)); in bdrv_refresh_limits()
194 bs->bl.request_alignment = (drv->bdrv_co_preadv || in bdrv_refresh_limits()
200 QLIST_FOREACH(c, &bs->children, next) { in bdrv_refresh_limits()
203 bdrv_merge_limits(&bs->bl, &c->bs->bl); in bdrv_refresh_limits()
208 bs->bl.has_variable_length |= c->bs->bl.has_variable_length; in bdrv_refresh_limits()
213 bs->bl.min_mem_alignment = 512; in bdrv_refresh_limits()
214 bs->bl.opt_mem_alignment = qemu_real_host_page_size(); in bdrv_refresh_limits()
217 bs->bl.max_iov = IOV_MAX; in bdrv_refresh_limits()
222 drv->bdrv_refresh_limits(bs, errp); in bdrv_refresh_limits()
228 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { in bdrv_refresh_limits()
238 void bdrv_enable_copy_on_read(BlockDriverState *bs) in bdrv_enable_copy_on_read() argument
241 qatomic_inc(&bs->copy_on_read); in bdrv_enable_copy_on_read()
244 void bdrv_disable_copy_on_read(BlockDriverState *bs) in bdrv_disable_copy_on_read() argument
246 int old = qatomic_fetch_dec(&bs->copy_on_read); in bdrv_disable_copy_on_read()
253 BlockDriverState *bs; member
261 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, in bdrv_drain_poll() argument
266 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { in bdrv_drain_poll()
270 if (qatomic_read(&bs->in_flight)) { in bdrv_drain_poll()
277 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, in bdrv_drain_poll_top_level() argument
283 return bdrv_drain_poll(bs, ignore_parent, false); in bdrv_drain_poll_top_level()
286 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
288 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
294 BlockDriverState *bs = data->bs; in bdrv_co_drain_bh_cb() local
296 if (bs) { in bdrv_co_drain_bh_cb()
297 bdrv_dec_in_flight(bs); in bdrv_co_drain_bh_cb()
299 bdrv_do_drained_begin(bs, data->parent, data->poll); in bdrv_co_drain_bh_cb()
302 bdrv_do_drained_end(bs, data->parent); in bdrv_co_drain_bh_cb()
313 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, in bdrv_co_yield_to_drain() argument
327 .bs = bs, in bdrv_co_yield_to_drain()
334 if (bs) { in bdrv_co_yield_to_drain()
335 bdrv_inc_in_flight(bs); in bdrv_co_yield_to_drain()
347 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, in bdrv_do_drained_begin() argument
353 bdrv_co_yield_to_drain(bs, true, parent, poll); in bdrv_do_drained_begin()
360 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { in bdrv_do_drained_begin()
362 bdrv_parent_drained_begin(bs, parent); in bdrv_do_drained_begin()
363 if (bs->drv && bs->drv->bdrv_drain_begin) { in bdrv_do_drained_begin()
364 bs->drv->bdrv_drain_begin(bs); in bdrv_do_drained_begin()
378 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); in bdrv_do_drained_begin()
382 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent) in bdrv_do_drained_begin_quiesce() argument
384 bdrv_do_drained_begin(bs, parent, false); in bdrv_do_drained_begin_quiesce()
388 bdrv_drained_begin(BlockDriverState *bs) in bdrv_drained_begin() argument
391 bdrv_do_drained_begin(bs, NULL, true); in bdrv_drained_begin()
398 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) in bdrv_do_drained_end() argument
405 bdrv_co_yield_to_drain(bs, false, parent, false); in bdrv_do_drained_end()
411 assert(bs->quiesce_counter > 0); in bdrv_do_drained_end()
415 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); in bdrv_do_drained_end()
418 if (bs->drv && bs->drv->bdrv_drain_end) { in bdrv_do_drained_end()
419 bs->drv->bdrv_drain_end(bs); in bdrv_do_drained_end()
421 bdrv_parent_drained_end(bs, parent); in bdrv_do_drained_end()
425 void bdrv_drained_end(BlockDriverState *bs) in bdrv_drained_end() argument
428 bdrv_do_drained_end(bs, NULL); in bdrv_drained_end()
431 void bdrv_drain(BlockDriverState *bs) in bdrv_drain() argument
434 bdrv_drained_begin(bs); in bdrv_drain()
435 bdrv_drained_end(bs); in bdrv_drain()
438 static void bdrv_drain_assert_idle(BlockDriverState *bs) in bdrv_drain_assert_idle() argument
444 assert(qatomic_read(&bs->in_flight) == 0); in bdrv_drain_assert_idle()
445 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { in bdrv_drain_assert_idle()
446 bdrv_drain_assert_idle(child->bs); in bdrv_drain_assert_idle()
454 BlockDriverState *bs = NULL; in bdrv_drain_all_poll() local
464 while ((bs = bdrv_next_all_states(bs))) { in bdrv_drain_all_poll()
465 result |= bdrv_drain_poll(bs, NULL, true); in bdrv_drain_all_poll()
485 BlockDriverState *bs = NULL; in bdrv_drain_all_begin_nopoll() local
505 while ((bs = bdrv_next_all_states(bs))) { in bdrv_drain_all_begin_nopoll()
506 bdrv_do_drained_begin(bs, NULL, false); in bdrv_drain_all_begin_nopoll()
512 BlockDriverState *bs = NULL; in bdrv_drain_all_begin() local
533 while ((bs = bdrv_next_all_states(bs))) { in bdrv_drain_all_begin()
534 bdrv_drain_assert_idle(bs); in bdrv_drain_all_begin()
538 void bdrv_drain_all_end_quiesce(BlockDriverState *bs) in bdrv_drain_all_end_quiesce() argument
542 g_assert(bs->quiesce_counter > 0); in bdrv_drain_all_end_quiesce()
543 g_assert(!bs->refcnt); in bdrv_drain_all_end_quiesce()
545 while (bs->quiesce_counter) { in bdrv_drain_all_end_quiesce()
546 bdrv_do_drained_end(bs, NULL); in bdrv_drain_all_end_quiesce()
552 BlockDriverState *bs = NULL; in bdrv_drain_all_end() local
564 while ((bs = bdrv_next_all_states(bs))) { in bdrv_drain_all_end()
565 bdrv_do_drained_end(bs, NULL); in bdrv_drain_all_end()
588 qatomic_dec(&req->bs->serialising_in_flight); in tracked_request_end()
591 qemu_mutex_lock(&req->bs->reqs_lock); in tracked_request_end()
593 qemu_mutex_unlock(&req->bs->reqs_lock); in tracked_request_end()
607 BlockDriverState *bs, in tracked_request_begin() argument
615 .bs = bs, in tracked_request_begin()
627 qemu_mutex_lock(&bs->reqs_lock); in tracked_request_begin()
628 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); in tracked_request_begin()
629 qemu_mutex_unlock(&bs->reqs_lock); in tracked_request_begin()
654 QLIST_FOREACH(req, &self->bs->tracked_requests, list) { in bdrv_find_conflicting_request()
690 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); in bdrv_wait_serialising_requests_locked()
706 qatomic_inc(&req->bs->serialising_in_flight); in tracked_request_set_serialising()
718 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) in bdrv_co_get_self_request() argument
724 QLIST_FOREACH(req, &bs->tracked_requests, list) { in bdrv_co_get_self_request()
737 bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes, in bdrv_round_to_subclusters() argument
742 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) { in bdrv_round_to_subclusters()
752 static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs) in bdrv_get_cluster_size() argument
757 ret = bdrv_co_get_info(bs, &bdi); in bdrv_get_cluster_size()
759 return bs->bl.request_alignment; in bdrv_get_cluster_size()
765 void bdrv_inc_in_flight(BlockDriverState *bs) in bdrv_inc_in_flight() argument
768 qatomic_inc(&bs->in_flight); in bdrv_inc_in_flight()
771 void bdrv_wakeup(BlockDriverState *bs) in bdrv_wakeup() argument
777 void bdrv_dec_in_flight(BlockDriverState *bs) in bdrv_dec_in_flight() argument
780 qatomic_dec(&bs->in_flight); in bdrv_dec_in_flight()
781 bdrv_wakeup(bs); in bdrv_dec_in_flight()
787 BlockDriverState *bs = self->bs; in bdrv_wait_serialising_requests() local
789 if (!qatomic_read(&bs->serialising_in_flight)) { in bdrv_wait_serialising_requests()
793 qemu_mutex_lock(&bs->reqs_lock); in bdrv_wait_serialising_requests()
795 qemu_mutex_unlock(&bs->reqs_lock); in bdrv_wait_serialising_requests()
803 qemu_mutex_lock(&req->bs->reqs_lock); in bdrv_make_request_serialising()
808 qemu_mutex_unlock(&req->bs->reqs_lock); in bdrv_make_request_serialising()
904 BlockDriverState *bs = child->bs; in bdrv_make_zero() local
907 target_size = bdrv_getlength(bs); in bdrv_make_zero()
917 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); in bdrv_make_zero()
952 ret = bdrv_co_flush(child->bs); in bdrv_co_pwrite_sync()
974 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, in bdrv_driver_preadv() argument
977 BlockDriver *drv = bs->drv; in bdrv_driver_preadv()
985 assert(!(flags & ~bs->supported_read_flags)); in bdrv_driver_preadv()
992 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, in bdrv_driver_preadv()
1002 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); in bdrv_driver_preadv()
1012 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, in bdrv_driver_preadv()
1032 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); in bdrv_driver_preadv()
1043 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, in bdrv_driver_pwritev() argument
1047 BlockDriver *drv = bs->drv; in bdrv_driver_pwritev()
1062 (~bs->supported_write_flags & BDRV_REQ_FUA)) { in bdrv_driver_pwritev()
1067 flags &= bs->supported_write_flags; in bdrv_driver_pwritev()
1070 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, in bdrv_driver_pwritev()
1081 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags); in bdrv_driver_pwritev()
1091 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags, in bdrv_driver_pwritev()
1110 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags); in bdrv_driver_pwritev()
1114 ret = bdrv_co_flush(bs); in bdrv_driver_pwritev()
1125 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset, in bdrv_driver_pwritev_compressed() argument
1129 BlockDriver *drv = bs->drv; in bdrv_driver_pwritev_compressed()
1145 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, in bdrv_driver_pwritev_compressed()
1150 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); in bdrv_driver_pwritev_compressed()
1154 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); in bdrv_driver_pwritev_compressed()
1164 BlockDriverState *bs = child->bs; in bdrv_co_do_copy_on_readv() local
1173 BlockDriver *drv = bs->drv; in bdrv_co_do_copy_on_readv()
1178 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, in bdrv_co_do_copy_on_readv()
1193 skip_write = (bs->open_flags & BDRV_O_INACTIVE); in bdrv_co_do_copy_on_readv()
1209 bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes); in bdrv_co_do_copy_on_readv()
1212 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, in bdrv_co_do_copy_on_readv()
1222 ret = bdrv_co_is_allocated(bs, align_offset, in bdrv_co_do_copy_on_readv()
1252 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); in bdrv_co_do_copy_on_readv()
1260 ret = bdrv_driver_preadv(bs, align_offset, pnum, in bdrv_co_do_copy_on_readv()
1266 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE); in bdrv_co_do_copy_on_readv()
1272 ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum, in bdrv_co_do_copy_on_readv()
1278 ret = bdrv_driver_pwritev(bs, align_offset, pnum, in bdrv_co_do_copy_on_readv()
1299 ret = bdrv_driver_preadv(bs, offset + progress, in bdrv_co_do_copy_on_readv()
1329 BlockDriverState *bs = child->bs; in bdrv_aligned_preadv() local
1339 assert((bs->open_flags & BDRV_O_NO_IO) == 0); in bdrv_aligned_preadv()
1340 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), in bdrv_aligned_preadv()
1359 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); in bdrv_aligned_preadv()
1370 ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum); in bdrv_aligned_preadv()
1385 total_bytes = bdrv_co_getlength(bs); in bdrv_aligned_preadv()
1391 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF))); in bdrv_aligned_preadv()
1395 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags); in bdrv_aligned_preadv()
1406 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, in bdrv_aligned_preadv()
1471 static bool bdrv_init_padding(BlockDriverState *bs, in bdrv_init_padding() argument
1476 int64_t align = bs->bl.request_alignment; in bdrv_init_padding()
1499 pad->buf = qemu_blockalign(bs, pad->buf_len); in bdrv_init_padding()
1515 BlockDriverState *bs = child->bs; in bdrv_padding_rmw_read() local
1516 uint64_t align = bs->bl.request_alignment; in bdrv_padding_rmw_read()
1527 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); in bdrv_padding_rmw_read()
1530 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); in bdrv_padding_rmw_read()
1538 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); in bdrv_padding_rmw_read()
1541 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); in bdrv_padding_rmw_read()
1552 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); in bdrv_padding_rmw_read()
1560 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); in bdrv_padding_rmw_read()
1606 static int bdrv_create_padded_qiov(BlockDriverState *bs, in bdrv_create_padded_qiov() argument
1670 pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len); in bdrv_create_padded_qiov()
1704 static int bdrv_pad_request(BlockDriverState *bs, in bdrv_pad_request() argument
1722 if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) { in bdrv_pad_request()
1740 ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, in bdrv_pad_request()
1776 BlockDriverState *bs = child->bs; in bdrv_co_preadv_part() local
1782 trace_bdrv_co_preadv_part(bs, offset, bytes, flags); in bdrv_co_preadv_part()
1784 if (!bdrv_co_is_inserted(bs)) { in bdrv_co_preadv_part()
1793 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { in bdrv_co_preadv_part()
1805 bdrv_inc_in_flight(bs); in bdrv_co_preadv_part()
1808 if (qatomic_read(&bs->copy_on_read)) { in bdrv_co_preadv_part()
1812 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false, in bdrv_co_preadv_part()
1818 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); in bdrv_co_preadv_part()
1820 bs->bl.request_alignment, in bdrv_co_preadv_part()
1826 bdrv_dec_in_flight(bs); in bdrv_co_preadv_part()
1832 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, in bdrv_co_do_pwrite_zeroes() argument
1835 BlockDriver *drv = bs->drv; in bdrv_co_do_pwrite_zeroes()
1843 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, in bdrv_co_do_pwrite_zeroes()
1845 int alignment = MAX(bs->bl.pwrite_zeroes_alignment, in bdrv_co_do_pwrite_zeroes()
1846 bs->bl.request_alignment); in bdrv_co_do_pwrite_zeroes()
1847 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); in bdrv_co_do_pwrite_zeroes()
1856 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { in bdrv_co_do_pwrite_zeroes()
1866 if (!(bs->open_flags & BDRV_O_UNMAP)) { in bdrv_co_do_pwrite_zeroes()
1871 bdrv_bsc_invalidate_range(bs, offset, bytes); in bdrv_co_do_pwrite_zeroes()
1873 assert(alignment % bs->bl.request_alignment == 0); in bdrv_co_do_pwrite_zeroes()
1877 assert(max_write_zeroes >= bs->bl.request_alignment); in bdrv_co_do_pwrite_zeroes()
1906 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, in bdrv_co_do_pwrite_zeroes()
1907 flags & bs->supported_zero_flags); in bdrv_co_do_pwrite_zeroes()
1909 !(bs->supported_zero_flags & BDRV_REQ_FUA)) { in bdrv_co_do_pwrite_zeroes()
1913 assert(!bs->supported_zero_flags); in bdrv_co_do_pwrite_zeroes()
1921 !(bs->supported_write_flags & BDRV_REQ_FUA)) { in bdrv_co_do_pwrite_zeroes()
1929 buf = qemu_try_blockalign0(bs, num); in bdrv_co_do_pwrite_zeroes()
1937 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); in bdrv_co_do_pwrite_zeroes()
1954 ret = bdrv_co_flush(bs); in bdrv_co_do_pwrite_zeroes()
1964 BlockDriverState *bs = child->bs; in bdrv_co_write_req_prepare() local
1968 if (bdrv_is_read_only(bs)) { in bdrv_co_write_req_prepare()
1972 assert(!(bs->open_flags & BDRV_O_INACTIVE)); in bdrv_co_write_req_prepare()
1973 assert((bs->open_flags & BDRV_O_NO_IO) == 0); in bdrv_co_write_req_prepare()
1978 QEMU_LOCK_GUARD(&bs->reqs_lock); in bdrv_co_write_req_prepare()
1980 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); in bdrv_co_write_req_prepare()
1993 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE || in bdrv_co_write_req_prepare()
2004 bdrv_write_threshold_check_write(bs, offset, bytes); in bdrv_co_write_req_prepare()
2019 BlockDriverState *bs = child->bs; in bdrv_co_write_req_finish() local
2023 qatomic_inc(&bs->write_gen); in bdrv_co_write_req_finish()
2034 end_sector > bs->total_sectors) && in bdrv_co_write_req_finish()
2036 bs->total_sectors = end_sector; in bdrv_co_write_req_finish()
2037 bdrv_parent_cb_resize(bs); in bdrv_co_write_req_finish()
2038 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); in bdrv_co_write_req_finish()
2043 stat64_max(&bs->wr_highest_offset, offset + bytes); in bdrv_co_write_req_finish()
2046 bdrv_set_dirty(bs, offset, bytes); in bdrv_co_write_req_finish()
2064 BlockDriverState *bs = child->bs; in bdrv_aligned_pwritev() local
2065 BlockDriver *drv = bs->drv; in bdrv_aligned_pwritev()
2077 if (bdrv_has_readonly_bitmaps(bs)) { in bdrv_aligned_pwritev()
2084 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), in bdrv_aligned_pwritev()
2089 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && in bdrv_aligned_pwritev()
2093 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { in bdrv_aligned_pwritev()
2104 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO); in bdrv_aligned_pwritev()
2105 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); in bdrv_aligned_pwritev()
2107 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, in bdrv_aligned_pwritev()
2110 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); in bdrv_aligned_pwritev()
2111 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); in bdrv_aligned_pwritev()
2113 bdrv_co_debug_event(bs, BLKDBG_PWRITEV); in bdrv_aligned_pwritev()
2120 !(bs->supported_write_flags & BDRV_REQ_FUA)) { in bdrv_aligned_pwritev()
2126 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, in bdrv_aligned_pwritev()
2136 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE); in bdrv_aligned_pwritev()
2150 BlockDriverState *bs = child->bs; in bdrv_co_do_zero_pwritev() local
2152 uint64_t align = bs->bl.request_alignment; in bdrv_co_do_zero_pwritev()
2160 padding = bdrv_init_padding(bs, offset, bytes, true, &pad); in bdrv_co_do_zero_pwritev()
2228 BlockDriverState *bs = child->bs; in bdrv_co_pwritev_part() local
2230 uint64_t align = bs->bl.request_alignment; in bdrv_co_pwritev_part()
2236 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags); in bdrv_co_pwritev_part()
2238 if (!bdrv_co_is_inserted(bs)) { in bdrv_co_pwritev_part()
2258 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { in bdrv_co_pwritev_part()
2276 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true, in bdrv_co_pwritev_part()
2283 bdrv_inc_in_flight(bs); in bdrv_co_pwritev_part()
2284 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); in bdrv_co_pwritev_part()
2311 bdrv_dec_in_flight(bs); in bdrv_co_pwritev_part()
2320 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); in bdrv_co_pwrite_zeroes()
2333 BlockDriverState *bs = NULL; in bdrv_flush_all() local
2348 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { in bdrv_flush_all()
2349 int ret = bdrv_flush(bs); in bdrv_flush_all()
2386 bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero, in bdrv_co_do_block_status() argument
2402 total_size = bdrv_co_getlength(bs); in bdrv_co_do_block_status()
2423 assert(bs->drv); in bdrv_co_do_block_status()
2424 has_filtered_child = bdrv_filter_child(bs); in bdrv_co_do_block_status()
2425 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { in bdrv_co_do_block_status()
2431 if (bs->drv->protocol_name) { in bdrv_co_do_block_status()
2434 local_file = bs; in bdrv_co_do_block_status()
2439 bdrv_inc_in_flight(bs); in bdrv_co_do_block_status()
2442 align = bs->bl.request_alignment; in bdrv_co_do_block_status()
2446 if (bs->drv->bdrv_co_block_status) { in bdrv_co_do_block_status()
2468 if (QLIST_EMPTY(&bs->children) && in bdrv_co_do_block_status()
2469 bdrv_bsc_is_data(bs, aligned_offset, pnum)) in bdrv_co_do_block_status()
2472 local_file = bs; in bdrv_co_do_block_status()
2475 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, in bdrv_co_do_block_status()
2492 QLIST_EMPTY(&bs->children)) in bdrv_co_do_block_status()
2505 assert(local_file == bs); in bdrv_co_do_block_status()
2507 bdrv_bsc_fill(bs, aligned_offset, *pnum); in bdrv_co_do_block_status()
2513 local_file = bdrv_filter_bs(bs); in bdrv_co_do_block_status()
2554 } else if (bs->drv->supports_backing) { in bdrv_co_do_block_status()
2555 BlockDriverState *cow_bs = bdrv_cow_bs(bs); in bdrv_co_do_block_status()
2569 local_file && local_file != bs && in bdrv_co_do_block_status()
2608 bdrv_dec_in_flight(bs); in bdrv_co_do_block_status()
2623 bdrv_co_common_block_status_above(BlockDriverState *bs, in bdrv_co_common_block_status_above() argument
2648 if (!include_base && bs == base) { in bdrv_co_common_block_status_above()
2653 ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum, in bdrv_co_common_block_status_above()
2656 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { in bdrv_co_common_block_status_above()
2667 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; in bdrv_co_common_block_status_above()
2726 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, in bdrv_co_block_status_above() argument
2733 return bdrv_co_common_block_status_above(bs, base, false, true, offset, in bdrv_co_block_status_above()
2737 int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset, in bdrv_co_block_status() argument
2742 return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs), in bdrv_co_block_status()
2753 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, in bdrv_co_is_zero_fast() argument
2764 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset, in bdrv_co_is_zero_fast()
2774 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, in bdrv_co_is_allocated() argument
2781 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset, in bdrv_co_is_allocated()
2807 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs, in bdrv_co_is_allocated_above() argument
2816 ret = bdrv_co_common_block_status_above(bs, base, include_base, false, in bdrv_co_is_allocated_above()
2830 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) in bdrv_co_readv_vmstate() argument
2832 BlockDriver *drv = bs->drv; in bdrv_co_readv_vmstate()
2833 BlockDriverState *child_bs = bdrv_primary_bs(bs); in bdrv_co_readv_vmstate()
2847 bdrv_inc_in_flight(bs); in bdrv_co_readv_vmstate()
2850 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos); in bdrv_co_readv_vmstate()
2857 bdrv_dec_in_flight(bs); in bdrv_co_readv_vmstate()
2863 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) in bdrv_co_writev_vmstate() argument
2865 BlockDriver *drv = bs->drv; in bdrv_co_writev_vmstate()
2866 BlockDriverState *child_bs = bdrv_primary_bs(bs); in bdrv_co_writev_vmstate()
2880 bdrv_inc_in_flight(bs); in bdrv_co_writev_vmstate()
2883 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos); in bdrv_co_writev_vmstate()
2890 bdrv_dec_in_flight(bs); in bdrv_co_writev_vmstate()
2895 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, in bdrv_save_vmstate() argument
2899 int ret = bdrv_writev_vmstate(bs, &qiov, pos); in bdrv_save_vmstate()
2905 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, in bdrv_load_vmstate() argument
2909 int ret = bdrv_readv_vmstate(bs, &qiov, pos); in bdrv_load_vmstate()
2947 int coroutine_fn bdrv_co_flush(BlockDriverState *bs) in bdrv_co_flush() argument
2949 BdrvChild *primary_child = bdrv_primary_child(bs); in bdrv_co_flush()
2956 bdrv_inc_in_flight(bs); in bdrv_co_flush()
2958 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) || in bdrv_co_flush()
2959 bdrv_is_sg(bs)) { in bdrv_co_flush()
2963 qemu_mutex_lock(&bs->reqs_lock); in bdrv_co_flush()
2964 current_gen = qatomic_read(&bs->write_gen); in bdrv_co_flush()
2967 while (bs->active_flush_req) { in bdrv_co_flush()
2968 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); in bdrv_co_flush()
2972 bs->active_flush_req = true; in bdrv_co_flush()
2973 qemu_mutex_unlock(&bs->reqs_lock); in bdrv_co_flush()
2976 if (bs->drv->bdrv_co_flush) { in bdrv_co_flush()
2977 ret = bs->drv->bdrv_co_flush(bs); in bdrv_co_flush()
2983 if (bs->drv->bdrv_co_flush_to_os) { in bdrv_co_flush()
2984 ret = bs->drv->bdrv_co_flush_to_os(bs); in bdrv_co_flush()
2991 if (bs->open_flags & BDRV_O_NO_FLUSH) { in bdrv_co_flush()
2996 if (bs->flushed_gen == current_gen) { in bdrv_co_flush()
3001 if (!bs->drv) { in bdrv_co_flush()
3007 if (bs->drv->bdrv_co_flush_to_disk) { in bdrv_co_flush()
3008 ret = bs->drv->bdrv_co_flush_to_disk(bs); in bdrv_co_flush()
3009 } else if (bs->drv->bdrv_aio_flush) { in bdrv_co_flush()
3015 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); in bdrv_co_flush()
3046 QLIST_FOREACH(child, &bs->children, next) { in bdrv_co_flush()
3048 int this_child_ret = bdrv_co_flush(child->bs); in bdrv_co_flush()
3058 bs->flushed_gen = current_gen; in bdrv_co_flush()
3061 qemu_mutex_lock(&bs->reqs_lock); in bdrv_co_flush()
3062 bs->active_flush_req = false; in bdrv_co_flush()
3064 qemu_co_queue_next(&bs->flush_queue); in bdrv_co_flush()
3065 qemu_mutex_unlock(&bs->reqs_lock); in bdrv_co_flush()
3068 bdrv_dec_in_flight(bs); in bdrv_co_flush()
3079 BlockDriverState *bs = child->bs; in bdrv_co_pdiscard() local
3083 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) { in bdrv_co_pdiscard()
3087 if (bdrv_has_readonly_bitmaps(bs)) { in bdrv_co_pdiscard()
3097 if (!(bs->open_flags & BDRV_O_UNMAP)) { in bdrv_co_pdiscard()
3101 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { in bdrv_co_pdiscard()
3106 bdrv_bsc_invalidate_range(bs, offset, bytes); in bdrv_co_pdiscard()
3113 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); in bdrv_co_pdiscard()
3114 assert(align % bs->bl.request_alignment == 0); in bdrv_co_pdiscard()
3118 bdrv_inc_in_flight(bs); in bdrv_co_pdiscard()
3119 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); in bdrv_co_pdiscard()
3126 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), in bdrv_co_pdiscard()
3128 assert(max_pdiscard >= bs->bl.request_alignment); in bdrv_co_pdiscard()
3136 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { in bdrv_co_pdiscard()
3137 num %= bs->bl.request_alignment; in bdrv_co_pdiscard()
3145 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && in bdrv_co_pdiscard()
3146 tail > bs->bl.request_alignment) { in bdrv_co_pdiscard()
3147 tail %= bs->bl.request_alignment; in bdrv_co_pdiscard()
3156 if (!bs->drv) { in bdrv_co_pdiscard()
3160 if (bs->drv->bdrv_co_pdiscard) { in bdrv_co_pdiscard()
3161 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); in bdrv_co_pdiscard()
3168 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, in bdrv_co_pdiscard()
3189 bdrv_dec_in_flight(bs); in bdrv_co_pdiscard()
3193 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) in bdrv_co_ioctl() argument
3195 BlockDriver *drv = bs->drv; in bdrv_co_ioctl()
3203 bdrv_inc_in_flight(bs); in bdrv_co_ioctl()
3210 co.ret = drv->bdrv_co_ioctl(bs, req, buf); in bdrv_co_ioctl()
3212 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); in bdrv_co_ioctl()
3220 bdrv_dec_in_flight(bs); in bdrv_co_ioctl()
3224 int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset, in bdrv_co_zone_report() argument
3228 BlockDriver *drv = bs->drv; in bdrv_co_zone_report()
3234 bdrv_inc_in_flight(bs); in bdrv_co_zone_report()
3235 if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) { in bdrv_co_zone_report()
3239 co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones); in bdrv_co_zone_report()
3241 bdrv_dec_in_flight(bs); in bdrv_co_zone_report()
3245 int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, in bdrv_co_zone_mgmt() argument
3248 BlockDriver *drv = bs->drv; in bdrv_co_zone_mgmt()
3254 bdrv_inc_in_flight(bs); in bdrv_co_zone_mgmt()
3255 if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) { in bdrv_co_zone_mgmt()
3259 co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len); in bdrv_co_zone_mgmt()
3261 bdrv_dec_in_flight(bs); in bdrv_co_zone_mgmt()
3265 int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset, in bdrv_co_zone_append() argument
3270 BlockDriver *drv = bs->drv; in bdrv_co_zone_append()
3281 bdrv_inc_in_flight(bs); in bdrv_co_zone_append()
3282 if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) { in bdrv_co_zone_append()
3286 co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags); in bdrv_co_zone_append()
3288 bdrv_dec_in_flight(bs); in bdrv_co_zone_append()
3292 void *qemu_blockalign(BlockDriverState *bs, size_t size) in qemu_blockalign() argument
3295 return qemu_memalign(bdrv_opt_mem_align(bs), size); in qemu_blockalign()
3298 void *qemu_blockalign0(BlockDriverState *bs, size_t size) in qemu_blockalign0() argument
3301 return memset(qemu_blockalign(bs, size), 0, size); in qemu_blockalign0()
3304 void *qemu_try_blockalign(BlockDriverState *bs, size_t size) in qemu_try_blockalign() argument
3306 size_t align = bdrv_opt_mem_align(bs); in qemu_try_blockalign()
3318 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) in qemu_try_blockalign0() argument
3320 void *mem = qemu_try_blockalign(bs, size); in qemu_try_blockalign0()
3332 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size, in bdrv_register_buf_rollback() argument
3340 QLIST_FOREACH(child, &bs->children, next) { in bdrv_register_buf_rollback()
3345 bdrv_unregister_buf(child->bs, host, size); in bdrv_register_buf_rollback()
3348 if (bs->drv && bs->drv->bdrv_unregister_buf) { in bdrv_register_buf_rollback()
3349 bs->drv->bdrv_unregister_buf(bs, host, size); in bdrv_register_buf_rollback()
3353 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size, in bdrv_register_buf() argument
3361 if (bs->drv && bs->drv->bdrv_register_buf) { in bdrv_register_buf()
3362 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) { in bdrv_register_buf()
3366 QLIST_FOREACH(child, &bs->children, next) { in bdrv_register_buf()
3367 if (!bdrv_register_buf(child->bs, host, size, errp)) { in bdrv_register_buf()
3368 bdrv_register_buf_rollback(bs, host, size, child); in bdrv_register_buf()
3375 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size) in bdrv_unregister_buf() argument
3382 if (bs->drv && bs->drv->bdrv_unregister_buf) { in bdrv_unregister_buf()
3383 bs->drv->bdrv_unregister_buf(bs, host, size); in bdrv_unregister_buf()
3385 QLIST_FOREACH(child, &bs->children, next) { in bdrv_unregister_buf()
3386 bdrv_unregister_buf(child->bs, host, size); in bdrv_unregister_buf()
3406 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) { in bdrv_co_copy_range_internal()
3417 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) { in bdrv_co_copy_range_internal()
3425 if (!src->bs->drv->bdrv_co_copy_range_from in bdrv_co_copy_range_internal()
3426 || !dst->bs->drv->bdrv_co_copy_range_to in bdrv_co_copy_range_internal()
3427 || src->bs->encrypted || dst->bs->encrypted) { in bdrv_co_copy_range_internal()
3432 bdrv_inc_in_flight(src->bs); in bdrv_co_copy_range_internal()
3433 tracked_request_begin(&req, src->bs, src_offset, bytes, in bdrv_co_copy_range_internal()
3440 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, in bdrv_co_copy_range_internal()
3447 bdrv_dec_in_flight(src->bs); in bdrv_co_copy_range_internal()
3449 bdrv_inc_in_flight(dst->bs); in bdrv_co_copy_range_internal()
3450 tracked_request_begin(&req, dst->bs, dst_offset, bytes, in bdrv_co_copy_range_internal()
3455 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, in bdrv_co_copy_range_internal()
3463 bdrv_dec_in_flight(dst->bs); in bdrv_co_copy_range_internal()
3519 bdrv_parent_cb_resize(BlockDriverState *bs) in bdrv_parent_cb_resize() argument
3525 QLIST_FOREACH(c, &bs->parents, next_parent) { in bdrv_parent_cb_resize()
3543 BlockDriverState *bs = child->bs; in bdrv_co_truncate() local
3545 BlockDriver *drv = bs->drv; in bdrv_co_truncate()
3567 old_size = bdrv_co_getlength(bs); in bdrv_co_truncate()
3573 if (bdrv_is_read_only(bs)) { in bdrv_co_truncate()
3584 bdrv_inc_in_flight(bs); in bdrv_co_truncate()
3585 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, in bdrv_co_truncate()
3602 filtered = bdrv_filter_child(bs); in bdrv_co_truncate()
3603 backing = bdrv_cow_child(bs); in bdrv_co_truncate()
3618 backing_len = bdrv_co_getlength(backing->bs); in bdrv_co_truncate()
3631 if (flags & ~bs->supported_truncate_flags) { in bdrv_co_truncate()
3636 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); in bdrv_co_truncate()
3648 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); in bdrv_co_truncate()
3652 offset = bs->total_sectors * BDRV_SECTOR_SIZE; in bdrv_co_truncate()
3663 bdrv_dec_in_flight(bs); in bdrv_co_truncate()
3668 void bdrv_cancel_in_flight(BlockDriverState *bs) in bdrv_cancel_in_flight() argument
3673 if (!bs || !bs->drv) { in bdrv_cancel_in_flight()
3677 if (bs->drv->bdrv_cancel_in_flight) { in bdrv_cancel_in_flight()
3678 bs->drv->bdrv_cancel_in_flight(bs); in bdrv_cancel_in_flight()
3686 BlockDriverState *bs = child->bs; in bdrv_co_preadv_snapshot() local
3687 BlockDriver *drv = bs->drv; in bdrv_co_preadv_snapshot()
3700 bdrv_inc_in_flight(bs); in bdrv_co_preadv_snapshot()
3701 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset); in bdrv_co_preadv_snapshot()
3702 bdrv_dec_in_flight(bs); in bdrv_co_preadv_snapshot()
3708 bdrv_co_snapshot_block_status(BlockDriverState *bs, in bdrv_co_snapshot_block_status() argument
3713 BlockDriver *drv = bs->drv; in bdrv_co_snapshot_block_status()
3726 bdrv_inc_in_flight(bs); in bdrv_co_snapshot_block_status()
3727 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, in bdrv_co_snapshot_block_status()
3729 bdrv_dec_in_flight(bs); in bdrv_co_snapshot_block_status()
3735 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes) in bdrv_co_pdiscard_snapshot() argument
3737 BlockDriver *drv = bs->drv; in bdrv_co_pdiscard_snapshot()
3750 bdrv_inc_in_flight(bs); in bdrv_co_pdiscard_snapshot()
3751 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes); in bdrv_co_pdiscard_snapshot()
3752 bdrv_dec_in_flight(bs); in bdrv_co_pdiscard_snapshot()