Lines Matching +full:role +full:- +full:switch +full:- +full:default +full:- +full:mode

10  * See the COPYING.LIB file in the top-level directory.
21 #include "block/dirty-bitmap.h"
22 #include "system/block-backend.h"
32 /* The mirroring buffer is a list of granularity-sized chunks.
52 /* Used to block operations on the drive-mirror-replace target */
68 * and the job is running in active mode.
138 qatomic_set(&s->actively_synced, false); in mirror_error_action()
140 return block_job_error_action(&s->common, s->on_source_error, in mirror_error_action()
143 return block_job_error_action(&s->common, s->on_target_error, in mirror_error_action()
153 uint64_t self_start_chunk = offset / s->granularity; in mirror_wait_on_conflicts()
154 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); in mirror_wait_on_conflicts()
155 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; in mirror_wait_on_conflicts()
157 while (find_next_bit(s->in_flight_bitmap, self_end_chunk, in mirror_wait_on_conflicts()
159 s->ret >= 0) in mirror_wait_on_conflicts()
163 QTAILQ_FOREACH(op, &s->ops_in_flight, next) { in mirror_wait_on_conflicts()
164 uint64_t op_start_chunk = op->offset / s->granularity; in mirror_wait_on_conflicts()
165 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, in mirror_wait_on_conflicts()
166 s->granularity) - in mirror_wait_on_conflicts()
182 if (op->waiting_for_op) { in mirror_wait_on_conflicts()
186 self->waiting_for_op = op; in mirror_wait_on_conflicts()
189 qemu_co_queue_wait(&op->waiting_requests, NULL); in mirror_wait_on_conflicts()
192 self->waiting_for_op = NULL; in mirror_wait_on_conflicts()
203 MirrorBlockJob *s = op->s; in mirror_iteration_done()
208 trace_mirror_iteration_done(s, op->offset, op->bytes, ret); in mirror_iteration_done()
210 s->in_flight--; in mirror_iteration_done()
211 s->bytes_in_flight -= op->bytes; in mirror_iteration_done()
212 iov = op->qiov.iov; in mirror_iteration_done()
213 for (i = 0; i < op->qiov.niov; i++) { in mirror_iteration_done()
215 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); in mirror_iteration_done()
216 s->buf_free_count++; in mirror_iteration_done()
219 chunk_num = op->offset / s->granularity; in mirror_iteration_done()
220 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); in mirror_iteration_done()
222 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); in mirror_iteration_done()
223 QTAILQ_REMOVE(&s->ops_in_flight, op, next); in mirror_iteration_done()
225 if (s->cow_bitmap) { in mirror_iteration_done()
226 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); in mirror_iteration_done()
228 if (!s->initial_zeroing_ongoing) { in mirror_iteration_done()
229 job_progress_update(&s->common.job, op->bytes); in mirror_iteration_done()
232 qemu_iovec_destroy(&op->qiov); in mirror_iteration_done()
234 qemu_co_queue_restart_all(&op->waiting_requests); in mirror_iteration_done()
240 MirrorBlockJob *s = op->s; in mirror_write_complete()
245 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); in mirror_write_complete()
246 action = mirror_error_action(s, false, -ret); in mirror_write_complete()
247 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { in mirror_write_complete()
248 s->ret = ret; in mirror_write_complete()
257 MirrorBlockJob *s = op->s; in mirror_read_complete()
262 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); in mirror_read_complete()
263 action = mirror_error_action(s, true, -ret); in mirror_read_complete()
264 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { in mirror_read_complete()
265 s->ret = ret; in mirror_read_complete()
272 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0); in mirror_read_complete()
276 /* Clip bytes relative to offset to not exceed end-of-file */
281 return MIN(bytes, s->bdev_length - offset); in mirror_clip_bytes()
293 int max_bytes = s->granularity * s->max_iov; in mirror_cow_align()
295 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); in mirror_cow_align()
296 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, in mirror_cow_align()
297 s->cow_bitmap); in mirror_cow_align()
299 bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes, in mirror_cow_align()
306 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); in mirror_cow_align()
313 ret = align_offset + align_bytes - (*offset + *bytes); in mirror_cow_align()
325 QTAILQ_FOREACH(op, &s->ops_in_flight, next) { in mirror_wait_for_free_in_flight_slot()
333 * use up in-flight slots. in mirror_wait_for_free_in_flight_slot()
335 if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) { in mirror_wait_for_free_in_flight_slot()
336 qemu_co_queue_wait(&op->waiting_requests, NULL); in mirror_wait_for_free_in_flight_slot()
345 * *op->bytes_handled is set to the number of bytes copied after and
347 * to alignment. This will be op->bytes if no alignment is necessary,
348 * or (new_end - op->offset) if the tail is rounded up or down due to
354 MirrorBlockJob *s = op->s; in mirror_co_read()
356 int ret = -1; in mirror_co_read()
359 max_bytes = s->granularity * s->max_iov; in mirror_co_read()
362 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); in mirror_co_read()
363 assert(op->bytes); in mirror_co_read()
364 assert(op->bytes < BDRV_REQUEST_MAX_BYTES); in mirror_co_read()
365 *op->bytes_handled = op->bytes; in mirror_co_read()
367 if (s->cow_bitmap) { in mirror_co_read()
368 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); in mirror_co_read()
371 assert(*op->bytes_handled <= UINT_MAX); in mirror_co_read()
372 assert(op->bytes <= s->buf_size); in mirror_co_read()
373 /* The offset is granularity-aligned because: in mirror_co_read()
376 assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); in mirror_co_read()
377 /* The range is sector-aligned, since bdrv_getlength() rounds up. */ in mirror_co_read()
378 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); in mirror_co_read()
379 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); in mirror_co_read()
381 while (s->buf_free_count < nb_chunks) { in mirror_co_read()
382 trace_mirror_yield_in_flight(s, op->offset, s->in_flight); in mirror_co_read()
386 /* Now make a QEMUIOVector taking enough granularity-sized chunks in mirror_co_read()
387 * from s->buf_free. in mirror_co_read()
389 qemu_iovec_init(&op->qiov, nb_chunks); in mirror_co_read()
390 while (nb_chunks-- > 0) { in mirror_co_read()
391 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); in mirror_co_read()
392 size_t remaining = op->bytes - op->qiov.size; in mirror_co_read()
394 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); in mirror_co_read()
395 s->buf_free_count--; in mirror_co_read()
396 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); in mirror_co_read()
400 s->in_flight++; in mirror_co_read()
401 s->bytes_in_flight += op->bytes; in mirror_co_read()
402 op->is_in_flight = true; in mirror_co_read()
403 trace_mirror_one_iteration(s, op->offset, op->bytes); in mirror_co_read()
406 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, in mirror_co_read()
407 &op->qiov, 0); in mirror_co_read()
418 op->s->in_flight++; in mirror_co_zero()
419 op->s->bytes_in_flight += op->bytes; in mirror_co_zero()
420 *op->bytes_handled = op->bytes; in mirror_co_zero()
421 op->is_in_flight = true; in mirror_co_zero()
423 if (op->s->zero_bitmap) { in mirror_co_zero()
424 unsigned long end = DIV_ROUND_UP(op->offset + op->bytes, in mirror_co_zero()
425 op->s->granularity); in mirror_co_zero()
426 assert(QEMU_IS_ALIGNED(op->offset, op->s->granularity)); in mirror_co_zero()
427 assert(QEMU_IS_ALIGNED(op->bytes, op->s->granularity) || in mirror_co_zero()
428 op->offset + op->bytes == op->s->bdev_length); in mirror_co_zero()
429 if (find_next_zero_bit(op->s->zero_bitmap, end, in mirror_co_zero()
430 op->offset / op->s->granularity) == end) { in mirror_co_zero()
432 *op->io_skipped = true; in mirror_co_zero()
436 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, in mirror_co_zero()
437 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); in mirror_co_zero()
439 if (ret >= 0 && op->s->zero_bitmap) { in mirror_co_zero()
440 bitmap_set(op->s->zero_bitmap, op->offset / op->s->granularity, in mirror_co_zero()
441 DIV_ROUND_UP(op->bytes, op->s->granularity)); in mirror_co_zero()
451 op->s->in_flight++; in mirror_co_discard()
452 op->s->bytes_in_flight += op->bytes; in mirror_co_discard()
453 *op->bytes_handled = op->bytes; in mirror_co_discard()
454 op->is_in_flight = true; in mirror_co_discard()
456 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); in mirror_co_discard()
466 int64_t bytes_handled = -1; in mirror_perform()
468 assert(QEMU_IS_ALIGNED(offset, s->granularity)); in mirror_perform()
469 assert(QEMU_IS_ALIGNED(bytes, s->granularity) || in mirror_perform()
470 offset + bytes == s->bdev_length); in mirror_perform()
479 qemu_co_queue_init(&op->waiting_requests); in mirror_perform()
481 switch (mirror_method) { in mirror_perform()
483 if (s->zero_bitmap) { in mirror_perform()
484 bitmap_clear(s->zero_bitmap, offset / s->granularity, in mirror_perform()
485 DIV_ROUND_UP(bytes, s->granularity)); in mirror_perform()
490 /* s->zero_bitmap handled in mirror_co_zero */ in mirror_perform()
494 if (s->zero_bitmap) { in mirror_perform()
495 bitmap_clear(s->zero_bitmap, offset / s->granularity, in mirror_perform()
496 DIV_ROUND_UP(bytes, s->granularity)); in mirror_perform()
500 default: in mirror_perform()
503 op->co = co; in mirror_perform()
505 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); in mirror_perform()
514 * and mirror_co_discard(), bytes_handled == op->bytes, which in mirror_perform()
527 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); in mirror_iteration()
528 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); in mirror_iteration()
531 source = s->mirror_top_bs->backing->bs; in mirror_iteration()
534 bdrv_dirty_bitmap_lock(s->dirty_bitmap); in mirror_iteration()
535 offset = bdrv_dirty_iter_next(s->dbi); in mirror_iteration()
537 bdrv_set_dirty_iter(s->dbi, 0); in mirror_iteration()
538 offset = bdrv_dirty_iter_next(s->dbi); in mirror_iteration()
539 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); in mirror_iteration()
542 bdrv_dirty_bitmap_unlock(s->dirty_bitmap); in mirror_iteration()
547 * not overlap with concurrent in-flight requests. Still, we would like to in mirror_iteration()
553 job_pause_point(&s->common.job); in mirror_iteration()
557 bdrv_dirty_bitmap_lock(s->dirty_bitmap); in mirror_iteration()
558 while (nb_chunks * s->granularity < s->buf_size) { in mirror_iteration()
560 int64_t next_offset = offset + nb_chunks * s->granularity; in mirror_iteration()
561 int64_t next_chunk = next_offset / s->granularity; in mirror_iteration()
562 if (next_offset >= s->bdev_length || in mirror_iteration()
563 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) { in mirror_iteration()
566 if (test_bit(next_chunk, s->in_flight_bitmap)) { in mirror_iteration()
570 next_dirty = bdrv_dirty_iter_next(s->dbi); in mirror_iteration()
573 bdrv_set_dirty_iter(s->dbi, next_offset); in mirror_iteration()
574 next_dirty = bdrv_dirty_iter_next(s->dbi); in mirror_iteration()
581 * calling bdrv_block_status_above could yield - if some blocks are in mirror_iteration()
584 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, in mirror_iteration()
585 nb_chunks * s->granularity); in mirror_iteration()
586 bdrv_dirty_bitmap_unlock(s->dirty_bitmap); in mirror_iteration()
588 /* Before claiming an area in the in-flight bitmap, we have to in mirror_iteration()
597 .bytes = nb_chunks * s->granularity, in mirror_iteration()
600 qemu_co_queue_init(&pseudo_op->waiting_requests); in mirror_iteration()
601 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); in mirror_iteration()
603 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); in mirror_iteration()
604 while (nb_chunks > 0 && offset < s->bdev_length) { in mirror_iteration()
605 int ret = -1; in mirror_iteration()
611 assert(!(offset % s->granularity)); in mirror_iteration()
614 nb_chunks * s->granularity, in mirror_iteration()
618 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); in mirror_iteration()
623 io_bytes -= io_bytes % s->granularity; in mirror_iteration()
624 if (io_bytes < s->granularity) { in mirror_iteration()
625 io_bytes = s->granularity; in mirror_iteration()
630 bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes, in mirror_iteration()
641 while (s->in_flight >= MAX_IN_FLIGHT) { in mirror_iteration()
642 trace_mirror_yield_in_flight(s, offset, s->in_flight); in mirror_iteration()
646 if (s->ret < 0) { in mirror_iteration()
662 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); in mirror_iteration()
663 block_job_ratelimit_processed_bytes(&s->common, io_bytes_acct); in mirror_iteration()
667 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); in mirror_iteration()
668 qemu_co_queue_restart_all(&pseudo_op->waiting_requests); in mirror_iteration()
674 int granularity = s->granularity; in mirror_free_init()
675 size_t buf_size = s->buf_size; in mirror_free_init()
676 uint8_t *buf = s->buf; in mirror_free_init()
678 assert(s->buf_free_count == 0); in mirror_free_init()
679 QSIMPLEQ_INIT(&s->buf_free); in mirror_free_init()
682 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); in mirror_free_init()
683 s->buf_free_count++; in mirror_free_init()
684 buf_size -= granularity; in mirror_free_init()
695 while (s->in_flight > 0) { in mirror_wait_for_all_io()
702 * for .prepare, returns 0 on success and -errno on failure.
708 BlockJob *bjob = &s->common; in mirror_exit_common()
714 bool abort = job->ret < 0; in mirror_exit_common()
719 if (s->prepared) { in mirror_exit_common()
722 s->prepared = true; in mirror_exit_common()
726 mirror_top_bs = s->mirror_top_bs; in mirror_exit_common()
727 bs_opaque = mirror_top_bs->opaque; in mirror_exit_common()
728 src = mirror_top_bs->backing->bs; in mirror_exit_common()
729 target_bs = blk_bs(s->target); in mirror_exit_common()
735 bdrv_release_dirty_bitmap(s->dirty_bitmap); in mirror_exit_common()
747 * inserting target_bs at s->to_replace, where we might not be able to get in mirror_exit_common()
750 blk_unref(s->target); in mirror_exit_common()
751 s->target = NULL; in mirror_exit_common()
759 bs_opaque->stop = true; in mirror_exit_common()
762 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, in mirror_exit_common()
766 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { in mirror_exit_common()
773 backing = s->sync_mode == MIRROR_SYNC_MODE_NONE ? src : s->base; in mirror_exit_common()
779 ret = -EPERM; in mirror_exit_common()
783 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { in mirror_exit_common()
795 if (s->should_complete && !abort) { in mirror_exit_common()
796 BlockDriverState *to_replace = s->to_replace ?: src; in mirror_exit_common()
805 assert(s->in_drain); in mirror_exit_common()
819 to_replace->node_name, target_bs->node_name); in mirror_exit_common()
825 ret = -EPERM; in mirror_exit_common()
828 if (s->to_replace) { in mirror_exit_common()
829 bdrv_op_unblock_all(s->to_replace, s->replace_blocker); in mirror_exit_common()
830 error_free(s->replace_blocker); in mirror_exit_common()
831 bdrv_unref(s->to_replace); in mirror_exit_common()
833 g_free(s->replaces); in mirror_exit_common()
842 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); in mirror_exit_common()
845 if (abort && s->base_ro && !bdrv_is_read_only(target_bs)) { in mirror_exit_common()
852 bs_opaque->job = NULL; in mirror_exit_common()
856 s->in_drain = false; in mirror_exit_common()
878 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { in mirror_throttle()
879 s->last_pause_ns = now; in mirror_throttle()
880 job_sleep_ns(&s->common.job, 0); in mirror_throttle()
882 job_pause_point(&s->common.job); in mirror_throttle()
890 BlockDriverState *target_bs = blk_bs(s->target); in mirror_dirty_init()
891 int ret = -EIO; in mirror_dirty_init()
894 target_bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP && in mirror_dirty_init()
896 int64_t bitmap_length = DIV_ROUND_UP(s->bdev_length, s->granularity); in mirror_dirty_init()
898 /* Determine if the image is already zero, regardless of sync mode. */ in mirror_dirty_init()
899 s->zero_bitmap = bitmap_new(bitmap_length); in mirror_dirty_init()
901 bs = s->mirror_top_bs->backing->bs; in mirror_dirty_init()
902 if (s->target_is_zero) { in mirror_dirty_init()
909 /* Determine if a pre-zeroing pass is necessary. */ in mirror_dirty_init()
912 } else if (s->sync_mode == MIRROR_SYNC_MODE_TOP) { in mirror_dirty_init()
914 * In TOP mode, there is no benefit to a pre-zeroing pass, but in mirror_dirty_init()
919 bitmap_set(s->zero_bitmap, 0, bitmap_length); in mirror_dirty_init()
923 * Here, we are in FULL mode; our goal is to avoid writing in mirror_dirty_init()
927 * way to pre-zero the image (the dirty bitmap will be in mirror_dirty_init()
928 * populated later by the non-zero portions, the same as for in mirror_dirty_init()
929 * TOP mode). If pre-zeroing is not fast, or we need to visit in mirror_dirty_init()
931 * non-allocated regions of the source, then just mark the in mirror_dirty_init()
933 * point in time. Otherwise, it can be faster to pre-zero the in mirror_dirty_init()
934 * image now, even if we re-write the allocated portions of in mirror_dirty_init()
935 * the disk later, and the pre-zero pass will populate the in mirror_dirty_init()
939 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); in mirror_dirty_init()
943 s->initial_zeroing_ongoing = true; in mirror_dirty_init()
944 for (offset = 0; offset < s->bdev_length; ) { in mirror_dirty_init()
945 int bytes = MIN(s->bdev_length - offset, in mirror_dirty_init()
946 QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); in mirror_dirty_init()
951 if (job_is_cancelled(&s->common.job)) { in mirror_dirty_init()
952 s->initial_zeroing_ongoing = false; in mirror_dirty_init()
956 if (s->in_flight >= MAX_IN_FLIGHT) { in mirror_dirty_init()
957 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, in mirror_dirty_init()
958 s->in_flight); in mirror_dirty_init()
968 s->initial_zeroing_ongoing = false; in mirror_dirty_init()
970 /* In FULL mode, and image already reads as zero. */ in mirror_dirty_init()
971 bitmap_set(s->zero_bitmap, 0, bitmap_length); in mirror_dirty_init()
975 for (offset = 0; offset < s->bdev_length; ) { in mirror_dirty_init()
977 int bytes = MIN(s->bdev_length - offset, in mirror_dirty_init()
978 QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); in mirror_dirty_init()
982 if (job_is_cancelled(&s->common.job)) { in mirror_dirty_init()
987 ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset, in mirror_dirty_init()
996 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); in mirror_dirty_init()
1008 int ret = blk_co_flush(s->target); in mirror_flush()
1010 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { in mirror_flush()
1011 s->ret = ret; in mirror_flush()
1021 MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque; in mirror_run()
1022 BlockDriverState *target_bs = blk_bs(s->target); in mirror_run()
1033 bs = bdrv_filter_bs(s->mirror_top_bs); in mirror_run()
1036 if (job_is_cancelled(&s->common.job)) { in mirror_run()
1041 s->bdev_length = bdrv_co_getlength(bs); in mirror_run()
1044 if (s->bdev_length < 0) { in mirror_run()
1045 ret = s->bdev_length; in mirror_run()
1049 target_length = blk_co_getlength(s->target); in mirror_run()
1057 if (s->base == blk_bs(s->target)) { in mirror_run()
1058 if (s->bdev_length > target_length) { in mirror_run()
1059 ret = blk_co_truncate(s->target, s->bdev_length, false, in mirror_run()
1065 } else if (s->bdev_length != target_length) { in mirror_run()
1067 ret = -EINVAL; in mirror_run()
1071 if (s->bdev_length == 0) { in mirror_run()
1073 job_transition_to_ready(&s->common.job); in mirror_run()
1074 qatomic_set(&s->actively_synced, true); in mirror_run()
1075 while (!job_cancel_requested(&s->common.job) && !s->should_complete) { in mirror_run()
1076 job_yield(&s->common.job); in mirror_run()
1081 length = DIV_ROUND_UP(s->bdev_length, s->granularity); in mirror_run()
1082 s->in_flight_bitmap = bitmap_new(length); in mirror_run()
1092 s->target_cluster_size = bdi.cluster_size; in mirror_run()
1094 s->target_cluster_size = BDRV_SECTOR_SIZE; in mirror_run()
1097 s->granularity < s->target_cluster_size) { in mirror_run()
1098 s->buf_size = MAX(s->buf_size, s->target_cluster_size); in mirror_run()
1099 s->cow_bitmap = bitmap_new(length); in mirror_run()
1101 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); in mirror_run()
1104 s->buf = qemu_try_blockalign(bs, s->buf_size); in mirror_run()
1105 if (s->buf == NULL) { in mirror_run()
1106 ret = -ENOMEM; in mirror_run()
1112 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); in mirror_run()
1113 if (s->sync_mode != MIRROR_SYNC_MODE_NONE) { in mirror_run()
1115 if (ret < 0 || job_is_cancelled(&s->common.job)) { in mirror_run()
1124 mirror_top_opaque->job = s; in mirror_run()
1126 assert(!s->dbi); in mirror_run()
1127 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); in mirror_run()
1132 if (s->ret < 0) { in mirror_run()
1133 ret = s->ret; in mirror_run()
1137 job_pause_point(&s->common.job); in mirror_run()
1139 if (job_is_cancelled(&s->common.job)) { in mirror_run()
1144 cnt = bdrv_get_dirty_count(s->dirty_bitmap); in mirror_run()
1145 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is in mirror_run()
1148 job_progress_set_remaining(&s->common.job, in mirror_run()
1149 s->bytes_in_flight + cnt + in mirror_run()
1150 s->active_write_bytes_in_flight); in mirror_run()
1156 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; in mirror_run()
1158 iostatus = s->common.iostatus; in mirror_run()
1162 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || in mirror_run()
1163 (cnt == 0 && s->in_flight > 0)) { in mirror_run()
1164 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); in mirror_run()
1173 if (s->in_flight == 0 && cnt == 0) { in mirror_run()
1175 if (!job_is_ready(&s->common.job)) { in mirror_run()
1177 /* Go check s->ret. */ in mirror_run()
1182 * report completion. This way, block-job-cancel will leave in mirror_run()
1185 job_transition_to_ready(&s->common.job); in mirror_run()
1187 if (qatomic_read(&s->copy_mode) != MIRROR_COPY_MODE_BACKGROUND) { in mirror_run()
1188 qatomic_set(&s->actively_synced, true); in mirror_run()
1191 should_complete = s->should_complete || in mirror_run()
1192 job_cancel_requested(&s->common.job); in mirror_run()
1193 cnt = bdrv_get_dirty_count(s->dirty_bitmap); in mirror_run()
1204 * whether to switch to target check one last time if I/O has in mirror_run()
1209 s->in_drain = true; in mirror_run()
1213 assert(s->in_active_write_counter == 0); in mirror_run()
1215 cnt = bdrv_get_dirty_count(s->dirty_bitmap); in mirror_run()
1218 s->in_drain = false; in mirror_run()
1225 assert(QLIST_EMPTY(&bs->tracked_requests)); in mirror_run()
1230 if (job_is_ready(&s->common.job) && !should_complete) { in mirror_run()
1231 if (s->in_flight == 0 && cnt == 0) { in mirror_run()
1232 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job), in mirror_run()
1234 job_sleep_ns(&s->common.job, BLOCK_JOB_SLICE_TIME); in mirror_run()
1237 block_job_ratelimit_sleep(&s->common); in mirror_run()
1239 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); in mirror_run()
1243 if (s->in_flight > 0) { in mirror_run()
1248 assert(ret < 0 || job_is_cancelled(&s->common.job)); in mirror_run()
1253 assert(s->in_flight == 0); in mirror_run()
1254 qemu_vfree(s->buf); in mirror_run()
1255 g_free(s->cow_bitmap); in mirror_run()
1256 g_free(s->zero_bitmap); in mirror_run()
1257 g_free(s->in_flight_bitmap); in mirror_run()
1258 bdrv_dirty_iter_free(s->dbi); in mirror_run()
1261 s->in_drain = true; in mirror_run()
1274 job->id); in mirror_complete()
1279 if (s->replaces) { in mirror_complete()
1280 s->to_replace = bdrv_find_node(s->replaces); in mirror_complete()
1281 if (!s->to_replace) { in mirror_complete()
1282 error_setg(errp, "Node name '%s' not found", s->replaces); in mirror_complete()
1287 error_setg(&s->replace_blocker, in mirror_complete()
1288 "block device is in use by block-job-complete"); in mirror_complete()
1289 bdrv_op_block_all(s->to_replace, s->replace_blocker); in mirror_complete()
1290 bdrv_ref(s->to_replace); in mirror_complete()
1293 s->should_complete = true; in mirror_complete()
1295 /* If the job is paused, it will be re-entered when it is resumed */ in mirror_complete()
1297 if (!job->paused) { in mirror_complete()
1320 if (!s->common.job.paused && !job_is_cancelled_locked(&job->job) in mirror_drained_poll()
1321 && !s->in_drain) { in mirror_drained_poll()
1326 return !!s->in_flight; in mirror_drained_poll()
1332 BlockDriverState *target = blk_bs(s->target); in mirror_cancel()
1336 * force-cancellation. in mirror_cancel()
1356 BlockJobChangeOptionsMirror *change_opts = &opts->u.mirror; in mirror_change()
1366 if (qatomic_read(&s->copy_mode) == change_opts->copy_mode) { in mirror_change()
1370 if (change_opts->copy_mode != MIRROR_COPY_MODE_WRITE_BLOCKING) { in mirror_change()
1371 error_setg(errp, "Change to copy mode '%s' is not implemented", in mirror_change()
1372 MirrorCopyMode_str(change_opts->copy_mode)); in mirror_change()
1376 current = qatomic_cmpxchg(&s->copy_mode, MIRROR_COPY_MODE_BACKGROUND, in mirror_change()
1377 change_opts->copy_mode); in mirror_change()
1379 error_setg(errp, "Expected current copy mode '%s', got '%s'", in mirror_change()
1389 info->u.mirror = (BlockJobInfoMirror) { in mirror_query()
1390 .actively_synced = qatomic_read(&s->actively_synced), in mirror_query()
1438 if (!QEMU_IS_ALIGNED(offset, job->granularity) && in do_sync_target_write()
1439 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset)) in do_sync_target_write()
1458 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset; in do_sync_target_write()
1464 bytes -= qiov_offset; in do_sync_target_write()
1467 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) && in do_sync_target_write()
1468 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1)) in do_sync_target_write()
1470 uint64_t tail = (offset + bytes) % job->granularity; in do_sync_target_write()
1476 bytes -= tail; in do_sync_target_write()
1484 dirty_bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity); in do_sync_target_write()
1485 dirty_bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity); in do_sync_target_write()
1487 bdrv_reset_dirty_bitmap(job->dirty_bitmap, dirty_bitmap_offset, in do_sync_target_write()
1488 dirty_bitmap_end - dirty_bitmap_offset); in do_sync_target_write()
1490 zero_bitmap_offset = offset / job->granularity; in do_sync_target_write()
1491 zero_bitmap_end = DIV_ROUND_UP(offset + bytes, job->granularity); in do_sync_target_write()
1493 job_progress_increase_remaining(&job->common.job, bytes); in do_sync_target_write()
1494 job->active_write_bytes_in_flight += bytes; in do_sync_target_write()
1496 switch (method) { in do_sync_target_write()
1498 if (job->zero_bitmap) { in do_sync_target_write()
1499 bitmap_clear(job->zero_bitmap, zero_bitmap_offset, in do_sync_target_write()
1500 zero_bitmap_end - zero_bitmap_offset); in do_sync_target_write()
1502 ret = blk_co_pwritev_part(job->target, offset, bytes, in do_sync_target_write()
1507 if (job->zero_bitmap) { in do_sync_target_write()
1508 if (find_next_zero_bit(job->zero_bitmap, zero_bitmap_end, in do_sync_target_write()
1515 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags); in do_sync_target_write()
1516 if (job->zero_bitmap && ret >= 0) { in do_sync_target_write()
1517 bitmap_set(job->zero_bitmap, dirty_bitmap_offset / job->granularity, in do_sync_target_write()
1518 (dirty_bitmap_end - dirty_bitmap_offset) / in do_sync_target_write()
1519 job->granularity); in do_sync_target_write()
1524 if (job->zero_bitmap) { in do_sync_target_write()
1525 bitmap_clear(job->zero_bitmap, zero_bitmap_offset, in do_sync_target_write()
1526 zero_bitmap_end - zero_bitmap_offset); in do_sync_target_write()
1529 ret = blk_co_pdiscard(job->target, offset, bytes); in do_sync_target_write()
1532 default: in do_sync_target_write()
1536 job->active_write_bytes_in_flight -= bytes; in do_sync_target_write()
1538 job_progress_update(&job->common.job, bytes); in do_sync_target_write()
1546 * the region for in-flight op. in do_sync_target_write()
1548 dirty_bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity); in do_sync_target_write()
1549 dirty_bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity); in do_sync_target_write()
1550 bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_bitmap_offset, in do_sync_target_write()
1551 dirty_bitmap_end - dirty_bitmap_offset); in do_sync_target_write()
1552 qatomic_set(&job->actively_synced, false); in do_sync_target_write()
1554 action = mirror_error_action(job, false, -ret); in do_sync_target_write()
1556 if (!job->ret) { in do_sync_target_write()
1557 job->ret = ret; in do_sync_target_write()
1568 uint64_t start_chunk = offset / s->granularity; in active_write_prepare()
1569 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); in active_write_prepare()
1580 qemu_co_queue_init(&op->waiting_requests); in active_write_prepare()
1581 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); in active_write_prepare()
1583 s->in_active_write_counter++; in active_write_prepare()
1587 * running requests that are copying off now-to-be stale data in the area, in active_write_prepare()
1600 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); in active_write_prepare()
1607 uint64_t start_chunk = op->offset / op->s->granularity; in active_write_settle()
1608 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, in active_write_settle()
1609 op->s->granularity); in active_write_settle()
1611 if (!--op->s->in_active_write_counter && in active_write_settle()
1612 qatomic_read(&op->s->actively_synced)) { in active_write_settle()
1613 BdrvChild *source = op->s->mirror_top_bs->backing; in active_write_settle()
1615 if (QLIST_FIRST(&source->bs->parents) == source && in active_write_settle()
1622 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); in active_write_settle()
1625 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); in active_write_settle()
1626 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); in active_write_settle()
1627 qemu_co_queue_restart_all(&op->waiting_requests); in active_write_settle()
1635 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); in bdrv_mirror_top_preadv()
1640 return s->job && s->job->ret >= 0 && in should_copy_to_target()
1641 !job_is_cancelled(&s->job->common.job) && in should_copy_to_target()
1642 qatomic_read(&s->job->copy_mode) == MIRROR_COPY_MODE_WRITE_BLOCKING; in should_copy_to_target()
1651 MirrorBDSOpaque *s = bs->opaque; in bdrv_mirror_top_do_write()
1655 op = active_write_prepare(s->job, offset, bytes); in bdrv_mirror_top_do_write()
1658 switch (method) { in bdrv_mirror_top_do_write()
1660 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); in bdrv_mirror_top_do_write()
1664 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); in bdrv_mirror_top_do_write()
1668 ret = bdrv_co_pdiscard(bs->backing, offset, bytes); in bdrv_mirror_top_do_write()
1671 default: in bdrv_mirror_top_do_write()
1675 if (!copy_to_target && s->job && s->job->dirty_bitmap) { in bdrv_mirror_top_do_write()
1676 qatomic_set(&s->job->actively_synced, false); in bdrv_mirror_top_do_write()
1677 bdrv_set_dirty_bitmap(s->job->dirty_bitmap, offset, bytes); in bdrv_mirror_top_do_write()
1685 do_sync_target_write(s->job, method, offset, bytes, qiov, flags); in bdrv_mirror_top_do_write()
1702 bool copy_to_target = should_copy_to_target(bs->opaque); in bdrv_mirror_top_pwritev()
1710 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); in bdrv_mirror_top_pwritev()
1732 if (bs->backing == NULL) { in bdrv_mirror_top_flush()
1736 return bdrv_co_flush(bs->backing->bs); in bdrv_mirror_top_flush()
1743 bool copy_to_target = should_copy_to_target(bs->opaque); in bdrv_mirror_top_pwrite_zeroes()
1751 bool copy_to_target = should_copy_to_target(bs->opaque); in bdrv_mirror_top_pdiscard()
1758 if (bs->backing == NULL) { in bdrv_mirror_top_refresh_filename()
1763 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), in bdrv_mirror_top_refresh_filename()
1764 bs->backing->bs->filename); in bdrv_mirror_top_refresh_filename()
1768 BdrvChildRole role, in bdrv_mirror_top_child_perm() argument
1773 MirrorBDSOpaque *s = bs->opaque; in bdrv_mirror_top_child_perm()
1775 if (s->stop) { in bdrv_mirror_top_child_perm()
1785 bdrv_default_perms(bs, c, role, reopen_queue, in bdrv_mirror_top_child_perm()
1788 if (s->is_commit) { in bdrv_mirror_top_child_perm()
1793 * We also have to force-share the WRITE permission, or in bdrv_mirror_top_child_perm()
1860 error_setg(errp, "Invalid parameter 'buf-size'"); in mirror_start_job()
1887 mirror_top_bs->implicit = true; in mirror_start_job()
1891 mirror_top_bs->never_freeze = true; in mirror_start_job()
1893 mirror_top_bs->total_sectors = bs->total_sectors; in mirror_start_job()
1894 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; in mirror_start_job()
1895 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | in mirror_start_job()
1898 mirror_top_bs->opaque = bs_opaque; in mirror_start_job()
1900 bs_opaque->is_commit = target_is_backing; in mirror_start_job()
1924 s->mirror_top_bs = mirror_top_bs; in mirror_start_job()
1925 s->base_ro = base_ro; in mirror_start_job()
1943 error_setg_errno(errp, -bs_size, in mirror_start_job()
1950 error_setg_errno(errp, -target_size, in mirror_start_job()
1975 s->target = blk_new(s->common.job.aio_context, in mirror_start_job()
1977 ret = blk_insert_bs(s->target, target, errp); in mirror_start_job()
1983 * of non-shared block migration. To allow migration completion, we in mirror_start_job()
1988 blk_set_force_allow_inactivate(s->target); in mirror_start_job()
1990 blk_set_allow_aio_context_change(s->target, true); in mirror_start_job()
1991 blk_set_disable_request_queuing(s->target, true); in mirror_start_job()
1994 s->replaces = g_strdup(replaces); in mirror_start_job()
1995 s->on_source_error = on_source_error; in mirror_start_job()
1996 s->on_target_error = on_target_error; in mirror_start_job()
1997 s->sync_mode = sync_mode; in mirror_start_job()
1998 s->backing_mode = backing_mode; in mirror_start_job()
1999 s->target_is_zero = target_is_zero; in mirror_start_job()
2000 qatomic_set(&s->copy_mode, copy_mode); in mirror_start_job()
2001 s->base = base; in mirror_start_job()
2002 s->base_overlay = bdrv_find_overlay(bs, base); in mirror_start_job()
2003 s->granularity = granularity; in mirror_start_job()
2004 s->buf_size = ROUND_UP(buf_size, granularity); in mirror_start_job()
2005 s->unmap = unmap; in mirror_start_job()
2007 s->should_complete = true; in mirror_start_job()
2011 s->dirty_bitmap = bdrv_create_dirty_bitmap(s->mirror_top_bs, granularity, in mirror_start_job()
2013 if (!s->dirty_bitmap) { in mirror_start_job()
2019 * mode. in mirror_start_job()
2021 bdrv_disable_dirty_bitmap(s->dirty_bitmap); in mirror_start_job()
2024 ret = block_job_add_bdrv(&s->common, "source", bs, 0, in mirror_start_job()
2034 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, in mirror_start_job()
2054 * ourselves at s->base (if writes are blocked for a node, they are in mirror_start_job()
2056 * second filter driver above s->base (== target). in mirror_start_job()
2071 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, in mirror_start_job()
2086 QTAILQ_INIT(&s->ops_in_flight); in mirror_start_job()
2089 job_start(&s->common.job); in mirror_start_job()
2091 return &s->common; in mirror_start_job()
2099 g_free(s->replaces); in mirror_start_job()
2100 blk_unref(s->target); in mirror_start_job()
2101 bs_opaque->job = NULL; in mirror_start_job()
2102 if (s->dirty_bitmap) { in mirror_start_job()
2103 bdrv_release_dirty_bitmap(s->dirty_bitmap); in mirror_start_job()
2105 job_early_fail(&s->common.job); in mirror_start_job()
2108 bs_opaque->stop = true; in mirror_start_job()
2111 assert(mirror_top_bs->backing->bs == bs); in mirror_start_job()
2112 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, in mirror_start_job()
2127 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, in mirror_start() argument
2138 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) || in mirror_start()
2139 (mode == MIRROR_SYNC_MODE_BITMAP)) { in mirror_start()
2140 error_setg(errp, "Sync mode '%s' not supported", in mirror_start()
2141 MirrorSyncMode_str(mode)); in mirror_start()
2146 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL; in mirror_start()
2150 speed, granularity, buf_size, mode, backing_mode, in mirror_start()