1 /* 2 * Image mirroring 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Paolo Bonzini <pbonzini@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10 * See the COPYING.LIB file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/cutils.h" 16 #include "qemu/coroutine.h" 17 #include "qemu/range.h" 18 #include "trace.h" 19 #include "block/blockjob_int.h" 20 #include "block/block_int.h" 21 #include "block/dirty-bitmap.h" 22 #include "sysemu/block-backend.h" 23 #include "qapi/error.h" 24 #include "qemu/ratelimit.h" 25 #include "qemu/bitmap.h" 26 #include "qemu/memalign.h" 27 28 #define MAX_IN_FLIGHT 16 29 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ 30 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) 31 32 /* The mirroring buffer is a list of granularity-sized chunks. 33 * Free chunks are organized in a list. 34 */ 35 typedef struct MirrorBuffer { 36 QSIMPLEQ_ENTRY(MirrorBuffer) next; 37 } MirrorBuffer; 38 39 typedef struct MirrorOp MirrorOp; 40 41 typedef struct MirrorBlockJob { 42 BlockJob common; 43 BlockBackend *target; 44 BlockDriverState *mirror_top_bs; 45 BlockDriverState *base; 46 BlockDriverState *base_overlay; 47 48 /* The name of the graph node to replace */ 49 char *replaces; 50 /* The BDS to replace */ 51 BlockDriverState *to_replace; 52 /* Used to block operations on the drive-mirror-replace target */ 53 Error *replace_blocker; 54 bool is_none_mode; 55 BlockMirrorBackingMode backing_mode; 56 /* Whether the target image requires explicit zero-initialization */ 57 bool zero_target; 58 MirrorCopyMode copy_mode; 59 BlockdevOnError on_source_error, on_target_error; 60 /* Set when the target is synced (dirty bitmap is clean, nothing 61 * in flight) and the job is running in active mode */ 62 bool actively_synced; 63 bool should_complete; 64 int64_t granularity; 65 size_t buf_size; 66 int64_t bdev_length; 67 unsigned long *cow_bitmap; 68 BdrvDirtyBitmap *dirty_bitmap; 69 BdrvDirtyBitmapIter *dbi; 70 uint8_t *buf; 71 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 72 int buf_free_count; 73 74 uint64_t last_pause_ns; 75 unsigned long *in_flight_bitmap; 76 unsigned in_flight; 77 int64_t bytes_in_flight; 78 QTAILQ_HEAD(, MirrorOp) ops_in_flight; 79 int ret; 80 bool unmap; 81 int target_cluster_size; 82 int max_iov; 83 bool initial_zeroing_ongoing; 84 int in_active_write_counter; 85 int64_t active_write_bytes_in_flight; 86 bool prepared; 87 bool in_drain; 88 } MirrorBlockJob; 89 90 typedef struct MirrorBDSOpaque { 91 MirrorBlockJob *job; 92 bool stop; 93 bool is_commit; 94 } MirrorBDSOpaque; 95 96 struct MirrorOp { 97 MirrorBlockJob *s; 98 QEMUIOVector qiov; 99 int64_t offset; 100 uint64_t bytes; 101 102 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and 103 * mirror_co_discard() before yielding for the first time */ 104 int64_t *bytes_handled; 105 106 bool is_pseudo_op; 107 bool is_active_write; 108 bool is_in_flight; 109 CoQueue waiting_requests; 110 Coroutine *co; 111 MirrorOp *waiting_for_op; 112 113 QTAILQ_ENTRY(MirrorOp) next; 114 }; 115 116 typedef enum MirrorMethod { 117 MIRROR_METHOD_COPY, 118 MIRROR_METHOD_ZERO, 119 MIRROR_METHOD_DISCARD, 120 } MirrorMethod; 121 122 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 123 int error) 124 { 125 s->actively_synced = false; 126 if (read) { 127 return block_job_error_action(&s->common, s->on_source_error, 128 true, error); 129 } else { 130 return block_job_error_action(&s->common, s->on_target_error, 131 false, error); 132 } 133 } 134 135 static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, 136 MirrorBlockJob *s, 137 uint64_t offset, 138 uint64_t bytes) 139 { 140 uint64_t self_start_chunk = offset / s->granularity; 141 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 142 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; 143 144 while (find_next_bit(s->in_flight_bitmap, self_end_chunk, 145 self_start_chunk) < self_end_chunk && 146 s->ret >= 0) 147 { 148 MirrorOp *op; 149 150 QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 151 uint64_t op_start_chunk = op->offset / s->granularity; 152 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, 153 s->granularity) - 154 op_start_chunk; 155 156 if (op == self) { 157 continue; 158 } 159 160 if (ranges_overlap(self_start_chunk, self_nb_chunks, 161 op_start_chunk, op_nb_chunks)) 162 { 163 if (self) { 164 /* 165 * If the operation is already (indirectly) waiting for us, 166 * or will wait for us as soon as it wakes up, then just go 167 * on (instead of producing a deadlock in the former case). 168 */ 169 if (op->waiting_for_op) { 170 continue; 171 } 172 173 self->waiting_for_op = op; 174 } 175 176 qemu_co_queue_wait(&op->waiting_requests, NULL); 177 178 if (self) { 179 self->waiting_for_op = NULL; 180 } 181 182 break; 183 } 184 } 185 } 186 } 187 188 static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) 189 { 190 MirrorBlockJob *s = op->s; 191 struct iovec *iov; 192 int64_t chunk_num; 193 int i, nb_chunks; 194 195 trace_mirror_iteration_done(s, op->offset, op->bytes, ret); 196 197 s->in_flight--; 198 s->bytes_in_flight -= op->bytes; 199 iov = op->qiov.iov; 200 for (i = 0; i < op->qiov.niov; i++) { 201 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 202 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 203 s->buf_free_count++; 204 } 205 206 chunk_num = op->offset / s->granularity; 207 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 208 209 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 210 QTAILQ_REMOVE(&s->ops_in_flight, op, next); 211 if (ret >= 0) { 212 if (s->cow_bitmap) { 213 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 214 } 215 if (!s->initial_zeroing_ongoing) { 216 job_progress_update(&s->common.job, op->bytes); 217 } 218 } 219 qemu_iovec_destroy(&op->qiov); 220 221 qemu_co_queue_restart_all(&op->waiting_requests); 222 g_free(op); 223 } 224 225 static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) 226 { 227 MirrorBlockJob *s = op->s; 228 229 if (ret < 0) { 230 BlockErrorAction action; 231 232 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 233 action = mirror_error_action(s, false, -ret); 234 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 235 s->ret = ret; 236 } 237 } 238 239 mirror_iteration_done(op, ret); 240 } 241 242 static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret) 243 { 244 MirrorBlockJob *s = op->s; 245 246 if (ret < 0) { 247 BlockErrorAction action; 248 249 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 250 action = mirror_error_action(s, true, -ret); 251 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 252 s->ret = ret; 253 } 254 255 mirror_iteration_done(op, ret); 256 return; 257 } 258 259 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0); 260 mirror_write_complete(op, ret); 261 } 262 263 /* Clip bytes relative to offset to not exceed end-of-file */ 264 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, 265 int64_t offset, 266 int64_t bytes) 267 { 268 return MIN(bytes, s->bdev_length - offset); 269 } 270 271 /* Round offset and/or bytes to target cluster if COW is needed, and 272 * return the offset of the adjusted tail against original. */ 273 static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset, 274 uint64_t *bytes) 275 { 276 bool need_cow; 277 int ret = 0; 278 int64_t align_offset = *offset; 279 int64_t align_bytes = *bytes; 280 int max_bytes = s->granularity * s->max_iov; 281 282 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); 283 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, 284 s->cow_bitmap); 285 if (need_cow) { 286 bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes, 287 &align_offset, &align_bytes); 288 } 289 290 if (align_bytes > max_bytes) { 291 align_bytes = max_bytes; 292 if (need_cow) { 293 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); 294 } 295 } 296 /* Clipping may result in align_bytes unaligned to chunk boundary, but 297 * that doesn't matter because it's already the end of source image. */ 298 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); 299 300 ret = align_offset + align_bytes - (*offset + *bytes); 301 *offset = align_offset; 302 *bytes = align_bytes; 303 assert(ret >= 0); 304 return ret; 305 } 306 307 static inline void coroutine_fn 308 mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) 309 { 310 MirrorOp *op; 311 312 QTAILQ_FOREACH(op, &s->ops_in_flight, next) { 313 /* 314 * Do not wait on pseudo ops, because it may in turn wait on 315 * some other operation to start, which may in fact be the 316 * caller of this function. Since there is only one pseudo op 317 * at any given time, we will always find some real operation 318 * to wait on. 319 * Also, do not wait on active operations, because they do not 320 * use up in-flight slots. 321 */ 322 if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) { 323 qemu_co_queue_wait(&op->waiting_requests, NULL); 324 return; 325 } 326 } 327 abort(); 328 } 329 330 /* Perform a mirror copy operation. 331 * 332 * *op->bytes_handled is set to the number of bytes copied after and 333 * including offset, excluding any bytes copied prior to offset due 334 * to alignment. This will be op->bytes if no alignment is necessary, 335 * or (new_end - op->offset) if the tail is rounded up or down due to 336 * alignment or buffer limit. 337 */ 338 static void coroutine_fn mirror_co_read(void *opaque) 339 { 340 MirrorOp *op = opaque; 341 MirrorBlockJob *s = op->s; 342 int nb_chunks; 343 uint64_t ret; 344 uint64_t max_bytes; 345 346 max_bytes = s->granularity * s->max_iov; 347 348 /* We can only handle as much as buf_size at a time. */ 349 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); 350 assert(op->bytes); 351 assert(op->bytes < BDRV_REQUEST_MAX_BYTES); 352 *op->bytes_handled = op->bytes; 353 354 if (s->cow_bitmap) { 355 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); 356 } 357 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */ 358 assert(*op->bytes_handled <= UINT_MAX); 359 assert(op->bytes <= s->buf_size); 360 /* The offset is granularity-aligned because: 361 * 1) Caller passes in aligned values; 362 * 2) mirror_cow_align is used only when target cluster is larger. */ 363 assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); 364 /* The range is sector-aligned, since bdrv_getlength() rounds up. */ 365 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); 366 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 367 368 while (s->buf_free_count < nb_chunks) { 369 trace_mirror_yield_in_flight(s, op->offset, s->in_flight); 370 mirror_wait_for_free_in_flight_slot(s); 371 } 372 373 /* Now make a QEMUIOVector taking enough granularity-sized chunks 374 * from s->buf_free. 375 */ 376 qemu_iovec_init(&op->qiov, nb_chunks); 377 while (nb_chunks-- > 0) { 378 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 379 size_t remaining = op->bytes - op->qiov.size; 380 381 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 382 s->buf_free_count--; 383 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 384 } 385 386 /* Copy the dirty cluster. */ 387 s->in_flight++; 388 s->bytes_in_flight += op->bytes; 389 op->is_in_flight = true; 390 trace_mirror_one_iteration(s, op->offset, op->bytes); 391 392 WITH_GRAPH_RDLOCK_GUARD() { 393 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, 394 &op->qiov, 0); 395 } 396 mirror_read_complete(op, ret); 397 } 398 399 static void coroutine_fn mirror_co_zero(void *opaque) 400 { 401 MirrorOp *op = opaque; 402 int ret; 403 404 op->s->in_flight++; 405 op->s->bytes_in_flight += op->bytes; 406 *op->bytes_handled = op->bytes; 407 op->is_in_flight = true; 408 409 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, 410 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); 411 mirror_write_complete(op, ret); 412 } 413 414 static void coroutine_fn mirror_co_discard(void *opaque) 415 { 416 MirrorOp *op = opaque; 417 int ret; 418 419 op->s->in_flight++; 420 op->s->bytes_in_flight += op->bytes; 421 *op->bytes_handled = op->bytes; 422 op->is_in_flight = true; 423 424 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); 425 mirror_write_complete(op, ret); 426 } 427 428 static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, 429 unsigned bytes, MirrorMethod mirror_method) 430 { 431 MirrorOp *op; 432 Coroutine *co; 433 int64_t bytes_handled = -1; 434 435 op = g_new(MirrorOp, 1); 436 *op = (MirrorOp){ 437 .s = s, 438 .offset = offset, 439 .bytes = bytes, 440 .bytes_handled = &bytes_handled, 441 }; 442 qemu_co_queue_init(&op->waiting_requests); 443 444 switch (mirror_method) { 445 case MIRROR_METHOD_COPY: 446 co = qemu_coroutine_create(mirror_co_read, op); 447 break; 448 case MIRROR_METHOD_ZERO: 449 co = qemu_coroutine_create(mirror_co_zero, op); 450 break; 451 case MIRROR_METHOD_DISCARD: 452 co = qemu_coroutine_create(mirror_co_discard, op); 453 break; 454 default: 455 abort(); 456 } 457 op->co = co; 458 459 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 460 qemu_coroutine_enter(co); 461 /* At this point, ownership of op has been moved to the coroutine 462 * and the object may already be freed */ 463 464 /* Assert that this value has been set */ 465 assert(bytes_handled >= 0); 466 467 /* Same assertion as in mirror_co_read() (and for mirror_co_read() 468 * and mirror_co_discard(), bytes_handled == op->bytes, which 469 * is the @bytes parameter given to this function) */ 470 assert(bytes_handled <= UINT_MAX); 471 return bytes_handled; 472 } 473 474 static void coroutine_fn mirror_iteration(MirrorBlockJob *s) 475 { 476 BlockDriverState *source = s->mirror_top_bs->backing->bs; 477 MirrorOp *pseudo_op; 478 int64_t offset; 479 /* At least the first dirty chunk is mirrored in one iteration. */ 480 int nb_chunks = 1; 481 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 482 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); 483 484 bdrv_dirty_bitmap_lock(s->dirty_bitmap); 485 offset = bdrv_dirty_iter_next(s->dbi); 486 if (offset < 0) { 487 bdrv_set_dirty_iter(s->dbi, 0); 488 offset = bdrv_dirty_iter_next(s->dbi); 489 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 490 assert(offset >= 0); 491 } 492 bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 493 494 /* 495 * Wait for concurrent requests to @offset. The next loop will limit the 496 * copied area based on in_flight_bitmap so we only copy an area that does 497 * not overlap with concurrent in-flight requests. Still, we would like to 498 * copy something, so wait until there are at least no more requests to the 499 * very beginning of the area. 500 */ 501 mirror_wait_on_conflicts(NULL, s, offset, 1); 502 503 job_pause_point(&s->common.job); 504 505 /* Find the number of consecutive dirty chunks following the first dirty 506 * one, and wait for in flight requests in them. */ 507 bdrv_dirty_bitmap_lock(s->dirty_bitmap); 508 while (nb_chunks * s->granularity < s->buf_size) { 509 int64_t next_dirty; 510 int64_t next_offset = offset + nb_chunks * s->granularity; 511 int64_t next_chunk = next_offset / s->granularity; 512 if (next_offset >= s->bdev_length || 513 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) { 514 break; 515 } 516 if (test_bit(next_chunk, s->in_flight_bitmap)) { 517 break; 518 } 519 520 next_dirty = bdrv_dirty_iter_next(s->dbi); 521 if (next_dirty > next_offset || next_dirty < 0) { 522 /* The bitmap iterator's cache is stale, refresh it */ 523 bdrv_set_dirty_iter(s->dbi, next_offset); 524 next_dirty = bdrv_dirty_iter_next(s->dbi); 525 } 526 assert(next_dirty == next_offset); 527 nb_chunks++; 528 } 529 530 /* Clear dirty bits before querying the block status, because 531 * calling bdrv_block_status_above could yield - if some blocks are 532 * marked dirty in this window, we need to know. 533 */ 534 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, 535 nb_chunks * s->granularity); 536 bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 537 538 /* Before claiming an area in the in-flight bitmap, we have to 539 * create a MirrorOp for it so that conflicting requests can wait 540 * for it. mirror_perform() will create the real MirrorOps later, 541 * for now we just create a pseudo operation that will wake up all 542 * conflicting requests once all real operations have been 543 * launched. */ 544 pseudo_op = g_new(MirrorOp, 1); 545 *pseudo_op = (MirrorOp){ 546 .offset = offset, 547 .bytes = nb_chunks * s->granularity, 548 .is_pseudo_op = true, 549 }; 550 qemu_co_queue_init(&pseudo_op->waiting_requests); 551 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); 552 553 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); 554 while (nb_chunks > 0 && offset < s->bdev_length) { 555 int ret; 556 int64_t io_bytes; 557 int64_t io_bytes_acct; 558 MirrorMethod mirror_method = MIRROR_METHOD_COPY; 559 560 assert(!(offset % s->granularity)); 561 WITH_GRAPH_RDLOCK_GUARD() { 562 ret = bdrv_block_status_above(source, NULL, offset, 563 nb_chunks * s->granularity, 564 &io_bytes, NULL, NULL); 565 } 566 if (ret < 0) { 567 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); 568 } else if (ret & BDRV_BLOCK_DATA) { 569 io_bytes = MIN(io_bytes, max_io_bytes); 570 } 571 572 io_bytes -= io_bytes % s->granularity; 573 if (io_bytes < s->granularity) { 574 io_bytes = s->granularity; 575 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 576 int64_t target_offset; 577 int64_t target_bytes; 578 WITH_GRAPH_RDLOCK_GUARD() { 579 bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes, 580 &target_offset, &target_bytes); 581 } 582 if (target_offset == offset && 583 target_bytes == io_bytes) { 584 mirror_method = ret & BDRV_BLOCK_ZERO ? 585 MIRROR_METHOD_ZERO : 586 MIRROR_METHOD_DISCARD; 587 } 588 } 589 590 while (s->in_flight >= MAX_IN_FLIGHT) { 591 trace_mirror_yield_in_flight(s, offset, s->in_flight); 592 mirror_wait_for_free_in_flight_slot(s); 593 } 594 595 if (s->ret < 0) { 596 ret = 0; 597 goto fail; 598 } 599 600 io_bytes = mirror_clip_bytes(s, offset, io_bytes); 601 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); 602 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { 603 io_bytes_acct = 0; 604 } else { 605 io_bytes_acct = io_bytes; 606 } 607 assert(io_bytes); 608 offset += io_bytes; 609 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); 610 block_job_ratelimit_processed_bytes(&s->common, io_bytes_acct); 611 } 612 613 fail: 614 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); 615 qemu_co_queue_restart_all(&pseudo_op->waiting_requests); 616 g_free(pseudo_op); 617 } 618 619 static void mirror_free_init(MirrorBlockJob *s) 620 { 621 int granularity = s->granularity; 622 size_t buf_size = s->buf_size; 623 uint8_t *buf = s->buf; 624 625 assert(s->buf_free_count == 0); 626 QSIMPLEQ_INIT(&s->buf_free); 627 while (buf_size != 0) { 628 MirrorBuffer *cur = (MirrorBuffer *)buf; 629 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 630 s->buf_free_count++; 631 buf_size -= granularity; 632 buf += granularity; 633 } 634 } 635 636 /* This is also used for the .pause callback. There is no matching 637 * mirror_resume() because mirror_run() will begin iterating again 638 * when the job is resumed. 639 */ 640 static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s) 641 { 642 while (s->in_flight > 0) { 643 mirror_wait_for_free_in_flight_slot(s); 644 } 645 } 646 647 /** 648 * mirror_exit_common: handle both abort() and prepare() cases. 649 * for .prepare, returns 0 on success and -errno on failure. 650 * for .abort cases, denoted by abort = true, MUST return 0. 651 */ 652 static int mirror_exit_common(Job *job) 653 { 654 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 655 BlockJob *bjob = &s->common; 656 MirrorBDSOpaque *bs_opaque; 657 AioContext *replace_aio_context = NULL; 658 BlockDriverState *src; 659 BlockDriverState *target_bs; 660 BlockDriverState *mirror_top_bs; 661 Error *local_err = NULL; 662 bool abort = job->ret < 0; 663 int ret = 0; 664 665 GLOBAL_STATE_CODE(); 666 667 if (s->prepared) { 668 return 0; 669 } 670 s->prepared = true; 671 672 aio_context_acquire(qemu_get_aio_context()); 673 674 mirror_top_bs = s->mirror_top_bs; 675 bs_opaque = mirror_top_bs->opaque; 676 src = mirror_top_bs->backing->bs; 677 target_bs = blk_bs(s->target); 678 679 if (bdrv_chain_contains(src, target_bs)) { 680 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs); 681 } 682 683 bdrv_release_dirty_bitmap(s->dirty_bitmap); 684 685 /* Make sure that the source BDS doesn't go away during bdrv_replace_node, 686 * before we can call bdrv_drained_end */ 687 bdrv_ref(src); 688 bdrv_ref(mirror_top_bs); 689 bdrv_ref(target_bs); 690 691 /* 692 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before 693 * inserting target_bs at s->to_replace, where we might not be able to get 694 * these permissions. 695 */ 696 blk_unref(s->target); 697 s->target = NULL; 698 699 /* We don't access the source any more. Dropping any WRITE/RESIZE is 700 * required before it could become a backing file of target_bs. Not having 701 * these permissions any more means that we can't allow any new requests on 702 * mirror_top_bs from now on, so keep it drained. */ 703 bdrv_drained_begin(mirror_top_bs); 704 bs_opaque->stop = true; 705 706 bdrv_graph_rdlock_main_loop(); 707 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 708 &error_abort); 709 bdrv_graph_rdunlock_main_loop(); 710 711 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 712 BlockDriverState *backing = s->is_none_mode ? src : s->base; 713 BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs); 714 715 if (bdrv_cow_bs(unfiltered_target) != backing) { 716 bdrv_set_backing_hd(unfiltered_target, backing, &local_err); 717 if (local_err) { 718 error_report_err(local_err); 719 local_err = NULL; 720 ret = -EPERM; 721 } 722 } 723 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 724 assert(!bdrv_backing_chain_next(target_bs)); 725 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL, 726 "backing", &local_err); 727 if (ret < 0) { 728 error_report_err(local_err); 729 local_err = NULL; 730 } 731 } 732 733 if (s->to_replace) { 734 replace_aio_context = bdrv_get_aio_context(s->to_replace); 735 aio_context_acquire(replace_aio_context); 736 } 737 738 if (s->should_complete && !abort) { 739 BlockDriverState *to_replace = s->to_replace ?: src; 740 bool ro = bdrv_is_read_only(to_replace); 741 742 if (ro != bdrv_is_read_only(target_bs)) { 743 bdrv_reopen_set_read_only(target_bs, ro, NULL); 744 } 745 746 /* The mirror job has no requests in flight any more, but we need to 747 * drain potential other users of the BDS before changing the graph. */ 748 assert(s->in_drain); 749 bdrv_drained_begin(target_bs); 750 /* 751 * Cannot use check_to_replace_node() here, because that would 752 * check for an op blocker on @to_replace, and we have our own 753 * there. 754 * 755 * TODO Pull out the writer lock from bdrv_replace_node() to here 756 */ 757 bdrv_graph_rdlock_main_loop(); 758 if (bdrv_recurse_can_replace(src, to_replace)) { 759 bdrv_replace_node(to_replace, target_bs, &local_err); 760 } else { 761 error_setg(&local_err, "Can no longer replace '%s' by '%s', " 762 "because it can no longer be guaranteed that doing so " 763 "would not lead to an abrupt change of visible data", 764 to_replace->node_name, target_bs->node_name); 765 } 766 bdrv_graph_rdunlock_main_loop(); 767 bdrv_drained_end(target_bs); 768 if (local_err) { 769 error_report_err(local_err); 770 ret = -EPERM; 771 } 772 } 773 if (s->to_replace) { 774 bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 775 error_free(s->replace_blocker); 776 bdrv_unref(s->to_replace); 777 } 778 if (replace_aio_context) { 779 aio_context_release(replace_aio_context); 780 } 781 g_free(s->replaces); 782 bdrv_unref(target_bs); 783 784 /* 785 * Remove the mirror filter driver from the graph. Before this, get rid of 786 * the blockers on the intermediate nodes so that the resulting state is 787 * valid. 788 */ 789 block_job_remove_all_bdrv(bjob); 790 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); 791 792 bs_opaque->job = NULL; 793 794 bdrv_drained_end(src); 795 bdrv_drained_end(mirror_top_bs); 796 s->in_drain = false; 797 bdrv_unref(mirror_top_bs); 798 bdrv_unref(src); 799 800 aio_context_release(qemu_get_aio_context()); 801 802 return ret; 803 } 804 805 static int mirror_prepare(Job *job) 806 { 807 return mirror_exit_common(job); 808 } 809 810 static void mirror_abort(Job *job) 811 { 812 int ret = mirror_exit_common(job); 813 assert(ret == 0); 814 } 815 816 static void coroutine_fn mirror_throttle(MirrorBlockJob *s) 817 { 818 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 819 820 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { 821 s->last_pause_ns = now; 822 job_sleep_ns(&s->common.job, 0); 823 } else { 824 job_pause_point(&s->common.job); 825 } 826 } 827 828 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 829 { 830 int64_t offset; 831 BlockDriverState *bs = s->mirror_top_bs->backing->bs; 832 BlockDriverState *target_bs = blk_bs(s->target); 833 int ret; 834 int64_t count; 835 836 if (s->zero_target) { 837 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 838 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); 839 return 0; 840 } 841 842 s->initial_zeroing_ongoing = true; 843 for (offset = 0; offset < s->bdev_length; ) { 844 int bytes = MIN(s->bdev_length - offset, 845 QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 846 847 mirror_throttle(s); 848 849 if (job_is_cancelled(&s->common.job)) { 850 s->initial_zeroing_ongoing = false; 851 return 0; 852 } 853 854 if (s->in_flight >= MAX_IN_FLIGHT) { 855 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, 856 s->in_flight); 857 mirror_wait_for_free_in_flight_slot(s); 858 continue; 859 } 860 861 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); 862 offset += bytes; 863 } 864 865 mirror_wait_for_all_io(s); 866 s->initial_zeroing_ongoing = false; 867 } 868 869 /* First part, loop on the sectors and initialize the dirty bitmap. */ 870 for (offset = 0; offset < s->bdev_length; ) { 871 /* Just to make sure we are not exceeding int limit. */ 872 int bytes = MIN(s->bdev_length - offset, 873 QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 874 875 mirror_throttle(s); 876 877 if (job_is_cancelled(&s->common.job)) { 878 return 0; 879 } 880 881 WITH_GRAPH_RDLOCK_GUARD() { 882 ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, 883 bytes, &count); 884 } 885 if (ret < 0) { 886 return ret; 887 } 888 889 assert(count); 890 if (ret > 0) { 891 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); 892 } 893 offset += count; 894 } 895 return 0; 896 } 897 898 /* Called when going out of the streaming phase to flush the bulk of the 899 * data to the medium, or just before completing. 900 */ 901 static int coroutine_fn mirror_flush(MirrorBlockJob *s) 902 { 903 int ret = blk_co_flush(s->target); 904 if (ret < 0) { 905 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 906 s->ret = ret; 907 } 908 } 909 return ret; 910 } 911 912 static int coroutine_fn mirror_run(Job *job, Error **errp) 913 { 914 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 915 BlockDriverState *bs = s->mirror_top_bs->backing->bs; 916 MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque; 917 BlockDriverState *target_bs = blk_bs(s->target); 918 bool need_drain = true; 919 BlockDeviceIoStatus iostatus; 920 int64_t length; 921 int64_t target_length; 922 BlockDriverInfo bdi; 923 char backing_filename[2]; /* we only need 2 characters because we are only 924 checking for a NULL string */ 925 int ret = 0; 926 927 if (job_is_cancelled(&s->common.job)) { 928 goto immediate_exit; 929 } 930 931 bdrv_graph_co_rdlock(); 932 s->bdev_length = bdrv_co_getlength(bs); 933 bdrv_graph_co_rdunlock(); 934 935 if (s->bdev_length < 0) { 936 ret = s->bdev_length; 937 goto immediate_exit; 938 } 939 940 target_length = blk_co_getlength(s->target); 941 if (target_length < 0) { 942 ret = target_length; 943 goto immediate_exit; 944 } 945 946 /* Active commit must resize the base image if its size differs from the 947 * active layer. */ 948 if (s->base == blk_bs(s->target)) { 949 if (s->bdev_length > target_length) { 950 ret = blk_co_truncate(s->target, s->bdev_length, false, 951 PREALLOC_MODE_OFF, 0, NULL); 952 if (ret < 0) { 953 goto immediate_exit; 954 } 955 } 956 } else if (s->bdev_length != target_length) { 957 error_setg(errp, "Source and target image have different sizes"); 958 ret = -EINVAL; 959 goto immediate_exit; 960 } 961 962 if (s->bdev_length == 0) { 963 /* Transition to the READY state and wait for complete. */ 964 job_transition_to_ready(&s->common.job); 965 s->actively_synced = true; 966 while (!job_cancel_requested(&s->common.job) && !s->should_complete) { 967 job_yield(&s->common.job); 968 } 969 goto immediate_exit; 970 } 971 972 length = DIV_ROUND_UP(s->bdev_length, s->granularity); 973 s->in_flight_bitmap = bitmap_new(length); 974 975 /* If we have no backing file yet in the destination, we cannot let 976 * the destination do COW. Instead, we copy sectors around the 977 * dirty data if needed. We need a bitmap to do that. 978 */ 979 bdrv_get_backing_filename(target_bs, backing_filename, 980 sizeof(backing_filename)); 981 bdrv_graph_co_rdlock(); 982 if (!bdrv_co_get_info(target_bs, &bdi) && bdi.cluster_size) { 983 s->target_cluster_size = bdi.cluster_size; 984 } else { 985 s->target_cluster_size = BDRV_SECTOR_SIZE; 986 } 987 bdrv_graph_co_rdunlock(); 988 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) && 989 s->granularity < s->target_cluster_size) { 990 s->buf_size = MAX(s->buf_size, s->target_cluster_size); 991 s->cow_bitmap = bitmap_new(length); 992 } 993 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 994 995 s->buf = qemu_try_blockalign(bs, s->buf_size); 996 if (s->buf == NULL) { 997 ret = -ENOMEM; 998 goto immediate_exit; 999 } 1000 1001 mirror_free_init(s); 1002 1003 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1004 if (!s->is_none_mode) { 1005 ret = mirror_dirty_init(s); 1006 if (ret < 0 || job_is_cancelled(&s->common.job)) { 1007 goto immediate_exit; 1008 } 1009 } 1010 1011 /* 1012 * Only now the job is fully initialised and mirror_top_bs should start 1013 * accessing it. 1014 */ 1015 mirror_top_opaque->job = s; 1016 1017 assert(!s->dbi); 1018 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); 1019 for (;;) { 1020 int64_t cnt, delta; 1021 bool should_complete; 1022 1023 if (s->ret < 0) { 1024 ret = s->ret; 1025 goto immediate_exit; 1026 } 1027 1028 job_pause_point(&s->common.job); 1029 1030 if (job_is_cancelled(&s->common.job)) { 1031 ret = 0; 1032 goto immediate_exit; 1033 } 1034 1035 cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1036 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is 1037 * the number of bytes currently being processed; together those are 1038 * the current remaining operation length */ 1039 job_progress_set_remaining(&s->common.job, 1040 s->bytes_in_flight + cnt + 1041 s->active_write_bytes_in_flight); 1042 1043 /* Note that even when no rate limit is applied we need to yield 1044 * periodically with no pending I/O so that bdrv_drain_all() returns. 1045 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is 1046 * an error, or when the source is clean, whichever comes first. */ 1047 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 1048 WITH_JOB_LOCK_GUARD() { 1049 iostatus = s->common.iostatus; 1050 } 1051 if (delta < BLOCK_JOB_SLICE_TIME && 1052 iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 1053 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 1054 (cnt == 0 && s->in_flight > 0)) { 1055 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); 1056 mirror_wait_for_free_in_flight_slot(s); 1057 continue; 1058 } else if (cnt != 0) { 1059 mirror_iteration(s); 1060 } 1061 } 1062 1063 should_complete = false; 1064 if (s->in_flight == 0 && cnt == 0) { 1065 trace_mirror_before_flush(s); 1066 if (!job_is_ready(&s->common.job)) { 1067 if (mirror_flush(s) < 0) { 1068 /* Go check s->ret. */ 1069 continue; 1070 } 1071 /* We're out of the streaming phase. From now on, if the job 1072 * is cancelled we will actually complete all pending I/O and 1073 * report completion. This way, block-job-cancel will leave 1074 * the target in a consistent state. 1075 */ 1076 job_transition_to_ready(&s->common.job); 1077 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { 1078 s->actively_synced = true; 1079 } 1080 } 1081 1082 should_complete = s->should_complete || 1083 job_cancel_requested(&s->common.job); 1084 cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1085 } 1086 1087 if (cnt == 0 && should_complete) { 1088 /* The dirty bitmap is not updated while operations are pending. 1089 * If we're about to exit, wait for pending operations before 1090 * calling bdrv_get_dirty_count(bs), or we may exit while the 1091 * source has dirty data to copy! 1092 * 1093 * Note that I/O can be submitted by the guest while 1094 * mirror_populate runs, so pause it now. Before deciding 1095 * whether to switch to target check one last time if I/O has 1096 * come in the meanwhile, and if not flush the data to disk. 1097 */ 1098 trace_mirror_before_drain(s, cnt); 1099 1100 s->in_drain = true; 1101 bdrv_drained_begin(bs); 1102 1103 /* Must be zero because we are drained */ 1104 assert(s->in_active_write_counter == 0); 1105 1106 cnt = bdrv_get_dirty_count(s->dirty_bitmap); 1107 if (cnt > 0 || mirror_flush(s) < 0) { 1108 bdrv_drained_end(bs); 1109 s->in_drain = false; 1110 continue; 1111 } 1112 1113 /* The two disks are in sync. Exit and report successful 1114 * completion. 1115 */ 1116 assert(QLIST_EMPTY(&bs->tracked_requests)); 1117 need_drain = false; 1118 break; 1119 } 1120 1121 if (job_is_ready(&s->common.job) && !should_complete) { 1122 if (s->in_flight == 0 && cnt == 0) { 1123 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job), 1124 BLOCK_JOB_SLICE_TIME); 1125 job_sleep_ns(&s->common.job, BLOCK_JOB_SLICE_TIME); 1126 } 1127 } else { 1128 block_job_ratelimit_sleep(&s->common); 1129 } 1130 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1131 } 1132 1133 immediate_exit: 1134 if (s->in_flight > 0) { 1135 /* We get here only if something went wrong. Either the job failed, 1136 * or it was cancelled prematurely so that we do not guarantee that 1137 * the target is a copy of the source. 1138 */ 1139 assert(ret < 0 || job_is_cancelled(&s->common.job)); 1140 assert(need_drain); 1141 mirror_wait_for_all_io(s); 1142 } 1143 1144 assert(s->in_flight == 0); 1145 qemu_vfree(s->buf); 1146 g_free(s->cow_bitmap); 1147 g_free(s->in_flight_bitmap); 1148 bdrv_dirty_iter_free(s->dbi); 1149 1150 if (need_drain) { 1151 s->in_drain = true; 1152 bdrv_drained_begin(bs); 1153 } 1154 1155 return ret; 1156 } 1157 1158 static void mirror_complete(Job *job, Error **errp) 1159 { 1160 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1161 1162 if (!job_is_ready(job)) { 1163 error_setg(errp, "The active block job '%s' cannot be completed", 1164 job->id); 1165 return; 1166 } 1167 1168 /* block all operations on to_replace bs */ 1169 if (s->replaces) { 1170 AioContext *replace_aio_context; 1171 1172 s->to_replace = bdrv_find_node(s->replaces); 1173 if (!s->to_replace) { 1174 error_setg(errp, "Node name '%s' not found", s->replaces); 1175 return; 1176 } 1177 1178 replace_aio_context = bdrv_get_aio_context(s->to_replace); 1179 aio_context_acquire(replace_aio_context); 1180 1181 /* TODO Translate this into child freeze system. */ 1182 error_setg(&s->replace_blocker, 1183 "block device is in use by block-job-complete"); 1184 bdrv_op_block_all(s->to_replace, s->replace_blocker); 1185 bdrv_ref(s->to_replace); 1186 1187 aio_context_release(replace_aio_context); 1188 } 1189 1190 s->should_complete = true; 1191 1192 /* If the job is paused, it will be re-entered when it is resumed */ 1193 WITH_JOB_LOCK_GUARD() { 1194 if (!job->paused) { 1195 job_enter_cond_locked(job, NULL); 1196 } 1197 } 1198 } 1199 1200 static void coroutine_fn mirror_pause(Job *job) 1201 { 1202 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1203 1204 mirror_wait_for_all_io(s); 1205 } 1206 1207 static bool mirror_drained_poll(BlockJob *job) 1208 { 1209 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 1210 1211 /* If the job isn't paused nor cancelled, we can't be sure that it won't 1212 * issue more requests. We make an exception if we've reached this point 1213 * from one of our own drain sections, to avoid a deadlock waiting for 1214 * ourselves. 1215 */ 1216 WITH_JOB_LOCK_GUARD() { 1217 if (!s->common.job.paused && !job_is_cancelled_locked(&job->job) 1218 && !s->in_drain) { 1219 return true; 1220 } 1221 } 1222 1223 return !!s->in_flight; 1224 } 1225 1226 static bool mirror_cancel(Job *job, bool force) 1227 { 1228 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 1229 BlockDriverState *target = blk_bs(s->target); 1230 1231 /* 1232 * Before the job is READY, we treat any cancellation like a 1233 * force-cancellation. 1234 */ 1235 force = force || !job_is_ready(job); 1236 1237 if (force) { 1238 bdrv_cancel_in_flight(target); 1239 } 1240 return force; 1241 } 1242 1243 static bool commit_active_cancel(Job *job, bool force) 1244 { 1245 /* Same as above in mirror_cancel() */ 1246 return force || !job_is_ready(job); 1247 } 1248 1249 static const BlockJobDriver mirror_job_driver = { 1250 .job_driver = { 1251 .instance_size = sizeof(MirrorBlockJob), 1252 .job_type = JOB_TYPE_MIRROR, 1253 .free = block_job_free, 1254 .user_resume = block_job_user_resume, 1255 .run = mirror_run, 1256 .prepare = mirror_prepare, 1257 .abort = mirror_abort, 1258 .pause = mirror_pause, 1259 .complete = mirror_complete, 1260 .cancel = mirror_cancel, 1261 }, 1262 .drained_poll = mirror_drained_poll, 1263 }; 1264 1265 static const BlockJobDriver commit_active_job_driver = { 1266 .job_driver = { 1267 .instance_size = sizeof(MirrorBlockJob), 1268 .job_type = JOB_TYPE_COMMIT, 1269 .free = block_job_free, 1270 .user_resume = block_job_user_resume, 1271 .run = mirror_run, 1272 .prepare = mirror_prepare, 1273 .abort = mirror_abort, 1274 .pause = mirror_pause, 1275 .complete = mirror_complete, 1276 .cancel = commit_active_cancel, 1277 }, 1278 .drained_poll = mirror_drained_poll, 1279 }; 1280 1281 static void coroutine_fn 1282 do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, 1283 uint64_t offset, uint64_t bytes, 1284 QEMUIOVector *qiov, int flags) 1285 { 1286 int ret; 1287 size_t qiov_offset = 0; 1288 int64_t bitmap_offset, bitmap_end; 1289 1290 if (!QEMU_IS_ALIGNED(offset, job->granularity) && 1291 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset)) 1292 { 1293 /* 1294 * Dirty unaligned padding: ignore it. 1295 * 1296 * Reasoning: 1297 * 1. If we copy it, we can't reset corresponding bit in 1298 * dirty_bitmap as there may be some "dirty" bytes still not 1299 * copied. 1300 * 2. It's already dirty, so skipping it we don't diverge mirror 1301 * progress. 1302 * 1303 * Note, that because of this, guest write may have no contribution 1304 * into mirror converge, but that's not bad, as we have background 1305 * process of mirroring. If under some bad circumstances (high guest 1306 * IO load) background process starve, we will not converge anyway, 1307 * even if each write will contribute, as guest is not guaranteed to 1308 * rewrite the whole disk. 1309 */ 1310 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset; 1311 if (bytes <= qiov_offset) { 1312 /* nothing to do after shrink */ 1313 return; 1314 } 1315 offset += qiov_offset; 1316 bytes -= qiov_offset; 1317 } 1318 1319 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) && 1320 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1)) 1321 { 1322 uint64_t tail = (offset + bytes) % job->granularity; 1323 1324 if (bytes <= tail) { 1325 /* nothing to do after shrink */ 1326 return; 1327 } 1328 bytes -= tail; 1329 } 1330 1331 /* 1332 * Tails are either clean or shrunk, so for bitmap resetting 1333 * we safely align the range down. 1334 */ 1335 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity); 1336 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity); 1337 if (bitmap_offset < bitmap_end) { 1338 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1339 bitmap_end - bitmap_offset); 1340 } 1341 1342 job_progress_increase_remaining(&job->common.job, bytes); 1343 job->active_write_bytes_in_flight += bytes; 1344 1345 switch (method) { 1346 case MIRROR_METHOD_COPY: 1347 ret = blk_co_pwritev_part(job->target, offset, bytes, 1348 qiov, qiov_offset, flags); 1349 break; 1350 1351 case MIRROR_METHOD_ZERO: 1352 assert(!qiov); 1353 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags); 1354 break; 1355 1356 case MIRROR_METHOD_DISCARD: 1357 assert(!qiov); 1358 ret = blk_co_pdiscard(job->target, offset, bytes); 1359 break; 1360 1361 default: 1362 abort(); 1363 } 1364 1365 job->active_write_bytes_in_flight -= bytes; 1366 if (ret >= 0) { 1367 job_progress_update(&job->common.job, bytes); 1368 } else { 1369 BlockErrorAction action; 1370 1371 /* 1372 * We failed, so we should mark dirty the whole area, aligned up. 1373 * Note that we don't care about shrunk tails if any: they were dirty 1374 * at function start, and they must be still dirty, as we've locked 1375 * the region for in-flight op. 1376 */ 1377 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity); 1378 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity); 1379 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset, 1380 bitmap_end - bitmap_offset); 1381 job->actively_synced = false; 1382 1383 action = mirror_error_action(job, false, -ret); 1384 if (action == BLOCK_ERROR_ACTION_REPORT) { 1385 if (!job->ret) { 1386 job->ret = ret; 1387 } 1388 } 1389 } 1390 } 1391 1392 static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, 1393 uint64_t offset, 1394 uint64_t bytes) 1395 { 1396 MirrorOp *op; 1397 uint64_t start_chunk = offset / s->granularity; 1398 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); 1399 1400 op = g_new(MirrorOp, 1); 1401 *op = (MirrorOp){ 1402 .s = s, 1403 .offset = offset, 1404 .bytes = bytes, 1405 .is_active_write = true, 1406 .is_in_flight = true, 1407 .co = qemu_coroutine_self(), 1408 }; 1409 qemu_co_queue_init(&op->waiting_requests); 1410 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); 1411 1412 s->in_active_write_counter++; 1413 1414 /* 1415 * Wait for concurrent requests affecting the area. If there are already 1416 * running requests that are copying off now-to-be stale data in the area, 1417 * we must wait for them to finish before we begin writing fresh data to the 1418 * target so that the write operations appear in the correct order. 1419 * Note that background requests (see mirror_iteration()) in contrast only 1420 * wait for conflicting requests at the start of the dirty area, and then 1421 * (based on the in_flight_bitmap) truncate the area to copy so it will not 1422 * conflict with any requests beyond that. For active writes, however, we 1423 * cannot truncate that area. The request from our parent must be blocked 1424 * until the area is copied in full. Therefore, we must wait for the whole 1425 * area to become free of concurrent requests. 1426 */ 1427 mirror_wait_on_conflicts(op, s, offset, bytes); 1428 1429 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1430 1431 return op; 1432 } 1433 1434 static void coroutine_fn GRAPH_RDLOCK active_write_settle(MirrorOp *op) 1435 { 1436 uint64_t start_chunk = op->offset / op->s->granularity; 1437 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, 1438 op->s->granularity); 1439 1440 if (!--op->s->in_active_write_counter && op->s->actively_synced) { 1441 BdrvChild *source = op->s->mirror_top_bs->backing; 1442 1443 if (QLIST_FIRST(&source->bs->parents) == source && 1444 QLIST_NEXT(source, next_parent) == NULL) 1445 { 1446 /* Assert that we are back in sync once all active write 1447 * operations are settled. 1448 * Note that we can only assert this if the mirror node 1449 * is the source node's only parent. */ 1450 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); 1451 } 1452 } 1453 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); 1454 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); 1455 qemu_co_queue_restart_all(&op->waiting_requests); 1456 g_free(op); 1457 } 1458 1459 static int coroutine_fn GRAPH_RDLOCK 1460 bdrv_mirror_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, 1461 QEMUIOVector *qiov, BdrvRequestFlags flags) 1462 { 1463 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 1464 } 1465 1466 static int coroutine_fn GRAPH_RDLOCK 1467 bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorMethod method, 1468 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, 1469 int flags) 1470 { 1471 MirrorOp *op = NULL; 1472 MirrorBDSOpaque *s = bs->opaque; 1473 int ret = 0; 1474 bool copy_to_target = false; 1475 1476 if (s->job) { 1477 copy_to_target = s->job->ret >= 0 && 1478 !job_is_cancelled(&s->job->common.job) && 1479 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1480 } 1481 1482 if (copy_to_target) { 1483 op = active_write_prepare(s->job, offset, bytes); 1484 } 1485 1486 switch (method) { 1487 case MIRROR_METHOD_COPY: 1488 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 1489 break; 1490 1491 case MIRROR_METHOD_ZERO: 1492 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); 1493 break; 1494 1495 case MIRROR_METHOD_DISCARD: 1496 ret = bdrv_co_pdiscard(bs->backing, offset, bytes); 1497 break; 1498 1499 default: 1500 abort(); 1501 } 1502 1503 if (ret < 0) { 1504 goto out; 1505 } 1506 1507 if (copy_to_target) { 1508 do_sync_target_write(s->job, method, offset, bytes, qiov, flags); 1509 } 1510 1511 out: 1512 if (copy_to_target) { 1513 active_write_settle(op); 1514 } 1515 return ret; 1516 } 1517 1518 static int coroutine_fn GRAPH_RDLOCK 1519 bdrv_mirror_top_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, 1520 QEMUIOVector *qiov, BdrvRequestFlags flags) 1521 { 1522 MirrorBDSOpaque *s = bs->opaque; 1523 QEMUIOVector bounce_qiov; 1524 void *bounce_buf; 1525 int ret = 0; 1526 bool copy_to_target = false; 1527 1528 if (s->job) { 1529 copy_to_target = s->job->ret >= 0 && 1530 !job_is_cancelled(&s->job->common.job) && 1531 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; 1532 } 1533 1534 if (copy_to_target) { 1535 /* The guest might concurrently modify the data to write; but 1536 * the data on source and destination must match, so we have 1537 * to use a bounce buffer if we are going to write to the 1538 * target now. */ 1539 bounce_buf = qemu_blockalign(bs, bytes); 1540 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); 1541 1542 qemu_iovec_init(&bounce_qiov, 1); 1543 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); 1544 qiov = &bounce_qiov; 1545 1546 flags &= ~BDRV_REQ_REGISTERED_BUF; 1547 } 1548 1549 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, 1550 flags); 1551 1552 if (copy_to_target) { 1553 qemu_iovec_destroy(&bounce_qiov); 1554 qemu_vfree(bounce_buf); 1555 } 1556 1557 return ret; 1558 } 1559 1560 static int coroutine_fn GRAPH_RDLOCK bdrv_mirror_top_flush(BlockDriverState *bs) 1561 { 1562 if (bs->backing == NULL) { 1563 /* we can be here after failed bdrv_append in mirror_start_job */ 1564 return 0; 1565 } 1566 return bdrv_co_flush(bs->backing->bs); 1567 } 1568 1569 static int coroutine_fn GRAPH_RDLOCK 1570 bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, int64_t offset, 1571 int64_t bytes, BdrvRequestFlags flags) 1572 { 1573 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, 1574 flags); 1575 } 1576 1577 static int coroutine_fn GRAPH_RDLOCK 1578 bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) 1579 { 1580 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, 1581 NULL, 0); 1582 } 1583 1584 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs) 1585 { 1586 if (bs->backing == NULL) { 1587 /* we can be here after failed bdrv_attach_child in 1588 * bdrv_set_backing_hd */ 1589 return; 1590 } 1591 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), 1592 bs->backing->bs->filename); 1593 } 1594 1595 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 1596 BdrvChildRole role, 1597 BlockReopenQueue *reopen_queue, 1598 uint64_t perm, uint64_t shared, 1599 uint64_t *nperm, uint64_t *nshared) 1600 { 1601 MirrorBDSOpaque *s = bs->opaque; 1602 1603 if (s->stop) { 1604 /* 1605 * If the job is to be stopped, we do not need to forward 1606 * anything to the real image. 1607 */ 1608 *nperm = 0; 1609 *nshared = BLK_PERM_ALL; 1610 return; 1611 } 1612 1613 bdrv_default_perms(bs, c, role, reopen_queue, 1614 perm, shared, nperm, nshared); 1615 1616 if (s->is_commit) { 1617 /* 1618 * For commit jobs, we cannot take CONSISTENT_READ, because 1619 * that permission is unshared for everything above the base 1620 * node (except for filters on the base node). 1621 * We also have to force-share the WRITE permission, or 1622 * otherwise we would block ourselves at the base node (if 1623 * writes are blocked for a node, they are also blocked for 1624 * its backing file). 1625 * (We could also share RESIZE, because it may be needed for 1626 * the target if its size is less than the top node's; but 1627 * bdrv_default_perms_for_cow() automatically shares RESIZE 1628 * for backing nodes if WRITE is shared, so there is no need 1629 * to do it here.) 1630 */ 1631 *nperm &= ~BLK_PERM_CONSISTENT_READ; 1632 *nshared |= BLK_PERM_WRITE; 1633 } 1634 } 1635 1636 /* Dummy node that provides consistent read to its users without requiring it 1637 * from its backing file and that allows writes on the backing file chain. */ 1638 static BlockDriver bdrv_mirror_top = { 1639 .format_name = "mirror_top", 1640 .bdrv_co_preadv = bdrv_mirror_top_preadv, 1641 .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 1642 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 1643 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 1644 .bdrv_co_flush = bdrv_mirror_top_flush, 1645 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, 1646 .bdrv_child_perm = bdrv_mirror_top_child_perm, 1647 1648 .is_filter = true, 1649 .filtered_child_is_backing = true, 1650 }; 1651 1652 static BlockJob *mirror_start_job( 1653 const char *job_id, BlockDriverState *bs, 1654 int creation_flags, BlockDriverState *target, 1655 const char *replaces, int64_t speed, 1656 uint32_t granularity, int64_t buf_size, 1657 BlockMirrorBackingMode backing_mode, 1658 bool zero_target, 1659 BlockdevOnError on_source_error, 1660 BlockdevOnError on_target_error, 1661 bool unmap, 1662 BlockCompletionFunc *cb, 1663 void *opaque, 1664 const BlockJobDriver *driver, 1665 bool is_none_mode, BlockDriverState *base, 1666 bool auto_complete, const char *filter_node_name, 1667 bool is_mirror, MirrorCopyMode copy_mode, 1668 Error **errp) 1669 { 1670 MirrorBlockJob *s; 1671 MirrorBDSOpaque *bs_opaque; 1672 BlockDriverState *mirror_top_bs; 1673 bool target_is_backing; 1674 uint64_t target_perms, target_shared_perms; 1675 int ret; 1676 1677 GLOBAL_STATE_CODE(); 1678 1679 if (granularity == 0) { 1680 granularity = bdrv_get_default_bitmap_granularity(target); 1681 } 1682 1683 assert(is_power_of_2(granularity)); 1684 1685 if (buf_size < 0) { 1686 error_setg(errp, "Invalid parameter 'buf-size'"); 1687 return NULL; 1688 } 1689 1690 if (buf_size == 0) { 1691 buf_size = DEFAULT_MIRROR_BUF_SIZE; 1692 } 1693 1694 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) { 1695 error_setg(errp, "Can't mirror node into itself"); 1696 return NULL; 1697 } 1698 1699 target_is_backing = bdrv_chain_contains(bs, target); 1700 1701 /* In the case of active commit, add dummy driver to provide consistent 1702 * reads on the top, while disabling it in the intermediate nodes, and make 1703 * the backing chain writable. */ 1704 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 1705 BDRV_O_RDWR, errp); 1706 if (mirror_top_bs == NULL) { 1707 return NULL; 1708 } 1709 if (!filter_node_name) { 1710 mirror_top_bs->implicit = true; 1711 } 1712 1713 /* So that we can always drop this node */ 1714 mirror_top_bs->never_freeze = true; 1715 1716 mirror_top_bs->total_sectors = bs->total_sectors; 1717 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; 1718 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | 1719 BDRV_REQ_NO_FALLBACK; 1720 bs_opaque = g_new0(MirrorBDSOpaque, 1); 1721 mirror_top_bs->opaque = bs_opaque; 1722 1723 bs_opaque->is_commit = target_is_backing; 1724 1725 bdrv_drained_begin(bs); 1726 ret = bdrv_append(mirror_top_bs, bs, errp); 1727 bdrv_drained_end(bs); 1728 1729 if (ret < 0) { 1730 bdrv_unref(mirror_top_bs); 1731 return NULL; 1732 } 1733 1734 /* Make sure that the source is not resized while the job is running */ 1735 s = block_job_create(job_id, driver, NULL, mirror_top_bs, 1736 BLK_PERM_CONSISTENT_READ, 1737 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 1738 BLK_PERM_WRITE, speed, 1739 creation_flags, cb, opaque, errp); 1740 if (!s) { 1741 goto fail; 1742 } 1743 1744 /* The block job now has a reference to this node */ 1745 bdrv_unref(mirror_top_bs); 1746 1747 s->mirror_top_bs = mirror_top_bs; 1748 1749 /* No resize for the target either; while the mirror is still running, a 1750 * consistent read isn't necessarily possible. We could possibly allow 1751 * writes and graph modifications, though it would likely defeat the 1752 * purpose of a mirror, so leave them blocked for now. 1753 * 1754 * In the case of active commit, things look a bit different, though, 1755 * because the target is an already populated backing file in active use. 1756 * We can allow anything except resize there.*/ 1757 1758 target_perms = BLK_PERM_WRITE; 1759 target_shared_perms = BLK_PERM_WRITE_UNCHANGED; 1760 1761 if (target_is_backing) { 1762 int64_t bs_size, target_size; 1763 bs_size = bdrv_getlength(bs); 1764 if (bs_size < 0) { 1765 error_setg_errno(errp, -bs_size, 1766 "Could not inquire top image size"); 1767 goto fail; 1768 } 1769 1770 target_size = bdrv_getlength(target); 1771 if (target_size < 0) { 1772 error_setg_errno(errp, -target_size, 1773 "Could not inquire base image size"); 1774 goto fail; 1775 } 1776 1777 if (target_size < bs_size) { 1778 target_perms |= BLK_PERM_RESIZE; 1779 } 1780 1781 target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE; 1782 } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) { 1783 /* 1784 * We may want to allow this in the future, but it would 1785 * require taking some extra care. 1786 */ 1787 error_setg(errp, "Cannot mirror to a filter on top of a node in the " 1788 "source's backing chain"); 1789 goto fail; 1790 } 1791 1792 s->target = blk_new(s->common.job.aio_context, 1793 target_perms, target_shared_perms); 1794 ret = blk_insert_bs(s->target, target, errp); 1795 if (ret < 0) { 1796 goto fail; 1797 } 1798 if (is_mirror) { 1799 /* XXX: Mirror target could be a NBD server of target QEMU in the case 1800 * of non-shared block migration. To allow migration completion, we 1801 * have to allow "inactivate" of the target BB. When that happens, we 1802 * know the job is drained, and the vcpus are stopped, so no write 1803 * operation will be performed. Block layer already has assertions to 1804 * ensure that. */ 1805 blk_set_force_allow_inactivate(s->target); 1806 } 1807 blk_set_allow_aio_context_change(s->target, true); 1808 blk_set_disable_request_queuing(s->target, true); 1809 1810 s->replaces = g_strdup(replaces); 1811 s->on_source_error = on_source_error; 1812 s->on_target_error = on_target_error; 1813 s->is_none_mode = is_none_mode; 1814 s->backing_mode = backing_mode; 1815 s->zero_target = zero_target; 1816 s->copy_mode = copy_mode; 1817 s->base = base; 1818 s->base_overlay = bdrv_find_overlay(bs, base); 1819 s->granularity = granularity; 1820 s->buf_size = ROUND_UP(buf_size, granularity); 1821 s->unmap = unmap; 1822 if (auto_complete) { 1823 s->should_complete = true; 1824 } 1825 1826 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1827 if (!s->dirty_bitmap) { 1828 goto fail; 1829 } 1830 if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) { 1831 bdrv_disable_dirty_bitmap(s->dirty_bitmap); 1832 } 1833 1834 ret = block_job_add_bdrv(&s->common, "source", bs, 0, 1835 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | 1836 BLK_PERM_CONSISTENT_READ, 1837 errp); 1838 if (ret < 0) { 1839 goto fail; 1840 } 1841 1842 /* Required permissions are already taken with blk_new() */ 1843 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 1844 &error_abort); 1845 1846 /* In commit_active_start() all intermediate nodes disappear, so 1847 * any jobs in them must be blocked */ 1848 if (target_is_backing) { 1849 BlockDriverState *iter, *filtered_target; 1850 uint64_t iter_shared_perms; 1851 1852 /* 1853 * The topmost node with 1854 * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target) 1855 */ 1856 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target)); 1857 1858 assert(bdrv_skip_filters(filtered_target) == 1859 bdrv_skip_filters(target)); 1860 1861 /* 1862 * XXX BLK_PERM_WRITE needs to be allowed so we don't block 1863 * ourselves at s->base (if writes are blocked for a node, they are 1864 * also blocked for its backing file). The other options would be a 1865 * second filter driver above s->base (== target). 1866 */ 1867 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE; 1868 1869 for (iter = bdrv_filter_or_cow_bs(bs); iter != target; 1870 iter = bdrv_filter_or_cow_bs(iter)) 1871 { 1872 if (iter == filtered_target) { 1873 /* 1874 * From here on, all nodes are filters on the base. 1875 * This allows us to share BLK_PERM_CONSISTENT_READ. 1876 */ 1877 iter_shared_perms |= BLK_PERM_CONSISTENT_READ; 1878 } 1879 1880 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 1881 iter_shared_perms, errp); 1882 if (ret < 0) { 1883 goto fail; 1884 } 1885 } 1886 1887 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { 1888 goto fail; 1889 } 1890 } 1891 1892 QTAILQ_INIT(&s->ops_in_flight); 1893 1894 trace_mirror_start(bs, s, opaque); 1895 job_start(&s->common.job); 1896 1897 return &s->common; 1898 1899 fail: 1900 if (s) { 1901 /* Make sure this BDS does not go away until we have completed the graph 1902 * changes below */ 1903 bdrv_ref(mirror_top_bs); 1904 1905 g_free(s->replaces); 1906 blk_unref(s->target); 1907 bs_opaque->job = NULL; 1908 if (s->dirty_bitmap) { 1909 bdrv_release_dirty_bitmap(s->dirty_bitmap); 1910 } 1911 job_early_fail(&s->common.job); 1912 } 1913 1914 bs_opaque->stop = true; 1915 bdrv_graph_rdlock_main_loop(); 1916 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, 1917 &error_abort); 1918 bdrv_graph_rdunlock_main_loop(); 1919 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); 1920 1921 bdrv_unref(mirror_top_bs); 1922 1923 return NULL; 1924 } 1925 1926 void mirror_start(const char *job_id, BlockDriverState *bs, 1927 BlockDriverState *target, const char *replaces, 1928 int creation_flags, int64_t speed, 1929 uint32_t granularity, int64_t buf_size, 1930 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1931 bool zero_target, 1932 BlockdevOnError on_source_error, 1933 BlockdevOnError on_target_error, 1934 bool unmap, const char *filter_node_name, 1935 MirrorCopyMode copy_mode, Error **errp) 1936 { 1937 bool is_none_mode; 1938 BlockDriverState *base; 1939 1940 GLOBAL_STATE_CODE(); 1941 1942 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) || 1943 (mode == MIRROR_SYNC_MODE_BITMAP)) { 1944 error_setg(errp, "Sync mode '%s' not supported", 1945 MirrorSyncMode_str(mode)); 1946 return; 1947 } 1948 is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 1949 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL; 1950 mirror_start_job(job_id, bs, creation_flags, target, replaces, 1951 speed, granularity, buf_size, backing_mode, zero_target, 1952 on_source_error, on_target_error, unmap, NULL, NULL, 1953 &mirror_job_driver, is_none_mode, base, false, 1954 filter_node_name, true, copy_mode, errp); 1955 } 1956 1957 BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, 1958 BlockDriverState *base, int creation_flags, 1959 int64_t speed, BlockdevOnError on_error, 1960 const char *filter_node_name, 1961 BlockCompletionFunc *cb, void *opaque, 1962 bool auto_complete, Error **errp) 1963 { 1964 bool base_read_only; 1965 BlockJob *job; 1966 1967 GLOBAL_STATE_CODE(); 1968 1969 base_read_only = bdrv_is_read_only(base); 1970 1971 if (base_read_only) { 1972 if (bdrv_reopen_set_read_only(base, false, errp) < 0) { 1973 return NULL; 1974 } 1975 } 1976 1977 job = mirror_start_job( 1978 job_id, bs, creation_flags, base, NULL, speed, 0, 0, 1979 MIRROR_LEAVE_BACKING_CHAIN, false, 1980 on_error, on_error, true, cb, opaque, 1981 &commit_active_job_driver, false, base, auto_complete, 1982 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND, 1983 errp); 1984 if (!job) { 1985 goto error_restore_flags; 1986 } 1987 1988 return job; 1989 1990 error_restore_flags: 1991 /* ignore error and errp for bdrv_reopen, because we want to propagate 1992 * the original error */ 1993 if (base_read_only) { 1994 bdrv_reopen_set_read_only(base, true, NULL); 1995 } 1996 return NULL; 1997 } 1998