1 /* 2 * Image mirroring 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Paolo Bonzini <pbonzini@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10 * See the COPYING.LIB file in the top-level directory. 11 * 12 */ 13 14 #include "trace.h" 15 #include "block/blockjob.h" 16 #include "block/block_int.h" 17 #include "qapi/qmp/qerror.h" 18 #include "qemu/ratelimit.h" 19 #include "qemu/bitmap.h" 20 21 #define SLICE_TIME 100000000ULL /* ns */ 22 #define MAX_IN_FLIGHT 16 23 24 /* The mirroring buffer is a list of granularity-sized chunks. 25 * Free chunks are organized in a list. 26 */ 27 typedef struct MirrorBuffer { 28 QSIMPLEQ_ENTRY(MirrorBuffer) next; 29 } MirrorBuffer; 30 31 typedef struct MirrorBlockJob { 32 BlockJob common; 33 RateLimit limit; 34 BlockDriverState *target; 35 BlockDriverState *base; 36 /* The name of the graph node to replace */ 37 char *replaces; 38 /* The BDS to replace */ 39 BlockDriverState *to_replace; 40 /* Used to block operations on the drive-mirror-replace target */ 41 Error *replace_blocker; 42 bool is_none_mode; 43 BlockdevOnError on_source_error, on_target_error; 44 bool synced; 45 bool should_complete; 46 int64_t sector_num; 47 int64_t granularity; 48 size_t buf_size; 49 int64_t bdev_length; 50 unsigned long *cow_bitmap; 51 BdrvDirtyBitmap *dirty_bitmap; 52 HBitmapIter hbi; 53 uint8_t *buf; 54 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 55 int buf_free_count; 56 57 unsigned long *in_flight_bitmap; 58 int in_flight; 59 int sectors_in_flight; 60 int ret; 61 bool unmap; 62 } MirrorBlockJob; 63 64 typedef struct MirrorOp { 65 MirrorBlockJob *s; 66 QEMUIOVector qiov; 67 int64_t sector_num; 68 int nb_sectors; 69 } MirrorOp; 70 71 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 72 int error) 73 { 74 s->synced = false; 75 if (read) { 76 return block_job_error_action(&s->common, s->common.bs, 77 s->on_source_error, true, error); 78 } else { 79 return block_job_error_action(&s->common, s->target, 80 s->on_target_error, false, error); 81 } 82 } 83 84 static void mirror_iteration_done(MirrorOp *op, int ret) 85 { 86 MirrorBlockJob *s = op->s; 87 struct iovec *iov; 88 int64_t chunk_num; 89 int i, nb_chunks, sectors_per_chunk; 90 91 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); 92 93 s->in_flight--; 94 s->sectors_in_flight -= op->nb_sectors; 95 iov = op->qiov.iov; 96 for (i = 0; i < op->qiov.niov; i++) { 97 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 98 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 99 s->buf_free_count++; 100 } 101 102 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 103 chunk_num = op->sector_num / sectors_per_chunk; 104 nb_chunks = op->nb_sectors / sectors_per_chunk; 105 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 106 if (ret >= 0) { 107 if (s->cow_bitmap) { 108 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 109 } 110 s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE; 111 } 112 113 qemu_iovec_destroy(&op->qiov); 114 g_slice_free(MirrorOp, op); 115 116 /* Enter coroutine when it is not sleeping. The coroutine sleeps to 117 * rate-limit itself. The coroutine will eventually resume since there is 118 * a sleep timeout so don't wake it early. 119 */ 120 if (s->common.busy) { 121 qemu_coroutine_enter(s->common.co, NULL); 122 } 123 } 124 125 static void mirror_write_complete(void *opaque, int ret) 126 { 127 MirrorOp *op = opaque; 128 MirrorBlockJob *s = op->s; 129 if (ret < 0) { 130 BlockErrorAction action; 131 132 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 133 action = mirror_error_action(s, false, -ret); 134 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 135 s->ret = ret; 136 } 137 } 138 mirror_iteration_done(op, ret); 139 } 140 141 static void mirror_read_complete(void *opaque, int ret) 142 { 143 MirrorOp *op = opaque; 144 MirrorBlockJob *s = op->s; 145 if (ret < 0) { 146 BlockErrorAction action; 147 148 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 149 action = mirror_error_action(s, true, -ret); 150 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 151 s->ret = ret; 152 } 153 154 mirror_iteration_done(op, ret); 155 return; 156 } 157 bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors, 158 mirror_write_complete, op); 159 } 160 161 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 162 { 163 BlockDriverState *source = s->common.bs; 164 int nb_sectors, sectors_per_chunk, nb_chunks; 165 int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; 166 uint64_t delay_ns = 0; 167 MirrorOp *op; 168 int pnum; 169 int64_t ret; 170 171 s->sector_num = hbitmap_iter_next(&s->hbi); 172 if (s->sector_num < 0) { 173 bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); 174 s->sector_num = hbitmap_iter_next(&s->hbi); 175 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 176 assert(s->sector_num >= 0); 177 } 178 179 hbitmap_next_sector = s->sector_num; 180 sector_num = s->sector_num; 181 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 182 end = s->bdev_length / BDRV_SECTOR_SIZE; 183 184 /* Extend the QEMUIOVector to include all adjacent blocks that will 185 * be copied in this operation. 186 * 187 * We have to do this if we have no backing file yet in the destination, 188 * and the cluster size is very large. Then we need to do COW ourselves. 189 * The first time a cluster is copied, copy it entirely. Note that, 190 * because both the granularity and the cluster size are powers of two, 191 * the number of sectors to copy cannot exceed one cluster. 192 * 193 * We also want to extend the QEMUIOVector to include more adjacent 194 * dirty blocks if possible, to limit the number of I/O operations and 195 * run efficiently even with a small granularity. 196 */ 197 nb_chunks = 0; 198 nb_sectors = 0; 199 next_sector = sector_num; 200 next_chunk = sector_num / sectors_per_chunk; 201 202 /* Wait for I/O to this cluster (from a previous iteration) to be done. */ 203 while (test_bit(next_chunk, s->in_flight_bitmap)) { 204 trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 205 qemu_coroutine_yield(); 206 } 207 208 do { 209 int added_sectors, added_chunks; 210 211 if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) || 212 test_bit(next_chunk, s->in_flight_bitmap)) { 213 assert(nb_sectors > 0); 214 break; 215 } 216 217 added_sectors = sectors_per_chunk; 218 if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) { 219 bdrv_round_to_clusters(s->target, 220 next_sector, added_sectors, 221 &next_sector, &added_sectors); 222 223 /* On the first iteration, the rounding may make us copy 224 * sectors before the first dirty one. 225 */ 226 if (next_sector < sector_num) { 227 assert(nb_sectors == 0); 228 sector_num = next_sector; 229 next_chunk = next_sector / sectors_per_chunk; 230 } 231 } 232 233 added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors)); 234 added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk; 235 236 /* When doing COW, it may happen that there is not enough space for 237 * a full cluster. Wait if that is the case. 238 */ 239 while (nb_chunks == 0 && s->buf_free_count < added_chunks) { 240 trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); 241 qemu_coroutine_yield(); 242 } 243 if (s->buf_free_count < nb_chunks + added_chunks) { 244 trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); 245 break; 246 } 247 248 /* We have enough free space to copy these sectors. */ 249 bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); 250 251 nb_sectors += added_sectors; 252 nb_chunks += added_chunks; 253 next_sector += added_sectors; 254 next_chunk += added_chunks; 255 if (!s->synced && s->common.speed) { 256 delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors); 257 } 258 } while (delay_ns == 0 && next_sector < end); 259 260 /* Allocate a MirrorOp that is used as an AIO callback. */ 261 op = g_slice_new(MirrorOp); 262 op->s = s; 263 op->sector_num = sector_num; 264 op->nb_sectors = nb_sectors; 265 266 /* Now make a QEMUIOVector taking enough granularity-sized chunks 267 * from s->buf_free. 268 */ 269 qemu_iovec_init(&op->qiov, nb_chunks); 270 next_sector = sector_num; 271 while (nb_chunks-- > 0) { 272 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 273 size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size; 274 275 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 276 s->buf_free_count--; 277 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 278 279 /* Advance the HBitmapIter in parallel, so that we do not examine 280 * the same sector twice. 281 */ 282 if (next_sector > hbitmap_next_sector 283 && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { 284 hbitmap_next_sector = hbitmap_iter_next(&s->hbi); 285 } 286 287 next_sector += sectors_per_chunk; 288 } 289 290 bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors); 291 292 /* Copy the dirty cluster. */ 293 s->in_flight++; 294 s->sectors_in_flight += nb_sectors; 295 trace_mirror_one_iteration(s, sector_num, nb_sectors); 296 297 ret = bdrv_get_block_status_above(source, NULL, sector_num, 298 nb_sectors, &pnum); 299 if (ret < 0 || pnum < nb_sectors || 300 (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) { 301 bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, 302 mirror_read_complete, op); 303 } else if (ret & BDRV_BLOCK_ZERO) { 304 bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors, 305 s->unmap ? BDRV_REQ_MAY_UNMAP : 0, 306 mirror_write_complete, op); 307 } else { 308 assert(!(ret & BDRV_BLOCK_DATA)); 309 bdrv_aio_discard(s->target, sector_num, op->nb_sectors, 310 mirror_write_complete, op); 311 } 312 return delay_ns; 313 } 314 315 static void mirror_free_init(MirrorBlockJob *s) 316 { 317 int granularity = s->granularity; 318 size_t buf_size = s->buf_size; 319 uint8_t *buf = s->buf; 320 321 assert(s->buf_free_count == 0); 322 QSIMPLEQ_INIT(&s->buf_free); 323 while (buf_size != 0) { 324 MirrorBuffer *cur = (MirrorBuffer *)buf; 325 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 326 s->buf_free_count++; 327 buf_size -= granularity; 328 buf += granularity; 329 } 330 } 331 332 static void mirror_drain(MirrorBlockJob *s) 333 { 334 while (s->in_flight > 0) { 335 qemu_coroutine_yield(); 336 } 337 } 338 339 typedef struct { 340 int ret; 341 } MirrorExitData; 342 343 static void mirror_exit(BlockJob *job, void *opaque) 344 { 345 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 346 MirrorExitData *data = opaque; 347 AioContext *replace_aio_context = NULL; 348 349 if (s->to_replace) { 350 replace_aio_context = bdrv_get_aio_context(s->to_replace); 351 aio_context_acquire(replace_aio_context); 352 } 353 354 if (s->should_complete && data->ret == 0) { 355 BlockDriverState *to_replace = s->common.bs; 356 if (s->to_replace) { 357 to_replace = s->to_replace; 358 } 359 if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) { 360 bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL); 361 } 362 bdrv_swap(s->target, to_replace); 363 if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) { 364 /* drop the bs loop chain formed by the swap: break the loop then 365 * trigger the unref from the top one */ 366 BlockDriverState *p = s->base->backing_hd; 367 bdrv_set_backing_hd(s->base, NULL); 368 bdrv_unref(p); 369 } 370 } 371 if (s->to_replace) { 372 bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 373 error_free(s->replace_blocker); 374 bdrv_unref(s->to_replace); 375 } 376 if (replace_aio_context) { 377 aio_context_release(replace_aio_context); 378 } 379 g_free(s->replaces); 380 bdrv_unref(s->target); 381 block_job_completed(&s->common, data->ret); 382 g_free(data); 383 } 384 385 static void coroutine_fn mirror_run(void *opaque) 386 { 387 MirrorBlockJob *s = opaque; 388 MirrorExitData *data; 389 BlockDriverState *bs = s->common.bs; 390 int64_t sector_num, end, sectors_per_chunk, length; 391 uint64_t last_pause_ns; 392 BlockDriverInfo bdi; 393 char backing_filename[2]; /* we only need 2 characters because we are only 394 checking for a NULL string */ 395 int ret = 0; 396 int n; 397 398 if (block_job_is_cancelled(&s->common)) { 399 goto immediate_exit; 400 } 401 402 s->bdev_length = bdrv_getlength(bs); 403 if (s->bdev_length < 0) { 404 ret = s->bdev_length; 405 goto immediate_exit; 406 } else if (s->bdev_length == 0) { 407 /* Report BLOCK_JOB_READY and wait for complete. */ 408 block_job_event_ready(&s->common); 409 s->synced = true; 410 while (!block_job_is_cancelled(&s->common) && !s->should_complete) { 411 block_job_yield(&s->common); 412 } 413 s->common.cancelled = false; 414 goto immediate_exit; 415 } 416 417 length = DIV_ROUND_UP(s->bdev_length, s->granularity); 418 s->in_flight_bitmap = bitmap_new(length); 419 420 /* If we have no backing file yet in the destination, we cannot let 421 * the destination do COW. Instead, we copy sectors around the 422 * dirty data if needed. We need a bitmap to do that. 423 */ 424 bdrv_get_backing_filename(s->target, backing_filename, 425 sizeof(backing_filename)); 426 if (backing_filename[0] && !s->target->backing_hd) { 427 ret = bdrv_get_info(s->target, &bdi); 428 if (ret < 0) { 429 goto immediate_exit; 430 } 431 if (s->granularity < bdi.cluster_size) { 432 s->buf_size = MAX(s->buf_size, bdi.cluster_size); 433 s->cow_bitmap = bitmap_new(length); 434 } 435 } 436 437 end = s->bdev_length / BDRV_SECTOR_SIZE; 438 s->buf = qemu_try_blockalign(bs, s->buf_size); 439 if (s->buf == NULL) { 440 ret = -ENOMEM; 441 goto immediate_exit; 442 } 443 444 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 445 mirror_free_init(s); 446 447 if (!s->is_none_mode) { 448 /* First part, loop on the sectors and initialize the dirty bitmap. */ 449 BlockDriverState *base = s->base; 450 for (sector_num = 0; sector_num < end; ) { 451 int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1; 452 ret = bdrv_is_allocated_above(bs, base, 453 sector_num, next - sector_num, &n); 454 455 if (ret < 0) { 456 goto immediate_exit; 457 } 458 459 assert(n > 0); 460 if (ret == 1) { 461 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); 462 sector_num = next; 463 } else { 464 sector_num += n; 465 } 466 } 467 } 468 469 bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); 470 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 471 for (;;) { 472 uint64_t delay_ns = 0; 473 int64_t cnt; 474 bool should_complete; 475 476 if (s->ret < 0) { 477 ret = s->ret; 478 goto immediate_exit; 479 } 480 481 cnt = bdrv_get_dirty_count(s->dirty_bitmap); 482 /* s->common.offset contains the number of bytes already processed so 483 * far, cnt is the number of dirty sectors remaining and 484 * s->sectors_in_flight is the number of sectors currently being 485 * processed; together those are the current total operation length */ 486 s->common.len = s->common.offset + 487 (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; 488 489 /* Note that even when no rate limit is applied we need to yield 490 * periodically with no pending I/O so that bdrv_drain_all() returns. 491 * We do so every SLICE_TIME nanoseconds, or when there is an error, 492 * or when the source is clean, whichever comes first. 493 */ 494 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && 495 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 496 if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || 497 (cnt == 0 && s->in_flight > 0)) { 498 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); 499 qemu_coroutine_yield(); 500 continue; 501 } else if (cnt != 0) { 502 delay_ns = mirror_iteration(s); 503 } 504 } 505 506 should_complete = false; 507 if (s->in_flight == 0 && cnt == 0) { 508 trace_mirror_before_flush(s); 509 ret = bdrv_flush(s->target); 510 if (ret < 0) { 511 if (mirror_error_action(s, false, -ret) == 512 BLOCK_ERROR_ACTION_REPORT) { 513 goto immediate_exit; 514 } 515 } else { 516 /* We're out of the streaming phase. From now on, if the job 517 * is cancelled we will actually complete all pending I/O and 518 * report completion. This way, block-job-cancel will leave 519 * the target in a consistent state. 520 */ 521 if (!s->synced) { 522 block_job_event_ready(&s->common); 523 s->synced = true; 524 } 525 526 should_complete = s->should_complete || 527 block_job_is_cancelled(&s->common); 528 cnt = bdrv_get_dirty_count(s->dirty_bitmap); 529 } 530 } 531 532 if (cnt == 0 && should_complete) { 533 /* The dirty bitmap is not updated while operations are pending. 534 * If we're about to exit, wait for pending operations before 535 * calling bdrv_get_dirty_count(bs), or we may exit while the 536 * source has dirty data to copy! 537 * 538 * Note that I/O can be submitted by the guest while 539 * mirror_populate runs. 540 */ 541 trace_mirror_before_drain(s, cnt); 542 bdrv_drain(bs); 543 cnt = bdrv_get_dirty_count(s->dirty_bitmap); 544 } 545 546 ret = 0; 547 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 548 if (!s->synced) { 549 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 550 if (block_job_is_cancelled(&s->common)) { 551 break; 552 } 553 } else if (!should_complete) { 554 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); 555 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 556 } else if (cnt == 0) { 557 /* The two disks are in sync. Exit and report successful 558 * completion. 559 */ 560 assert(QLIST_EMPTY(&bs->tracked_requests)); 561 s->common.cancelled = false; 562 break; 563 } 564 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 565 } 566 567 immediate_exit: 568 if (s->in_flight > 0) { 569 /* We get here only if something went wrong. Either the job failed, 570 * or it was cancelled prematurely so that we do not guarantee that 571 * the target is a copy of the source. 572 */ 573 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); 574 mirror_drain(s); 575 } 576 577 assert(s->in_flight == 0); 578 qemu_vfree(s->buf); 579 g_free(s->cow_bitmap); 580 g_free(s->in_flight_bitmap); 581 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); 582 bdrv_iostatus_disable(s->target); 583 584 data = g_malloc(sizeof(*data)); 585 data->ret = ret; 586 block_job_defer_to_main_loop(&s->common, mirror_exit, data); 587 } 588 589 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) 590 { 591 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 592 593 if (speed < 0) { 594 error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 595 return; 596 } 597 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 598 } 599 600 static void mirror_iostatus_reset(BlockJob *job) 601 { 602 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 603 604 bdrv_iostatus_reset(s->target); 605 } 606 607 static void mirror_complete(BlockJob *job, Error **errp) 608 { 609 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 610 Error *local_err = NULL; 611 int ret; 612 613 ret = bdrv_open_backing_file(s->target, NULL, &local_err); 614 if (ret < 0) { 615 error_propagate(errp, local_err); 616 return; 617 } 618 if (!s->synced) { 619 error_setg(errp, QERR_BLOCK_JOB_NOT_READY, 620 bdrv_get_device_name(job->bs)); 621 return; 622 } 623 624 /* check the target bs is not blocked and block all operations on it */ 625 if (s->replaces) { 626 AioContext *replace_aio_context; 627 628 s->to_replace = check_to_replace_node(s->replaces, &local_err); 629 if (!s->to_replace) { 630 error_propagate(errp, local_err); 631 return; 632 } 633 634 replace_aio_context = bdrv_get_aio_context(s->to_replace); 635 aio_context_acquire(replace_aio_context); 636 637 error_setg(&s->replace_blocker, 638 "block device is in use by block-job-complete"); 639 bdrv_op_block_all(s->to_replace, s->replace_blocker); 640 bdrv_ref(s->to_replace); 641 642 aio_context_release(replace_aio_context); 643 } 644 645 s->should_complete = true; 646 block_job_enter(&s->common); 647 } 648 649 static const BlockJobDriver mirror_job_driver = { 650 .instance_size = sizeof(MirrorBlockJob), 651 .job_type = BLOCK_JOB_TYPE_MIRROR, 652 .set_speed = mirror_set_speed, 653 .iostatus_reset= mirror_iostatus_reset, 654 .complete = mirror_complete, 655 }; 656 657 static const BlockJobDriver commit_active_job_driver = { 658 .instance_size = sizeof(MirrorBlockJob), 659 .job_type = BLOCK_JOB_TYPE_COMMIT, 660 .set_speed = mirror_set_speed, 661 .iostatus_reset 662 = mirror_iostatus_reset, 663 .complete = mirror_complete, 664 }; 665 666 static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, 667 const char *replaces, 668 int64_t speed, uint32_t granularity, 669 int64_t buf_size, 670 BlockdevOnError on_source_error, 671 BlockdevOnError on_target_error, 672 bool unmap, 673 BlockCompletionFunc *cb, 674 void *opaque, Error **errp, 675 const BlockJobDriver *driver, 676 bool is_none_mode, BlockDriverState *base) 677 { 678 MirrorBlockJob *s; 679 680 if (granularity == 0) { 681 granularity = bdrv_get_default_bitmap_granularity(target); 682 } 683 684 assert ((granularity & (granularity - 1)) == 0); 685 686 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || 687 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && 688 !bdrv_iostatus_is_enabled(bs)) { 689 error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error"); 690 return; 691 } 692 693 694 s = block_job_create(driver, bs, speed, cb, opaque, errp); 695 if (!s) { 696 return; 697 } 698 699 s->replaces = g_strdup(replaces); 700 s->on_source_error = on_source_error; 701 s->on_target_error = on_target_error; 702 s->target = target; 703 s->is_none_mode = is_none_mode; 704 s->base = base; 705 s->granularity = granularity; 706 s->buf_size = MAX(buf_size, granularity); 707 s->unmap = unmap; 708 709 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 710 if (!s->dirty_bitmap) { 711 g_free(s->replaces); 712 block_job_release(bs); 713 return; 714 } 715 bdrv_set_enable_write_cache(s->target, true); 716 bdrv_set_on_error(s->target, on_target_error, on_target_error); 717 bdrv_iostatus_enable(s->target); 718 s->common.co = qemu_coroutine_create(mirror_run); 719 trace_mirror_start(bs, s, s->common.co, opaque); 720 qemu_coroutine_enter(s->common.co, s); 721 } 722 723 void mirror_start(BlockDriverState *bs, BlockDriverState *target, 724 const char *replaces, 725 int64_t speed, uint32_t granularity, int64_t buf_size, 726 MirrorSyncMode mode, BlockdevOnError on_source_error, 727 BlockdevOnError on_target_error, 728 bool unmap, 729 BlockCompletionFunc *cb, 730 void *opaque, Error **errp) 731 { 732 bool is_none_mode; 733 BlockDriverState *base; 734 735 if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { 736 error_setg(errp, "Sync mode 'incremental' not supported"); 737 return; 738 } 739 is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 740 base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL; 741 mirror_start_job(bs, target, replaces, 742 speed, granularity, buf_size, 743 on_source_error, on_target_error, unmap, cb, opaque, errp, 744 &mirror_job_driver, is_none_mode, base); 745 } 746 747 void commit_active_start(BlockDriverState *bs, BlockDriverState *base, 748 int64_t speed, 749 BlockdevOnError on_error, 750 BlockCompletionFunc *cb, 751 void *opaque, Error **errp) 752 { 753 int64_t length, base_length; 754 int orig_base_flags; 755 int ret; 756 Error *local_err = NULL; 757 758 orig_base_flags = bdrv_get_flags(base); 759 760 if (bdrv_reopen(base, bs->open_flags, errp)) { 761 return; 762 } 763 764 length = bdrv_getlength(bs); 765 if (length < 0) { 766 error_setg_errno(errp, -length, 767 "Unable to determine length of %s", bs->filename); 768 goto error_restore_flags; 769 } 770 771 base_length = bdrv_getlength(base); 772 if (base_length < 0) { 773 error_setg_errno(errp, -base_length, 774 "Unable to determine length of %s", base->filename); 775 goto error_restore_flags; 776 } 777 778 if (length > base_length) { 779 ret = bdrv_truncate(base, length); 780 if (ret < 0) { 781 error_setg_errno(errp, -ret, 782 "Top image %s is larger than base image %s, and " 783 "resize of base image failed", 784 bs->filename, base->filename); 785 goto error_restore_flags; 786 } 787 } 788 789 bdrv_ref(base); 790 mirror_start_job(bs, base, NULL, speed, 0, 0, 791 on_error, on_error, false, cb, opaque, &local_err, 792 &commit_active_job_driver, false, base); 793 if (local_err) { 794 error_propagate(errp, local_err); 795 goto error_restore_flags; 796 } 797 798 return; 799 800 error_restore_flags: 801 /* ignore error and errp for bdrv_reopen, because we want to propagate 802 * the original error */ 803 bdrv_reopen(base, orig_base_flags, NULL); 804 return; 805 } 806