1 /* 2 * Image mirroring 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Paolo Bonzini <pbonzini@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10 * See the COPYING.LIB file in the top-level directory. 11 * 12 */ 13 14 #include "trace.h" 15 #include "block/blockjob.h" 16 #include "block/block_int.h" 17 #include "qemu/ratelimit.h" 18 #include "qemu/bitmap.h" 19 20 #define SLICE_TIME 100000000ULL /* ns */ 21 #define MAX_IN_FLIGHT 16 22 23 /* The mirroring buffer is a list of granularity-sized chunks. 24 * Free chunks are organized in a list. 25 */ 26 typedef struct MirrorBuffer { 27 QSIMPLEQ_ENTRY(MirrorBuffer) next; 28 } MirrorBuffer; 29 30 typedef struct MirrorBlockJob { 31 BlockJob common; 32 RateLimit limit; 33 BlockDriverState *target; 34 BlockDriverState *base; 35 bool is_none_mode; 36 BlockdevOnError on_source_error, on_target_error; 37 bool synced; 38 bool should_complete; 39 int64_t sector_num; 40 int64_t granularity; 41 size_t buf_size; 42 unsigned long *cow_bitmap; 43 BdrvDirtyBitmap *dirty_bitmap; 44 HBitmapIter hbi; 45 uint8_t *buf; 46 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 47 int buf_free_count; 48 49 unsigned long *in_flight_bitmap; 50 int in_flight; 51 int ret; 52 } MirrorBlockJob; 53 54 typedef struct MirrorOp { 55 MirrorBlockJob *s; 56 QEMUIOVector qiov; 57 int64_t sector_num; 58 int nb_sectors; 59 } MirrorOp; 60 61 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 62 int error) 63 { 64 s->synced = false; 65 if (read) { 66 return block_job_error_action(&s->common, s->common.bs, 67 s->on_source_error, true, error); 68 } else { 69 return block_job_error_action(&s->common, s->target, 70 s->on_target_error, false, error); 71 } 72 } 73 74 static void mirror_iteration_done(MirrorOp *op, int ret) 75 { 76 MirrorBlockJob *s = op->s; 77 struct iovec *iov; 78 int64_t chunk_num; 79 int i, nb_chunks, sectors_per_chunk; 80 81 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); 82 83 s->in_flight--; 84 iov = op->qiov.iov; 85 for (i = 0; i < op->qiov.niov; i++) { 86 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 87 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 88 s->buf_free_count++; 89 } 90 91 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 92 chunk_num = op->sector_num / sectors_per_chunk; 93 nb_chunks = op->nb_sectors / sectors_per_chunk; 94 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 95 if (s->cow_bitmap && ret >= 0) { 96 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 97 } 98 99 qemu_iovec_destroy(&op->qiov); 100 g_slice_free(MirrorOp, op); 101 102 /* Enter coroutine when it is not sleeping. The coroutine sleeps to 103 * rate-limit itself. The coroutine will eventually resume since there is 104 * a sleep timeout so don't wake it early. 105 */ 106 if (s->common.busy) { 107 qemu_coroutine_enter(s->common.co, NULL); 108 } 109 } 110 111 static void mirror_write_complete(void *opaque, int ret) 112 { 113 MirrorOp *op = opaque; 114 MirrorBlockJob *s = op->s; 115 if (ret < 0) { 116 BlockDriverState *source = s->common.bs; 117 BlockErrorAction action; 118 119 bdrv_set_dirty(source, op->sector_num, op->nb_sectors); 120 action = mirror_error_action(s, false, -ret); 121 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 122 s->ret = ret; 123 } 124 } 125 mirror_iteration_done(op, ret); 126 } 127 128 static void mirror_read_complete(void *opaque, int ret) 129 { 130 MirrorOp *op = opaque; 131 MirrorBlockJob *s = op->s; 132 if (ret < 0) { 133 BlockDriverState *source = s->common.bs; 134 BlockErrorAction action; 135 136 bdrv_set_dirty(source, op->sector_num, op->nb_sectors); 137 action = mirror_error_action(s, true, -ret); 138 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 139 s->ret = ret; 140 } 141 142 mirror_iteration_done(op, ret); 143 return; 144 } 145 bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors, 146 mirror_write_complete, op); 147 } 148 149 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 150 { 151 BlockDriverState *source = s->common.bs; 152 int nb_sectors, sectors_per_chunk, nb_chunks; 153 int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; 154 uint64_t delay_ns; 155 MirrorOp *op; 156 157 s->sector_num = hbitmap_iter_next(&s->hbi); 158 if (s->sector_num < 0) { 159 bdrv_dirty_iter_init(source, s->dirty_bitmap, &s->hbi); 160 s->sector_num = hbitmap_iter_next(&s->hbi); 161 trace_mirror_restart_iter(s, 162 bdrv_get_dirty_count(source, s->dirty_bitmap)); 163 assert(s->sector_num >= 0); 164 } 165 166 hbitmap_next_sector = s->sector_num; 167 sector_num = s->sector_num; 168 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 169 end = s->common.len >> BDRV_SECTOR_BITS; 170 171 /* Extend the QEMUIOVector to include all adjacent blocks that will 172 * be copied in this operation. 173 * 174 * We have to do this if we have no backing file yet in the destination, 175 * and the cluster size is very large. Then we need to do COW ourselves. 176 * The first time a cluster is copied, copy it entirely. Note that, 177 * because both the granularity and the cluster size are powers of two, 178 * the number of sectors to copy cannot exceed one cluster. 179 * 180 * We also want to extend the QEMUIOVector to include more adjacent 181 * dirty blocks if possible, to limit the number of I/O operations and 182 * run efficiently even with a small granularity. 183 */ 184 nb_chunks = 0; 185 nb_sectors = 0; 186 next_sector = sector_num; 187 next_chunk = sector_num / sectors_per_chunk; 188 189 /* Wait for I/O to this cluster (from a previous iteration) to be done. */ 190 while (test_bit(next_chunk, s->in_flight_bitmap)) { 191 trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 192 qemu_coroutine_yield(); 193 } 194 195 do { 196 int added_sectors, added_chunks; 197 198 if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) || 199 test_bit(next_chunk, s->in_flight_bitmap)) { 200 assert(nb_sectors > 0); 201 break; 202 } 203 204 added_sectors = sectors_per_chunk; 205 if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) { 206 bdrv_round_to_clusters(s->target, 207 next_sector, added_sectors, 208 &next_sector, &added_sectors); 209 210 /* On the first iteration, the rounding may make us copy 211 * sectors before the first dirty one. 212 */ 213 if (next_sector < sector_num) { 214 assert(nb_sectors == 0); 215 sector_num = next_sector; 216 next_chunk = next_sector / sectors_per_chunk; 217 } 218 } 219 220 added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors)); 221 added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk; 222 223 /* When doing COW, it may happen that there is not enough space for 224 * a full cluster. Wait if that is the case. 225 */ 226 while (nb_chunks == 0 && s->buf_free_count < added_chunks) { 227 trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); 228 qemu_coroutine_yield(); 229 } 230 if (s->buf_free_count < nb_chunks + added_chunks) { 231 trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); 232 break; 233 } 234 235 /* We have enough free space to copy these sectors. */ 236 bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); 237 238 nb_sectors += added_sectors; 239 nb_chunks += added_chunks; 240 next_sector += added_sectors; 241 next_chunk += added_chunks; 242 if (!s->synced && s->common.speed) { 243 delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors); 244 } else { 245 delay_ns = 0; 246 } 247 } while (delay_ns == 0 && next_sector < end); 248 249 /* Allocate a MirrorOp that is used as an AIO callback. */ 250 op = g_slice_new(MirrorOp); 251 op->s = s; 252 op->sector_num = sector_num; 253 op->nb_sectors = nb_sectors; 254 255 /* Now make a QEMUIOVector taking enough granularity-sized chunks 256 * from s->buf_free. 257 */ 258 qemu_iovec_init(&op->qiov, nb_chunks); 259 next_sector = sector_num; 260 while (nb_chunks-- > 0) { 261 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 262 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 263 s->buf_free_count--; 264 qemu_iovec_add(&op->qiov, buf, s->granularity); 265 266 /* Advance the HBitmapIter in parallel, so that we do not examine 267 * the same sector twice. 268 */ 269 if (next_sector > hbitmap_next_sector 270 && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { 271 hbitmap_next_sector = hbitmap_iter_next(&s->hbi); 272 } 273 274 next_sector += sectors_per_chunk; 275 } 276 277 bdrv_reset_dirty(source, sector_num, nb_sectors); 278 279 /* Copy the dirty cluster. */ 280 s->in_flight++; 281 trace_mirror_one_iteration(s, sector_num, nb_sectors); 282 bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, 283 mirror_read_complete, op); 284 return delay_ns; 285 } 286 287 static void mirror_free_init(MirrorBlockJob *s) 288 { 289 int granularity = s->granularity; 290 size_t buf_size = s->buf_size; 291 uint8_t *buf = s->buf; 292 293 assert(s->buf_free_count == 0); 294 QSIMPLEQ_INIT(&s->buf_free); 295 while (buf_size != 0) { 296 MirrorBuffer *cur = (MirrorBuffer *)buf; 297 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 298 s->buf_free_count++; 299 buf_size -= granularity; 300 buf += granularity; 301 } 302 } 303 304 static void mirror_drain(MirrorBlockJob *s) 305 { 306 while (s->in_flight > 0) { 307 qemu_coroutine_yield(); 308 } 309 } 310 311 static void coroutine_fn mirror_run(void *opaque) 312 { 313 MirrorBlockJob *s = opaque; 314 BlockDriverState *bs = s->common.bs; 315 int64_t sector_num, end, sectors_per_chunk, length; 316 uint64_t last_pause_ns; 317 BlockDriverInfo bdi; 318 char backing_filename[1024]; 319 int ret = 0; 320 int n; 321 322 if (block_job_is_cancelled(&s->common)) { 323 goto immediate_exit; 324 } 325 326 s->common.len = bdrv_getlength(bs); 327 if (s->common.len <= 0) { 328 ret = s->common.len; 329 goto immediate_exit; 330 } 331 332 length = DIV_ROUND_UP(s->common.len, s->granularity); 333 s->in_flight_bitmap = bitmap_new(length); 334 335 /* If we have no backing file yet in the destination, we cannot let 336 * the destination do COW. Instead, we copy sectors around the 337 * dirty data if needed. We need a bitmap to do that. 338 */ 339 bdrv_get_backing_filename(s->target, backing_filename, 340 sizeof(backing_filename)); 341 if (backing_filename[0] && !s->target->backing_hd) { 342 ret = bdrv_get_info(s->target, &bdi); 343 if (ret < 0) { 344 goto immediate_exit; 345 } 346 if (s->granularity < bdi.cluster_size) { 347 s->buf_size = MAX(s->buf_size, bdi.cluster_size); 348 s->cow_bitmap = bitmap_new(length); 349 } 350 } 351 352 end = s->common.len >> BDRV_SECTOR_BITS; 353 s->buf = qemu_blockalign(bs, s->buf_size); 354 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 355 mirror_free_init(s); 356 357 if (!s->is_none_mode) { 358 /* First part, loop on the sectors and initialize the dirty bitmap. */ 359 BlockDriverState *base = s->base; 360 for (sector_num = 0; sector_num < end; ) { 361 int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1; 362 ret = bdrv_is_allocated_above(bs, base, 363 sector_num, next - sector_num, &n); 364 365 if (ret < 0) { 366 goto immediate_exit; 367 } 368 369 assert(n > 0); 370 if (ret == 1) { 371 bdrv_set_dirty(bs, sector_num, n); 372 sector_num = next; 373 } else { 374 sector_num += n; 375 } 376 } 377 } 378 379 bdrv_dirty_iter_init(bs, s->dirty_bitmap, &s->hbi); 380 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 381 for (;;) { 382 uint64_t delay_ns = 0; 383 int64_t cnt; 384 bool should_complete; 385 386 if (s->ret < 0) { 387 ret = s->ret; 388 goto immediate_exit; 389 } 390 391 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap); 392 393 /* Note that even when no rate limit is applied we need to yield 394 * periodically with no pending I/O so that qemu_aio_flush() returns. 395 * We do so every SLICE_TIME nanoseconds, or when there is an error, 396 * or when the source is clean, whichever comes first. 397 */ 398 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && 399 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 400 if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || 401 (cnt == 0 && s->in_flight > 0)) { 402 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); 403 qemu_coroutine_yield(); 404 continue; 405 } else if (cnt != 0) { 406 delay_ns = mirror_iteration(s); 407 if (delay_ns == 0) { 408 continue; 409 } 410 } 411 } 412 413 should_complete = false; 414 if (s->in_flight == 0 && cnt == 0) { 415 trace_mirror_before_flush(s); 416 ret = bdrv_flush(s->target); 417 if (ret < 0) { 418 if (mirror_error_action(s, false, -ret) == 419 BLOCK_ERROR_ACTION_REPORT) { 420 goto immediate_exit; 421 } 422 } else { 423 /* We're out of the streaming phase. From now on, if the job 424 * is cancelled we will actually complete all pending I/O and 425 * report completion. This way, block-job-cancel will leave 426 * the target in a consistent state. 427 */ 428 s->common.offset = end * BDRV_SECTOR_SIZE; 429 if (!s->synced) { 430 block_job_event_ready(&s->common); 431 s->synced = true; 432 } 433 434 should_complete = s->should_complete || 435 block_job_is_cancelled(&s->common); 436 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap); 437 } 438 } 439 440 if (cnt == 0 && should_complete) { 441 /* The dirty bitmap is not updated while operations are pending. 442 * If we're about to exit, wait for pending operations before 443 * calling bdrv_get_dirty_count(bs), or we may exit while the 444 * source has dirty data to copy! 445 * 446 * Note that I/O can be submitted by the guest while 447 * mirror_populate runs. 448 */ 449 trace_mirror_before_drain(s, cnt); 450 bdrv_drain_all(); 451 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap); 452 } 453 454 ret = 0; 455 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 456 if (!s->synced) { 457 /* Publish progress */ 458 s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE; 459 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 460 if (block_job_is_cancelled(&s->common)) { 461 break; 462 } 463 } else if (!should_complete) { 464 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); 465 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 466 } else if (cnt == 0) { 467 /* The two disks are in sync. Exit and report successful 468 * completion. 469 */ 470 assert(QLIST_EMPTY(&bs->tracked_requests)); 471 s->common.cancelled = false; 472 break; 473 } 474 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 475 } 476 477 immediate_exit: 478 if (s->in_flight > 0) { 479 /* We get here only if something went wrong. Either the job failed, 480 * or it was cancelled prematurely so that we do not guarantee that 481 * the target is a copy of the source. 482 */ 483 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); 484 mirror_drain(s); 485 } 486 487 assert(s->in_flight == 0); 488 qemu_vfree(s->buf); 489 g_free(s->cow_bitmap); 490 g_free(s->in_flight_bitmap); 491 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); 492 bdrv_iostatus_disable(s->target); 493 if (s->should_complete && ret == 0) { 494 if (bdrv_get_flags(s->target) != bdrv_get_flags(s->common.bs)) { 495 bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL); 496 } 497 bdrv_swap(s->target, s->common.bs); 498 if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) { 499 /* drop the bs loop chain formed by the swap: break the loop then 500 * trigger the unref from the top one */ 501 BlockDriverState *p = s->base->backing_hd; 502 bdrv_set_backing_hd(s->base, NULL); 503 bdrv_unref(p); 504 } 505 } 506 bdrv_unref(s->target); 507 block_job_completed(&s->common, ret); 508 } 509 510 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) 511 { 512 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 513 514 if (speed < 0) { 515 error_set(errp, QERR_INVALID_PARAMETER, "speed"); 516 return; 517 } 518 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 519 } 520 521 static void mirror_iostatus_reset(BlockJob *job) 522 { 523 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 524 525 bdrv_iostatus_reset(s->target); 526 } 527 528 static void mirror_complete(BlockJob *job, Error **errp) 529 { 530 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 531 Error *local_err = NULL; 532 int ret; 533 534 ret = bdrv_open_backing_file(s->target, NULL, &local_err); 535 if (ret < 0) { 536 error_propagate(errp, local_err); 537 return; 538 } 539 if (!s->synced) { 540 error_set(errp, QERR_BLOCK_JOB_NOT_READY, job->bs->device_name); 541 return; 542 } 543 544 s->should_complete = true; 545 block_job_resume(job); 546 } 547 548 static const BlockJobDriver mirror_job_driver = { 549 .instance_size = sizeof(MirrorBlockJob), 550 .job_type = BLOCK_JOB_TYPE_MIRROR, 551 .set_speed = mirror_set_speed, 552 .iostatus_reset= mirror_iostatus_reset, 553 .complete = mirror_complete, 554 }; 555 556 static const BlockJobDriver commit_active_job_driver = { 557 .instance_size = sizeof(MirrorBlockJob), 558 .job_type = BLOCK_JOB_TYPE_COMMIT, 559 .set_speed = mirror_set_speed, 560 .iostatus_reset 561 = mirror_iostatus_reset, 562 .complete = mirror_complete, 563 }; 564 565 static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, 566 int64_t speed, int64_t granularity, 567 int64_t buf_size, 568 BlockdevOnError on_source_error, 569 BlockdevOnError on_target_error, 570 BlockDriverCompletionFunc *cb, 571 void *opaque, Error **errp, 572 const BlockJobDriver *driver, 573 bool is_none_mode, BlockDriverState *base) 574 { 575 MirrorBlockJob *s; 576 577 if (granularity == 0) { 578 /* Choose the default granularity based on the target file's cluster 579 * size, clamped between 4k and 64k. */ 580 BlockDriverInfo bdi; 581 if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) { 582 granularity = MAX(4096, bdi.cluster_size); 583 granularity = MIN(65536, granularity); 584 } else { 585 granularity = 65536; 586 } 587 } 588 589 assert ((granularity & (granularity - 1)) == 0); 590 591 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || 592 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && 593 !bdrv_iostatus_is_enabled(bs)) { 594 error_set(errp, QERR_INVALID_PARAMETER, "on-source-error"); 595 return; 596 } 597 598 599 s = block_job_create(driver, bs, speed, cb, opaque, errp); 600 if (!s) { 601 return; 602 } 603 604 s->on_source_error = on_source_error; 605 s->on_target_error = on_target_error; 606 s->target = target; 607 s->is_none_mode = is_none_mode; 608 s->base = base; 609 s->granularity = granularity; 610 s->buf_size = MAX(buf_size, granularity); 611 612 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, errp); 613 if (!s->dirty_bitmap) { 614 return; 615 } 616 bdrv_set_enable_write_cache(s->target, true); 617 bdrv_set_on_error(s->target, on_target_error, on_target_error); 618 bdrv_iostatus_enable(s->target); 619 s->common.co = qemu_coroutine_create(mirror_run); 620 trace_mirror_start(bs, s, s->common.co, opaque); 621 qemu_coroutine_enter(s->common.co, s); 622 } 623 624 void mirror_start(BlockDriverState *bs, BlockDriverState *target, 625 int64_t speed, int64_t granularity, int64_t buf_size, 626 MirrorSyncMode mode, BlockdevOnError on_source_error, 627 BlockdevOnError on_target_error, 628 BlockDriverCompletionFunc *cb, 629 void *opaque, Error **errp) 630 { 631 bool is_none_mode; 632 BlockDriverState *base; 633 634 is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 635 base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL; 636 mirror_start_job(bs, target, speed, granularity, buf_size, 637 on_source_error, on_target_error, cb, opaque, errp, 638 &mirror_job_driver, is_none_mode, base); 639 } 640 641 void commit_active_start(BlockDriverState *bs, BlockDriverState *base, 642 int64_t speed, 643 BlockdevOnError on_error, 644 BlockDriverCompletionFunc *cb, 645 void *opaque, Error **errp) 646 { 647 int64_t length, base_length; 648 int orig_base_flags; 649 int ret; 650 Error *local_err = NULL; 651 652 orig_base_flags = bdrv_get_flags(base); 653 654 if (bdrv_reopen(base, bs->open_flags, errp)) { 655 return; 656 } 657 658 length = bdrv_getlength(bs); 659 if (length < 0) { 660 error_setg_errno(errp, -length, 661 "Unable to determine length of %s", bs->filename); 662 goto error_restore_flags; 663 } 664 665 base_length = bdrv_getlength(base); 666 if (base_length < 0) { 667 error_setg_errno(errp, -base_length, 668 "Unable to determine length of %s", base->filename); 669 goto error_restore_flags; 670 } 671 672 if (length > base_length) { 673 ret = bdrv_truncate(base, length); 674 if (ret < 0) { 675 error_setg_errno(errp, -ret, 676 "Top image %s is larger than base image %s, and " 677 "resize of base image failed", 678 bs->filename, base->filename); 679 goto error_restore_flags; 680 } 681 } 682 683 bdrv_ref(base); 684 mirror_start_job(bs, base, speed, 0, 0, 685 on_error, on_error, cb, opaque, &local_err, 686 &commit_active_job_driver, false, base); 687 if (local_err) { 688 error_propagate(errp, local_err); 689 goto error_restore_flags; 690 } 691 692 return; 693 694 error_restore_flags: 695 /* ignore error and errp for bdrv_reopen, because we want to propagate 696 * the original error */ 697 bdrv_reopen(base, orig_base_flags, NULL); 698 return; 699 } 700