1 /* 2 * QEMU backup 3 * 4 * Copyright (C) 2013 Proxmox Server Solutions 5 * 6 * Authors: 7 * Dietmar Maurer (dietmar@proxmox.com) 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 16 #include "trace.h" 17 #include "block/block.h" 18 #include "block/block_int.h" 19 #include "block/blockjob_int.h" 20 #include "block/block_backup.h" 21 #include "qapi/error.h" 22 #include "qapi/qmp/qerror.h" 23 #include "qemu/ratelimit.h" 24 #include "qemu/cutils.h" 25 #include "sysemu/block-backend.h" 26 #include "qemu/bitmap.h" 27 #include "qemu/error-report.h" 28 29 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) 30 31 typedef struct BackupBlockJob { 32 BlockJob common; 33 BlockBackend *target; 34 /* bitmap for sync=incremental */ 35 BdrvDirtyBitmap *sync_bitmap; 36 MirrorSyncMode sync_mode; 37 BlockdevOnError on_source_error; 38 BlockdevOnError on_target_error; 39 CoRwlock flush_rwlock; 40 uint64_t len; 41 uint64_t bytes_read; 42 int64_t cluster_size; 43 bool compress; 44 NotifierWithReturn before_write; 45 QLIST_HEAD(, CowRequest) inflight_reqs; 46 47 HBitmap *copy_bitmap; 48 bool use_copy_range; 49 int64_t copy_range_size; 50 } BackupBlockJob; 51 52 static const BlockJobDriver backup_job_driver; 53 54 /* See if in-flight requests overlap and wait for them to complete */ 55 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job, 56 int64_t start, 57 int64_t end) 58 { 59 CowRequest *req; 60 bool retry; 61 62 do { 63 retry = false; 64 QLIST_FOREACH(req, &job->inflight_reqs, list) { 65 if (end > req->start_byte && start < req->end_byte) { 66 qemu_co_queue_wait(&req->wait_queue, NULL); 67 retry = true; 68 break; 69 } 70 } 71 } while (retry); 72 } 73 74 /* Keep track of an in-flight request */ 75 static void cow_request_begin(CowRequest *req, BackupBlockJob *job, 76 int64_t start, int64_t end) 77 { 78 req->start_byte = start; 79 req->end_byte = end; 80 qemu_co_queue_init(&req->wait_queue); 81 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list); 82 } 83 84 /* Forget about a completed request */ 85 static void cow_request_end(CowRequest *req) 86 { 87 QLIST_REMOVE(req, list); 88 qemu_co_queue_restart_all(&req->wait_queue); 89 } 90 91 /* Copy range to target with a bounce buffer and return the bytes copied. If 92 * error occured, return a negative error number */ 93 static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job, 94 int64_t start, 95 int64_t end, 96 bool is_write_notifier, 97 bool *error_is_read, 98 void **bounce_buffer) 99 { 100 int ret; 101 struct iovec iov; 102 QEMUIOVector qiov; 103 BlockBackend *blk = job->common.blk; 104 int nbytes; 105 106 hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1); 107 nbytes = MIN(job->cluster_size, job->len - start); 108 if (!*bounce_buffer) { 109 *bounce_buffer = blk_blockalign(blk, job->cluster_size); 110 } 111 iov.iov_base = *bounce_buffer; 112 iov.iov_len = nbytes; 113 qemu_iovec_init_external(&qiov, &iov, 1); 114 115 ret = blk_co_preadv(blk, start, qiov.size, &qiov, 116 is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0); 117 if (ret < 0) { 118 trace_backup_do_cow_read_fail(job, start, ret); 119 if (error_is_read) { 120 *error_is_read = true; 121 } 122 goto fail; 123 } 124 125 if (qemu_iovec_is_zero(&qiov)) { 126 ret = blk_co_pwrite_zeroes(job->target, start, 127 qiov.size, BDRV_REQ_MAY_UNMAP); 128 } else { 129 ret = blk_co_pwritev(job->target, start, 130 qiov.size, &qiov, 131 job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0); 132 } 133 if (ret < 0) { 134 trace_backup_do_cow_write_fail(job, start, ret); 135 if (error_is_read) { 136 *error_is_read = false; 137 } 138 goto fail; 139 } 140 141 return nbytes; 142 fail: 143 hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); 144 return ret; 145 146 } 147 148 /* Copy range to target and return the bytes copied. If error occured, return a 149 * negative error number. */ 150 static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job, 151 int64_t start, 152 int64_t end, 153 bool is_write_notifier) 154 { 155 int ret; 156 int nr_clusters; 157 BlockBackend *blk = job->common.blk; 158 int nbytes; 159 160 assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size)); 161 nbytes = MIN(job->copy_range_size, end - start); 162 nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size); 163 hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 164 nr_clusters); 165 ret = blk_co_copy_range(blk, start, job->target, start, nbytes, 166 is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0); 167 if (ret < 0) { 168 trace_backup_do_cow_copy_range_fail(job, start, ret); 169 hbitmap_set(job->copy_bitmap, start / job->cluster_size, 170 nr_clusters); 171 return ret; 172 } 173 174 return nbytes; 175 } 176 177 static int coroutine_fn backup_do_cow(BackupBlockJob *job, 178 int64_t offset, uint64_t bytes, 179 bool *error_is_read, 180 bool is_write_notifier) 181 { 182 CowRequest cow_request; 183 int ret = 0; 184 int64_t start, end; /* bytes */ 185 void *bounce_buffer = NULL; 186 187 qemu_co_rwlock_rdlock(&job->flush_rwlock); 188 189 start = QEMU_ALIGN_DOWN(offset, job->cluster_size); 190 end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size); 191 192 trace_backup_do_cow_enter(job, start, offset, bytes); 193 194 wait_for_overlapping_requests(job, start, end); 195 cow_request_begin(&cow_request, job, start, end); 196 197 while (start < end) { 198 if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) { 199 trace_backup_do_cow_skip(job, start); 200 start += job->cluster_size; 201 continue; /* already copied */ 202 } 203 204 trace_backup_do_cow_process(job, start); 205 206 if (job->use_copy_range) { 207 ret = backup_cow_with_offload(job, start, end, is_write_notifier); 208 if (ret < 0) { 209 job->use_copy_range = false; 210 } 211 } 212 if (!job->use_copy_range) { 213 ret = backup_cow_with_bounce_buffer(job, start, end, is_write_notifier, 214 error_is_read, &bounce_buffer); 215 } 216 if (ret < 0) { 217 break; 218 } 219 220 /* Publish progress, guest I/O counts as progress too. Note that the 221 * offset field is an opaque progress value, it is not a disk offset. 222 */ 223 start += ret; 224 job->bytes_read += ret; 225 job_progress_update(&job->common.job, ret); 226 ret = 0; 227 } 228 229 if (bounce_buffer) { 230 qemu_vfree(bounce_buffer); 231 } 232 233 cow_request_end(&cow_request); 234 235 trace_backup_do_cow_return(job, offset, bytes, ret); 236 237 qemu_co_rwlock_unlock(&job->flush_rwlock); 238 239 return ret; 240 } 241 242 static int coroutine_fn backup_before_write_notify( 243 NotifierWithReturn *notifier, 244 void *opaque) 245 { 246 BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write); 247 BdrvTrackedRequest *req = opaque; 248 249 assert(req->bs == blk_bs(job->common.blk)); 250 assert(QEMU_IS_ALIGNED(req->offset, BDRV_SECTOR_SIZE)); 251 assert(QEMU_IS_ALIGNED(req->bytes, BDRV_SECTOR_SIZE)); 252 253 return backup_do_cow(job, req->offset, req->bytes, NULL, true); 254 } 255 256 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) 257 { 258 BdrvDirtyBitmap *bm; 259 BlockDriverState *bs = blk_bs(job->common.blk); 260 261 if (ret < 0) { 262 /* Merge the successor back into the parent, delete nothing. */ 263 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); 264 assert(bm); 265 } else { 266 /* Everything is fine, delete this bitmap and install the backup. */ 267 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); 268 assert(bm); 269 } 270 } 271 272 static void backup_commit(Job *job) 273 { 274 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); 275 if (s->sync_bitmap) { 276 backup_cleanup_sync_bitmap(s, 0); 277 } 278 } 279 280 static void backup_abort(Job *job) 281 { 282 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); 283 if (s->sync_bitmap) { 284 backup_cleanup_sync_bitmap(s, -1); 285 } 286 } 287 288 static void backup_clean(Job *job) 289 { 290 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); 291 assert(s->target); 292 blk_unref(s->target); 293 s->target = NULL; 294 } 295 296 static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context) 297 { 298 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 299 300 blk_set_aio_context(s->target, aio_context); 301 } 302 303 void backup_do_checkpoint(BlockJob *job, Error **errp) 304 { 305 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); 306 int64_t len; 307 308 assert(block_job_driver(job) == &backup_job_driver); 309 310 if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) { 311 error_setg(errp, "The backup job only supports block checkpoint in" 312 " sync=none mode"); 313 return; 314 } 315 316 len = DIV_ROUND_UP(backup_job->len, backup_job->cluster_size); 317 hbitmap_set(backup_job->copy_bitmap, 0, len); 318 } 319 320 void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset, 321 uint64_t bytes) 322 { 323 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); 324 int64_t start, end; 325 326 assert(block_job_driver(job) == &backup_job_driver); 327 328 start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size); 329 end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size); 330 wait_for_overlapping_requests(backup_job, start, end); 331 } 332 333 void backup_cow_request_begin(CowRequest *req, BlockJob *job, 334 int64_t offset, uint64_t bytes) 335 { 336 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); 337 int64_t start, end; 338 339 assert(block_job_driver(job) == &backup_job_driver); 340 341 start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size); 342 end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size); 343 cow_request_begin(req, backup_job, start, end); 344 } 345 346 void backup_cow_request_end(CowRequest *req) 347 { 348 cow_request_end(req); 349 } 350 351 static void backup_drain(BlockJob *job) 352 { 353 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 354 355 /* Need to keep a reference in case blk_drain triggers execution 356 * of backup_complete... 357 */ 358 if (s->target) { 359 BlockBackend *target = s->target; 360 blk_ref(target); 361 blk_drain(target); 362 blk_unref(target); 363 } 364 } 365 366 static BlockErrorAction backup_error_action(BackupBlockJob *job, 367 bool read, int error) 368 { 369 if (read) { 370 return block_job_error_action(&job->common, job->on_source_error, 371 true, error); 372 } else { 373 return block_job_error_action(&job->common, job->on_target_error, 374 false, error); 375 } 376 } 377 378 typedef struct { 379 int ret; 380 } BackupCompleteData; 381 382 static void backup_complete(Job *job, void *opaque) 383 { 384 BackupCompleteData *data = opaque; 385 386 job_completed(job, data->ret, NULL); 387 g_free(data); 388 } 389 390 static bool coroutine_fn yield_and_check(BackupBlockJob *job) 391 { 392 uint64_t delay_ns; 393 394 if (job_is_cancelled(&job->common.job)) { 395 return true; 396 } 397 398 /* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can 399 * return. Without a yield, the VM would not reboot. */ 400 delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read); 401 job->bytes_read = 0; 402 job_sleep_ns(&job->common.job, delay_ns); 403 404 if (job_is_cancelled(&job->common.job)) { 405 return true; 406 } 407 408 return false; 409 } 410 411 static int coroutine_fn backup_run_incremental(BackupBlockJob *job) 412 { 413 int ret; 414 bool error_is_read; 415 int64_t cluster; 416 HBitmapIter hbi; 417 418 hbitmap_iter_init(&hbi, job->copy_bitmap, 0); 419 while ((cluster = hbitmap_iter_next(&hbi, true)) != -1) { 420 do { 421 if (yield_and_check(job)) { 422 return 0; 423 } 424 ret = backup_do_cow(job, cluster * job->cluster_size, 425 job->cluster_size, &error_is_read, false); 426 if (ret < 0 && backup_error_action(job, error_is_read, -ret) == 427 BLOCK_ERROR_ACTION_REPORT) 428 { 429 return ret; 430 } 431 } while (ret < 0); 432 } 433 434 return 0; 435 } 436 437 /* init copy_bitmap from sync_bitmap */ 438 static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) 439 { 440 BdrvDirtyBitmapIter *dbi; 441 int64_t offset; 442 int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap), 443 job->cluster_size); 444 445 dbi = bdrv_dirty_iter_new(job->sync_bitmap); 446 while ((offset = bdrv_dirty_iter_next(dbi)) != -1) { 447 int64_t cluster = offset / job->cluster_size; 448 int64_t next_cluster; 449 450 offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap); 451 if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) { 452 hbitmap_set(job->copy_bitmap, cluster, end - cluster); 453 break; 454 } 455 456 offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset); 457 if (offset == -1) { 458 hbitmap_set(job->copy_bitmap, cluster, end - cluster); 459 break; 460 } 461 462 next_cluster = DIV_ROUND_UP(offset, job->cluster_size); 463 hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster); 464 if (next_cluster >= end) { 465 break; 466 } 467 468 bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); 469 } 470 471 /* TODO job_progress_set_remaining() would make more sense */ 472 job_progress_update(&job->common.job, 473 job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size); 474 475 bdrv_dirty_iter_free(dbi); 476 } 477 478 static void coroutine_fn backup_run(void *opaque) 479 { 480 BackupBlockJob *job = opaque; 481 BackupCompleteData *data; 482 BlockDriverState *bs = blk_bs(job->common.blk); 483 int64_t offset, nb_clusters; 484 int ret = 0; 485 486 QLIST_INIT(&job->inflight_reqs); 487 qemu_co_rwlock_init(&job->flush_rwlock); 488 489 nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size); 490 job_progress_set_remaining(&job->common.job, job->len); 491 492 job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); 493 if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 494 backup_incremental_init_copy_bitmap(job); 495 } else { 496 hbitmap_set(job->copy_bitmap, 0, nb_clusters); 497 } 498 499 500 job->before_write.notify = backup_before_write_notify; 501 bdrv_add_before_write_notifier(bs, &job->before_write); 502 503 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { 504 /* All bits are set in copy_bitmap to allow any cluster to be copied. 505 * This does not actually require them to be copied. */ 506 while (!job_is_cancelled(&job->common.job)) { 507 /* Yield until the job is cancelled. We just let our before_write 508 * notify callback service CoW requests. */ 509 job_yield(&job->common.job); 510 } 511 } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 512 ret = backup_run_incremental(job); 513 } else { 514 /* Both FULL and TOP SYNC_MODE's require copying.. */ 515 for (offset = 0; offset < job->len; 516 offset += job->cluster_size) { 517 bool error_is_read; 518 int alloced = 0; 519 520 if (yield_and_check(job)) { 521 break; 522 } 523 524 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { 525 int i; 526 int64_t n; 527 528 /* Check to see if these blocks are already in the 529 * backing file. */ 530 531 for (i = 0; i < job->cluster_size;) { 532 /* bdrv_is_allocated() only returns true/false based 533 * on the first set of sectors it comes across that 534 * are are all in the same state. 535 * For that reason we must verify each sector in the 536 * backup cluster length. We end up copying more than 537 * needed but at some point that is always the case. */ 538 alloced = 539 bdrv_is_allocated(bs, offset + i, 540 job->cluster_size - i, &n); 541 i += n; 542 543 if (alloced || n == 0) { 544 break; 545 } 546 } 547 548 /* If the above loop never found any sectors that are in 549 * the topmost image, skip this backup. */ 550 if (alloced == 0) { 551 continue; 552 } 553 } 554 /* FULL sync mode we copy the whole drive. */ 555 if (alloced < 0) { 556 ret = alloced; 557 } else { 558 ret = backup_do_cow(job, offset, job->cluster_size, 559 &error_is_read, false); 560 } 561 if (ret < 0) { 562 /* Depending on error action, fail now or retry cluster */ 563 BlockErrorAction action = 564 backup_error_action(job, error_is_read, -ret); 565 if (action == BLOCK_ERROR_ACTION_REPORT) { 566 break; 567 } else { 568 offset -= job->cluster_size; 569 continue; 570 } 571 } 572 } 573 } 574 575 notifier_with_return_remove(&job->before_write); 576 577 /* wait until pending backup_do_cow() calls have completed */ 578 qemu_co_rwlock_wrlock(&job->flush_rwlock); 579 qemu_co_rwlock_unlock(&job->flush_rwlock); 580 hbitmap_free(job->copy_bitmap); 581 582 data = g_malloc(sizeof(*data)); 583 data->ret = ret; 584 job_defer_to_main_loop(&job->common.job, backup_complete, data); 585 } 586 587 static const BlockJobDriver backup_job_driver = { 588 .job_driver = { 589 .instance_size = sizeof(BackupBlockJob), 590 .job_type = JOB_TYPE_BACKUP, 591 .free = block_job_free, 592 .user_resume = block_job_user_resume, 593 .drain = block_job_drain, 594 .start = backup_run, 595 .commit = backup_commit, 596 .abort = backup_abort, 597 .clean = backup_clean, 598 }, 599 .attached_aio_context = backup_attached_aio_context, 600 .drain = backup_drain, 601 }; 602 603 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, 604 BlockDriverState *target, int64_t speed, 605 MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, 606 bool compress, 607 BlockdevOnError on_source_error, 608 BlockdevOnError on_target_error, 609 int creation_flags, 610 BlockCompletionFunc *cb, void *opaque, 611 JobTxn *txn, Error **errp) 612 { 613 int64_t len; 614 BlockDriverInfo bdi; 615 BackupBlockJob *job = NULL; 616 int ret; 617 618 assert(bs); 619 assert(target); 620 621 if (bs == target) { 622 error_setg(errp, "Source and target cannot be the same"); 623 return NULL; 624 } 625 626 if (!bdrv_is_inserted(bs)) { 627 error_setg(errp, "Device is not inserted: %s", 628 bdrv_get_device_name(bs)); 629 return NULL; 630 } 631 632 if (!bdrv_is_inserted(target)) { 633 error_setg(errp, "Device is not inserted: %s", 634 bdrv_get_device_name(target)); 635 return NULL; 636 } 637 638 if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) { 639 error_setg(errp, "Compression is not supported for this drive %s", 640 bdrv_get_device_name(target)); 641 return NULL; 642 } 643 644 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { 645 return NULL; 646 } 647 648 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { 649 return NULL; 650 } 651 652 if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 653 if (!sync_bitmap) { 654 error_setg(errp, "must provide a valid bitmap name for " 655 "\"incremental\" sync mode"); 656 return NULL; 657 } 658 659 /* Create a new bitmap, and freeze/disable this one. */ 660 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { 661 return NULL; 662 } 663 } else if (sync_bitmap) { 664 error_setg(errp, 665 "a sync_bitmap was provided to backup_run, " 666 "but received an incompatible sync_mode (%s)", 667 MirrorSyncMode_str(sync_mode)); 668 return NULL; 669 } 670 671 len = bdrv_getlength(bs); 672 if (len < 0) { 673 error_setg_errno(errp, -len, "unable to get length for '%s'", 674 bdrv_get_device_name(bs)); 675 goto error; 676 } 677 678 /* job->len is fixed, so we can't allow resize */ 679 job = block_job_create(job_id, &backup_job_driver, txn, bs, 680 BLK_PERM_CONSISTENT_READ, 681 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | 682 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD, 683 speed, creation_flags, cb, opaque, errp); 684 if (!job) { 685 goto error; 686 } 687 688 /* The target must match the source in size, so no resize here either */ 689 job->target = blk_new(BLK_PERM_WRITE, 690 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | 691 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD); 692 ret = blk_insert_bs(job->target, target, errp); 693 if (ret < 0) { 694 goto error; 695 } 696 697 job->on_source_error = on_source_error; 698 job->on_target_error = on_target_error; 699 job->sync_mode = sync_mode; 700 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ? 701 sync_bitmap : NULL; 702 job->compress = compress; 703 704 /* If there is no backing file on the target, we cannot rely on COW if our 705 * backup cluster size is smaller than the target cluster size. Even for 706 * targets with a backing file, try to avoid COW if possible. */ 707 ret = bdrv_get_info(target, &bdi); 708 if (ret == -ENOTSUP && !target->backing) { 709 /* Cluster size is not defined */ 710 warn_report("The target block device doesn't provide " 711 "information about the block size and it doesn't have a " 712 "backing file. The default block size of %u bytes is " 713 "used. If the actual block size of the target exceeds " 714 "this default, the backup may be unusable", 715 BACKUP_CLUSTER_SIZE_DEFAULT); 716 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; 717 } else if (ret < 0 && !target->backing) { 718 error_setg_errno(errp, -ret, 719 "Couldn't determine the cluster size of the target image, " 720 "which has no backing file"); 721 error_append_hint(errp, 722 "Aborting, since this may create an unusable destination image\n"); 723 goto error; 724 } else if (ret < 0 && target->backing) { 725 /* Not fatal; just trudge on ahead. */ 726 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; 727 } else { 728 job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); 729 } 730 job->use_copy_range = true; 731 job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk), 732 blk_get_max_transfer(job->target)); 733 job->copy_range_size = MAX(job->cluster_size, 734 QEMU_ALIGN_UP(job->copy_range_size, 735 job->cluster_size)); 736 737 /* Required permissions are already taken with target's blk_new() */ 738 block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, 739 &error_abort); 740 job->len = len; 741 742 return &job->common; 743 744 error: 745 if (sync_bitmap) { 746 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); 747 } 748 if (job) { 749 backup_clean(&job->common.job); 750 job_early_fail(&job->common.job); 751 } 752 753 return NULL; 754 } 755