1 /* 2 * QEMU backup 3 * 4 * Copyright (C) 2013 Proxmox Server Solutions 5 * 6 * Authors: 7 * Dietmar Maurer (dietmar@proxmox.com) 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 16 #include "trace.h" 17 #include "block/block.h" 18 #include "block/block_int.h" 19 #include "block/blockjob_int.h" 20 #include "block/block_backup.h" 21 #include "qapi/error.h" 22 #include "qapi/qmp/qerror.h" 23 #include "qemu/ratelimit.h" 24 #include "qemu/cutils.h" 25 #include "sysemu/block-backend.h" 26 #include "qemu/bitmap.h" 27 #include "qemu/error-report.h" 28 29 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) 30 31 typedef struct BackupBlockJob { 32 BlockJob common; 33 BlockBackend *target; 34 /* bitmap for sync=incremental */ 35 BdrvDirtyBitmap *sync_bitmap; 36 MirrorSyncMode sync_mode; 37 BlockdevOnError on_source_error; 38 BlockdevOnError on_target_error; 39 CoRwlock flush_rwlock; 40 uint64_t len; 41 uint64_t bytes_read; 42 int64_t cluster_size; 43 bool compress; 44 NotifierWithReturn before_write; 45 QLIST_HEAD(, CowRequest) inflight_reqs; 46 47 HBitmap *copy_bitmap; 48 bool use_copy_range; 49 int64_t copy_range_size; 50 51 bool serialize_target_writes; 52 } BackupBlockJob; 53 54 static const BlockJobDriver backup_job_driver; 55 56 /* See if in-flight requests overlap and wait for them to complete */ 57 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job, 58 int64_t start, 59 int64_t end) 60 { 61 CowRequest *req; 62 bool retry; 63 64 do { 65 retry = false; 66 QLIST_FOREACH(req, &job->inflight_reqs, list) { 67 if (end > req->start_byte && start < req->end_byte) { 68 qemu_co_queue_wait(&req->wait_queue, NULL); 69 retry = true; 70 break; 71 } 72 } 73 } while (retry); 74 } 75 76 /* Keep track of an in-flight request */ 77 static void cow_request_begin(CowRequest *req, BackupBlockJob *job, 78 int64_t start, int64_t end) 79 { 80 req->start_byte = start; 81 req->end_byte = end; 82 qemu_co_queue_init(&req->wait_queue); 83 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list); 84 } 85 86 /* Forget about a completed request */ 87 static void cow_request_end(CowRequest *req) 88 { 89 QLIST_REMOVE(req, list); 90 qemu_co_queue_restart_all(&req->wait_queue); 91 } 92 93 /* Copy range to target with a bounce buffer and return the bytes copied. If 94 * error occurred, return a negative error number */ 95 static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job, 96 int64_t start, 97 int64_t end, 98 bool is_write_notifier, 99 bool *error_is_read, 100 void **bounce_buffer) 101 { 102 int ret; 103 struct iovec iov; 104 QEMUIOVector qiov; 105 BlockBackend *blk = job->common.blk; 106 int nbytes; 107 int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; 108 int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0; 109 110 hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1); 111 nbytes = MIN(job->cluster_size, job->len - start); 112 if (!*bounce_buffer) { 113 *bounce_buffer = blk_blockalign(blk, job->cluster_size); 114 } 115 iov.iov_base = *bounce_buffer; 116 iov.iov_len = nbytes; 117 qemu_iovec_init_external(&qiov, &iov, 1); 118 119 ret = blk_co_preadv(blk, start, qiov.size, &qiov, read_flags); 120 if (ret < 0) { 121 trace_backup_do_cow_read_fail(job, start, ret); 122 if (error_is_read) { 123 *error_is_read = true; 124 } 125 goto fail; 126 } 127 128 if (qemu_iovec_is_zero(&qiov)) { 129 ret = blk_co_pwrite_zeroes(job->target, start, 130 qiov.size, write_flags | BDRV_REQ_MAY_UNMAP); 131 } else { 132 ret = blk_co_pwritev(job->target, start, 133 qiov.size, &qiov, write_flags | 134 (job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0)); 135 } 136 if (ret < 0) { 137 trace_backup_do_cow_write_fail(job, start, ret); 138 if (error_is_read) { 139 *error_is_read = false; 140 } 141 goto fail; 142 } 143 144 return nbytes; 145 fail: 146 hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); 147 return ret; 148 149 } 150 151 /* Copy range to target and return the bytes copied. If error occurred, return a 152 * negative error number. */ 153 static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job, 154 int64_t start, 155 int64_t end, 156 bool is_write_notifier) 157 { 158 int ret; 159 int nr_clusters; 160 BlockBackend *blk = job->common.blk; 161 int nbytes; 162 int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; 163 int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0; 164 165 assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size)); 166 nbytes = MIN(job->copy_range_size, end - start); 167 nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size); 168 hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 169 nr_clusters); 170 ret = blk_co_copy_range(blk, start, job->target, start, nbytes, 171 read_flags, write_flags); 172 if (ret < 0) { 173 trace_backup_do_cow_copy_range_fail(job, start, ret); 174 hbitmap_set(job->copy_bitmap, start / job->cluster_size, 175 nr_clusters); 176 return ret; 177 } 178 179 return nbytes; 180 } 181 182 static int coroutine_fn backup_do_cow(BackupBlockJob *job, 183 int64_t offset, uint64_t bytes, 184 bool *error_is_read, 185 bool is_write_notifier) 186 { 187 CowRequest cow_request; 188 int ret = 0; 189 int64_t start, end; /* bytes */ 190 void *bounce_buffer = NULL; 191 192 qemu_co_rwlock_rdlock(&job->flush_rwlock); 193 194 start = QEMU_ALIGN_DOWN(offset, job->cluster_size); 195 end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size); 196 197 trace_backup_do_cow_enter(job, start, offset, bytes); 198 199 wait_for_overlapping_requests(job, start, end); 200 cow_request_begin(&cow_request, job, start, end); 201 202 while (start < end) { 203 if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) { 204 trace_backup_do_cow_skip(job, start); 205 start += job->cluster_size; 206 continue; /* already copied */ 207 } 208 209 trace_backup_do_cow_process(job, start); 210 211 if (job->use_copy_range) { 212 ret = backup_cow_with_offload(job, start, end, is_write_notifier); 213 if (ret < 0) { 214 job->use_copy_range = false; 215 } 216 } 217 if (!job->use_copy_range) { 218 ret = backup_cow_with_bounce_buffer(job, start, end, is_write_notifier, 219 error_is_read, &bounce_buffer); 220 } 221 if (ret < 0) { 222 break; 223 } 224 225 /* Publish progress, guest I/O counts as progress too. Note that the 226 * offset field is an opaque progress value, it is not a disk offset. 227 */ 228 start += ret; 229 job->bytes_read += ret; 230 job_progress_update(&job->common.job, ret); 231 ret = 0; 232 } 233 234 if (bounce_buffer) { 235 qemu_vfree(bounce_buffer); 236 } 237 238 cow_request_end(&cow_request); 239 240 trace_backup_do_cow_return(job, offset, bytes, ret); 241 242 qemu_co_rwlock_unlock(&job->flush_rwlock); 243 244 return ret; 245 } 246 247 static int coroutine_fn backup_before_write_notify( 248 NotifierWithReturn *notifier, 249 void *opaque) 250 { 251 BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write); 252 BdrvTrackedRequest *req = opaque; 253 254 assert(req->bs == blk_bs(job->common.blk)); 255 assert(QEMU_IS_ALIGNED(req->offset, BDRV_SECTOR_SIZE)); 256 assert(QEMU_IS_ALIGNED(req->bytes, BDRV_SECTOR_SIZE)); 257 258 return backup_do_cow(job, req->offset, req->bytes, NULL, true); 259 } 260 261 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) 262 { 263 BdrvDirtyBitmap *bm; 264 BlockDriverState *bs = blk_bs(job->common.blk); 265 266 if (ret < 0) { 267 /* Merge the successor back into the parent, delete nothing. */ 268 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); 269 assert(bm); 270 } else { 271 /* Everything is fine, delete this bitmap and install the backup. */ 272 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); 273 assert(bm); 274 } 275 } 276 277 static void backup_commit(Job *job) 278 { 279 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); 280 if (s->sync_bitmap) { 281 backup_cleanup_sync_bitmap(s, 0); 282 } 283 } 284 285 static void backup_abort(Job *job) 286 { 287 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); 288 if (s->sync_bitmap) { 289 backup_cleanup_sync_bitmap(s, -1); 290 } 291 } 292 293 static void backup_clean(Job *job) 294 { 295 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); 296 assert(s->target); 297 blk_unref(s->target); 298 s->target = NULL; 299 } 300 301 static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context) 302 { 303 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 304 305 blk_set_aio_context(s->target, aio_context); 306 } 307 308 void backup_do_checkpoint(BlockJob *job, Error **errp) 309 { 310 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); 311 int64_t len; 312 313 assert(block_job_driver(job) == &backup_job_driver); 314 315 if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) { 316 error_setg(errp, "The backup job only supports block checkpoint in" 317 " sync=none mode"); 318 return; 319 } 320 321 len = DIV_ROUND_UP(backup_job->len, backup_job->cluster_size); 322 hbitmap_set(backup_job->copy_bitmap, 0, len); 323 } 324 325 void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset, 326 uint64_t bytes) 327 { 328 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); 329 int64_t start, end; 330 331 assert(block_job_driver(job) == &backup_job_driver); 332 333 start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size); 334 end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size); 335 wait_for_overlapping_requests(backup_job, start, end); 336 } 337 338 void backup_cow_request_begin(CowRequest *req, BlockJob *job, 339 int64_t offset, uint64_t bytes) 340 { 341 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); 342 int64_t start, end; 343 344 assert(block_job_driver(job) == &backup_job_driver); 345 346 start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size); 347 end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size); 348 cow_request_begin(req, backup_job, start, end); 349 } 350 351 void backup_cow_request_end(CowRequest *req) 352 { 353 cow_request_end(req); 354 } 355 356 static void backup_drain(BlockJob *job) 357 { 358 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 359 360 /* Need to keep a reference in case blk_drain triggers execution 361 * of backup_complete... 362 */ 363 if (s->target) { 364 BlockBackend *target = s->target; 365 blk_ref(target); 366 blk_drain(target); 367 blk_unref(target); 368 } 369 } 370 371 static BlockErrorAction backup_error_action(BackupBlockJob *job, 372 bool read, int error) 373 { 374 if (read) { 375 return block_job_error_action(&job->common, job->on_source_error, 376 true, error); 377 } else { 378 return block_job_error_action(&job->common, job->on_target_error, 379 false, error); 380 } 381 } 382 383 typedef struct { 384 int ret; 385 } BackupCompleteData; 386 387 static void backup_complete(Job *job, void *opaque) 388 { 389 BackupCompleteData *data = opaque; 390 391 job_completed(job, data->ret, NULL); 392 g_free(data); 393 } 394 395 static bool coroutine_fn yield_and_check(BackupBlockJob *job) 396 { 397 uint64_t delay_ns; 398 399 if (job_is_cancelled(&job->common.job)) { 400 return true; 401 } 402 403 /* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can 404 * return. Without a yield, the VM would not reboot. */ 405 delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read); 406 job->bytes_read = 0; 407 job_sleep_ns(&job->common.job, delay_ns); 408 409 if (job_is_cancelled(&job->common.job)) { 410 return true; 411 } 412 413 return false; 414 } 415 416 static int coroutine_fn backup_run_incremental(BackupBlockJob *job) 417 { 418 int ret; 419 bool error_is_read; 420 int64_t cluster; 421 HBitmapIter hbi; 422 423 hbitmap_iter_init(&hbi, job->copy_bitmap, 0); 424 while ((cluster = hbitmap_iter_next(&hbi, true)) != -1) { 425 do { 426 if (yield_and_check(job)) { 427 return 0; 428 } 429 ret = backup_do_cow(job, cluster * job->cluster_size, 430 job->cluster_size, &error_is_read, false); 431 if (ret < 0 && backup_error_action(job, error_is_read, -ret) == 432 BLOCK_ERROR_ACTION_REPORT) 433 { 434 return ret; 435 } 436 } while (ret < 0); 437 } 438 439 return 0; 440 } 441 442 /* init copy_bitmap from sync_bitmap */ 443 static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) 444 { 445 BdrvDirtyBitmapIter *dbi; 446 int64_t offset; 447 int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap), 448 job->cluster_size); 449 450 dbi = bdrv_dirty_iter_new(job->sync_bitmap); 451 while ((offset = bdrv_dirty_iter_next(dbi)) != -1) { 452 int64_t cluster = offset / job->cluster_size; 453 int64_t next_cluster; 454 455 offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap); 456 if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) { 457 hbitmap_set(job->copy_bitmap, cluster, end - cluster); 458 break; 459 } 460 461 offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset); 462 if (offset == -1) { 463 hbitmap_set(job->copy_bitmap, cluster, end - cluster); 464 break; 465 } 466 467 next_cluster = DIV_ROUND_UP(offset, job->cluster_size); 468 hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster); 469 if (next_cluster >= end) { 470 break; 471 } 472 473 bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); 474 } 475 476 /* TODO job_progress_set_remaining() would make more sense */ 477 job_progress_update(&job->common.job, 478 job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size); 479 480 bdrv_dirty_iter_free(dbi); 481 } 482 483 static void coroutine_fn backup_run(void *opaque) 484 { 485 BackupBlockJob *job = opaque; 486 BackupCompleteData *data; 487 BlockDriverState *bs = blk_bs(job->common.blk); 488 int64_t offset, nb_clusters; 489 int ret = 0; 490 491 QLIST_INIT(&job->inflight_reqs); 492 qemu_co_rwlock_init(&job->flush_rwlock); 493 494 nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size); 495 job_progress_set_remaining(&job->common.job, job->len); 496 497 job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); 498 if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 499 backup_incremental_init_copy_bitmap(job); 500 } else { 501 hbitmap_set(job->copy_bitmap, 0, nb_clusters); 502 } 503 504 505 job->before_write.notify = backup_before_write_notify; 506 bdrv_add_before_write_notifier(bs, &job->before_write); 507 508 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { 509 /* All bits are set in copy_bitmap to allow any cluster to be copied. 510 * This does not actually require them to be copied. */ 511 while (!job_is_cancelled(&job->common.job)) { 512 /* Yield until the job is cancelled. We just let our before_write 513 * notify callback service CoW requests. */ 514 job_yield(&job->common.job); 515 } 516 } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 517 ret = backup_run_incremental(job); 518 } else { 519 /* Both FULL and TOP SYNC_MODE's require copying.. */ 520 for (offset = 0; offset < job->len; 521 offset += job->cluster_size) { 522 bool error_is_read; 523 int alloced = 0; 524 525 if (yield_and_check(job)) { 526 break; 527 } 528 529 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { 530 int i; 531 int64_t n; 532 533 /* Check to see if these blocks are already in the 534 * backing file. */ 535 536 for (i = 0; i < job->cluster_size;) { 537 /* bdrv_is_allocated() only returns true/false based 538 * on the first set of sectors it comes across that 539 * are are all in the same state. 540 * For that reason we must verify each sector in the 541 * backup cluster length. We end up copying more than 542 * needed but at some point that is always the case. */ 543 alloced = 544 bdrv_is_allocated(bs, offset + i, 545 job->cluster_size - i, &n); 546 i += n; 547 548 if (alloced || n == 0) { 549 break; 550 } 551 } 552 553 /* If the above loop never found any sectors that are in 554 * the topmost image, skip this backup. */ 555 if (alloced == 0) { 556 continue; 557 } 558 } 559 /* FULL sync mode we copy the whole drive. */ 560 if (alloced < 0) { 561 ret = alloced; 562 } else { 563 ret = backup_do_cow(job, offset, job->cluster_size, 564 &error_is_read, false); 565 } 566 if (ret < 0) { 567 /* Depending on error action, fail now or retry cluster */ 568 BlockErrorAction action = 569 backup_error_action(job, error_is_read, -ret); 570 if (action == BLOCK_ERROR_ACTION_REPORT) { 571 break; 572 } else { 573 offset -= job->cluster_size; 574 continue; 575 } 576 } 577 } 578 } 579 580 notifier_with_return_remove(&job->before_write); 581 582 /* wait until pending backup_do_cow() calls have completed */ 583 qemu_co_rwlock_wrlock(&job->flush_rwlock); 584 qemu_co_rwlock_unlock(&job->flush_rwlock); 585 hbitmap_free(job->copy_bitmap); 586 587 data = g_malloc(sizeof(*data)); 588 data->ret = ret; 589 job_defer_to_main_loop(&job->common.job, backup_complete, data); 590 } 591 592 static const BlockJobDriver backup_job_driver = { 593 .job_driver = { 594 .instance_size = sizeof(BackupBlockJob), 595 .job_type = JOB_TYPE_BACKUP, 596 .free = block_job_free, 597 .user_resume = block_job_user_resume, 598 .drain = block_job_drain, 599 .start = backup_run, 600 .commit = backup_commit, 601 .abort = backup_abort, 602 .clean = backup_clean, 603 }, 604 .attached_aio_context = backup_attached_aio_context, 605 .drain = backup_drain, 606 }; 607 608 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, 609 BlockDriverState *target, int64_t speed, 610 MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, 611 bool compress, 612 BlockdevOnError on_source_error, 613 BlockdevOnError on_target_error, 614 int creation_flags, 615 BlockCompletionFunc *cb, void *opaque, 616 JobTxn *txn, Error **errp) 617 { 618 int64_t len; 619 BlockDriverInfo bdi; 620 BackupBlockJob *job = NULL; 621 int ret; 622 623 assert(bs); 624 assert(target); 625 626 if (bs == target) { 627 error_setg(errp, "Source and target cannot be the same"); 628 return NULL; 629 } 630 631 if (!bdrv_is_inserted(bs)) { 632 error_setg(errp, "Device is not inserted: %s", 633 bdrv_get_device_name(bs)); 634 return NULL; 635 } 636 637 if (!bdrv_is_inserted(target)) { 638 error_setg(errp, "Device is not inserted: %s", 639 bdrv_get_device_name(target)); 640 return NULL; 641 } 642 643 if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) { 644 error_setg(errp, "Compression is not supported for this drive %s", 645 bdrv_get_device_name(target)); 646 return NULL; 647 } 648 649 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { 650 return NULL; 651 } 652 653 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { 654 return NULL; 655 } 656 657 if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 658 if (!sync_bitmap) { 659 error_setg(errp, "must provide a valid bitmap name for " 660 "\"incremental\" sync mode"); 661 return NULL; 662 } 663 664 /* Create a new bitmap, and freeze/disable this one. */ 665 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { 666 return NULL; 667 } 668 } else if (sync_bitmap) { 669 error_setg(errp, 670 "a sync_bitmap was provided to backup_run, " 671 "but received an incompatible sync_mode (%s)", 672 MirrorSyncMode_str(sync_mode)); 673 return NULL; 674 } 675 676 len = bdrv_getlength(bs); 677 if (len < 0) { 678 error_setg_errno(errp, -len, "unable to get length for '%s'", 679 bdrv_get_device_name(bs)); 680 goto error; 681 } 682 683 /* job->len is fixed, so we can't allow resize */ 684 job = block_job_create(job_id, &backup_job_driver, txn, bs, 685 BLK_PERM_CONSISTENT_READ, 686 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | 687 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD, 688 speed, creation_flags, cb, opaque, errp); 689 if (!job) { 690 goto error; 691 } 692 693 /* The target must match the source in size, so no resize here either */ 694 job->target = blk_new(BLK_PERM_WRITE, 695 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | 696 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD); 697 ret = blk_insert_bs(job->target, target, errp); 698 if (ret < 0) { 699 goto error; 700 } 701 702 job->on_source_error = on_source_error; 703 job->on_target_error = on_target_error; 704 job->sync_mode = sync_mode; 705 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ? 706 sync_bitmap : NULL; 707 job->compress = compress; 708 709 /* Detect image-fleecing (and similar) schemes */ 710 job->serialize_target_writes = bdrv_chain_contains(target, bs); 711 712 /* If there is no backing file on the target, we cannot rely on COW if our 713 * backup cluster size is smaller than the target cluster size. Even for 714 * targets with a backing file, try to avoid COW if possible. */ 715 ret = bdrv_get_info(target, &bdi); 716 if (ret == -ENOTSUP && !target->backing) { 717 /* Cluster size is not defined */ 718 warn_report("The target block device doesn't provide " 719 "information about the block size and it doesn't have a " 720 "backing file. The default block size of %u bytes is " 721 "used. If the actual block size of the target exceeds " 722 "this default, the backup may be unusable", 723 BACKUP_CLUSTER_SIZE_DEFAULT); 724 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; 725 } else if (ret < 0 && !target->backing) { 726 error_setg_errno(errp, -ret, 727 "Couldn't determine the cluster size of the target image, " 728 "which has no backing file"); 729 error_append_hint(errp, 730 "Aborting, since this may create an unusable destination image\n"); 731 goto error; 732 } else if (ret < 0 && target->backing) { 733 /* Not fatal; just trudge on ahead. */ 734 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; 735 } else { 736 job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); 737 } 738 job->use_copy_range = true; 739 job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk), 740 blk_get_max_transfer(job->target)); 741 job->copy_range_size = MAX(job->cluster_size, 742 QEMU_ALIGN_UP(job->copy_range_size, 743 job->cluster_size)); 744 745 /* Required permissions are already taken with target's blk_new() */ 746 block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, 747 &error_abort); 748 job->len = len; 749 750 return &job->common; 751 752 error: 753 if (sync_bitmap) { 754 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); 755 } 756 if (job) { 757 backup_clean(&job->common.job); 758 job_early_fail(&job->common.job); 759 } 760 761 return NULL; 762 } 763