1 /* 2 * QEMU backup 3 * 4 * Copyright (C) 2013 Proxmox Server Solutions 5 * 6 * Authors: 7 * Dietmar Maurer (dietmar@proxmox.com) 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 16 #include "trace.h" 17 #include "block/block.h" 18 #include "block/block_int.h" 19 #include "block/blockjob_int.h" 20 #include "block/block_backup.h" 21 #include "qapi/error.h" 22 #include "qapi/qmp/qerror.h" 23 #include "qemu/ratelimit.h" 24 #include "qemu/cutils.h" 25 #include "sysemu/block-backend.h" 26 #include "qemu/bitmap.h" 27 28 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) 29 #define SLICE_TIME 100000000ULL /* ns */ 30 31 typedef struct BackupBlockJob { 32 BlockJob common; 33 BlockBackend *target; 34 /* bitmap for sync=incremental */ 35 BdrvDirtyBitmap *sync_bitmap; 36 MirrorSyncMode sync_mode; 37 RateLimit limit; 38 BlockdevOnError on_source_error; 39 BlockdevOnError on_target_error; 40 CoRwlock flush_rwlock; 41 uint64_t sectors_read; 42 unsigned long *done_bitmap; 43 int64_t cluster_size; 44 bool compress; 45 NotifierWithReturn before_write; 46 QLIST_HEAD(, CowRequest) inflight_reqs; 47 } BackupBlockJob; 48 49 /* Size of a cluster in sectors, instead of bytes. */ 50 static inline int64_t cluster_size_sectors(BackupBlockJob *job) 51 { 52 return job->cluster_size / BDRV_SECTOR_SIZE; 53 } 54 55 /* See if in-flight requests overlap and wait for them to complete */ 56 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job, 57 int64_t start, 58 int64_t end) 59 { 60 CowRequest *req; 61 bool retry; 62 63 do { 64 retry = false; 65 QLIST_FOREACH(req, &job->inflight_reqs, list) { 66 if (end > req->start && start < req->end) { 67 qemu_co_queue_wait(&req->wait_queue); 68 retry = true; 69 break; 70 } 71 } 72 } while (retry); 73 } 74 75 /* Keep track of an in-flight request */ 76 static void cow_request_begin(CowRequest *req, BackupBlockJob *job, 77 int64_t start, int64_t end) 78 { 79 req->start = start; 80 req->end = end; 81 qemu_co_queue_init(&req->wait_queue); 82 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list); 83 } 84 85 /* Forget about a completed request */ 86 static void cow_request_end(CowRequest *req) 87 { 88 QLIST_REMOVE(req, list); 89 qemu_co_queue_restart_all(&req->wait_queue); 90 } 91 92 static int coroutine_fn backup_do_cow(BackupBlockJob *job, 93 int64_t sector_num, int nb_sectors, 94 bool *error_is_read, 95 bool is_write_notifier) 96 { 97 BlockBackend *blk = job->common.blk; 98 CowRequest cow_request; 99 struct iovec iov; 100 QEMUIOVector bounce_qiov; 101 void *bounce_buffer = NULL; 102 int ret = 0; 103 int64_t sectors_per_cluster = cluster_size_sectors(job); 104 int64_t start, end; 105 int n; 106 107 qemu_co_rwlock_rdlock(&job->flush_rwlock); 108 109 start = sector_num / sectors_per_cluster; 110 end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster); 111 112 trace_backup_do_cow_enter(job, start, sector_num, nb_sectors); 113 114 wait_for_overlapping_requests(job, start, end); 115 cow_request_begin(&cow_request, job, start, end); 116 117 for (; start < end; start++) { 118 if (test_bit(start, job->done_bitmap)) { 119 trace_backup_do_cow_skip(job, start); 120 continue; /* already copied */ 121 } 122 123 trace_backup_do_cow_process(job, start); 124 125 n = MIN(sectors_per_cluster, 126 job->common.len / BDRV_SECTOR_SIZE - 127 start * sectors_per_cluster); 128 129 if (!bounce_buffer) { 130 bounce_buffer = blk_blockalign(blk, job->cluster_size); 131 } 132 iov.iov_base = bounce_buffer; 133 iov.iov_len = n * BDRV_SECTOR_SIZE; 134 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 135 136 ret = blk_co_preadv(blk, start * job->cluster_size, 137 bounce_qiov.size, &bounce_qiov, 138 is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0); 139 if (ret < 0) { 140 trace_backup_do_cow_read_fail(job, start, ret); 141 if (error_is_read) { 142 *error_is_read = true; 143 } 144 goto out; 145 } 146 147 if (buffer_is_zero(iov.iov_base, iov.iov_len)) { 148 ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size, 149 bounce_qiov.size, BDRV_REQ_MAY_UNMAP); 150 } else { 151 ret = blk_co_pwritev(job->target, start * job->cluster_size, 152 bounce_qiov.size, &bounce_qiov, 153 job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0); 154 } 155 if (ret < 0) { 156 trace_backup_do_cow_write_fail(job, start, ret); 157 if (error_is_read) { 158 *error_is_read = false; 159 } 160 goto out; 161 } 162 163 set_bit(start, job->done_bitmap); 164 165 /* Publish progress, guest I/O counts as progress too. Note that the 166 * offset field is an opaque progress value, it is not a disk offset. 167 */ 168 job->sectors_read += n; 169 job->common.offset += n * BDRV_SECTOR_SIZE; 170 } 171 172 out: 173 if (bounce_buffer) { 174 qemu_vfree(bounce_buffer); 175 } 176 177 cow_request_end(&cow_request); 178 179 trace_backup_do_cow_return(job, sector_num, nb_sectors, ret); 180 181 qemu_co_rwlock_unlock(&job->flush_rwlock); 182 183 return ret; 184 } 185 186 static int coroutine_fn backup_before_write_notify( 187 NotifierWithReturn *notifier, 188 void *opaque) 189 { 190 BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write); 191 BdrvTrackedRequest *req = opaque; 192 int64_t sector_num = req->offset >> BDRV_SECTOR_BITS; 193 int nb_sectors = req->bytes >> BDRV_SECTOR_BITS; 194 195 assert(req->bs == blk_bs(job->common.blk)); 196 assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0); 197 assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 198 199 return backup_do_cow(job, sector_num, nb_sectors, NULL, true); 200 } 201 202 static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp) 203 { 204 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 205 206 if (speed < 0) { 207 error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 208 return; 209 } 210 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 211 } 212 213 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) 214 { 215 BdrvDirtyBitmap *bm; 216 BlockDriverState *bs = blk_bs(job->common.blk); 217 218 if (ret < 0 || block_job_is_cancelled(&job->common)) { 219 /* Merge the successor back into the parent, delete nothing. */ 220 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); 221 assert(bm); 222 } else { 223 /* Everything is fine, delete this bitmap and install the backup. */ 224 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); 225 assert(bm); 226 } 227 } 228 229 static void backup_commit(BlockJob *job) 230 { 231 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 232 if (s->sync_bitmap) { 233 backup_cleanup_sync_bitmap(s, 0); 234 } 235 } 236 237 static void backup_abort(BlockJob *job) 238 { 239 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 240 if (s->sync_bitmap) { 241 backup_cleanup_sync_bitmap(s, -1); 242 } 243 } 244 245 static void backup_clean(BlockJob *job) 246 { 247 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 248 assert(s->target); 249 blk_unref(s->target); 250 s->target = NULL; 251 } 252 253 static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context) 254 { 255 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 256 257 blk_set_aio_context(s->target, aio_context); 258 } 259 260 void backup_do_checkpoint(BlockJob *job, Error **errp) 261 { 262 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); 263 int64_t len; 264 265 assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP); 266 267 if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) { 268 error_setg(errp, "The backup job only supports block checkpoint in" 269 " sync=none mode"); 270 return; 271 } 272 273 len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size); 274 bitmap_zero(backup_job->done_bitmap, len); 275 } 276 277 void backup_wait_for_overlapping_requests(BlockJob *job, int64_t sector_num, 278 int nb_sectors) 279 { 280 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); 281 int64_t sectors_per_cluster = cluster_size_sectors(backup_job); 282 int64_t start, end; 283 284 assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP); 285 286 start = sector_num / sectors_per_cluster; 287 end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster); 288 wait_for_overlapping_requests(backup_job, start, end); 289 } 290 291 void backup_cow_request_begin(CowRequest *req, BlockJob *job, 292 int64_t sector_num, 293 int nb_sectors) 294 { 295 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); 296 int64_t sectors_per_cluster = cluster_size_sectors(backup_job); 297 int64_t start, end; 298 299 assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP); 300 301 start = sector_num / sectors_per_cluster; 302 end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster); 303 cow_request_begin(req, backup_job, start, end); 304 } 305 306 void backup_cow_request_end(CowRequest *req) 307 { 308 cow_request_end(req); 309 } 310 311 static void backup_drain(BlockJob *job) 312 { 313 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 314 315 /* Need to keep a reference in case blk_drain triggers execution 316 * of backup_complete... 317 */ 318 if (s->target) { 319 BlockBackend *target = s->target; 320 blk_ref(target); 321 blk_drain(target); 322 blk_unref(target); 323 } 324 } 325 326 static BlockErrorAction backup_error_action(BackupBlockJob *job, 327 bool read, int error) 328 { 329 if (read) { 330 return block_job_error_action(&job->common, job->on_source_error, 331 true, error); 332 } else { 333 return block_job_error_action(&job->common, job->on_target_error, 334 false, error); 335 } 336 } 337 338 typedef struct { 339 int ret; 340 } BackupCompleteData; 341 342 static void backup_complete(BlockJob *job, void *opaque) 343 { 344 BackupCompleteData *data = opaque; 345 346 block_job_completed(job, data->ret); 347 g_free(data); 348 } 349 350 static bool coroutine_fn yield_and_check(BackupBlockJob *job) 351 { 352 if (block_job_is_cancelled(&job->common)) { 353 return true; 354 } 355 356 /* we need to yield so that bdrv_drain_all() returns. 357 * (without, VM does not reboot) 358 */ 359 if (job->common.speed) { 360 uint64_t delay_ns = ratelimit_calculate_delay(&job->limit, 361 job->sectors_read); 362 job->sectors_read = 0; 363 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); 364 } else { 365 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); 366 } 367 368 if (block_job_is_cancelled(&job->common)) { 369 return true; 370 } 371 372 return false; 373 } 374 375 static int coroutine_fn backup_run_incremental(BackupBlockJob *job) 376 { 377 bool error_is_read; 378 int ret = 0; 379 int clusters_per_iter; 380 uint32_t granularity; 381 int64_t sector; 382 int64_t cluster; 383 int64_t end; 384 int64_t last_cluster = -1; 385 int64_t sectors_per_cluster = cluster_size_sectors(job); 386 BdrvDirtyBitmapIter *dbi; 387 388 granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); 389 clusters_per_iter = MAX((granularity / job->cluster_size), 1); 390 dbi = bdrv_dirty_iter_new(job->sync_bitmap, 0); 391 392 /* Find the next dirty sector(s) */ 393 while ((sector = bdrv_dirty_iter_next(dbi)) != -1) { 394 cluster = sector / sectors_per_cluster; 395 396 /* Fake progress updates for any clusters we skipped */ 397 if (cluster != last_cluster + 1) { 398 job->common.offset += ((cluster - last_cluster - 1) * 399 job->cluster_size); 400 } 401 402 for (end = cluster + clusters_per_iter; cluster < end; cluster++) { 403 do { 404 if (yield_and_check(job)) { 405 goto out; 406 } 407 ret = backup_do_cow(job, cluster * sectors_per_cluster, 408 sectors_per_cluster, &error_is_read, 409 false); 410 if ((ret < 0) && 411 backup_error_action(job, error_is_read, -ret) == 412 BLOCK_ERROR_ACTION_REPORT) { 413 goto out; 414 } 415 } while (ret < 0); 416 } 417 418 /* If the bitmap granularity is smaller than the backup granularity, 419 * we need to advance the iterator pointer to the next cluster. */ 420 if (granularity < job->cluster_size) { 421 bdrv_set_dirty_iter(dbi, cluster * sectors_per_cluster); 422 } 423 424 last_cluster = cluster - 1; 425 } 426 427 /* Play some final catchup with the progress meter */ 428 end = DIV_ROUND_UP(job->common.len, job->cluster_size); 429 if (last_cluster + 1 < end) { 430 job->common.offset += ((end - last_cluster - 1) * job->cluster_size); 431 } 432 433 out: 434 bdrv_dirty_iter_free(dbi); 435 return ret; 436 } 437 438 static void coroutine_fn backup_run(void *opaque) 439 { 440 BackupBlockJob *job = opaque; 441 BackupCompleteData *data; 442 BlockDriverState *bs = blk_bs(job->common.blk); 443 int64_t start, end; 444 int64_t sectors_per_cluster = cluster_size_sectors(job); 445 int ret = 0; 446 447 QLIST_INIT(&job->inflight_reqs); 448 qemu_co_rwlock_init(&job->flush_rwlock); 449 450 start = 0; 451 end = DIV_ROUND_UP(job->common.len, job->cluster_size); 452 453 job->done_bitmap = bitmap_new(end); 454 455 job->before_write.notify = backup_before_write_notify; 456 bdrv_add_before_write_notifier(bs, &job->before_write); 457 458 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { 459 while (!block_job_is_cancelled(&job->common)) { 460 /* Yield until the job is cancelled. We just let our before_write 461 * notify callback service CoW requests. */ 462 block_job_yield(&job->common); 463 } 464 } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 465 ret = backup_run_incremental(job); 466 } else { 467 /* Both FULL and TOP SYNC_MODE's require copying.. */ 468 for (; start < end; start++) { 469 bool error_is_read; 470 if (yield_and_check(job)) { 471 break; 472 } 473 474 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { 475 int i, n; 476 int alloced = 0; 477 478 /* Check to see if these blocks are already in the 479 * backing file. */ 480 481 for (i = 0; i < sectors_per_cluster;) { 482 /* bdrv_is_allocated() only returns true/false based 483 * on the first set of sectors it comes across that 484 * are are all in the same state. 485 * For that reason we must verify each sector in the 486 * backup cluster length. We end up copying more than 487 * needed but at some point that is always the case. */ 488 alloced = 489 bdrv_is_allocated(bs, 490 start * sectors_per_cluster + i, 491 sectors_per_cluster - i, &n); 492 i += n; 493 494 if (alloced == 1 || n == 0) { 495 break; 496 } 497 } 498 499 /* If the above loop never found any sectors that are in 500 * the topmost image, skip this backup. */ 501 if (alloced == 0) { 502 continue; 503 } 504 } 505 /* FULL sync mode we copy the whole drive. */ 506 ret = backup_do_cow(job, start * sectors_per_cluster, 507 sectors_per_cluster, &error_is_read, false); 508 if (ret < 0) { 509 /* Depending on error action, fail now or retry cluster */ 510 BlockErrorAction action = 511 backup_error_action(job, error_is_read, -ret); 512 if (action == BLOCK_ERROR_ACTION_REPORT) { 513 break; 514 } else { 515 start--; 516 continue; 517 } 518 } 519 } 520 } 521 522 notifier_with_return_remove(&job->before_write); 523 524 /* wait until pending backup_do_cow() calls have completed */ 525 qemu_co_rwlock_wrlock(&job->flush_rwlock); 526 qemu_co_rwlock_unlock(&job->flush_rwlock); 527 g_free(job->done_bitmap); 528 529 data = g_malloc(sizeof(*data)); 530 data->ret = ret; 531 block_job_defer_to_main_loop(&job->common, backup_complete, data); 532 } 533 534 static const BlockJobDriver backup_job_driver = { 535 .instance_size = sizeof(BackupBlockJob), 536 .job_type = BLOCK_JOB_TYPE_BACKUP, 537 .start = backup_run, 538 .set_speed = backup_set_speed, 539 .commit = backup_commit, 540 .abort = backup_abort, 541 .clean = backup_clean, 542 .attached_aio_context = backup_attached_aio_context, 543 .drain = backup_drain, 544 }; 545 546 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, 547 BlockDriverState *target, int64_t speed, 548 MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, 549 bool compress, 550 BlockdevOnError on_source_error, 551 BlockdevOnError on_target_error, 552 int creation_flags, 553 BlockCompletionFunc *cb, void *opaque, 554 BlockJobTxn *txn, Error **errp) 555 { 556 int64_t len; 557 BlockDriverInfo bdi; 558 BackupBlockJob *job = NULL; 559 int ret; 560 561 assert(bs); 562 assert(target); 563 564 if (bs == target) { 565 error_setg(errp, "Source and target cannot be the same"); 566 return NULL; 567 } 568 569 if (!bdrv_is_inserted(bs)) { 570 error_setg(errp, "Device is not inserted: %s", 571 bdrv_get_device_name(bs)); 572 return NULL; 573 } 574 575 if (!bdrv_is_inserted(target)) { 576 error_setg(errp, "Device is not inserted: %s", 577 bdrv_get_device_name(target)); 578 return NULL; 579 } 580 581 if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) { 582 error_setg(errp, "Compression is not supported for this drive %s", 583 bdrv_get_device_name(target)); 584 return NULL; 585 } 586 587 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { 588 return NULL; 589 } 590 591 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { 592 return NULL; 593 } 594 595 if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 596 if (!sync_bitmap) { 597 error_setg(errp, "must provide a valid bitmap name for " 598 "\"incremental\" sync mode"); 599 return NULL; 600 } 601 602 /* Create a new bitmap, and freeze/disable this one. */ 603 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { 604 return NULL; 605 } 606 } else if (sync_bitmap) { 607 error_setg(errp, 608 "a sync_bitmap was provided to backup_run, " 609 "but received an incompatible sync_mode (%s)", 610 MirrorSyncMode_lookup[sync_mode]); 611 return NULL; 612 } 613 614 len = bdrv_getlength(bs); 615 if (len < 0) { 616 error_setg_errno(errp, -len, "unable to get length for '%s'", 617 bdrv_get_device_name(bs)); 618 goto error; 619 } 620 621 job = block_job_create(job_id, &backup_job_driver, bs, speed, 622 creation_flags, cb, opaque, errp); 623 if (!job) { 624 goto error; 625 } 626 627 job->target = blk_new(); 628 blk_insert_bs(job->target, target); 629 630 job->on_source_error = on_source_error; 631 job->on_target_error = on_target_error; 632 job->sync_mode = sync_mode; 633 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ? 634 sync_bitmap : NULL; 635 job->compress = compress; 636 637 /* If there is no backing file on the target, we cannot rely on COW if our 638 * backup cluster size is smaller than the target cluster size. Even for 639 * targets with a backing file, try to avoid COW if possible. */ 640 ret = bdrv_get_info(target, &bdi); 641 if (ret < 0 && !target->backing) { 642 error_setg_errno(errp, -ret, 643 "Couldn't determine the cluster size of the target image, " 644 "which has no backing file"); 645 error_append_hint(errp, 646 "Aborting, since this may create an unusable destination image\n"); 647 goto error; 648 } else if (ret < 0 && target->backing) { 649 /* Not fatal; just trudge on ahead. */ 650 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; 651 } else { 652 job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); 653 } 654 655 block_job_add_bdrv(&job->common, target); 656 job->common.len = len; 657 block_job_txn_add_job(txn, &job->common); 658 659 return &job->common; 660 661 error: 662 if (sync_bitmap) { 663 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); 664 } 665 if (job) { 666 backup_clean(&job->common); 667 block_job_unref(&job->common); 668 } 669 670 return NULL; 671 } 672