1 /* 2 * QEMU backup 3 * 4 * Copyright (C) 2013 Proxmox Server Solutions 5 * 6 * Authors: 7 * Dietmar Maurer (dietmar@proxmox.com) 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 16 #include "trace.h" 17 #include "block/block.h" 18 #include "block/block_int.h" 19 #include "block/blockjob.h" 20 #include "qapi/qmp/qerror.h" 21 #include "qemu/ratelimit.h" 22 #include "sysemu/block-backend.h" 23 #include "qemu/bitmap.h" 24 25 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) 26 #define SLICE_TIME 100000000ULL /* ns */ 27 28 typedef struct CowRequest { 29 int64_t start; 30 int64_t end; 31 QLIST_ENTRY(CowRequest) list; 32 CoQueue wait_queue; /* coroutines blocked on this request */ 33 } CowRequest; 34 35 typedef struct BackupBlockJob { 36 BlockJob common; 37 BlockDriverState *target; 38 /* bitmap for sync=incremental */ 39 BdrvDirtyBitmap *sync_bitmap; 40 MirrorSyncMode sync_mode; 41 RateLimit limit; 42 BlockdevOnError on_source_error; 43 BlockdevOnError on_target_error; 44 CoRwlock flush_rwlock; 45 uint64_t sectors_read; 46 unsigned long *done_bitmap; 47 int64_t cluster_size; 48 QLIST_HEAD(, CowRequest) inflight_reqs; 49 } BackupBlockJob; 50 51 /* Size of a cluster in sectors, instead of bytes. */ 52 static inline int64_t cluster_size_sectors(BackupBlockJob *job) 53 { 54 return job->cluster_size / BDRV_SECTOR_SIZE; 55 } 56 57 /* See if in-flight requests overlap and wait for them to complete */ 58 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job, 59 int64_t start, 60 int64_t end) 61 { 62 CowRequest *req; 63 bool retry; 64 65 do { 66 retry = false; 67 QLIST_FOREACH(req, &job->inflight_reqs, list) { 68 if (end > req->start && start < req->end) { 69 qemu_co_queue_wait(&req->wait_queue); 70 retry = true; 71 break; 72 } 73 } 74 } while (retry); 75 } 76 77 /* Keep track of an in-flight request */ 78 static void cow_request_begin(CowRequest *req, BackupBlockJob *job, 79 int64_t start, int64_t end) 80 { 81 req->start = start; 82 req->end = end; 83 qemu_co_queue_init(&req->wait_queue); 84 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list); 85 } 86 87 /* Forget about a completed request */ 88 static void cow_request_end(CowRequest *req) 89 { 90 QLIST_REMOVE(req, list); 91 qemu_co_queue_restart_all(&req->wait_queue); 92 } 93 94 static int coroutine_fn backup_do_cow(BlockDriverState *bs, 95 int64_t sector_num, int nb_sectors, 96 bool *error_is_read, 97 bool is_write_notifier) 98 { 99 BackupBlockJob *job = (BackupBlockJob *)bs->job; 100 CowRequest cow_request; 101 struct iovec iov; 102 QEMUIOVector bounce_qiov; 103 void *bounce_buffer = NULL; 104 int ret = 0; 105 int64_t sectors_per_cluster = cluster_size_sectors(job); 106 int64_t start, end; 107 int n; 108 109 qemu_co_rwlock_rdlock(&job->flush_rwlock); 110 111 start = sector_num / sectors_per_cluster; 112 end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster); 113 114 trace_backup_do_cow_enter(job, start, sector_num, nb_sectors); 115 116 wait_for_overlapping_requests(job, start, end); 117 cow_request_begin(&cow_request, job, start, end); 118 119 for (; start < end; start++) { 120 if (test_bit(start, job->done_bitmap)) { 121 trace_backup_do_cow_skip(job, start); 122 continue; /* already copied */ 123 } 124 125 trace_backup_do_cow_process(job, start); 126 127 n = MIN(sectors_per_cluster, 128 job->common.len / BDRV_SECTOR_SIZE - 129 start * sectors_per_cluster); 130 131 if (!bounce_buffer) { 132 bounce_buffer = qemu_blockalign(bs, job->cluster_size); 133 } 134 iov.iov_base = bounce_buffer; 135 iov.iov_len = n * BDRV_SECTOR_SIZE; 136 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 137 138 if (is_write_notifier) { 139 ret = bdrv_co_readv_no_serialising(bs, 140 start * sectors_per_cluster, 141 n, &bounce_qiov); 142 } else { 143 ret = bdrv_co_readv(bs, start * sectors_per_cluster, n, 144 &bounce_qiov); 145 } 146 if (ret < 0) { 147 trace_backup_do_cow_read_fail(job, start, ret); 148 if (error_is_read) { 149 *error_is_read = true; 150 } 151 goto out; 152 } 153 154 if (buffer_is_zero(iov.iov_base, iov.iov_len)) { 155 ret = bdrv_co_write_zeroes(job->target, 156 start * sectors_per_cluster, 157 n, BDRV_REQ_MAY_UNMAP); 158 } else { 159 ret = bdrv_co_writev(job->target, 160 start * sectors_per_cluster, n, 161 &bounce_qiov); 162 } 163 if (ret < 0) { 164 trace_backup_do_cow_write_fail(job, start, ret); 165 if (error_is_read) { 166 *error_is_read = false; 167 } 168 goto out; 169 } 170 171 set_bit(start, job->done_bitmap); 172 173 /* Publish progress, guest I/O counts as progress too. Note that the 174 * offset field is an opaque progress value, it is not a disk offset. 175 */ 176 job->sectors_read += n; 177 job->common.offset += n * BDRV_SECTOR_SIZE; 178 } 179 180 out: 181 if (bounce_buffer) { 182 qemu_vfree(bounce_buffer); 183 } 184 185 cow_request_end(&cow_request); 186 187 trace_backup_do_cow_return(job, sector_num, nb_sectors, ret); 188 189 qemu_co_rwlock_unlock(&job->flush_rwlock); 190 191 return ret; 192 } 193 194 static int coroutine_fn backup_before_write_notify( 195 NotifierWithReturn *notifier, 196 void *opaque) 197 { 198 BdrvTrackedRequest *req = opaque; 199 int64_t sector_num = req->offset >> BDRV_SECTOR_BITS; 200 int nb_sectors = req->bytes >> BDRV_SECTOR_BITS; 201 202 assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0); 203 assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 204 205 return backup_do_cow(req->bs, sector_num, nb_sectors, NULL, true); 206 } 207 208 static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp) 209 { 210 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 211 212 if (speed < 0) { 213 error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 214 return; 215 } 216 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 217 } 218 219 static void backup_iostatus_reset(BlockJob *job) 220 { 221 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 222 223 if (s->target->blk) { 224 blk_iostatus_reset(s->target->blk); 225 } 226 } 227 228 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) 229 { 230 BdrvDirtyBitmap *bm; 231 BlockDriverState *bs = job->common.bs; 232 233 if (ret < 0 || block_job_is_cancelled(&job->common)) { 234 /* Merge the successor back into the parent, delete nothing. */ 235 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); 236 assert(bm); 237 } else { 238 /* Everything is fine, delete this bitmap and install the backup. */ 239 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); 240 assert(bm); 241 } 242 } 243 244 static void backup_commit(BlockJob *job) 245 { 246 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 247 if (s->sync_bitmap) { 248 backup_cleanup_sync_bitmap(s, 0); 249 } 250 } 251 252 static void backup_abort(BlockJob *job) 253 { 254 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 255 if (s->sync_bitmap) { 256 backup_cleanup_sync_bitmap(s, -1); 257 } 258 } 259 260 static const BlockJobDriver backup_job_driver = { 261 .instance_size = sizeof(BackupBlockJob), 262 .job_type = BLOCK_JOB_TYPE_BACKUP, 263 .set_speed = backup_set_speed, 264 .iostatus_reset = backup_iostatus_reset, 265 .commit = backup_commit, 266 .abort = backup_abort, 267 }; 268 269 static BlockErrorAction backup_error_action(BackupBlockJob *job, 270 bool read, int error) 271 { 272 if (read) { 273 return block_job_error_action(&job->common, job->common.bs, 274 job->on_source_error, true, error); 275 } else { 276 return block_job_error_action(&job->common, job->target, 277 job->on_target_error, false, error); 278 } 279 } 280 281 typedef struct { 282 int ret; 283 } BackupCompleteData; 284 285 static void backup_complete(BlockJob *job, void *opaque) 286 { 287 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 288 BackupCompleteData *data = opaque; 289 290 bdrv_unref(s->target); 291 292 block_job_completed(job, data->ret); 293 g_free(data); 294 } 295 296 static bool coroutine_fn yield_and_check(BackupBlockJob *job) 297 { 298 if (block_job_is_cancelled(&job->common)) { 299 return true; 300 } 301 302 /* we need to yield so that bdrv_drain_all() returns. 303 * (without, VM does not reboot) 304 */ 305 if (job->common.speed) { 306 uint64_t delay_ns = ratelimit_calculate_delay(&job->limit, 307 job->sectors_read); 308 job->sectors_read = 0; 309 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); 310 } else { 311 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); 312 } 313 314 if (block_job_is_cancelled(&job->common)) { 315 return true; 316 } 317 318 return false; 319 } 320 321 static int coroutine_fn backup_run_incremental(BackupBlockJob *job) 322 { 323 bool error_is_read; 324 int ret = 0; 325 int clusters_per_iter; 326 uint32_t granularity; 327 int64_t sector; 328 int64_t cluster; 329 int64_t end; 330 int64_t last_cluster = -1; 331 int64_t sectors_per_cluster = cluster_size_sectors(job); 332 BlockDriverState *bs = job->common.bs; 333 HBitmapIter hbi; 334 335 granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); 336 clusters_per_iter = MAX((granularity / job->cluster_size), 1); 337 bdrv_dirty_iter_init(job->sync_bitmap, &hbi); 338 339 /* Find the next dirty sector(s) */ 340 while ((sector = hbitmap_iter_next(&hbi)) != -1) { 341 cluster = sector / sectors_per_cluster; 342 343 /* Fake progress updates for any clusters we skipped */ 344 if (cluster != last_cluster + 1) { 345 job->common.offset += ((cluster - last_cluster - 1) * 346 job->cluster_size); 347 } 348 349 for (end = cluster + clusters_per_iter; cluster < end; cluster++) { 350 do { 351 if (yield_and_check(job)) { 352 return ret; 353 } 354 ret = backup_do_cow(bs, cluster * sectors_per_cluster, 355 sectors_per_cluster, &error_is_read, 356 false); 357 if ((ret < 0) && 358 backup_error_action(job, error_is_read, -ret) == 359 BLOCK_ERROR_ACTION_REPORT) { 360 return ret; 361 } 362 } while (ret < 0); 363 } 364 365 /* If the bitmap granularity is smaller than the backup granularity, 366 * we need to advance the iterator pointer to the next cluster. */ 367 if (granularity < job->cluster_size) { 368 bdrv_set_dirty_iter(&hbi, cluster * sectors_per_cluster); 369 } 370 371 last_cluster = cluster - 1; 372 } 373 374 /* Play some final catchup with the progress meter */ 375 end = DIV_ROUND_UP(job->common.len, job->cluster_size); 376 if (last_cluster + 1 < end) { 377 job->common.offset += ((end - last_cluster - 1) * job->cluster_size); 378 } 379 380 return ret; 381 } 382 383 static void coroutine_fn backup_run(void *opaque) 384 { 385 BackupBlockJob *job = opaque; 386 BackupCompleteData *data; 387 BlockDriverState *bs = job->common.bs; 388 BlockDriverState *target = job->target; 389 BlockdevOnError on_target_error = job->on_target_error; 390 NotifierWithReturn before_write = { 391 .notify = backup_before_write_notify, 392 }; 393 int64_t start, end; 394 int64_t sectors_per_cluster = cluster_size_sectors(job); 395 int ret = 0; 396 397 QLIST_INIT(&job->inflight_reqs); 398 qemu_co_rwlock_init(&job->flush_rwlock); 399 400 start = 0; 401 end = DIV_ROUND_UP(job->common.len, job->cluster_size); 402 403 job->done_bitmap = bitmap_new(end); 404 405 bdrv_set_enable_write_cache(target, true); 406 if (target->blk) { 407 blk_set_on_error(target->blk, on_target_error, on_target_error); 408 blk_iostatus_enable(target->blk); 409 } 410 411 bdrv_add_before_write_notifier(bs, &before_write); 412 413 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { 414 while (!block_job_is_cancelled(&job->common)) { 415 /* Yield until the job is cancelled. We just let our before_write 416 * notify callback service CoW requests. */ 417 job->common.busy = false; 418 qemu_coroutine_yield(); 419 job->common.busy = true; 420 } 421 } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 422 ret = backup_run_incremental(job); 423 } else { 424 /* Both FULL and TOP SYNC_MODE's require copying.. */ 425 for (; start < end; start++) { 426 bool error_is_read; 427 if (yield_and_check(job)) { 428 break; 429 } 430 431 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { 432 int i, n; 433 int alloced = 0; 434 435 /* Check to see if these blocks are already in the 436 * backing file. */ 437 438 for (i = 0; i < sectors_per_cluster;) { 439 /* bdrv_is_allocated() only returns true/false based 440 * on the first set of sectors it comes across that 441 * are are all in the same state. 442 * For that reason we must verify each sector in the 443 * backup cluster length. We end up copying more than 444 * needed but at some point that is always the case. */ 445 alloced = 446 bdrv_is_allocated(bs, 447 start * sectors_per_cluster + i, 448 sectors_per_cluster - i, &n); 449 i += n; 450 451 if (alloced == 1 || n == 0) { 452 break; 453 } 454 } 455 456 /* If the above loop never found any sectors that are in 457 * the topmost image, skip this backup. */ 458 if (alloced == 0) { 459 continue; 460 } 461 } 462 /* FULL sync mode we copy the whole drive. */ 463 ret = backup_do_cow(bs, start * sectors_per_cluster, 464 sectors_per_cluster, &error_is_read, false); 465 if (ret < 0) { 466 /* Depending on error action, fail now or retry cluster */ 467 BlockErrorAction action = 468 backup_error_action(job, error_is_read, -ret); 469 if (action == BLOCK_ERROR_ACTION_REPORT) { 470 break; 471 } else { 472 start--; 473 continue; 474 } 475 } 476 } 477 } 478 479 notifier_with_return_remove(&before_write); 480 481 /* wait until pending backup_do_cow() calls have completed */ 482 qemu_co_rwlock_wrlock(&job->flush_rwlock); 483 qemu_co_rwlock_unlock(&job->flush_rwlock); 484 g_free(job->done_bitmap); 485 486 if (target->blk) { 487 blk_iostatus_disable(target->blk); 488 } 489 bdrv_op_unblock_all(target, job->common.blocker); 490 491 data = g_malloc(sizeof(*data)); 492 data->ret = ret; 493 block_job_defer_to_main_loop(&job->common, backup_complete, data); 494 } 495 496 void backup_start(BlockDriverState *bs, BlockDriverState *target, 497 int64_t speed, MirrorSyncMode sync_mode, 498 BdrvDirtyBitmap *sync_bitmap, 499 BlockdevOnError on_source_error, 500 BlockdevOnError on_target_error, 501 BlockCompletionFunc *cb, void *opaque, 502 BlockJobTxn *txn, Error **errp) 503 { 504 int64_t len; 505 BlockDriverInfo bdi; 506 int ret; 507 508 assert(bs); 509 assert(target); 510 assert(cb); 511 512 if (bs == target) { 513 error_setg(errp, "Source and target cannot be the same"); 514 return; 515 } 516 517 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || 518 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && 519 (!bs->blk || !blk_iostatus_is_enabled(bs->blk))) { 520 error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error"); 521 return; 522 } 523 524 if (!bdrv_is_inserted(bs)) { 525 error_setg(errp, "Device is not inserted: %s", 526 bdrv_get_device_name(bs)); 527 return; 528 } 529 530 if (!bdrv_is_inserted(target)) { 531 error_setg(errp, "Device is not inserted: %s", 532 bdrv_get_device_name(target)); 533 return; 534 } 535 536 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { 537 return; 538 } 539 540 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { 541 return; 542 } 543 544 if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 545 if (!sync_bitmap) { 546 error_setg(errp, "must provide a valid bitmap name for " 547 "\"incremental\" sync mode"); 548 return; 549 } 550 551 /* Create a new bitmap, and freeze/disable this one. */ 552 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { 553 return; 554 } 555 } else if (sync_bitmap) { 556 error_setg(errp, 557 "a sync_bitmap was provided to backup_run, " 558 "but received an incompatible sync_mode (%s)", 559 MirrorSyncMode_lookup[sync_mode]); 560 return; 561 } 562 563 len = bdrv_getlength(bs); 564 if (len < 0) { 565 error_setg_errno(errp, -len, "unable to get length for '%s'", 566 bdrv_get_device_name(bs)); 567 goto error; 568 } 569 570 BackupBlockJob *job = block_job_create(&backup_job_driver, bs, speed, 571 cb, opaque, errp); 572 if (!job) { 573 goto error; 574 } 575 576 job->on_source_error = on_source_error; 577 job->on_target_error = on_target_error; 578 job->target = target; 579 job->sync_mode = sync_mode; 580 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ? 581 sync_bitmap : NULL; 582 583 /* If there is no backing file on the target, we cannot rely on COW if our 584 * backup cluster size is smaller than the target cluster size. Even for 585 * targets with a backing file, try to avoid COW if possible. */ 586 ret = bdrv_get_info(job->target, &bdi); 587 if (ret < 0 && !target->backing) { 588 error_setg_errno(errp, -ret, 589 "Couldn't determine the cluster size of the target image, " 590 "which has no backing file"); 591 error_append_hint(errp, 592 "Aborting, since this may create an unusable destination image\n"); 593 goto error; 594 } else if (ret < 0 && target->backing) { 595 /* Not fatal; just trudge on ahead. */ 596 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; 597 } else { 598 job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); 599 } 600 601 bdrv_op_block_all(target, job->common.blocker); 602 job->common.len = len; 603 job->common.co = qemu_coroutine_create(backup_run); 604 block_job_txn_add_job(txn, &job->common); 605 qemu_coroutine_enter(job->common.co, job); 606 return; 607 608 error: 609 if (sync_bitmap) { 610 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); 611 } 612 } 613