1 /* 2 * QEMU backup 3 * 4 * Copyright (C) 2013 Proxmox Server Solutions 5 * 6 * Authors: 7 * Dietmar Maurer (dietmar@proxmox.com) 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 16 #include "trace.h" 17 #include "block/block.h" 18 #include "block/block_int.h" 19 #include "block/blockjob.h" 20 #include "qapi/qmp/qerror.h" 21 #include "qemu/ratelimit.h" 22 #include "sysemu/block-backend.h" 23 24 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) 25 #define SLICE_TIME 100000000ULL /* ns */ 26 27 typedef struct CowRequest { 28 int64_t start; 29 int64_t end; 30 QLIST_ENTRY(CowRequest) list; 31 CoQueue wait_queue; /* coroutines blocked on this request */ 32 } CowRequest; 33 34 typedef struct BackupBlockJob { 35 BlockJob common; 36 BlockDriverState *target; 37 /* bitmap for sync=incremental */ 38 BdrvDirtyBitmap *sync_bitmap; 39 MirrorSyncMode sync_mode; 40 RateLimit limit; 41 BlockdevOnError on_source_error; 42 BlockdevOnError on_target_error; 43 CoRwlock flush_rwlock; 44 uint64_t sectors_read; 45 HBitmap *bitmap; 46 int64_t cluster_size; 47 QLIST_HEAD(, CowRequest) inflight_reqs; 48 } BackupBlockJob; 49 50 /* Size of a cluster in sectors, instead of bytes. */ 51 static inline int64_t cluster_size_sectors(BackupBlockJob *job) 52 { 53 return job->cluster_size / BDRV_SECTOR_SIZE; 54 } 55 56 /* See if in-flight requests overlap and wait for them to complete */ 57 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job, 58 int64_t start, 59 int64_t end) 60 { 61 CowRequest *req; 62 bool retry; 63 64 do { 65 retry = false; 66 QLIST_FOREACH(req, &job->inflight_reqs, list) { 67 if (end > req->start && start < req->end) { 68 qemu_co_queue_wait(&req->wait_queue); 69 retry = true; 70 break; 71 } 72 } 73 } while (retry); 74 } 75 76 /* Keep track of an in-flight request */ 77 static void cow_request_begin(CowRequest *req, BackupBlockJob *job, 78 int64_t start, int64_t end) 79 { 80 req->start = start; 81 req->end = end; 82 qemu_co_queue_init(&req->wait_queue); 83 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list); 84 } 85 86 /* Forget about a completed request */ 87 static void cow_request_end(CowRequest *req) 88 { 89 QLIST_REMOVE(req, list); 90 qemu_co_queue_restart_all(&req->wait_queue); 91 } 92 93 static int coroutine_fn backup_do_cow(BlockDriverState *bs, 94 int64_t sector_num, int nb_sectors, 95 bool *error_is_read, 96 bool is_write_notifier) 97 { 98 BackupBlockJob *job = (BackupBlockJob *)bs->job; 99 CowRequest cow_request; 100 struct iovec iov; 101 QEMUIOVector bounce_qiov; 102 void *bounce_buffer = NULL; 103 int ret = 0; 104 int64_t sectors_per_cluster = cluster_size_sectors(job); 105 int64_t start, end; 106 int n; 107 108 qemu_co_rwlock_rdlock(&job->flush_rwlock); 109 110 start = sector_num / sectors_per_cluster; 111 end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster); 112 113 trace_backup_do_cow_enter(job, start, sector_num, nb_sectors); 114 115 wait_for_overlapping_requests(job, start, end); 116 cow_request_begin(&cow_request, job, start, end); 117 118 for (; start < end; start++) { 119 if (hbitmap_get(job->bitmap, start)) { 120 trace_backup_do_cow_skip(job, start); 121 continue; /* already copied */ 122 } 123 124 trace_backup_do_cow_process(job, start); 125 126 n = MIN(sectors_per_cluster, 127 job->common.len / BDRV_SECTOR_SIZE - 128 start * sectors_per_cluster); 129 130 if (!bounce_buffer) { 131 bounce_buffer = qemu_blockalign(bs, job->cluster_size); 132 } 133 iov.iov_base = bounce_buffer; 134 iov.iov_len = n * BDRV_SECTOR_SIZE; 135 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 136 137 if (is_write_notifier) { 138 ret = bdrv_co_readv_no_serialising(bs, 139 start * sectors_per_cluster, 140 n, &bounce_qiov); 141 } else { 142 ret = bdrv_co_readv(bs, start * sectors_per_cluster, n, 143 &bounce_qiov); 144 } 145 if (ret < 0) { 146 trace_backup_do_cow_read_fail(job, start, ret); 147 if (error_is_read) { 148 *error_is_read = true; 149 } 150 goto out; 151 } 152 153 if (buffer_is_zero(iov.iov_base, iov.iov_len)) { 154 ret = bdrv_co_write_zeroes(job->target, 155 start * sectors_per_cluster, 156 n, BDRV_REQ_MAY_UNMAP); 157 } else { 158 ret = bdrv_co_writev(job->target, 159 start * sectors_per_cluster, n, 160 &bounce_qiov); 161 } 162 if (ret < 0) { 163 trace_backup_do_cow_write_fail(job, start, ret); 164 if (error_is_read) { 165 *error_is_read = false; 166 } 167 goto out; 168 } 169 170 hbitmap_set(job->bitmap, start, 1); 171 172 /* Publish progress, guest I/O counts as progress too. Note that the 173 * offset field is an opaque progress value, it is not a disk offset. 174 */ 175 job->sectors_read += n; 176 job->common.offset += n * BDRV_SECTOR_SIZE; 177 } 178 179 out: 180 if (bounce_buffer) { 181 qemu_vfree(bounce_buffer); 182 } 183 184 cow_request_end(&cow_request); 185 186 trace_backup_do_cow_return(job, sector_num, nb_sectors, ret); 187 188 qemu_co_rwlock_unlock(&job->flush_rwlock); 189 190 return ret; 191 } 192 193 static int coroutine_fn backup_before_write_notify( 194 NotifierWithReturn *notifier, 195 void *opaque) 196 { 197 BdrvTrackedRequest *req = opaque; 198 int64_t sector_num = req->offset >> BDRV_SECTOR_BITS; 199 int nb_sectors = req->bytes >> BDRV_SECTOR_BITS; 200 201 assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0); 202 assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 203 204 return backup_do_cow(req->bs, sector_num, nb_sectors, NULL, true); 205 } 206 207 static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp) 208 { 209 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 210 211 if (speed < 0) { 212 error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 213 return; 214 } 215 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 216 } 217 218 static void backup_iostatus_reset(BlockJob *job) 219 { 220 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 221 222 if (s->target->blk) { 223 blk_iostatus_reset(s->target->blk); 224 } 225 } 226 227 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) 228 { 229 BdrvDirtyBitmap *bm; 230 BlockDriverState *bs = job->common.bs; 231 232 if (ret < 0 || block_job_is_cancelled(&job->common)) { 233 /* Merge the successor back into the parent, delete nothing. */ 234 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); 235 assert(bm); 236 } else { 237 /* Everything is fine, delete this bitmap and install the backup. */ 238 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); 239 assert(bm); 240 } 241 } 242 243 static void backup_commit(BlockJob *job) 244 { 245 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 246 if (s->sync_bitmap) { 247 backup_cleanup_sync_bitmap(s, 0); 248 } 249 } 250 251 static void backup_abort(BlockJob *job) 252 { 253 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 254 if (s->sync_bitmap) { 255 backup_cleanup_sync_bitmap(s, -1); 256 } 257 } 258 259 static const BlockJobDriver backup_job_driver = { 260 .instance_size = sizeof(BackupBlockJob), 261 .job_type = BLOCK_JOB_TYPE_BACKUP, 262 .set_speed = backup_set_speed, 263 .iostatus_reset = backup_iostatus_reset, 264 .commit = backup_commit, 265 .abort = backup_abort, 266 }; 267 268 static BlockErrorAction backup_error_action(BackupBlockJob *job, 269 bool read, int error) 270 { 271 if (read) { 272 return block_job_error_action(&job->common, job->common.bs, 273 job->on_source_error, true, error); 274 } else { 275 return block_job_error_action(&job->common, job->target, 276 job->on_target_error, false, error); 277 } 278 } 279 280 typedef struct { 281 int ret; 282 } BackupCompleteData; 283 284 static void backup_complete(BlockJob *job, void *opaque) 285 { 286 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 287 BackupCompleteData *data = opaque; 288 289 bdrv_unref(s->target); 290 291 block_job_completed(job, data->ret); 292 g_free(data); 293 } 294 295 static bool coroutine_fn yield_and_check(BackupBlockJob *job) 296 { 297 if (block_job_is_cancelled(&job->common)) { 298 return true; 299 } 300 301 /* we need to yield so that bdrv_drain_all() returns. 302 * (without, VM does not reboot) 303 */ 304 if (job->common.speed) { 305 uint64_t delay_ns = ratelimit_calculate_delay(&job->limit, 306 job->sectors_read); 307 job->sectors_read = 0; 308 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); 309 } else { 310 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); 311 } 312 313 if (block_job_is_cancelled(&job->common)) { 314 return true; 315 } 316 317 return false; 318 } 319 320 static int coroutine_fn backup_run_incremental(BackupBlockJob *job) 321 { 322 bool error_is_read; 323 int ret = 0; 324 int clusters_per_iter; 325 uint32_t granularity; 326 int64_t sector; 327 int64_t cluster; 328 int64_t end; 329 int64_t last_cluster = -1; 330 int64_t sectors_per_cluster = cluster_size_sectors(job); 331 BlockDriverState *bs = job->common.bs; 332 HBitmapIter hbi; 333 334 granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); 335 clusters_per_iter = MAX((granularity / job->cluster_size), 1); 336 bdrv_dirty_iter_init(job->sync_bitmap, &hbi); 337 338 /* Find the next dirty sector(s) */ 339 while ((sector = hbitmap_iter_next(&hbi)) != -1) { 340 cluster = sector / sectors_per_cluster; 341 342 /* Fake progress updates for any clusters we skipped */ 343 if (cluster != last_cluster + 1) { 344 job->common.offset += ((cluster - last_cluster - 1) * 345 job->cluster_size); 346 } 347 348 for (end = cluster + clusters_per_iter; cluster < end; cluster++) { 349 do { 350 if (yield_and_check(job)) { 351 return ret; 352 } 353 ret = backup_do_cow(bs, cluster * sectors_per_cluster, 354 sectors_per_cluster, &error_is_read, 355 false); 356 if ((ret < 0) && 357 backup_error_action(job, error_is_read, -ret) == 358 BLOCK_ERROR_ACTION_REPORT) { 359 return ret; 360 } 361 } while (ret < 0); 362 } 363 364 /* If the bitmap granularity is smaller than the backup granularity, 365 * we need to advance the iterator pointer to the next cluster. */ 366 if (granularity < job->cluster_size) { 367 bdrv_set_dirty_iter(&hbi, cluster * sectors_per_cluster); 368 } 369 370 last_cluster = cluster - 1; 371 } 372 373 /* Play some final catchup with the progress meter */ 374 end = DIV_ROUND_UP(job->common.len, job->cluster_size); 375 if (last_cluster + 1 < end) { 376 job->common.offset += ((end - last_cluster - 1) * job->cluster_size); 377 } 378 379 return ret; 380 } 381 382 static void coroutine_fn backup_run(void *opaque) 383 { 384 BackupBlockJob *job = opaque; 385 BackupCompleteData *data; 386 BlockDriverState *bs = job->common.bs; 387 BlockDriverState *target = job->target; 388 BlockdevOnError on_target_error = job->on_target_error; 389 NotifierWithReturn before_write = { 390 .notify = backup_before_write_notify, 391 }; 392 int64_t start, end; 393 int64_t sectors_per_cluster = cluster_size_sectors(job); 394 int ret = 0; 395 396 QLIST_INIT(&job->inflight_reqs); 397 qemu_co_rwlock_init(&job->flush_rwlock); 398 399 start = 0; 400 end = DIV_ROUND_UP(job->common.len, job->cluster_size); 401 402 job->bitmap = hbitmap_alloc(end, 0); 403 404 bdrv_set_enable_write_cache(target, true); 405 if (target->blk) { 406 blk_set_on_error(target->blk, on_target_error, on_target_error); 407 blk_iostatus_enable(target->blk); 408 } 409 410 bdrv_add_before_write_notifier(bs, &before_write); 411 412 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { 413 while (!block_job_is_cancelled(&job->common)) { 414 /* Yield until the job is cancelled. We just let our before_write 415 * notify callback service CoW requests. */ 416 job->common.busy = false; 417 qemu_coroutine_yield(); 418 job->common.busy = true; 419 } 420 } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 421 ret = backup_run_incremental(job); 422 } else { 423 /* Both FULL and TOP SYNC_MODE's require copying.. */ 424 for (; start < end; start++) { 425 bool error_is_read; 426 if (yield_and_check(job)) { 427 break; 428 } 429 430 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { 431 int i, n; 432 int alloced = 0; 433 434 /* Check to see if these blocks are already in the 435 * backing file. */ 436 437 for (i = 0; i < sectors_per_cluster;) { 438 /* bdrv_is_allocated() only returns true/false based 439 * on the first set of sectors it comes across that 440 * are are all in the same state. 441 * For that reason we must verify each sector in the 442 * backup cluster length. We end up copying more than 443 * needed but at some point that is always the case. */ 444 alloced = 445 bdrv_is_allocated(bs, 446 start * sectors_per_cluster + i, 447 sectors_per_cluster - i, &n); 448 i += n; 449 450 if (alloced == 1 || n == 0) { 451 break; 452 } 453 } 454 455 /* If the above loop never found any sectors that are in 456 * the topmost image, skip this backup. */ 457 if (alloced == 0) { 458 continue; 459 } 460 } 461 /* FULL sync mode we copy the whole drive. */ 462 ret = backup_do_cow(bs, start * sectors_per_cluster, 463 sectors_per_cluster, &error_is_read, false); 464 if (ret < 0) { 465 /* Depending on error action, fail now or retry cluster */ 466 BlockErrorAction action = 467 backup_error_action(job, error_is_read, -ret); 468 if (action == BLOCK_ERROR_ACTION_REPORT) { 469 break; 470 } else { 471 start--; 472 continue; 473 } 474 } 475 } 476 } 477 478 notifier_with_return_remove(&before_write); 479 480 /* wait until pending backup_do_cow() calls have completed */ 481 qemu_co_rwlock_wrlock(&job->flush_rwlock); 482 qemu_co_rwlock_unlock(&job->flush_rwlock); 483 hbitmap_free(job->bitmap); 484 485 if (target->blk) { 486 blk_iostatus_disable(target->blk); 487 } 488 bdrv_op_unblock_all(target, job->common.blocker); 489 490 data = g_malloc(sizeof(*data)); 491 data->ret = ret; 492 block_job_defer_to_main_loop(&job->common, backup_complete, data); 493 } 494 495 void backup_start(BlockDriverState *bs, BlockDriverState *target, 496 int64_t speed, MirrorSyncMode sync_mode, 497 BdrvDirtyBitmap *sync_bitmap, 498 BlockdevOnError on_source_error, 499 BlockdevOnError on_target_error, 500 BlockCompletionFunc *cb, void *opaque, 501 BlockJobTxn *txn, Error **errp) 502 { 503 int64_t len; 504 BlockDriverInfo bdi; 505 int ret; 506 507 assert(bs); 508 assert(target); 509 assert(cb); 510 511 if (bs == target) { 512 error_setg(errp, "Source and target cannot be the same"); 513 return; 514 } 515 516 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || 517 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && 518 (!bs->blk || !blk_iostatus_is_enabled(bs->blk))) { 519 error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error"); 520 return; 521 } 522 523 if (!bdrv_is_inserted(bs)) { 524 error_setg(errp, "Device is not inserted: %s", 525 bdrv_get_device_name(bs)); 526 return; 527 } 528 529 if (!bdrv_is_inserted(target)) { 530 error_setg(errp, "Device is not inserted: %s", 531 bdrv_get_device_name(target)); 532 return; 533 } 534 535 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { 536 return; 537 } 538 539 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { 540 return; 541 } 542 543 if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 544 if (!sync_bitmap) { 545 error_setg(errp, "must provide a valid bitmap name for " 546 "\"incremental\" sync mode"); 547 return; 548 } 549 550 /* Create a new bitmap, and freeze/disable this one. */ 551 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { 552 return; 553 } 554 } else if (sync_bitmap) { 555 error_setg(errp, 556 "a sync_bitmap was provided to backup_run, " 557 "but received an incompatible sync_mode (%s)", 558 MirrorSyncMode_lookup[sync_mode]); 559 return; 560 } 561 562 len = bdrv_getlength(bs); 563 if (len < 0) { 564 error_setg_errno(errp, -len, "unable to get length for '%s'", 565 bdrv_get_device_name(bs)); 566 goto error; 567 } 568 569 BackupBlockJob *job = block_job_create(&backup_job_driver, bs, speed, 570 cb, opaque, errp); 571 if (!job) { 572 goto error; 573 } 574 575 job->on_source_error = on_source_error; 576 job->on_target_error = on_target_error; 577 job->target = target; 578 job->sync_mode = sync_mode; 579 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ? 580 sync_bitmap : NULL; 581 582 /* If there is no backing file on the target, we cannot rely on COW if our 583 * backup cluster size is smaller than the target cluster size. Even for 584 * targets with a backing file, try to avoid COW if possible. */ 585 ret = bdrv_get_info(job->target, &bdi); 586 if (ret < 0 && !target->backing) { 587 error_setg_errno(errp, -ret, 588 "Couldn't determine the cluster size of the target image, " 589 "which has no backing file"); 590 error_append_hint(errp, 591 "Aborting, since this may create an unusable destination image\n"); 592 goto error; 593 } else if (ret < 0 && target->backing) { 594 /* Not fatal; just trudge on ahead. */ 595 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; 596 } else { 597 job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); 598 } 599 600 bdrv_op_block_all(target, job->common.blocker); 601 job->common.len = len; 602 job->common.co = qemu_coroutine_create(backup_run); 603 block_job_txn_add_job(txn, &job->common); 604 qemu_coroutine_enter(job->common.co, job); 605 return; 606 607 error: 608 if (sync_bitmap) { 609 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); 610 } 611 } 612