1 /* 2 * QEMU backup 3 * 4 * Copyright (C) 2013 Proxmox Server Solutions 5 * 6 * Authors: 7 * Dietmar Maurer (dietmar@proxmox.com) 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include <stdio.h> 15 #include <errno.h> 16 #include <unistd.h> 17 18 #include "trace.h" 19 #include "block/block.h" 20 #include "block/block_int.h" 21 #include "block/blockjob.h" 22 #include "qemu/ratelimit.h" 23 24 #define BACKUP_CLUSTER_BITS 16 25 #define BACKUP_CLUSTER_SIZE (1 << BACKUP_CLUSTER_BITS) 26 #define BACKUP_SECTORS_PER_CLUSTER (BACKUP_CLUSTER_SIZE / BDRV_SECTOR_SIZE) 27 28 #define SLICE_TIME 100000000ULL /* ns */ 29 30 typedef struct CowRequest { 31 int64_t start; 32 int64_t end; 33 QLIST_ENTRY(CowRequest) list; 34 CoQueue wait_queue; /* coroutines blocked on this request */ 35 } CowRequest; 36 37 typedef struct BackupBlockJob { 38 BlockJob common; 39 BlockDriverState *target; 40 /* bitmap for sync=dirty-bitmap */ 41 BdrvDirtyBitmap *sync_bitmap; 42 MirrorSyncMode sync_mode; 43 RateLimit limit; 44 BlockdevOnError on_source_error; 45 BlockdevOnError on_target_error; 46 CoRwlock flush_rwlock; 47 uint64_t sectors_read; 48 HBitmap *bitmap; 49 QLIST_HEAD(, CowRequest) inflight_reqs; 50 } BackupBlockJob; 51 52 /* See if in-flight requests overlap and wait for them to complete */ 53 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job, 54 int64_t start, 55 int64_t end) 56 { 57 CowRequest *req; 58 bool retry; 59 60 do { 61 retry = false; 62 QLIST_FOREACH(req, &job->inflight_reqs, list) { 63 if (end > req->start && start < req->end) { 64 qemu_co_queue_wait(&req->wait_queue); 65 retry = true; 66 break; 67 } 68 } 69 } while (retry); 70 } 71 72 /* Keep track of an in-flight request */ 73 static void cow_request_begin(CowRequest *req, BackupBlockJob *job, 74 int64_t start, int64_t end) 75 { 76 req->start = start; 77 req->end = end; 78 qemu_co_queue_init(&req->wait_queue); 79 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list); 80 } 81 82 /* Forget about a completed request */ 83 static void cow_request_end(CowRequest *req) 84 { 85 QLIST_REMOVE(req, list); 86 qemu_co_queue_restart_all(&req->wait_queue); 87 } 88 89 static int coroutine_fn backup_do_cow(BlockDriverState *bs, 90 int64_t sector_num, int nb_sectors, 91 bool *error_is_read) 92 { 93 BackupBlockJob *job = (BackupBlockJob *)bs->job; 94 CowRequest cow_request; 95 struct iovec iov; 96 QEMUIOVector bounce_qiov; 97 void *bounce_buffer = NULL; 98 int ret = 0; 99 int64_t start, end; 100 int n; 101 102 qemu_co_rwlock_rdlock(&job->flush_rwlock); 103 104 start = sector_num / BACKUP_SECTORS_PER_CLUSTER; 105 end = DIV_ROUND_UP(sector_num + nb_sectors, BACKUP_SECTORS_PER_CLUSTER); 106 107 trace_backup_do_cow_enter(job, start, sector_num, nb_sectors); 108 109 wait_for_overlapping_requests(job, start, end); 110 cow_request_begin(&cow_request, job, start, end); 111 112 for (; start < end; start++) { 113 if (hbitmap_get(job->bitmap, start)) { 114 trace_backup_do_cow_skip(job, start); 115 continue; /* already copied */ 116 } 117 118 trace_backup_do_cow_process(job, start); 119 120 n = MIN(BACKUP_SECTORS_PER_CLUSTER, 121 job->common.len / BDRV_SECTOR_SIZE - 122 start * BACKUP_SECTORS_PER_CLUSTER); 123 124 if (!bounce_buffer) { 125 bounce_buffer = qemu_blockalign(bs, BACKUP_CLUSTER_SIZE); 126 } 127 iov.iov_base = bounce_buffer; 128 iov.iov_len = n * BDRV_SECTOR_SIZE; 129 qemu_iovec_init_external(&bounce_qiov, &iov, 1); 130 131 ret = bdrv_co_readv(bs, start * BACKUP_SECTORS_PER_CLUSTER, n, 132 &bounce_qiov); 133 if (ret < 0) { 134 trace_backup_do_cow_read_fail(job, start, ret); 135 if (error_is_read) { 136 *error_is_read = true; 137 } 138 goto out; 139 } 140 141 if (buffer_is_zero(iov.iov_base, iov.iov_len)) { 142 ret = bdrv_co_write_zeroes(job->target, 143 start * BACKUP_SECTORS_PER_CLUSTER, 144 n, BDRV_REQ_MAY_UNMAP); 145 } else { 146 ret = bdrv_co_writev(job->target, 147 start * BACKUP_SECTORS_PER_CLUSTER, n, 148 &bounce_qiov); 149 } 150 if (ret < 0) { 151 trace_backup_do_cow_write_fail(job, start, ret); 152 if (error_is_read) { 153 *error_is_read = false; 154 } 155 goto out; 156 } 157 158 hbitmap_set(job->bitmap, start, 1); 159 160 /* Publish progress, guest I/O counts as progress too. Note that the 161 * offset field is an opaque progress value, it is not a disk offset. 162 */ 163 job->sectors_read += n; 164 job->common.offset += n * BDRV_SECTOR_SIZE; 165 } 166 167 out: 168 if (bounce_buffer) { 169 qemu_vfree(bounce_buffer); 170 } 171 172 cow_request_end(&cow_request); 173 174 trace_backup_do_cow_return(job, sector_num, nb_sectors, ret); 175 176 qemu_co_rwlock_unlock(&job->flush_rwlock); 177 178 return ret; 179 } 180 181 static int coroutine_fn backup_before_write_notify( 182 NotifierWithReturn *notifier, 183 void *opaque) 184 { 185 BdrvTrackedRequest *req = opaque; 186 int64_t sector_num = req->offset >> BDRV_SECTOR_BITS; 187 int nb_sectors = req->bytes >> BDRV_SECTOR_BITS; 188 189 assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0); 190 assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 191 192 return backup_do_cow(req->bs, sector_num, nb_sectors, NULL); 193 } 194 195 static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp) 196 { 197 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 198 199 if (speed < 0) { 200 error_set(errp, QERR_INVALID_PARAMETER, "speed"); 201 return; 202 } 203 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 204 } 205 206 static void backup_iostatus_reset(BlockJob *job) 207 { 208 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 209 210 bdrv_iostatus_reset(s->target); 211 } 212 213 static const BlockJobDriver backup_job_driver = { 214 .instance_size = sizeof(BackupBlockJob), 215 .job_type = BLOCK_JOB_TYPE_BACKUP, 216 .set_speed = backup_set_speed, 217 .iostatus_reset = backup_iostatus_reset, 218 }; 219 220 static BlockErrorAction backup_error_action(BackupBlockJob *job, 221 bool read, int error) 222 { 223 if (read) { 224 return block_job_error_action(&job->common, job->common.bs, 225 job->on_source_error, true, error); 226 } else { 227 return block_job_error_action(&job->common, job->target, 228 job->on_target_error, false, error); 229 } 230 } 231 232 typedef struct { 233 int ret; 234 } BackupCompleteData; 235 236 static void backup_complete(BlockJob *job, void *opaque) 237 { 238 BackupBlockJob *s = container_of(job, BackupBlockJob, common); 239 BackupCompleteData *data = opaque; 240 241 bdrv_unref(s->target); 242 243 block_job_completed(job, data->ret); 244 g_free(data); 245 } 246 247 static bool coroutine_fn yield_and_check(BackupBlockJob *job) 248 { 249 if (block_job_is_cancelled(&job->common)) { 250 return true; 251 } 252 253 /* we need to yield so that bdrv_drain_all() returns. 254 * (without, VM does not reboot) 255 */ 256 if (job->common.speed) { 257 uint64_t delay_ns = ratelimit_calculate_delay(&job->limit, 258 job->sectors_read); 259 job->sectors_read = 0; 260 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); 261 } else { 262 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); 263 } 264 265 if (block_job_is_cancelled(&job->common)) { 266 return true; 267 } 268 269 return false; 270 } 271 272 static int coroutine_fn backup_run_incremental(BackupBlockJob *job) 273 { 274 bool error_is_read; 275 int ret = 0; 276 int clusters_per_iter; 277 uint32_t granularity; 278 int64_t sector; 279 int64_t cluster; 280 int64_t end; 281 int64_t last_cluster = -1; 282 BlockDriverState *bs = job->common.bs; 283 HBitmapIter hbi; 284 285 granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); 286 clusters_per_iter = MAX((granularity / BACKUP_CLUSTER_SIZE), 1); 287 bdrv_dirty_iter_init(job->sync_bitmap, &hbi); 288 289 /* Find the next dirty sector(s) */ 290 while ((sector = hbitmap_iter_next(&hbi)) != -1) { 291 cluster = sector / BACKUP_SECTORS_PER_CLUSTER; 292 293 /* Fake progress updates for any clusters we skipped */ 294 if (cluster != last_cluster + 1) { 295 job->common.offset += ((cluster - last_cluster - 1) * 296 BACKUP_CLUSTER_SIZE); 297 } 298 299 for (end = cluster + clusters_per_iter; cluster < end; cluster++) { 300 do { 301 if (yield_and_check(job)) { 302 return ret; 303 } 304 ret = backup_do_cow(bs, cluster * BACKUP_SECTORS_PER_CLUSTER, 305 BACKUP_SECTORS_PER_CLUSTER, &error_is_read); 306 if ((ret < 0) && 307 backup_error_action(job, error_is_read, -ret) == 308 BLOCK_ERROR_ACTION_REPORT) { 309 return ret; 310 } 311 } while (ret < 0); 312 } 313 314 /* If the bitmap granularity is smaller than the backup granularity, 315 * we need to advance the iterator pointer to the next cluster. */ 316 if (granularity < BACKUP_CLUSTER_SIZE) { 317 bdrv_set_dirty_iter(&hbi, cluster * BACKUP_SECTORS_PER_CLUSTER); 318 } 319 320 last_cluster = cluster - 1; 321 } 322 323 /* Play some final catchup with the progress meter */ 324 end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE); 325 if (last_cluster + 1 < end) { 326 job->common.offset += ((end - last_cluster - 1) * BACKUP_CLUSTER_SIZE); 327 } 328 329 return ret; 330 } 331 332 static void coroutine_fn backup_run(void *opaque) 333 { 334 BackupBlockJob *job = opaque; 335 BackupCompleteData *data; 336 BlockDriverState *bs = job->common.bs; 337 BlockDriverState *target = job->target; 338 BlockdevOnError on_target_error = job->on_target_error; 339 NotifierWithReturn before_write = { 340 .notify = backup_before_write_notify, 341 }; 342 int64_t start, end; 343 int ret = 0; 344 345 QLIST_INIT(&job->inflight_reqs); 346 qemu_co_rwlock_init(&job->flush_rwlock); 347 348 start = 0; 349 end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE); 350 351 job->bitmap = hbitmap_alloc(end, 0); 352 353 bdrv_set_enable_write_cache(target, true); 354 bdrv_set_on_error(target, on_target_error, on_target_error); 355 bdrv_iostatus_enable(target); 356 357 bdrv_add_before_write_notifier(bs, &before_write); 358 359 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { 360 while (!block_job_is_cancelled(&job->common)) { 361 /* Yield until the job is cancelled. We just let our before_write 362 * notify callback service CoW requests. */ 363 job->common.busy = false; 364 qemu_coroutine_yield(); 365 job->common.busy = true; 366 } 367 } else if (job->sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) { 368 ret = backup_run_incremental(job); 369 } else { 370 /* Both FULL and TOP SYNC_MODE's require copying.. */ 371 for (; start < end; start++) { 372 bool error_is_read; 373 if (yield_and_check(job)) { 374 break; 375 } 376 377 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { 378 int i, n; 379 int alloced = 0; 380 381 /* Check to see if these blocks are already in the 382 * backing file. */ 383 384 for (i = 0; i < BACKUP_SECTORS_PER_CLUSTER;) { 385 /* bdrv_is_allocated() only returns true/false based 386 * on the first set of sectors it comes across that 387 * are are all in the same state. 388 * For that reason we must verify each sector in the 389 * backup cluster length. We end up copying more than 390 * needed but at some point that is always the case. */ 391 alloced = 392 bdrv_is_allocated(bs, 393 start * BACKUP_SECTORS_PER_CLUSTER + i, 394 BACKUP_SECTORS_PER_CLUSTER - i, &n); 395 i += n; 396 397 if (alloced == 1 || n == 0) { 398 break; 399 } 400 } 401 402 /* If the above loop never found any sectors that are in 403 * the topmost image, skip this backup. */ 404 if (alloced == 0) { 405 continue; 406 } 407 } 408 /* FULL sync mode we copy the whole drive. */ 409 ret = backup_do_cow(bs, start * BACKUP_SECTORS_PER_CLUSTER, 410 BACKUP_SECTORS_PER_CLUSTER, &error_is_read); 411 if (ret < 0) { 412 /* Depending on error action, fail now or retry cluster */ 413 BlockErrorAction action = 414 backup_error_action(job, error_is_read, -ret); 415 if (action == BLOCK_ERROR_ACTION_REPORT) { 416 break; 417 } else { 418 start--; 419 continue; 420 } 421 } 422 } 423 } 424 425 notifier_with_return_remove(&before_write); 426 427 /* wait until pending backup_do_cow() calls have completed */ 428 qemu_co_rwlock_wrlock(&job->flush_rwlock); 429 qemu_co_rwlock_unlock(&job->flush_rwlock); 430 431 if (job->sync_bitmap) { 432 BdrvDirtyBitmap *bm; 433 if (ret < 0) { 434 /* Merge the successor back into the parent, delete nothing. */ 435 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); 436 assert(bm); 437 } else { 438 /* Everything is fine, delete this bitmap and install the backup. */ 439 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); 440 assert(bm); 441 } 442 } 443 hbitmap_free(job->bitmap); 444 445 bdrv_iostatus_disable(target); 446 bdrv_op_unblock_all(target, job->common.blocker); 447 448 data = g_malloc(sizeof(*data)); 449 data->ret = ret; 450 block_job_defer_to_main_loop(&job->common, backup_complete, data); 451 } 452 453 void backup_start(BlockDriverState *bs, BlockDriverState *target, 454 int64_t speed, MirrorSyncMode sync_mode, 455 BdrvDirtyBitmap *sync_bitmap, 456 BlockdevOnError on_source_error, 457 BlockdevOnError on_target_error, 458 BlockCompletionFunc *cb, void *opaque, 459 Error **errp) 460 { 461 int64_t len; 462 463 assert(bs); 464 assert(target); 465 assert(cb); 466 467 if (bs == target) { 468 error_setg(errp, "Source and target cannot be the same"); 469 return; 470 } 471 472 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || 473 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && 474 !bdrv_iostatus_is_enabled(bs)) { 475 error_set(errp, QERR_INVALID_PARAMETER, "on-source-error"); 476 return; 477 } 478 479 if (!bdrv_is_inserted(bs)) { 480 error_setg(errp, "Device is not inserted: %s", 481 bdrv_get_device_name(bs)); 482 return; 483 } 484 485 if (!bdrv_is_inserted(target)) { 486 error_setg(errp, "Device is not inserted: %s", 487 bdrv_get_device_name(target)); 488 return; 489 } 490 491 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { 492 return; 493 } 494 495 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { 496 return; 497 } 498 499 if (sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) { 500 if (!sync_bitmap) { 501 error_setg(errp, "must provide a valid bitmap name for " 502 "\"dirty-bitmap\" sync mode"); 503 return; 504 } 505 506 /* Create a new bitmap, and freeze/disable this one. */ 507 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { 508 return; 509 } 510 } else if (sync_bitmap) { 511 error_setg(errp, 512 "a sync_bitmap was provided to backup_run, " 513 "but received an incompatible sync_mode (%s)", 514 MirrorSyncMode_lookup[sync_mode]); 515 return; 516 } 517 518 len = bdrv_getlength(bs); 519 if (len < 0) { 520 error_setg_errno(errp, -len, "unable to get length for '%s'", 521 bdrv_get_device_name(bs)); 522 goto error; 523 } 524 525 BackupBlockJob *job = block_job_create(&backup_job_driver, bs, speed, 526 cb, opaque, errp); 527 if (!job) { 528 goto error; 529 } 530 531 bdrv_op_block_all(target, job->common.blocker); 532 533 job->on_source_error = on_source_error; 534 job->on_target_error = on_target_error; 535 job->target = target; 536 job->sync_mode = sync_mode; 537 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP ? 538 sync_bitmap : NULL; 539 job->common.len = len; 540 job->common.co = qemu_coroutine_create(backup_run); 541 qemu_coroutine_enter(job->common.co, job); 542 return; 543 544 error: 545 if (sync_bitmap) { 546 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); 547 } 548 } 549