1 /* 2 * block_copy API 3 * 4 * Copyright (C) 2013 Proxmox Server Solutions 5 * Copyright (c) 2019 Virtuozzo International GmbH. 6 * 7 * Authors: 8 * Dietmar Maurer (dietmar@proxmox.com) 9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 */ 14 15 #include "qemu/osdep.h" 16 17 #include "trace.h" 18 #include "qapi/error.h" 19 #include "block/block-copy.h" 20 #include "block/block_int-io.h" 21 #include "block/dirty-bitmap.h" 22 #include "block/reqlist.h" 23 #include "sysemu/block-backend.h" 24 #include "qemu/units.h" 25 #include "qemu/co-shared-resource.h" 26 #include "qemu/coroutine.h" 27 #include "qemu/ratelimit.h" 28 #include "block/aio_task.h" 29 #include "qemu/error-report.h" 30 #include "qemu/memalign.h" 31 32 #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB) 33 #define BLOCK_COPY_MAX_BUFFER (1 * MiB) 34 #define BLOCK_COPY_MAX_MEM (128 * MiB) 35 #define BLOCK_COPY_MAX_WORKERS 64 36 #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */ 37 #define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16) 38 39 typedef enum { 40 COPY_READ_WRITE_CLUSTER, 41 COPY_READ_WRITE, 42 COPY_WRITE_ZEROES, 43 COPY_RANGE_SMALL, 44 COPY_RANGE_FULL 45 } BlockCopyMethod; 46 47 static coroutine_fn int block_copy_task_entry(AioTask *task); 48 49 typedef struct BlockCopyCallState { 50 /* Fields initialized in block_copy_async() and never changed. */ 51 BlockCopyState *s; 52 int64_t offset; 53 int64_t bytes; 54 int max_workers; 55 int64_t max_chunk; 56 bool ignore_ratelimit; 57 BlockCopyAsyncCallbackFunc cb; 58 void *cb_opaque; 59 /* Coroutine where async block-copy is running */ 60 Coroutine *co; 61 62 /* Fields whose state changes throughout the execution */ 63 bool finished; /* atomic */ 64 QemuCoSleep sleep; /* TODO: protect API with a lock */ 65 bool cancelled; /* atomic */ 66 /* To reference all call states from BlockCopyState */ 67 QLIST_ENTRY(BlockCopyCallState) list; 68 69 /* 70 * Fields that report information about return values and errors. 71 * Protected by lock in BlockCopyState. 72 */ 73 bool error_is_read; 74 /* 75 * @ret is set concurrently by tasks under mutex. Only set once by first 76 * failed task (and untouched if no task failed). 77 * After finishing (call_state->finished is true), it is not modified 78 * anymore and may be safely read without mutex. 79 */ 80 int ret; 81 } BlockCopyCallState; 82 83 typedef struct BlockCopyTask { 84 AioTask task; 85 86 /* 87 * Fields initialized in block_copy_task_create() 88 * and never changed. 89 */ 90 BlockCopyState *s; 91 BlockCopyCallState *call_state; 92 /* 93 * @method can also be set again in the while loop of 94 * block_copy_dirty_clusters(), but it is never accessed concurrently 95 * because the only other function that reads it is 96 * block_copy_task_entry() and it is invoked afterwards in the same 97 * iteration. 98 */ 99 BlockCopyMethod method; 100 101 /* 102 * Generally, req is protected by lock in BlockCopyState, Still req.offset 103 * is only set on task creation, so may be read concurrently after creation. 104 * req.bytes is changed at most once, and need only protecting the case of 105 * parallel read while updating @bytes value in block_copy_task_shrink(). 106 */ 107 BlockReq req; 108 } BlockCopyTask; 109 110 static int64_t task_end(BlockCopyTask *task) 111 { 112 return task->req.offset + task->req.bytes; 113 } 114 115 typedef struct BlockCopyState { 116 /* 117 * BdrvChild objects are not owned or managed by block-copy. They are 118 * provided by block-copy user and user is responsible for appropriate 119 * permissions on these children. 120 */ 121 BdrvChild *source; 122 BdrvChild *target; 123 124 /* 125 * Fields initialized in block_copy_state_new() 126 * and never changed. 127 */ 128 int64_t cluster_size; 129 int64_t max_transfer; 130 uint64_t len; 131 BdrvRequestFlags write_flags; 132 133 /* 134 * Fields whose state changes throughout the execution 135 * Protected by lock. 136 */ 137 CoMutex lock; 138 int64_t in_flight_bytes; 139 BlockCopyMethod method; 140 BlockReqList reqs; 141 QLIST_HEAD(, BlockCopyCallState) calls; 142 /* 143 * skip_unallocated: 144 * 145 * Used by sync=top jobs, which first scan the source node for unallocated 146 * areas and clear them in the copy_bitmap. During this process, the bitmap 147 * is thus not fully initialized: It may still have bits set for areas that 148 * are unallocated and should actually not be copied. 149 * 150 * This is indicated by skip_unallocated. 151 * 152 * In this case, block_copy() will query the source’s allocation status, 153 * skip unallocated regions, clear them in the copy_bitmap, and invoke 154 * block_copy_reset_unallocated() every time it does. 155 */ 156 bool skip_unallocated; /* atomic */ 157 /* State fields that use a thread-safe API */ 158 BdrvDirtyBitmap *copy_bitmap; 159 ProgressMeter *progress; 160 SharedResource *mem; 161 RateLimit rate_limit; 162 } BlockCopyState; 163 164 /* Called with lock held */ 165 static int64_t block_copy_chunk_size(BlockCopyState *s) 166 { 167 switch (s->method) { 168 case COPY_READ_WRITE_CLUSTER: 169 return s->cluster_size; 170 case COPY_READ_WRITE: 171 case COPY_RANGE_SMALL: 172 return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER), 173 s->max_transfer); 174 case COPY_RANGE_FULL: 175 return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE), 176 s->max_transfer); 177 default: 178 /* Cannot have COPY_WRITE_ZEROES here. */ 179 abort(); 180 } 181 } 182 183 /* 184 * Search for the first dirty area in offset/bytes range and create task at 185 * the beginning of it. 186 */ 187 static coroutine_fn BlockCopyTask * 188 block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state, 189 int64_t offset, int64_t bytes) 190 { 191 BlockCopyTask *task; 192 int64_t max_chunk; 193 194 QEMU_LOCK_GUARD(&s->lock); 195 max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk); 196 if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap, 197 offset, offset + bytes, 198 max_chunk, &offset, &bytes)) 199 { 200 return NULL; 201 } 202 203 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 204 bytes = QEMU_ALIGN_UP(bytes, s->cluster_size); 205 206 /* region is dirty, so no existent tasks possible in it */ 207 assert(!reqlist_find_conflict(&s->reqs, offset, bytes)); 208 209 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); 210 s->in_flight_bytes += bytes; 211 212 task = g_new(BlockCopyTask, 1); 213 *task = (BlockCopyTask) { 214 .task.func = block_copy_task_entry, 215 .s = s, 216 .call_state = call_state, 217 .method = s->method, 218 }; 219 reqlist_init_req(&s->reqs, &task->req, offset, bytes); 220 221 return task; 222 } 223 224 /* 225 * block_copy_task_shrink 226 * 227 * Drop the tail of the task to be handled later. Set dirty bits back and 228 * wake up all tasks waiting for us (may be some of them are not intersecting 229 * with shrunk task) 230 */ 231 static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task, 232 int64_t new_bytes) 233 { 234 QEMU_LOCK_GUARD(&task->s->lock); 235 if (new_bytes == task->req.bytes) { 236 return; 237 } 238 239 assert(new_bytes > 0 && new_bytes < task->req.bytes); 240 241 task->s->in_flight_bytes -= task->req.bytes - new_bytes; 242 bdrv_set_dirty_bitmap(task->s->copy_bitmap, 243 task->req.offset + new_bytes, 244 task->req.bytes - new_bytes); 245 246 reqlist_shrink_req(&task->req, new_bytes); 247 } 248 249 static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret) 250 { 251 QEMU_LOCK_GUARD(&task->s->lock); 252 task->s->in_flight_bytes -= task->req.bytes; 253 if (ret < 0) { 254 bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->req.offset, 255 task->req.bytes); 256 } 257 if (task->s->progress) { 258 progress_set_remaining(task->s->progress, 259 bdrv_get_dirty_count(task->s->copy_bitmap) + 260 task->s->in_flight_bytes); 261 } 262 reqlist_remove_req(&task->req); 263 } 264 265 void block_copy_state_free(BlockCopyState *s) 266 { 267 if (!s) { 268 return; 269 } 270 271 ratelimit_destroy(&s->rate_limit); 272 bdrv_release_dirty_bitmap(s->copy_bitmap); 273 shres_destroy(s->mem); 274 g_free(s); 275 } 276 277 static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target) 278 { 279 return MIN_NON_ZERO(INT_MAX, 280 MIN_NON_ZERO(source->bs->bl.max_transfer, 281 target->bs->bl.max_transfer)); 282 } 283 284 void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range, 285 bool compress) 286 { 287 /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */ 288 s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) | 289 (compress ? BDRV_REQ_WRITE_COMPRESSED : 0); 290 291 if (s->max_transfer < s->cluster_size) { 292 /* 293 * copy_range does not respect max_transfer. We don't want to bother 294 * with requests smaller than block-copy cluster size, so fallback to 295 * buffered copying (read and write respect max_transfer on their 296 * behalf). 297 */ 298 s->method = COPY_READ_WRITE_CLUSTER; 299 } else if (compress) { 300 /* Compression supports only cluster-size writes and no copy-range. */ 301 s->method = COPY_READ_WRITE_CLUSTER; 302 } else { 303 /* 304 * If copy range enabled, start with COPY_RANGE_SMALL, until first 305 * successful copy_range (look at block_copy_do_copy). 306 */ 307 s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE; 308 } 309 } 310 311 static int64_t block_copy_calculate_cluster_size(BlockDriverState *target, 312 Error **errp) 313 { 314 int ret; 315 BlockDriverInfo bdi; 316 bool target_does_cow; 317 318 GLOBAL_STATE_CODE(); 319 GRAPH_RDLOCK_GUARD_MAINLOOP(); 320 321 target_does_cow = bdrv_backing_chain_next(target); 322 323 /* 324 * If there is no backing file on the target, we cannot rely on COW if our 325 * backup cluster size is smaller than the target cluster size. Even for 326 * targets with a backing file, try to avoid COW if possible. 327 */ 328 ret = bdrv_get_info(target, &bdi); 329 if (ret == -ENOTSUP && !target_does_cow) { 330 /* Cluster size is not defined */ 331 warn_report("The target block device doesn't provide " 332 "information about the block size and it doesn't have a " 333 "backing file. The default block size of %u bytes is " 334 "used. If the actual block size of the target exceeds " 335 "this default, the backup may be unusable", 336 BLOCK_COPY_CLUSTER_SIZE_DEFAULT); 337 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; 338 } else if (ret < 0 && !target_does_cow) { 339 error_setg_errno(errp, -ret, 340 "Couldn't determine the cluster size of the target image, " 341 "which has no backing file"); 342 error_append_hint(errp, 343 "Aborting, since this may create an unusable destination image\n"); 344 return ret; 345 } else if (ret < 0 && target_does_cow) { 346 /* Not fatal; just trudge on ahead. */ 347 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; 348 } 349 350 return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); 351 } 352 353 BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, 354 const BdrvDirtyBitmap *bitmap, 355 Error **errp) 356 { 357 ERRP_GUARD(); 358 BlockCopyState *s; 359 int64_t cluster_size; 360 BdrvDirtyBitmap *copy_bitmap; 361 bool is_fleecing; 362 363 GLOBAL_STATE_CODE(); 364 365 cluster_size = block_copy_calculate_cluster_size(target->bs, errp); 366 if (cluster_size < 0) { 367 return NULL; 368 } 369 370 copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL, 371 errp); 372 if (!copy_bitmap) { 373 return NULL; 374 } 375 bdrv_disable_dirty_bitmap(copy_bitmap); 376 if (bitmap) { 377 if (!bdrv_merge_dirty_bitmap(copy_bitmap, bitmap, NULL, errp)) { 378 error_prepend(errp, "Failed to merge bitmap '%s' to internal " 379 "copy-bitmap: ", bdrv_dirty_bitmap_name(bitmap)); 380 bdrv_release_dirty_bitmap(copy_bitmap); 381 return NULL; 382 } 383 } else { 384 bdrv_set_dirty_bitmap(copy_bitmap, 0, 385 bdrv_dirty_bitmap_size(copy_bitmap)); 386 } 387 388 /* 389 * If source is in backing chain of target assume that target is going to be 390 * used for "image fleecing", i.e. it should represent a kind of snapshot of 391 * source at backup-start point in time. And target is going to be read by 392 * somebody (for example, used as NBD export) during backup job. 393 * 394 * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid 395 * intersection of backup writes and third party reads from target, 396 * otherwise reading from target we may occasionally read already updated by 397 * guest data. 398 * 399 * For more information see commit f8d59dfb40bb and test 400 * tests/qemu-iotests/222 401 */ 402 bdrv_graph_rdlock_main_loop(); 403 is_fleecing = bdrv_chain_contains(target->bs, source->bs); 404 bdrv_graph_rdunlock_main_loop(); 405 406 s = g_new(BlockCopyState, 1); 407 *s = (BlockCopyState) { 408 .source = source, 409 .target = target, 410 .copy_bitmap = copy_bitmap, 411 .cluster_size = cluster_size, 412 .len = bdrv_dirty_bitmap_size(copy_bitmap), 413 .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0), 414 .mem = shres_create(BLOCK_COPY_MAX_MEM), 415 .max_transfer = QEMU_ALIGN_DOWN( 416 block_copy_max_transfer(source, target), 417 cluster_size), 418 }; 419 420 block_copy_set_copy_opts(s, false, false); 421 422 ratelimit_init(&s->rate_limit); 423 qemu_co_mutex_init(&s->lock); 424 QLIST_INIT(&s->reqs); 425 QLIST_INIT(&s->calls); 426 427 return s; 428 } 429 430 /* Only set before running the job, no need for locking. */ 431 void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) 432 { 433 s->progress = pm; 434 } 435 436 /* 437 * Takes ownership of @task 438 * 439 * If pool is NULL directly run the task, otherwise schedule it into the pool. 440 * 441 * Returns: task.func return code if pool is NULL 442 * otherwise -ECANCELED if pool status is bad 443 * otherwise 0 (successfully scheduled) 444 */ 445 static coroutine_fn int block_copy_task_run(AioTaskPool *pool, 446 BlockCopyTask *task) 447 { 448 if (!pool) { 449 int ret = task->task.func(&task->task); 450 451 g_free(task); 452 return ret; 453 } 454 455 aio_task_pool_wait_slot(pool); 456 if (aio_task_pool_status(pool) < 0) { 457 co_put_to_shres(task->s->mem, task->req.bytes); 458 block_copy_task_end(task, -ECANCELED); 459 g_free(task); 460 return -ECANCELED; 461 } 462 463 aio_task_pool_start_task(pool, &task->task); 464 465 return 0; 466 } 467 468 /* 469 * block_copy_do_copy 470 * 471 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed 472 * s->len only to cover last cluster when s->len is not aligned to clusters. 473 * 474 * No sync here: neither bitmap nor intersecting requests handling, only copy. 475 * 476 * @method is an in-out argument, so that copy_range can be either extended to 477 * a full-size buffer or disabled if the copy_range attempt fails. The output 478 * value of @method should be used for subsequent tasks. 479 * Returns 0 on success. 480 */ 481 static int coroutine_fn GRAPH_RDLOCK 482 block_copy_do_copy(BlockCopyState *s, int64_t offset, int64_t bytes, 483 BlockCopyMethod *method, bool *error_is_read) 484 { 485 int ret; 486 int64_t nbytes = MIN(offset + bytes, s->len) - offset; 487 void *bounce_buffer = NULL; 488 489 assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes); 490 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 491 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); 492 assert(offset < s->len); 493 assert(offset + bytes <= s->len || 494 offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size)); 495 assert(nbytes < INT_MAX); 496 497 switch (*method) { 498 case COPY_WRITE_ZEROES: 499 ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags & 500 ~BDRV_REQ_WRITE_COMPRESSED); 501 if (ret < 0) { 502 trace_block_copy_write_zeroes_fail(s, offset, ret); 503 *error_is_read = false; 504 } 505 return ret; 506 507 case COPY_RANGE_SMALL: 508 case COPY_RANGE_FULL: 509 ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes, 510 0, s->write_flags); 511 if (ret >= 0) { 512 /* Successful copy-range, increase chunk size. */ 513 *method = COPY_RANGE_FULL; 514 return 0; 515 } 516 517 trace_block_copy_copy_range_fail(s, offset, ret); 518 *method = COPY_READ_WRITE; 519 /* Fall through to read+write with allocated buffer */ 520 521 case COPY_READ_WRITE_CLUSTER: 522 case COPY_READ_WRITE: 523 /* 524 * In case of failed copy_range request above, we may proceed with 525 * buffered request larger than BLOCK_COPY_MAX_BUFFER. 526 * Still, further requests will be properly limited, so don't care too 527 * much. Moreover the most likely case (copy_range is unsupported for 528 * the configuration, so the very first copy_range request fails) 529 * is handled by setting large copy_size only after first successful 530 * copy_range. 531 */ 532 533 bounce_buffer = qemu_blockalign(s->source->bs, nbytes); 534 535 ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0); 536 if (ret < 0) { 537 trace_block_copy_read_fail(s, offset, ret); 538 *error_is_read = true; 539 goto out; 540 } 541 542 ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer, 543 s->write_flags); 544 if (ret < 0) { 545 trace_block_copy_write_fail(s, offset, ret); 546 *error_is_read = false; 547 goto out; 548 } 549 550 out: 551 qemu_vfree(bounce_buffer); 552 break; 553 554 default: 555 abort(); 556 } 557 558 return ret; 559 } 560 561 static coroutine_fn int block_copy_task_entry(AioTask *task) 562 { 563 BlockCopyTask *t = container_of(task, BlockCopyTask, task); 564 BlockCopyState *s = t->s; 565 bool error_is_read = false; 566 BlockCopyMethod method = t->method; 567 int ret; 568 569 WITH_GRAPH_RDLOCK_GUARD() { 570 ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method, 571 &error_is_read); 572 } 573 574 WITH_QEMU_LOCK_GUARD(&s->lock) { 575 if (s->method == t->method) { 576 s->method = method; 577 } 578 579 if (ret < 0) { 580 if (!t->call_state->ret) { 581 t->call_state->ret = ret; 582 t->call_state->error_is_read = error_is_read; 583 } 584 } else if (s->progress) { 585 progress_work_done(s->progress, t->req.bytes); 586 } 587 } 588 co_put_to_shres(s->mem, t->req.bytes); 589 block_copy_task_end(t, ret); 590 591 return ret; 592 } 593 594 static coroutine_fn GRAPH_RDLOCK 595 int block_copy_block_status(BlockCopyState *s, int64_t offset, int64_t bytes, 596 int64_t *pnum) 597 { 598 int64_t num; 599 BlockDriverState *base; 600 int ret; 601 602 if (qatomic_read(&s->skip_unallocated)) { 603 base = bdrv_backing_chain_next(s->source->bs); 604 } else { 605 base = NULL; 606 } 607 608 ret = bdrv_co_block_status_above(s->source->bs, base, offset, bytes, &num, 609 NULL, NULL); 610 if (ret < 0 || num < s->cluster_size) { 611 /* 612 * On error or if failed to obtain large enough chunk just fallback to 613 * copy one cluster. 614 */ 615 num = s->cluster_size; 616 ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA; 617 } else if (offset + num == s->len) { 618 num = QEMU_ALIGN_UP(num, s->cluster_size); 619 } else { 620 num = QEMU_ALIGN_DOWN(num, s->cluster_size); 621 } 622 623 *pnum = num; 624 return ret; 625 } 626 627 /* 628 * Check if the cluster starting at offset is allocated or not. 629 * return via pnum the number of contiguous clusters sharing this allocation. 630 */ 631 static int coroutine_fn GRAPH_RDLOCK 632 block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset, 633 int64_t *pnum) 634 { 635 BlockDriverState *bs = s->source->bs; 636 int64_t count, total_count = 0; 637 int64_t bytes = s->len - offset; 638 int ret; 639 640 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 641 642 while (true) { 643 /* protected in backup_run() */ 644 ret = bdrv_co_is_allocated(bs, offset, bytes, &count); 645 if (ret < 0) { 646 return ret; 647 } 648 649 total_count += count; 650 651 if (ret || count == 0) { 652 /* 653 * ret: partial segment(s) are considered allocated. 654 * otherwise: unallocated tail is treated as an entire segment. 655 */ 656 *pnum = DIV_ROUND_UP(total_count, s->cluster_size); 657 return ret; 658 } 659 660 /* Unallocated segment(s) with uncertain following segment(s) */ 661 if (total_count >= s->cluster_size) { 662 *pnum = total_count / s->cluster_size; 663 return 0; 664 } 665 666 offset += count; 667 bytes -= count; 668 } 669 } 670 671 void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes) 672 { 673 QEMU_LOCK_GUARD(&s->lock); 674 675 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); 676 if (s->progress) { 677 progress_set_remaining(s->progress, 678 bdrv_get_dirty_count(s->copy_bitmap) + 679 s->in_flight_bytes); 680 } 681 } 682 683 /* 684 * Reset bits in copy_bitmap starting at offset if they represent unallocated 685 * data in the image. May reset subsequent contiguous bits. 686 * @return 0 when the cluster at @offset was unallocated, 687 * 1 otherwise, and -ret on error. 688 */ 689 int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s, 690 int64_t offset, 691 int64_t *count) 692 { 693 int ret; 694 int64_t clusters, bytes; 695 696 ret = block_copy_is_cluster_allocated(s, offset, &clusters); 697 if (ret < 0) { 698 return ret; 699 } 700 701 bytes = clusters * s->cluster_size; 702 703 if (!ret) { 704 block_copy_reset(s, offset, bytes); 705 } 706 707 *count = bytes; 708 return ret; 709 } 710 711 /* 712 * block_copy_dirty_clusters 713 * 714 * Copy dirty clusters in @offset/@bytes range. 715 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty 716 * clusters found and -errno on failure. 717 */ 718 static int coroutine_fn GRAPH_RDLOCK 719 block_copy_dirty_clusters(BlockCopyCallState *call_state) 720 { 721 BlockCopyState *s = call_state->s; 722 int64_t offset = call_state->offset; 723 int64_t bytes = call_state->bytes; 724 725 int ret = 0; 726 bool found_dirty = false; 727 int64_t end = offset + bytes; 728 AioTaskPool *aio = NULL; 729 730 /* 731 * block_copy() user is responsible for keeping source and target in same 732 * aio context 733 */ 734 assert(bdrv_get_aio_context(s->source->bs) == 735 bdrv_get_aio_context(s->target->bs)); 736 737 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 738 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); 739 740 while (bytes && aio_task_pool_status(aio) == 0 && 741 !qatomic_read(&call_state->cancelled)) { 742 BlockCopyTask *task; 743 int64_t status_bytes; 744 745 task = block_copy_task_create(s, call_state, offset, bytes); 746 if (!task) { 747 /* No more dirty bits in the bitmap */ 748 trace_block_copy_skip_range(s, offset, bytes); 749 break; 750 } 751 if (task->req.offset > offset) { 752 trace_block_copy_skip_range(s, offset, task->req.offset - offset); 753 } 754 755 found_dirty = true; 756 757 ret = block_copy_block_status(s, task->req.offset, task->req.bytes, 758 &status_bytes); 759 assert(ret >= 0); /* never fail */ 760 if (status_bytes < task->req.bytes) { 761 block_copy_task_shrink(task, status_bytes); 762 } 763 if (qatomic_read(&s->skip_unallocated) && 764 !(ret & BDRV_BLOCK_ALLOCATED)) { 765 block_copy_task_end(task, 0); 766 trace_block_copy_skip_range(s, task->req.offset, task->req.bytes); 767 offset = task_end(task); 768 bytes = end - offset; 769 g_free(task); 770 continue; 771 } 772 if (ret & BDRV_BLOCK_ZERO) { 773 task->method = COPY_WRITE_ZEROES; 774 } 775 776 if (!call_state->ignore_ratelimit) { 777 uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0); 778 if (ns > 0) { 779 block_copy_task_end(task, -EAGAIN); 780 g_free(task); 781 qemu_co_sleep_ns_wakeable(&call_state->sleep, 782 QEMU_CLOCK_REALTIME, ns); 783 continue; 784 } 785 } 786 787 ratelimit_calculate_delay(&s->rate_limit, task->req.bytes); 788 789 trace_block_copy_process(s, task->req.offset); 790 791 co_get_from_shres(s->mem, task->req.bytes); 792 793 offset = task_end(task); 794 bytes = end - offset; 795 796 if (!aio && bytes) { 797 aio = aio_task_pool_new(call_state->max_workers); 798 } 799 800 ret = block_copy_task_run(aio, task); 801 if (ret < 0) { 802 goto out; 803 } 804 } 805 806 out: 807 if (aio) { 808 aio_task_pool_wait_all(aio); 809 810 /* 811 * We are not really interested in -ECANCELED returned from 812 * block_copy_task_run. If it fails, it means some task already failed 813 * for real reason, let's return first failure. 814 * Still, assert that we don't rewrite failure by success. 815 * 816 * Note: ret may be positive here because of block-status result. 817 */ 818 assert(ret >= 0 || aio_task_pool_status(aio) < 0); 819 ret = aio_task_pool_status(aio); 820 821 aio_task_pool_free(aio); 822 } 823 824 return ret < 0 ? ret : found_dirty; 825 } 826 827 void block_copy_kick(BlockCopyCallState *call_state) 828 { 829 qemu_co_sleep_wake(&call_state->sleep); 830 } 831 832 /* 833 * block_copy_common 834 * 835 * Copy requested region, accordingly to dirty bitmap. 836 * Collaborate with parallel block_copy requests: if they succeed it will help 837 * us. If they fail, we will retry not-copied regions. So, if we return error, 838 * it means that some I/O operation failed in context of _this_ block_copy call, 839 * not some parallel operation. 840 */ 841 static int coroutine_fn GRAPH_RDLOCK 842 block_copy_common(BlockCopyCallState *call_state) 843 { 844 int ret; 845 BlockCopyState *s = call_state->s; 846 847 qemu_co_mutex_lock(&s->lock); 848 QLIST_INSERT_HEAD(&s->calls, call_state, list); 849 qemu_co_mutex_unlock(&s->lock); 850 851 do { 852 ret = block_copy_dirty_clusters(call_state); 853 854 if (ret == 0 && !qatomic_read(&call_state->cancelled)) { 855 WITH_QEMU_LOCK_GUARD(&s->lock) { 856 /* 857 * Check that there is no task we still need to 858 * wait to complete 859 */ 860 ret = reqlist_wait_one(&s->reqs, call_state->offset, 861 call_state->bytes, &s->lock); 862 if (ret == 0) { 863 /* 864 * No pending tasks, but check again the bitmap in this 865 * same critical section, since a task might have failed 866 * between this and the critical section in 867 * block_copy_dirty_clusters(). 868 * 869 * reqlist_wait_one return value 0 also means that it 870 * didn't release the lock. So, we are still in the same 871 * critical section, not interrupted by any concurrent 872 * access to state. 873 */ 874 ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap, 875 call_state->offset, 876 call_state->bytes) >= 0; 877 } 878 } 879 } 880 881 /* 882 * We retry in two cases: 883 * 1. Some progress done 884 * Something was copied, which means that there were yield points 885 * and some new dirty bits may have appeared (due to failed parallel 886 * block-copy requests). 887 * 2. We have waited for some intersecting block-copy request 888 * It may have failed and produced new dirty bits. 889 */ 890 } while (ret > 0 && !qatomic_read(&call_state->cancelled)); 891 892 qatomic_store_release(&call_state->finished, true); 893 894 if (call_state->cb) { 895 call_state->cb(call_state->cb_opaque); 896 } 897 898 qemu_co_mutex_lock(&s->lock); 899 QLIST_REMOVE(call_state, list); 900 qemu_co_mutex_unlock(&s->lock); 901 902 return ret; 903 } 904 905 static void coroutine_fn block_copy_async_co_entry(void *opaque) 906 { 907 GRAPH_RDLOCK_GUARD(); 908 block_copy_common(opaque); 909 } 910 911 int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes, 912 bool ignore_ratelimit, uint64_t timeout_ns, 913 BlockCopyAsyncCallbackFunc cb, 914 void *cb_opaque) 915 { 916 int ret; 917 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1); 918 919 *call_state = (BlockCopyCallState) { 920 .s = s, 921 .offset = start, 922 .bytes = bytes, 923 .ignore_ratelimit = ignore_ratelimit, 924 .max_workers = BLOCK_COPY_MAX_WORKERS, 925 .cb = cb, 926 .cb_opaque = cb_opaque, 927 }; 928 929 ret = qemu_co_timeout(block_copy_async_co_entry, call_state, timeout_ns, 930 g_free); 931 if (ret < 0) { 932 assert(ret == -ETIMEDOUT); 933 block_copy_call_cancel(call_state); 934 /* call_state will be freed by running coroutine. */ 935 return ret; 936 } 937 938 ret = call_state->ret; 939 g_free(call_state); 940 941 return ret; 942 } 943 944 BlockCopyCallState *block_copy_async(BlockCopyState *s, 945 int64_t offset, int64_t bytes, 946 int max_workers, int64_t max_chunk, 947 BlockCopyAsyncCallbackFunc cb, 948 void *cb_opaque) 949 { 950 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1); 951 952 *call_state = (BlockCopyCallState) { 953 .s = s, 954 .offset = offset, 955 .bytes = bytes, 956 .max_workers = max_workers, 957 .max_chunk = max_chunk, 958 .cb = cb, 959 .cb_opaque = cb_opaque, 960 961 .co = qemu_coroutine_create(block_copy_async_co_entry, call_state), 962 }; 963 964 qemu_coroutine_enter(call_state->co); 965 966 return call_state; 967 } 968 969 void block_copy_call_free(BlockCopyCallState *call_state) 970 { 971 if (!call_state) { 972 return; 973 } 974 975 assert(qatomic_read(&call_state->finished)); 976 g_free(call_state); 977 } 978 979 bool block_copy_call_finished(BlockCopyCallState *call_state) 980 { 981 return qatomic_read(&call_state->finished); 982 } 983 984 bool block_copy_call_succeeded(BlockCopyCallState *call_state) 985 { 986 return qatomic_load_acquire(&call_state->finished) && 987 !qatomic_read(&call_state->cancelled) && 988 call_state->ret == 0; 989 } 990 991 bool block_copy_call_failed(BlockCopyCallState *call_state) 992 { 993 return qatomic_load_acquire(&call_state->finished) && 994 !qatomic_read(&call_state->cancelled) && 995 call_state->ret < 0; 996 } 997 998 bool block_copy_call_cancelled(BlockCopyCallState *call_state) 999 { 1000 return qatomic_read(&call_state->cancelled); 1001 } 1002 1003 int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read) 1004 { 1005 assert(qatomic_load_acquire(&call_state->finished)); 1006 if (error_is_read) { 1007 *error_is_read = call_state->error_is_read; 1008 } 1009 return call_state->ret; 1010 } 1011 1012 /* 1013 * Note that cancelling and finishing are racy. 1014 * User can cancel a block-copy that is already finished. 1015 */ 1016 void block_copy_call_cancel(BlockCopyCallState *call_state) 1017 { 1018 qatomic_set(&call_state->cancelled, true); 1019 block_copy_kick(call_state); 1020 } 1021 1022 BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s) 1023 { 1024 return s->copy_bitmap; 1025 } 1026 1027 int64_t block_copy_cluster_size(BlockCopyState *s) 1028 { 1029 return s->cluster_size; 1030 } 1031 1032 void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip) 1033 { 1034 qatomic_set(&s->skip_unallocated, skip); 1035 } 1036 1037 void block_copy_set_speed(BlockCopyState *s, uint64_t speed) 1038 { 1039 ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME); 1040 1041 /* 1042 * Note: it's good to kick all call states from here, but it should be done 1043 * only from a coroutine, to not crash if s->calls list changed while 1044 * entering one call. So for now, the only user of this function kicks its 1045 * only one call_state by hand. 1046 */ 1047 } 1048