1 /* 2 * block_copy API 3 * 4 * Copyright (C) 2013 Proxmox Server Solutions 5 * Copyright (c) 2019 Virtuozzo International GmbH. 6 * 7 * Authors: 8 * Dietmar Maurer (dietmar@proxmox.com) 9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 */ 14 15 #include "qemu/osdep.h" 16 17 #include "trace.h" 18 #include "qapi/error.h" 19 #include "block/block-copy.h" 20 #include "sysemu/block-backend.h" 21 #include "qemu/units.h" 22 #include "qemu/coroutine.h" 23 #include "block/aio_task.h" 24 25 #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB) 26 #define BLOCK_COPY_MAX_BUFFER (1 * MiB) 27 #define BLOCK_COPY_MAX_MEM (128 * MiB) 28 #define BLOCK_COPY_MAX_WORKERS 64 29 #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */ 30 31 static coroutine_fn int block_copy_task_entry(AioTask *task); 32 33 typedef struct BlockCopyCallState { 34 /* IN parameters. Initialized in block_copy_async() and never changed. */ 35 BlockCopyState *s; 36 int64_t offset; 37 int64_t bytes; 38 int max_workers; 39 int64_t max_chunk; 40 bool ignore_ratelimit; 41 BlockCopyAsyncCallbackFunc cb; 42 void *cb_opaque; 43 44 /* Coroutine where async block-copy is running */ 45 Coroutine *co; 46 47 /* To reference all call states from BlockCopyState */ 48 QLIST_ENTRY(BlockCopyCallState) list; 49 50 /* State */ 51 int ret; 52 bool finished; 53 QemuCoSleep sleep; 54 bool cancelled; 55 56 /* OUT parameters */ 57 bool error_is_read; 58 } BlockCopyCallState; 59 60 typedef struct BlockCopyTask { 61 AioTask task; 62 63 BlockCopyState *s; 64 BlockCopyCallState *call_state; 65 int64_t offset; 66 int64_t bytes; 67 bool zeroes; 68 bool copy_range; 69 QLIST_ENTRY(BlockCopyTask) list; 70 CoQueue wait_queue; /* coroutines blocked on this task */ 71 } BlockCopyTask; 72 73 static int64_t task_end(BlockCopyTask *task) 74 { 75 return task->offset + task->bytes; 76 } 77 78 typedef struct BlockCopyState { 79 /* 80 * BdrvChild objects are not owned or managed by block-copy. They are 81 * provided by block-copy user and user is responsible for appropriate 82 * permissions on these children. 83 */ 84 BdrvChild *source; 85 BdrvChild *target; 86 BdrvDirtyBitmap *copy_bitmap; 87 int64_t in_flight_bytes; 88 int64_t cluster_size; 89 bool use_copy_range; 90 int64_t copy_size; 91 uint64_t len; 92 QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */ 93 QLIST_HEAD(, BlockCopyCallState) calls; 94 95 BdrvRequestFlags write_flags; 96 97 /* 98 * skip_unallocated: 99 * 100 * Used by sync=top jobs, which first scan the source node for unallocated 101 * areas and clear them in the copy_bitmap. During this process, the bitmap 102 * is thus not fully initialized: It may still have bits set for areas that 103 * are unallocated and should actually not be copied. 104 * 105 * This is indicated by skip_unallocated. 106 * 107 * In this case, block_copy() will query the source’s allocation status, 108 * skip unallocated regions, clear them in the copy_bitmap, and invoke 109 * block_copy_reset_unallocated() every time it does. 110 */ 111 bool skip_unallocated; 112 113 ProgressMeter *progress; 114 115 SharedResource *mem; 116 117 uint64_t speed; 118 RateLimit rate_limit; 119 } BlockCopyState; 120 121 static BlockCopyTask *find_conflicting_task(BlockCopyState *s, 122 int64_t offset, int64_t bytes) 123 { 124 BlockCopyTask *t; 125 126 QLIST_FOREACH(t, &s->tasks, list) { 127 if (offset + bytes > t->offset && offset < t->offset + t->bytes) { 128 return t; 129 } 130 } 131 132 return NULL; 133 } 134 135 /* 136 * If there are no intersecting tasks return false. Otherwise, wait for the 137 * first found intersecting tasks to finish and return true. 138 */ 139 static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset, 140 int64_t bytes) 141 { 142 BlockCopyTask *task = find_conflicting_task(s, offset, bytes); 143 144 if (!task) { 145 return false; 146 } 147 148 qemu_co_queue_wait(&task->wait_queue, NULL); 149 150 return true; 151 } 152 153 /* 154 * Search for the first dirty area in offset/bytes range and create task at 155 * the beginning of it. 156 */ 157 static BlockCopyTask *block_copy_task_create(BlockCopyState *s, 158 BlockCopyCallState *call_state, 159 int64_t offset, int64_t bytes) 160 { 161 BlockCopyTask *task; 162 int64_t max_chunk = MIN_NON_ZERO(s->copy_size, call_state->max_chunk); 163 164 if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap, 165 offset, offset + bytes, 166 max_chunk, &offset, &bytes)) 167 { 168 return NULL; 169 } 170 171 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 172 bytes = QEMU_ALIGN_UP(bytes, s->cluster_size); 173 174 /* region is dirty, so no existent tasks possible in it */ 175 assert(!find_conflicting_task(s, offset, bytes)); 176 177 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); 178 s->in_flight_bytes += bytes; 179 180 task = g_new(BlockCopyTask, 1); 181 *task = (BlockCopyTask) { 182 .task.func = block_copy_task_entry, 183 .s = s, 184 .call_state = call_state, 185 .offset = offset, 186 .bytes = bytes, 187 .copy_range = s->use_copy_range, 188 }; 189 qemu_co_queue_init(&task->wait_queue); 190 QLIST_INSERT_HEAD(&s->tasks, task, list); 191 192 return task; 193 } 194 195 /* 196 * block_copy_task_shrink 197 * 198 * Drop the tail of the task to be handled later. Set dirty bits back and 199 * wake up all tasks waiting for us (may be some of them are not intersecting 200 * with shrunk task) 201 */ 202 static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task, 203 int64_t new_bytes) 204 { 205 if (new_bytes == task->bytes) { 206 return; 207 } 208 209 assert(new_bytes > 0 && new_bytes < task->bytes); 210 211 task->s->in_flight_bytes -= task->bytes - new_bytes; 212 bdrv_set_dirty_bitmap(task->s->copy_bitmap, 213 task->offset + new_bytes, task->bytes - new_bytes); 214 215 task->bytes = new_bytes; 216 qemu_co_queue_restart_all(&task->wait_queue); 217 } 218 219 static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret) 220 { 221 task->s->in_flight_bytes -= task->bytes; 222 if (ret < 0) { 223 bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes); 224 } 225 QLIST_REMOVE(task, list); 226 qemu_co_queue_restart_all(&task->wait_queue); 227 } 228 229 void block_copy_state_free(BlockCopyState *s) 230 { 231 if (!s) { 232 return; 233 } 234 235 ratelimit_destroy(&s->rate_limit); 236 bdrv_release_dirty_bitmap(s->copy_bitmap); 237 shres_destroy(s->mem); 238 g_free(s); 239 } 240 241 static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target) 242 { 243 return MIN_NON_ZERO(INT_MAX, 244 MIN_NON_ZERO(source->bs->bl.max_transfer, 245 target->bs->bl.max_transfer)); 246 } 247 248 BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, 249 int64_t cluster_size, bool use_copy_range, 250 BdrvRequestFlags write_flags, Error **errp) 251 { 252 BlockCopyState *s; 253 BdrvDirtyBitmap *copy_bitmap; 254 255 copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL, 256 errp); 257 if (!copy_bitmap) { 258 return NULL; 259 } 260 bdrv_disable_dirty_bitmap(copy_bitmap); 261 262 s = g_new(BlockCopyState, 1); 263 *s = (BlockCopyState) { 264 .source = source, 265 .target = target, 266 .copy_bitmap = copy_bitmap, 267 .cluster_size = cluster_size, 268 .len = bdrv_dirty_bitmap_size(copy_bitmap), 269 .write_flags = write_flags, 270 .mem = shres_create(BLOCK_COPY_MAX_MEM), 271 }; 272 273 if (block_copy_max_transfer(source, target) < cluster_size) { 274 /* 275 * copy_range does not respect max_transfer. We don't want to bother 276 * with requests smaller than block-copy cluster size, so fallback to 277 * buffered copying (read and write respect max_transfer on their 278 * behalf). 279 */ 280 s->use_copy_range = false; 281 s->copy_size = cluster_size; 282 } else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) { 283 /* Compression supports only cluster-size writes and no copy-range. */ 284 s->use_copy_range = false; 285 s->copy_size = cluster_size; 286 } else { 287 /* 288 * We enable copy-range, but keep small copy_size, until first 289 * successful copy_range (look at block_copy_do_copy). 290 */ 291 s->use_copy_range = use_copy_range; 292 s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER); 293 } 294 295 ratelimit_init(&s->rate_limit); 296 QLIST_INIT(&s->tasks); 297 QLIST_INIT(&s->calls); 298 299 return s; 300 } 301 302 void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) 303 { 304 s->progress = pm; 305 } 306 307 /* 308 * Takes ownership of @task 309 * 310 * If pool is NULL directly run the task, otherwise schedule it into the pool. 311 * 312 * Returns: task.func return code if pool is NULL 313 * otherwise -ECANCELED if pool status is bad 314 * otherwise 0 (successfully scheduled) 315 */ 316 static coroutine_fn int block_copy_task_run(AioTaskPool *pool, 317 BlockCopyTask *task) 318 { 319 if (!pool) { 320 int ret = task->task.func(&task->task); 321 322 g_free(task); 323 return ret; 324 } 325 326 aio_task_pool_wait_slot(pool); 327 if (aio_task_pool_status(pool) < 0) { 328 co_put_to_shres(task->s->mem, task->bytes); 329 block_copy_task_end(task, -ECANCELED); 330 g_free(task); 331 return -ECANCELED; 332 } 333 334 aio_task_pool_start_task(pool, &task->task); 335 336 return 0; 337 } 338 339 /* 340 * block_copy_do_copy 341 * 342 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed 343 * s->len only to cover last cluster when s->len is not aligned to clusters. 344 * 345 * No sync here: nor bitmap neighter intersecting requests handling, only copy. 346 * 347 * @copy_range is an in-out argument: if *copy_range is false, copy_range is not 348 * done. If *copy_range is true, copy_range is attempted. If the copy_range 349 * attempt fails, the function falls back to the usual read+write and 350 * *copy_range is set to false. *copy_range and zeroes must not be true 351 * simultaneously. 352 * 353 * Returns 0 on success. 354 */ 355 static int coroutine_fn block_copy_do_copy(BlockCopyState *s, 356 int64_t offset, int64_t bytes, 357 bool zeroes, bool *copy_range, 358 bool *error_is_read) 359 { 360 int ret; 361 int64_t nbytes = MIN(offset + bytes, s->len) - offset; 362 void *bounce_buffer = NULL; 363 364 assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes); 365 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 366 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); 367 assert(offset < s->len); 368 assert(offset + bytes <= s->len || 369 offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size)); 370 assert(nbytes < INT_MAX); 371 assert(!(*copy_range && zeroes)); 372 373 if (zeroes) { 374 ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags & 375 ~BDRV_REQ_WRITE_COMPRESSED); 376 if (ret < 0) { 377 trace_block_copy_write_zeroes_fail(s, offset, ret); 378 *error_is_read = false; 379 } 380 return ret; 381 } 382 383 if (*copy_range) { 384 ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes, 385 0, s->write_flags); 386 if (ret < 0) { 387 trace_block_copy_copy_range_fail(s, offset, ret); 388 *copy_range = false; 389 /* Fallback to read+write with allocated buffer */ 390 } else { 391 return 0; 392 } 393 } 394 395 /* 396 * In case of failed copy_range request above, we may proceed with buffered 397 * request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will 398 * be properly limited, so don't care too much. Moreover the most likely 399 * case (copy_range is unsupported for the configuration, so the very first 400 * copy_range request fails) is handled by setting large copy_size only 401 * after first successful copy_range. 402 */ 403 404 bounce_buffer = qemu_blockalign(s->source->bs, nbytes); 405 406 ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0); 407 if (ret < 0) { 408 trace_block_copy_read_fail(s, offset, ret); 409 *error_is_read = true; 410 goto out; 411 } 412 413 ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer, 414 s->write_flags); 415 if (ret < 0) { 416 trace_block_copy_write_fail(s, offset, ret); 417 *error_is_read = false; 418 goto out; 419 } 420 421 out: 422 qemu_vfree(bounce_buffer); 423 424 return ret; 425 } 426 427 static void block_copy_handle_copy_range_result(BlockCopyState *s, 428 bool is_success) 429 { 430 if (!s->use_copy_range) { 431 /* already disabled */ 432 return; 433 } 434 435 if (is_success) { 436 /* 437 * Successful copy-range. Now increase copy_size. copy_range 438 * does not respect max_transfer (it's a TODO), so we factor 439 * that in here. 440 */ 441 s->copy_size = 442 MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE), 443 QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source, 444 s->target), 445 s->cluster_size)); 446 } else { 447 /* Copy-range failed, disable it. */ 448 s->use_copy_range = false; 449 s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER); 450 } 451 } 452 453 static coroutine_fn int block_copy_task_entry(AioTask *task) 454 { 455 BlockCopyTask *t = container_of(task, BlockCopyTask, task); 456 bool error_is_read = false; 457 bool copy_range = t->copy_range; 458 int ret; 459 460 ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes, 461 ©_range, &error_is_read); 462 if (t->copy_range) { 463 block_copy_handle_copy_range_result(t->s, copy_range); 464 } 465 if (ret < 0) { 466 if (!t->call_state->ret) { 467 t->call_state->ret = ret; 468 t->call_state->error_is_read = error_is_read; 469 } 470 } else { 471 progress_work_done(t->s->progress, t->bytes); 472 } 473 co_put_to_shres(t->s->mem, t->bytes); 474 block_copy_task_end(t, ret); 475 476 return ret; 477 } 478 479 static int block_copy_block_status(BlockCopyState *s, int64_t offset, 480 int64_t bytes, int64_t *pnum) 481 { 482 int64_t num; 483 BlockDriverState *base; 484 int ret; 485 486 if (s->skip_unallocated) { 487 base = bdrv_backing_chain_next(s->source->bs); 488 } else { 489 base = NULL; 490 } 491 492 ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num, 493 NULL, NULL); 494 if (ret < 0 || num < s->cluster_size) { 495 /* 496 * On error or if failed to obtain large enough chunk just fallback to 497 * copy one cluster. 498 */ 499 num = s->cluster_size; 500 ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA; 501 } else if (offset + num == s->len) { 502 num = QEMU_ALIGN_UP(num, s->cluster_size); 503 } else { 504 num = QEMU_ALIGN_DOWN(num, s->cluster_size); 505 } 506 507 *pnum = num; 508 return ret; 509 } 510 511 /* 512 * Check if the cluster starting at offset is allocated or not. 513 * return via pnum the number of contiguous clusters sharing this allocation. 514 */ 515 static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset, 516 int64_t *pnum) 517 { 518 BlockDriverState *bs = s->source->bs; 519 int64_t count, total_count = 0; 520 int64_t bytes = s->len - offset; 521 int ret; 522 523 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 524 525 while (true) { 526 ret = bdrv_is_allocated(bs, offset, bytes, &count); 527 if (ret < 0) { 528 return ret; 529 } 530 531 total_count += count; 532 533 if (ret || count == 0) { 534 /* 535 * ret: partial segment(s) are considered allocated. 536 * otherwise: unallocated tail is treated as an entire segment. 537 */ 538 *pnum = DIV_ROUND_UP(total_count, s->cluster_size); 539 return ret; 540 } 541 542 /* Unallocated segment(s) with uncertain following segment(s) */ 543 if (total_count >= s->cluster_size) { 544 *pnum = total_count / s->cluster_size; 545 return 0; 546 } 547 548 offset += count; 549 bytes -= count; 550 } 551 } 552 553 /* 554 * Reset bits in copy_bitmap starting at offset if they represent unallocated 555 * data in the image. May reset subsequent contiguous bits. 556 * @return 0 when the cluster at @offset was unallocated, 557 * 1 otherwise, and -ret on error. 558 */ 559 int64_t block_copy_reset_unallocated(BlockCopyState *s, 560 int64_t offset, int64_t *count) 561 { 562 int ret; 563 int64_t clusters, bytes; 564 565 ret = block_copy_is_cluster_allocated(s, offset, &clusters); 566 if (ret < 0) { 567 return ret; 568 } 569 570 bytes = clusters * s->cluster_size; 571 572 if (!ret) { 573 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); 574 progress_set_remaining(s->progress, 575 bdrv_get_dirty_count(s->copy_bitmap) + 576 s->in_flight_bytes); 577 } 578 579 *count = bytes; 580 return ret; 581 } 582 583 /* 584 * block_copy_dirty_clusters 585 * 586 * Copy dirty clusters in @offset/@bytes range. 587 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty 588 * clusters found and -errno on failure. 589 */ 590 static int coroutine_fn 591 block_copy_dirty_clusters(BlockCopyCallState *call_state) 592 { 593 BlockCopyState *s = call_state->s; 594 int64_t offset = call_state->offset; 595 int64_t bytes = call_state->bytes; 596 597 int ret = 0; 598 bool found_dirty = false; 599 int64_t end = offset + bytes; 600 AioTaskPool *aio = NULL; 601 602 /* 603 * block_copy() user is responsible for keeping source and target in same 604 * aio context 605 */ 606 assert(bdrv_get_aio_context(s->source->bs) == 607 bdrv_get_aio_context(s->target->bs)); 608 609 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 610 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); 611 612 while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) { 613 BlockCopyTask *task; 614 int64_t status_bytes; 615 616 task = block_copy_task_create(s, call_state, offset, bytes); 617 if (!task) { 618 /* No more dirty bits in the bitmap */ 619 trace_block_copy_skip_range(s, offset, bytes); 620 break; 621 } 622 if (task->offset > offset) { 623 trace_block_copy_skip_range(s, offset, task->offset - offset); 624 } 625 626 found_dirty = true; 627 628 ret = block_copy_block_status(s, task->offset, task->bytes, 629 &status_bytes); 630 assert(ret >= 0); /* never fail */ 631 if (status_bytes < task->bytes) { 632 block_copy_task_shrink(task, status_bytes); 633 } 634 if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) { 635 block_copy_task_end(task, 0); 636 progress_set_remaining(s->progress, 637 bdrv_get_dirty_count(s->copy_bitmap) + 638 s->in_flight_bytes); 639 trace_block_copy_skip_range(s, task->offset, task->bytes); 640 offset = task_end(task); 641 bytes = end - offset; 642 g_free(task); 643 continue; 644 } 645 if (ret & BDRV_BLOCK_ZERO) { 646 task->zeroes = true; 647 task->copy_range = false; 648 } 649 650 if (s->speed) { 651 if (!call_state->ignore_ratelimit) { 652 uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0); 653 if (ns > 0) { 654 block_copy_task_end(task, -EAGAIN); 655 g_free(task); 656 qemu_co_sleep_ns_wakeable(&call_state->sleep, 657 QEMU_CLOCK_REALTIME, ns); 658 continue; 659 } 660 } 661 662 ratelimit_calculate_delay(&s->rate_limit, task->bytes); 663 } 664 665 trace_block_copy_process(s, task->offset); 666 667 co_get_from_shres(s->mem, task->bytes); 668 669 offset = task_end(task); 670 bytes = end - offset; 671 672 if (!aio && bytes) { 673 aio = aio_task_pool_new(call_state->max_workers); 674 } 675 676 ret = block_copy_task_run(aio, task); 677 if (ret < 0) { 678 goto out; 679 } 680 } 681 682 out: 683 if (aio) { 684 aio_task_pool_wait_all(aio); 685 686 /* 687 * We are not really interested in -ECANCELED returned from 688 * block_copy_task_run. If it fails, it means some task already failed 689 * for real reason, let's return first failure. 690 * Still, assert that we don't rewrite failure by success. 691 * 692 * Note: ret may be positive here because of block-status result. 693 */ 694 assert(ret >= 0 || aio_task_pool_status(aio) < 0); 695 ret = aio_task_pool_status(aio); 696 697 aio_task_pool_free(aio); 698 } 699 700 return ret < 0 ? ret : found_dirty; 701 } 702 703 void block_copy_kick(BlockCopyCallState *call_state) 704 { 705 qemu_co_sleep_wake(&call_state->sleep); 706 } 707 708 /* 709 * block_copy_common 710 * 711 * Copy requested region, accordingly to dirty bitmap. 712 * Collaborate with parallel block_copy requests: if they succeed it will help 713 * us. If they fail, we will retry not-copied regions. So, if we return error, 714 * it means that some I/O operation failed in context of _this_ block_copy call, 715 * not some parallel operation. 716 */ 717 static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) 718 { 719 int ret; 720 721 QLIST_INSERT_HEAD(&call_state->s->calls, call_state, list); 722 723 do { 724 ret = block_copy_dirty_clusters(call_state); 725 726 if (ret == 0 && !call_state->cancelled) { 727 ret = block_copy_wait_one(call_state->s, call_state->offset, 728 call_state->bytes); 729 } 730 731 /* 732 * We retry in two cases: 733 * 1. Some progress done 734 * Something was copied, which means that there were yield points 735 * and some new dirty bits may have appeared (due to failed parallel 736 * block-copy requests). 737 * 2. We have waited for some intersecting block-copy request 738 * It may have failed and produced new dirty bits. 739 */ 740 } while (ret > 0 && !call_state->cancelled); 741 742 call_state->finished = true; 743 744 if (call_state->cb) { 745 call_state->cb(call_state->cb_opaque); 746 } 747 748 QLIST_REMOVE(call_state, list); 749 750 return ret; 751 } 752 753 int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes, 754 bool ignore_ratelimit) 755 { 756 BlockCopyCallState call_state = { 757 .s = s, 758 .offset = start, 759 .bytes = bytes, 760 .ignore_ratelimit = ignore_ratelimit, 761 .max_workers = BLOCK_COPY_MAX_WORKERS, 762 }; 763 764 return block_copy_common(&call_state); 765 } 766 767 static void coroutine_fn block_copy_async_co_entry(void *opaque) 768 { 769 block_copy_common(opaque); 770 } 771 772 BlockCopyCallState *block_copy_async(BlockCopyState *s, 773 int64_t offset, int64_t bytes, 774 int max_workers, int64_t max_chunk, 775 BlockCopyAsyncCallbackFunc cb, 776 void *cb_opaque) 777 { 778 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1); 779 780 *call_state = (BlockCopyCallState) { 781 .s = s, 782 .offset = offset, 783 .bytes = bytes, 784 .max_workers = max_workers, 785 .max_chunk = max_chunk, 786 .cb = cb, 787 .cb_opaque = cb_opaque, 788 789 .co = qemu_coroutine_create(block_copy_async_co_entry, call_state), 790 }; 791 792 qemu_coroutine_enter(call_state->co); 793 794 return call_state; 795 } 796 797 void block_copy_call_free(BlockCopyCallState *call_state) 798 { 799 if (!call_state) { 800 return; 801 } 802 803 assert(call_state->finished); 804 g_free(call_state); 805 } 806 807 bool block_copy_call_finished(BlockCopyCallState *call_state) 808 { 809 return call_state->finished; 810 } 811 812 bool block_copy_call_succeeded(BlockCopyCallState *call_state) 813 { 814 return call_state->finished && !call_state->cancelled && 815 call_state->ret == 0; 816 } 817 818 bool block_copy_call_failed(BlockCopyCallState *call_state) 819 { 820 return call_state->finished && !call_state->cancelled && 821 call_state->ret < 0; 822 } 823 824 bool block_copy_call_cancelled(BlockCopyCallState *call_state) 825 { 826 return call_state->cancelled; 827 } 828 829 int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read) 830 { 831 assert(call_state->finished); 832 if (error_is_read) { 833 *error_is_read = call_state->error_is_read; 834 } 835 return call_state->ret; 836 } 837 838 void block_copy_call_cancel(BlockCopyCallState *call_state) 839 { 840 call_state->cancelled = true; 841 block_copy_kick(call_state); 842 } 843 844 BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s) 845 { 846 return s->copy_bitmap; 847 } 848 849 void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip) 850 { 851 s->skip_unallocated = skip; 852 } 853 854 void block_copy_set_speed(BlockCopyState *s, uint64_t speed) 855 { 856 s->speed = speed; 857 if (speed > 0) { 858 ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME); 859 } 860 861 /* 862 * Note: it's good to kick all call states from here, but it should be done 863 * only from a coroutine, to not crash if s->calls list changed while 864 * entering one call. So for now, the only user of this function kicks its 865 * only one call_state by hand. 866 */ 867 } 868