1 /* 2 * block_copy API 3 * 4 * Copyright (C) 2013 Proxmox Server Solutions 5 * Copyright (c) 2019 Virtuozzo International GmbH. 6 * 7 * Authors: 8 * Dietmar Maurer (dietmar@proxmox.com) 9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 */ 14 15 #include "qemu/osdep.h" 16 17 #include "trace.h" 18 #include "qapi/error.h" 19 #include "block/block-copy.h" 20 #include "sysemu/block-backend.h" 21 #include "qemu/units.h" 22 #include "qemu/coroutine.h" 23 #include "block/aio_task.h" 24 25 #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB) 26 #define BLOCK_COPY_MAX_BUFFER (1 * MiB) 27 #define BLOCK_COPY_MAX_MEM (128 * MiB) 28 #define BLOCK_COPY_MAX_WORKERS 64 29 #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */ 30 31 static coroutine_fn int block_copy_task_entry(AioTask *task); 32 33 typedef struct BlockCopyCallState { 34 /* IN parameters. Initialized in block_copy_async() and never changed. */ 35 BlockCopyState *s; 36 int64_t offset; 37 int64_t bytes; 38 int max_workers; 39 int64_t max_chunk; 40 bool ignore_ratelimit; 41 BlockCopyAsyncCallbackFunc cb; 42 void *cb_opaque; 43 44 /* Coroutine where async block-copy is running */ 45 Coroutine *co; 46 47 /* To reference all call states from BlockCopyState */ 48 QLIST_ENTRY(BlockCopyCallState) list; 49 50 /* State */ 51 int ret; 52 bool finished; 53 QemuCoSleepState *sleep_state; 54 bool cancelled; 55 56 /* OUT parameters */ 57 bool error_is_read; 58 } BlockCopyCallState; 59 60 typedef struct BlockCopyTask { 61 AioTask task; 62 63 BlockCopyState *s; 64 BlockCopyCallState *call_state; 65 int64_t offset; 66 int64_t bytes; 67 bool zeroes; 68 QLIST_ENTRY(BlockCopyTask) list; 69 CoQueue wait_queue; /* coroutines blocked on this task */ 70 } BlockCopyTask; 71 72 static int64_t task_end(BlockCopyTask *task) 73 { 74 return task->offset + task->bytes; 75 } 76 77 typedef struct BlockCopyState { 78 /* 79 * BdrvChild objects are not owned or managed by block-copy. They are 80 * provided by block-copy user and user is responsible for appropriate 81 * permissions on these children. 82 */ 83 BdrvChild *source; 84 BdrvChild *target; 85 BdrvDirtyBitmap *copy_bitmap; 86 int64_t in_flight_bytes; 87 int64_t cluster_size; 88 bool use_copy_range; 89 int64_t copy_size; 90 uint64_t len; 91 QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */ 92 QLIST_HEAD(, BlockCopyCallState) calls; 93 94 BdrvRequestFlags write_flags; 95 96 /* 97 * skip_unallocated: 98 * 99 * Used by sync=top jobs, which first scan the source node for unallocated 100 * areas and clear them in the copy_bitmap. During this process, the bitmap 101 * is thus not fully initialized: It may still have bits set for areas that 102 * are unallocated and should actually not be copied. 103 * 104 * This is indicated by skip_unallocated. 105 * 106 * In this case, block_copy() will query the source’s allocation status, 107 * skip unallocated regions, clear them in the copy_bitmap, and invoke 108 * block_copy_reset_unallocated() every time it does. 109 */ 110 bool skip_unallocated; 111 112 ProgressMeter *progress; 113 114 SharedResource *mem; 115 116 uint64_t speed; 117 RateLimit rate_limit; 118 } BlockCopyState; 119 120 static BlockCopyTask *find_conflicting_task(BlockCopyState *s, 121 int64_t offset, int64_t bytes) 122 { 123 BlockCopyTask *t; 124 125 QLIST_FOREACH(t, &s->tasks, list) { 126 if (offset + bytes > t->offset && offset < t->offset + t->bytes) { 127 return t; 128 } 129 } 130 131 return NULL; 132 } 133 134 /* 135 * If there are no intersecting tasks return false. Otherwise, wait for the 136 * first found intersecting tasks to finish and return true. 137 */ 138 static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset, 139 int64_t bytes) 140 { 141 BlockCopyTask *task = find_conflicting_task(s, offset, bytes); 142 143 if (!task) { 144 return false; 145 } 146 147 qemu_co_queue_wait(&task->wait_queue, NULL); 148 149 return true; 150 } 151 152 /* 153 * Search for the first dirty area in offset/bytes range and create task at 154 * the beginning of it. 155 */ 156 static BlockCopyTask *block_copy_task_create(BlockCopyState *s, 157 BlockCopyCallState *call_state, 158 int64_t offset, int64_t bytes) 159 { 160 BlockCopyTask *task; 161 int64_t max_chunk = MIN_NON_ZERO(s->copy_size, call_state->max_chunk); 162 163 if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap, 164 offset, offset + bytes, 165 max_chunk, &offset, &bytes)) 166 { 167 return NULL; 168 } 169 170 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 171 bytes = QEMU_ALIGN_UP(bytes, s->cluster_size); 172 173 /* region is dirty, so no existent tasks possible in it */ 174 assert(!find_conflicting_task(s, offset, bytes)); 175 176 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); 177 s->in_flight_bytes += bytes; 178 179 task = g_new(BlockCopyTask, 1); 180 *task = (BlockCopyTask) { 181 .task.func = block_copy_task_entry, 182 .s = s, 183 .call_state = call_state, 184 .offset = offset, 185 .bytes = bytes, 186 }; 187 qemu_co_queue_init(&task->wait_queue); 188 QLIST_INSERT_HEAD(&s->tasks, task, list); 189 190 return task; 191 } 192 193 /* 194 * block_copy_task_shrink 195 * 196 * Drop the tail of the task to be handled later. Set dirty bits back and 197 * wake up all tasks waiting for us (may be some of them are not intersecting 198 * with shrunk task) 199 */ 200 static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task, 201 int64_t new_bytes) 202 { 203 if (new_bytes == task->bytes) { 204 return; 205 } 206 207 assert(new_bytes > 0 && new_bytes < task->bytes); 208 209 task->s->in_flight_bytes -= task->bytes - new_bytes; 210 bdrv_set_dirty_bitmap(task->s->copy_bitmap, 211 task->offset + new_bytes, task->bytes - new_bytes); 212 213 task->bytes = new_bytes; 214 qemu_co_queue_restart_all(&task->wait_queue); 215 } 216 217 static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret) 218 { 219 task->s->in_flight_bytes -= task->bytes; 220 if (ret < 0) { 221 bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes); 222 } 223 QLIST_REMOVE(task, list); 224 qemu_co_queue_restart_all(&task->wait_queue); 225 } 226 227 void block_copy_state_free(BlockCopyState *s) 228 { 229 if (!s) { 230 return; 231 } 232 233 bdrv_release_dirty_bitmap(s->copy_bitmap); 234 shres_destroy(s->mem); 235 g_free(s); 236 } 237 238 static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target) 239 { 240 return MIN_NON_ZERO(INT_MAX, 241 MIN_NON_ZERO(source->bs->bl.max_transfer, 242 target->bs->bl.max_transfer)); 243 } 244 245 BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, 246 int64_t cluster_size, bool use_copy_range, 247 BdrvRequestFlags write_flags, Error **errp) 248 { 249 BlockCopyState *s; 250 BdrvDirtyBitmap *copy_bitmap; 251 252 copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL, 253 errp); 254 if (!copy_bitmap) { 255 return NULL; 256 } 257 bdrv_disable_dirty_bitmap(copy_bitmap); 258 259 s = g_new(BlockCopyState, 1); 260 *s = (BlockCopyState) { 261 .source = source, 262 .target = target, 263 .copy_bitmap = copy_bitmap, 264 .cluster_size = cluster_size, 265 .len = bdrv_dirty_bitmap_size(copy_bitmap), 266 .write_flags = write_flags, 267 .mem = shres_create(BLOCK_COPY_MAX_MEM), 268 }; 269 270 if (block_copy_max_transfer(source, target) < cluster_size) { 271 /* 272 * copy_range does not respect max_transfer. We don't want to bother 273 * with requests smaller than block-copy cluster size, so fallback to 274 * buffered copying (read and write respect max_transfer on their 275 * behalf). 276 */ 277 s->use_copy_range = false; 278 s->copy_size = cluster_size; 279 } else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) { 280 /* Compression supports only cluster-size writes and no copy-range. */ 281 s->use_copy_range = false; 282 s->copy_size = cluster_size; 283 } else { 284 /* 285 * We enable copy-range, but keep small copy_size, until first 286 * successful copy_range (look at block_copy_do_copy). 287 */ 288 s->use_copy_range = use_copy_range; 289 s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER); 290 } 291 292 QLIST_INIT(&s->tasks); 293 QLIST_INIT(&s->calls); 294 295 return s; 296 } 297 298 void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) 299 { 300 s->progress = pm; 301 } 302 303 /* 304 * Takes ownership of @task 305 * 306 * If pool is NULL directly run the task, otherwise schedule it into the pool. 307 * 308 * Returns: task.func return code if pool is NULL 309 * otherwise -ECANCELED if pool status is bad 310 * otherwise 0 (successfully scheduled) 311 */ 312 static coroutine_fn int block_copy_task_run(AioTaskPool *pool, 313 BlockCopyTask *task) 314 { 315 if (!pool) { 316 int ret = task->task.func(&task->task); 317 318 g_free(task); 319 return ret; 320 } 321 322 aio_task_pool_wait_slot(pool); 323 if (aio_task_pool_status(pool) < 0) { 324 co_put_to_shres(task->s->mem, task->bytes); 325 block_copy_task_end(task, -ECANCELED); 326 g_free(task); 327 return -ECANCELED; 328 } 329 330 aio_task_pool_start_task(pool, &task->task); 331 332 return 0; 333 } 334 335 /* 336 * block_copy_do_copy 337 * 338 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed 339 * s->len only to cover last cluster when s->len is not aligned to clusters. 340 * 341 * No sync here: nor bitmap neighter intersecting requests handling, only copy. 342 * 343 * Returns 0 on success. 344 */ 345 static int coroutine_fn block_copy_do_copy(BlockCopyState *s, 346 int64_t offset, int64_t bytes, 347 bool zeroes, bool *error_is_read) 348 { 349 int ret; 350 int64_t nbytes = MIN(offset + bytes, s->len) - offset; 351 void *bounce_buffer = NULL; 352 353 assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes); 354 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 355 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); 356 assert(offset < s->len); 357 assert(offset + bytes <= s->len || 358 offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size)); 359 assert(nbytes < INT_MAX); 360 361 if (zeroes) { 362 ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags & 363 ~BDRV_REQ_WRITE_COMPRESSED); 364 if (ret < 0) { 365 trace_block_copy_write_zeroes_fail(s, offset, ret); 366 *error_is_read = false; 367 } 368 return ret; 369 } 370 371 if (s->use_copy_range) { 372 ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes, 373 0, s->write_flags); 374 if (ret < 0) { 375 trace_block_copy_copy_range_fail(s, offset, ret); 376 s->use_copy_range = false; 377 s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER); 378 /* Fallback to read+write with allocated buffer */ 379 } else { 380 if (s->use_copy_range) { 381 /* 382 * Successful copy-range. Now increase copy_size. copy_range 383 * does not respect max_transfer (it's a TODO), so we factor 384 * that in here. 385 * 386 * Note: we double-check s->use_copy_range for the case when 387 * parallel block-copy request unsets it during previous 388 * bdrv_co_copy_range call. 389 */ 390 s->copy_size = 391 MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE), 392 QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source, 393 s->target), 394 s->cluster_size)); 395 } 396 goto out; 397 } 398 } 399 400 /* 401 * In case of failed copy_range request above, we may proceed with buffered 402 * request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will 403 * be properly limited, so don't care too much. Moreover the most likely 404 * case (copy_range is unsupported for the configuration, so the very first 405 * copy_range request fails) is handled by setting large copy_size only 406 * after first successful copy_range. 407 */ 408 409 bounce_buffer = qemu_blockalign(s->source->bs, nbytes); 410 411 ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0); 412 if (ret < 0) { 413 trace_block_copy_read_fail(s, offset, ret); 414 *error_is_read = true; 415 goto out; 416 } 417 418 ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer, 419 s->write_flags); 420 if (ret < 0) { 421 trace_block_copy_write_fail(s, offset, ret); 422 *error_is_read = false; 423 goto out; 424 } 425 426 out: 427 qemu_vfree(bounce_buffer); 428 429 return ret; 430 } 431 432 static coroutine_fn int block_copy_task_entry(AioTask *task) 433 { 434 BlockCopyTask *t = container_of(task, BlockCopyTask, task); 435 bool error_is_read = false; 436 int ret; 437 438 ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes, 439 &error_is_read); 440 if (ret < 0 && !t->call_state->ret) { 441 t->call_state->ret = ret; 442 t->call_state->error_is_read = error_is_read; 443 } else { 444 progress_work_done(t->s->progress, t->bytes); 445 } 446 co_put_to_shres(t->s->mem, t->bytes); 447 block_copy_task_end(t, ret); 448 449 return ret; 450 } 451 452 static int block_copy_block_status(BlockCopyState *s, int64_t offset, 453 int64_t bytes, int64_t *pnum) 454 { 455 int64_t num; 456 BlockDriverState *base; 457 int ret; 458 459 if (s->skip_unallocated) { 460 base = bdrv_backing_chain_next(s->source->bs); 461 } else { 462 base = NULL; 463 } 464 465 ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num, 466 NULL, NULL); 467 if (ret < 0 || num < s->cluster_size) { 468 /* 469 * On error or if failed to obtain large enough chunk just fallback to 470 * copy one cluster. 471 */ 472 num = s->cluster_size; 473 ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA; 474 } else if (offset + num == s->len) { 475 num = QEMU_ALIGN_UP(num, s->cluster_size); 476 } else { 477 num = QEMU_ALIGN_DOWN(num, s->cluster_size); 478 } 479 480 *pnum = num; 481 return ret; 482 } 483 484 /* 485 * Check if the cluster starting at offset is allocated or not. 486 * return via pnum the number of contiguous clusters sharing this allocation. 487 */ 488 static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset, 489 int64_t *pnum) 490 { 491 BlockDriverState *bs = s->source->bs; 492 int64_t count, total_count = 0; 493 int64_t bytes = s->len - offset; 494 int ret; 495 496 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 497 498 while (true) { 499 ret = bdrv_is_allocated(bs, offset, bytes, &count); 500 if (ret < 0) { 501 return ret; 502 } 503 504 total_count += count; 505 506 if (ret || count == 0) { 507 /* 508 * ret: partial segment(s) are considered allocated. 509 * otherwise: unallocated tail is treated as an entire segment. 510 */ 511 *pnum = DIV_ROUND_UP(total_count, s->cluster_size); 512 return ret; 513 } 514 515 /* Unallocated segment(s) with uncertain following segment(s) */ 516 if (total_count >= s->cluster_size) { 517 *pnum = total_count / s->cluster_size; 518 return 0; 519 } 520 521 offset += count; 522 bytes -= count; 523 } 524 } 525 526 /* 527 * Reset bits in copy_bitmap starting at offset if they represent unallocated 528 * data in the image. May reset subsequent contiguous bits. 529 * @return 0 when the cluster at @offset was unallocated, 530 * 1 otherwise, and -ret on error. 531 */ 532 int64_t block_copy_reset_unallocated(BlockCopyState *s, 533 int64_t offset, int64_t *count) 534 { 535 int ret; 536 int64_t clusters, bytes; 537 538 ret = block_copy_is_cluster_allocated(s, offset, &clusters); 539 if (ret < 0) { 540 return ret; 541 } 542 543 bytes = clusters * s->cluster_size; 544 545 if (!ret) { 546 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); 547 progress_set_remaining(s->progress, 548 bdrv_get_dirty_count(s->copy_bitmap) + 549 s->in_flight_bytes); 550 } 551 552 *count = bytes; 553 return ret; 554 } 555 556 /* 557 * block_copy_dirty_clusters 558 * 559 * Copy dirty clusters in @offset/@bytes range. 560 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty 561 * clusters found and -errno on failure. 562 */ 563 static int coroutine_fn 564 block_copy_dirty_clusters(BlockCopyCallState *call_state) 565 { 566 BlockCopyState *s = call_state->s; 567 int64_t offset = call_state->offset; 568 int64_t bytes = call_state->bytes; 569 570 int ret = 0; 571 bool found_dirty = false; 572 int64_t end = offset + bytes; 573 AioTaskPool *aio = NULL; 574 575 /* 576 * block_copy() user is responsible for keeping source and target in same 577 * aio context 578 */ 579 assert(bdrv_get_aio_context(s->source->bs) == 580 bdrv_get_aio_context(s->target->bs)); 581 582 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 583 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); 584 585 while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) { 586 BlockCopyTask *task; 587 int64_t status_bytes; 588 589 task = block_copy_task_create(s, call_state, offset, bytes); 590 if (!task) { 591 /* No more dirty bits in the bitmap */ 592 trace_block_copy_skip_range(s, offset, bytes); 593 break; 594 } 595 if (task->offset > offset) { 596 trace_block_copy_skip_range(s, offset, task->offset - offset); 597 } 598 599 found_dirty = true; 600 601 ret = block_copy_block_status(s, task->offset, task->bytes, 602 &status_bytes); 603 assert(ret >= 0); /* never fail */ 604 if (status_bytes < task->bytes) { 605 block_copy_task_shrink(task, status_bytes); 606 } 607 if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) { 608 block_copy_task_end(task, 0); 609 progress_set_remaining(s->progress, 610 bdrv_get_dirty_count(s->copy_bitmap) + 611 s->in_flight_bytes); 612 trace_block_copy_skip_range(s, task->offset, task->bytes); 613 offset = task_end(task); 614 bytes = end - offset; 615 g_free(task); 616 continue; 617 } 618 task->zeroes = ret & BDRV_BLOCK_ZERO; 619 620 if (s->speed) { 621 if (!call_state->ignore_ratelimit) { 622 uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0); 623 if (ns > 0) { 624 block_copy_task_end(task, -EAGAIN); 625 g_free(task); 626 qemu_co_sleep_ns_wakeable(QEMU_CLOCK_REALTIME, ns, 627 &call_state->sleep_state); 628 continue; 629 } 630 } 631 632 ratelimit_calculate_delay(&s->rate_limit, task->bytes); 633 } 634 635 trace_block_copy_process(s, task->offset); 636 637 co_get_from_shres(s->mem, task->bytes); 638 639 offset = task_end(task); 640 bytes = end - offset; 641 642 if (!aio && bytes) { 643 aio = aio_task_pool_new(call_state->max_workers); 644 } 645 646 ret = block_copy_task_run(aio, task); 647 if (ret < 0) { 648 goto out; 649 } 650 } 651 652 out: 653 if (aio) { 654 aio_task_pool_wait_all(aio); 655 656 /* 657 * We are not really interested in -ECANCELED returned from 658 * block_copy_task_run. If it fails, it means some task already failed 659 * for real reason, let's return first failure. 660 * Still, assert that we don't rewrite failure by success. 661 * 662 * Note: ret may be positive here because of block-status result. 663 */ 664 assert(ret >= 0 || aio_task_pool_status(aio) < 0); 665 ret = aio_task_pool_status(aio); 666 667 aio_task_pool_free(aio); 668 } 669 670 return ret < 0 ? ret : found_dirty; 671 } 672 673 void block_copy_kick(BlockCopyCallState *call_state) 674 { 675 if (call_state->sleep_state) { 676 qemu_co_sleep_wake(call_state->sleep_state); 677 } 678 } 679 680 /* 681 * block_copy_common 682 * 683 * Copy requested region, accordingly to dirty bitmap. 684 * Collaborate with parallel block_copy requests: if they succeed it will help 685 * us. If they fail, we will retry not-copied regions. So, if we return error, 686 * it means that some I/O operation failed in context of _this_ block_copy call, 687 * not some parallel operation. 688 */ 689 static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) 690 { 691 int ret; 692 693 QLIST_INSERT_HEAD(&call_state->s->calls, call_state, list); 694 695 do { 696 ret = block_copy_dirty_clusters(call_state); 697 698 if (ret == 0 && !call_state->cancelled) { 699 ret = block_copy_wait_one(call_state->s, call_state->offset, 700 call_state->bytes); 701 } 702 703 /* 704 * We retry in two cases: 705 * 1. Some progress done 706 * Something was copied, which means that there were yield points 707 * and some new dirty bits may have appeared (due to failed parallel 708 * block-copy requests). 709 * 2. We have waited for some intersecting block-copy request 710 * It may have failed and produced new dirty bits. 711 */ 712 } while (ret > 0 && !call_state->cancelled); 713 714 call_state->finished = true; 715 716 if (call_state->cb) { 717 call_state->cb(call_state->cb_opaque); 718 } 719 720 QLIST_REMOVE(call_state, list); 721 722 return ret; 723 } 724 725 int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes, 726 bool ignore_ratelimit) 727 { 728 BlockCopyCallState call_state = { 729 .s = s, 730 .offset = start, 731 .bytes = bytes, 732 .ignore_ratelimit = ignore_ratelimit, 733 .max_workers = BLOCK_COPY_MAX_WORKERS, 734 }; 735 736 return block_copy_common(&call_state); 737 } 738 739 static void coroutine_fn block_copy_async_co_entry(void *opaque) 740 { 741 block_copy_common(opaque); 742 } 743 744 BlockCopyCallState *block_copy_async(BlockCopyState *s, 745 int64_t offset, int64_t bytes, 746 int max_workers, int64_t max_chunk, 747 BlockCopyAsyncCallbackFunc cb, 748 void *cb_opaque) 749 { 750 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1); 751 752 *call_state = (BlockCopyCallState) { 753 .s = s, 754 .offset = offset, 755 .bytes = bytes, 756 .max_workers = max_workers, 757 .max_chunk = max_chunk, 758 .cb = cb, 759 .cb_opaque = cb_opaque, 760 761 .co = qemu_coroutine_create(block_copy_async_co_entry, call_state), 762 }; 763 764 qemu_coroutine_enter(call_state->co); 765 766 return call_state; 767 } 768 769 void block_copy_call_free(BlockCopyCallState *call_state) 770 { 771 if (!call_state) { 772 return; 773 } 774 775 assert(call_state->finished); 776 g_free(call_state); 777 } 778 779 bool block_copy_call_finished(BlockCopyCallState *call_state) 780 { 781 return call_state->finished; 782 } 783 784 bool block_copy_call_succeeded(BlockCopyCallState *call_state) 785 { 786 return call_state->finished && !call_state->cancelled && 787 call_state->ret == 0; 788 } 789 790 bool block_copy_call_failed(BlockCopyCallState *call_state) 791 { 792 return call_state->finished && !call_state->cancelled && 793 call_state->ret < 0; 794 } 795 796 bool block_copy_call_cancelled(BlockCopyCallState *call_state) 797 { 798 return call_state->cancelled; 799 } 800 801 int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read) 802 { 803 assert(call_state->finished); 804 if (error_is_read) { 805 *error_is_read = call_state->error_is_read; 806 } 807 return call_state->ret; 808 } 809 810 void block_copy_call_cancel(BlockCopyCallState *call_state) 811 { 812 call_state->cancelled = true; 813 block_copy_kick(call_state); 814 } 815 816 BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s) 817 { 818 return s->copy_bitmap; 819 } 820 821 void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip) 822 { 823 s->skip_unallocated = skip; 824 } 825 826 void block_copy_set_speed(BlockCopyState *s, uint64_t speed) 827 { 828 s->speed = speed; 829 if (speed > 0) { 830 ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME); 831 } 832 833 /* 834 * Note: it's good to kick all call states from here, but it should be done 835 * only from a coroutine, to not crash if s->calls list changed while 836 * entering one call. So for now, the only user of this function kicks its 837 * only one call_state by hand. 838 */ 839 } 840