xref: /openbmc/qemu/block/block-copy.c (revision 5df022cf)
1 /*
2  * block_copy API
3  *
4  * Copyright (C) 2013 Proxmox Server Solutions
5  * Copyright (c) 2019 Virtuozzo International GmbH.
6  *
7  * Authors:
8  *  Dietmar Maurer (dietmar@proxmox.com)
9  *  Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  */
14 
15 #include "qemu/osdep.h"
16 
17 #include "trace.h"
18 #include "qapi/error.h"
19 #include "block/block-copy.h"
20 #include "sysemu/block-backend.h"
21 #include "qemu/units.h"
22 #include "qemu/coroutine.h"
23 #include "block/aio_task.h"
24 #include "qemu/error-report.h"
25 #include "qemu/memalign.h"
26 
27 #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
28 #define BLOCK_COPY_MAX_BUFFER (1 * MiB)
29 #define BLOCK_COPY_MAX_MEM (128 * MiB)
30 #define BLOCK_COPY_MAX_WORKERS 64
31 #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
32 #define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16)
33 
34 typedef enum {
35     COPY_READ_WRITE_CLUSTER,
36     COPY_READ_WRITE,
37     COPY_WRITE_ZEROES,
38     COPY_RANGE_SMALL,
39     COPY_RANGE_FULL
40 } BlockCopyMethod;
41 
42 static coroutine_fn int block_copy_task_entry(AioTask *task);
43 
44 typedef struct BlockCopyCallState {
45     /* Fields initialized in block_copy_async() and never changed. */
46     BlockCopyState *s;
47     int64_t offset;
48     int64_t bytes;
49     int max_workers;
50     int64_t max_chunk;
51     bool ignore_ratelimit;
52     BlockCopyAsyncCallbackFunc cb;
53     void *cb_opaque;
54     /* Coroutine where async block-copy is running */
55     Coroutine *co;
56 
57     /* Fields whose state changes throughout the execution */
58     bool finished; /* atomic */
59     QemuCoSleep sleep; /* TODO: protect API with a lock */
60     bool cancelled; /* atomic */
61     /* To reference all call states from BlockCopyState */
62     QLIST_ENTRY(BlockCopyCallState) list;
63 
64     /*
65      * Fields that report information about return values and erros.
66      * Protected by lock in BlockCopyState.
67      */
68     bool error_is_read;
69     /*
70      * @ret is set concurrently by tasks under mutex. Only set once by first
71      * failed task (and untouched if no task failed).
72      * After finishing (call_state->finished is true), it is not modified
73      * anymore and may be safely read without mutex.
74      */
75     int ret;
76 } BlockCopyCallState;
77 
78 typedef struct BlockCopyTask {
79     AioTask task;
80 
81     /*
82      * Fields initialized in block_copy_task_create()
83      * and never changed.
84      */
85     BlockCopyState *s;
86     BlockCopyCallState *call_state;
87     int64_t offset;
88     /*
89      * @method can also be set again in the while loop of
90      * block_copy_dirty_clusters(), but it is never accessed concurrently
91      * because the only other function that reads it is
92      * block_copy_task_entry() and it is invoked afterwards in the same
93      * iteration.
94      */
95     BlockCopyMethod method;
96 
97     /*
98      * Fields whose state changes throughout the execution
99      * Protected by lock in BlockCopyState.
100      */
101     CoQueue wait_queue; /* coroutines blocked on this task */
102     /*
103      * Only protect the case of parallel read while updating @bytes
104      * value in block_copy_task_shrink().
105      */
106     int64_t bytes;
107     QLIST_ENTRY(BlockCopyTask) list;
108 } BlockCopyTask;
109 
110 static int64_t task_end(BlockCopyTask *task)
111 {
112     return task->offset + task->bytes;
113 }
114 
115 typedef struct BlockCopyState {
116     /*
117      * BdrvChild objects are not owned or managed by block-copy. They are
118      * provided by block-copy user and user is responsible for appropriate
119      * permissions on these children.
120      */
121     BdrvChild *source;
122     BdrvChild *target;
123 
124     /*
125      * Fields initialized in block_copy_state_new()
126      * and never changed.
127      */
128     int64_t cluster_size;
129     int64_t max_transfer;
130     uint64_t len;
131     BdrvRequestFlags write_flags;
132 
133     /*
134      * Fields whose state changes throughout the execution
135      * Protected by lock.
136      */
137     CoMutex lock;
138     int64_t in_flight_bytes;
139     BlockCopyMethod method;
140     QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
141     QLIST_HEAD(, BlockCopyCallState) calls;
142     /*
143      * skip_unallocated:
144      *
145      * Used by sync=top jobs, which first scan the source node for unallocated
146      * areas and clear them in the copy_bitmap.  During this process, the bitmap
147      * is thus not fully initialized: It may still have bits set for areas that
148      * are unallocated and should actually not be copied.
149      *
150      * This is indicated by skip_unallocated.
151      *
152      * In this case, block_copy() will query the source’s allocation status,
153      * skip unallocated regions, clear them in the copy_bitmap, and invoke
154      * block_copy_reset_unallocated() every time it does.
155      */
156     bool skip_unallocated; /* atomic */
157     /* State fields that use a thread-safe API */
158     BdrvDirtyBitmap *copy_bitmap;
159     ProgressMeter *progress;
160     SharedResource *mem;
161     RateLimit rate_limit;
162 } BlockCopyState;
163 
164 /* Called with lock held */
165 static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
166                                             int64_t offset, int64_t bytes)
167 {
168     BlockCopyTask *t;
169 
170     QLIST_FOREACH(t, &s->tasks, list) {
171         if (offset + bytes > t->offset && offset < t->offset + t->bytes) {
172             return t;
173         }
174     }
175 
176     return NULL;
177 }
178 
179 /*
180  * If there are no intersecting tasks return false. Otherwise, wait for the
181  * first found intersecting tasks to finish and return true.
182  *
183  * Called with lock held. May temporary release the lock.
184  * Return value of 0 proves that lock was NOT released.
185  */
186 static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
187                                              int64_t bytes)
188 {
189     BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
190 
191     if (!task) {
192         return false;
193     }
194 
195     qemu_co_queue_wait(&task->wait_queue, &s->lock);
196 
197     return true;
198 }
199 
200 /* Called with lock held */
201 static int64_t block_copy_chunk_size(BlockCopyState *s)
202 {
203     switch (s->method) {
204     case COPY_READ_WRITE_CLUSTER:
205         return s->cluster_size;
206     case COPY_READ_WRITE:
207     case COPY_RANGE_SMALL:
208         return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER),
209                    s->max_transfer);
210     case COPY_RANGE_FULL:
211         return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
212                    s->max_transfer);
213     default:
214         /* Cannot have COPY_WRITE_ZEROES here.  */
215         abort();
216     }
217 }
218 
219 /*
220  * Search for the first dirty area in offset/bytes range and create task at
221  * the beginning of it.
222  */
223 static coroutine_fn BlockCopyTask *
224 block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
225                        int64_t offset, int64_t bytes)
226 {
227     BlockCopyTask *task;
228     int64_t max_chunk;
229 
230     QEMU_LOCK_GUARD(&s->lock);
231     max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk);
232     if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
233                                            offset, offset + bytes,
234                                            max_chunk, &offset, &bytes))
235     {
236         return NULL;
237     }
238 
239     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
240     bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
241 
242     /* region is dirty, so no existent tasks possible in it */
243     assert(!find_conflicting_task(s, offset, bytes));
244 
245     bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
246     s->in_flight_bytes += bytes;
247 
248     task = g_new(BlockCopyTask, 1);
249     *task = (BlockCopyTask) {
250         .task.func = block_copy_task_entry,
251         .s = s,
252         .call_state = call_state,
253         .offset = offset,
254         .bytes = bytes,
255         .method = s->method,
256     };
257     qemu_co_queue_init(&task->wait_queue);
258     QLIST_INSERT_HEAD(&s->tasks, task, list);
259 
260     return task;
261 }
262 
263 /*
264  * block_copy_task_shrink
265  *
266  * Drop the tail of the task to be handled later. Set dirty bits back and
267  * wake up all tasks waiting for us (may be some of them are not intersecting
268  * with shrunk task)
269  */
270 static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
271                                                 int64_t new_bytes)
272 {
273     QEMU_LOCK_GUARD(&task->s->lock);
274     if (new_bytes == task->bytes) {
275         return;
276     }
277 
278     assert(new_bytes > 0 && new_bytes < task->bytes);
279 
280     task->s->in_flight_bytes -= task->bytes - new_bytes;
281     bdrv_set_dirty_bitmap(task->s->copy_bitmap,
282                           task->offset + new_bytes, task->bytes - new_bytes);
283 
284     task->bytes = new_bytes;
285     qemu_co_queue_restart_all(&task->wait_queue);
286 }
287 
288 static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
289 {
290     QEMU_LOCK_GUARD(&task->s->lock);
291     task->s->in_flight_bytes -= task->bytes;
292     if (ret < 0) {
293         bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
294     }
295     QLIST_REMOVE(task, list);
296     if (task->s->progress) {
297         progress_set_remaining(task->s->progress,
298                                bdrv_get_dirty_count(task->s->copy_bitmap) +
299                                task->s->in_flight_bytes);
300     }
301     qemu_co_queue_restart_all(&task->wait_queue);
302 }
303 
304 void block_copy_state_free(BlockCopyState *s)
305 {
306     if (!s) {
307         return;
308     }
309 
310     ratelimit_destroy(&s->rate_limit);
311     bdrv_release_dirty_bitmap(s->copy_bitmap);
312     shres_destroy(s->mem);
313     g_free(s);
314 }
315 
316 static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
317 {
318     return MIN_NON_ZERO(INT_MAX,
319                         MIN_NON_ZERO(source->bs->bl.max_transfer,
320                                      target->bs->bl.max_transfer));
321 }
322 
323 void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
324                               bool compress)
325 {
326     /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */
327     s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) |
328         (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
329 
330     if (s->max_transfer < s->cluster_size) {
331         /*
332          * copy_range does not respect max_transfer. We don't want to bother
333          * with requests smaller than block-copy cluster size, so fallback to
334          * buffered copying (read and write respect max_transfer on their
335          * behalf).
336          */
337         s->method = COPY_READ_WRITE_CLUSTER;
338     } else if (compress) {
339         /* Compression supports only cluster-size writes and no copy-range. */
340         s->method = COPY_READ_WRITE_CLUSTER;
341     } else {
342         /*
343          * If copy range enabled, start with COPY_RANGE_SMALL, until first
344          * successful copy_range (look at block_copy_do_copy).
345          */
346         s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
347     }
348 }
349 
350 static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
351                                                  Error **errp)
352 {
353     int ret;
354     BlockDriverInfo bdi;
355     bool target_does_cow = bdrv_backing_chain_next(target);
356 
357     /*
358      * If there is no backing file on the target, we cannot rely on COW if our
359      * backup cluster size is smaller than the target cluster size. Even for
360      * targets with a backing file, try to avoid COW if possible.
361      */
362     ret = bdrv_get_info(target, &bdi);
363     if (ret == -ENOTSUP && !target_does_cow) {
364         /* Cluster size is not defined */
365         warn_report("The target block device doesn't provide "
366                     "information about the block size and it doesn't have a "
367                     "backing file. The default block size of %u bytes is "
368                     "used. If the actual block size of the target exceeds "
369                     "this default, the backup may be unusable",
370                     BLOCK_COPY_CLUSTER_SIZE_DEFAULT);
371         return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
372     } else if (ret < 0 && !target_does_cow) {
373         error_setg_errno(errp, -ret,
374             "Couldn't determine the cluster size of the target image, "
375             "which has no backing file");
376         error_append_hint(errp,
377             "Aborting, since this may create an unusable destination image\n");
378         return ret;
379     } else if (ret < 0 && target_does_cow) {
380         /* Not fatal; just trudge on ahead. */
381         return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
382     }
383 
384     return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
385 }
386 
387 BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
388                                      Error **errp)
389 {
390     BlockCopyState *s;
391     int64_t cluster_size;
392     BdrvDirtyBitmap *copy_bitmap;
393     bool is_fleecing;
394 
395     cluster_size = block_copy_calculate_cluster_size(target->bs, errp);
396     if (cluster_size < 0) {
397         return NULL;
398     }
399 
400     copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
401                                            errp);
402     if (!copy_bitmap) {
403         return NULL;
404     }
405     bdrv_disable_dirty_bitmap(copy_bitmap);
406 
407     /*
408      * If source is in backing chain of target assume that target is going to be
409      * used for "image fleecing", i.e. it should represent a kind of snapshot of
410      * source at backup-start point in time. And target is going to be read by
411      * somebody (for example, used as NBD export) during backup job.
412      *
413      * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid
414      * intersection of backup writes and third party reads from target,
415      * otherwise reading from target we may occasionally read already updated by
416      * guest data.
417      *
418      * For more information see commit f8d59dfb40bb and test
419      * tests/qemu-iotests/222
420      */
421     is_fleecing = bdrv_chain_contains(target->bs, source->bs);
422 
423     s = g_new(BlockCopyState, 1);
424     *s = (BlockCopyState) {
425         .source = source,
426         .target = target,
427         .copy_bitmap = copy_bitmap,
428         .cluster_size = cluster_size,
429         .len = bdrv_dirty_bitmap_size(copy_bitmap),
430         .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0),
431         .mem = shres_create(BLOCK_COPY_MAX_MEM),
432         .max_transfer = QEMU_ALIGN_DOWN(
433                                     block_copy_max_transfer(source, target),
434                                     cluster_size),
435     };
436 
437     block_copy_set_copy_opts(s, false, false);
438 
439     ratelimit_init(&s->rate_limit);
440     qemu_co_mutex_init(&s->lock);
441     QLIST_INIT(&s->tasks);
442     QLIST_INIT(&s->calls);
443 
444     return s;
445 }
446 
447 /* Only set before running the job, no need for locking. */
448 void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
449 {
450     s->progress = pm;
451 }
452 
453 /*
454  * Takes ownership of @task
455  *
456  * If pool is NULL directly run the task, otherwise schedule it into the pool.
457  *
458  * Returns: task.func return code if pool is NULL
459  *          otherwise -ECANCELED if pool status is bad
460  *          otherwise 0 (successfully scheduled)
461  */
462 static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
463                                             BlockCopyTask *task)
464 {
465     if (!pool) {
466         int ret = task->task.func(&task->task);
467 
468         g_free(task);
469         return ret;
470     }
471 
472     aio_task_pool_wait_slot(pool);
473     if (aio_task_pool_status(pool) < 0) {
474         co_put_to_shres(task->s->mem, task->bytes);
475         block_copy_task_end(task, -ECANCELED);
476         g_free(task);
477         return -ECANCELED;
478     }
479 
480     aio_task_pool_start_task(pool, &task->task);
481 
482     return 0;
483 }
484 
485 /*
486  * block_copy_do_copy
487  *
488  * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
489  * s->len only to cover last cluster when s->len is not aligned to clusters.
490  *
491  * No sync here: nor bitmap neighter intersecting requests handling, only copy.
492  *
493  * @method is an in-out argument, so that copy_range can be either extended to
494  * a full-size buffer or disabled if the copy_range attempt fails.  The output
495  * value of @method should be used for subsequent tasks.
496  * Returns 0 on success.
497  */
498 static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
499                                            int64_t offset, int64_t bytes,
500                                            BlockCopyMethod *method,
501                                            bool *error_is_read)
502 {
503     int ret;
504     int64_t nbytes = MIN(offset + bytes, s->len) - offset;
505     void *bounce_buffer = NULL;
506 
507     assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
508     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
509     assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
510     assert(offset < s->len);
511     assert(offset + bytes <= s->len ||
512            offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
513     assert(nbytes < INT_MAX);
514 
515     switch (*method) {
516     case COPY_WRITE_ZEROES:
517         ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
518                                     ~BDRV_REQ_WRITE_COMPRESSED);
519         if (ret < 0) {
520             trace_block_copy_write_zeroes_fail(s, offset, ret);
521             *error_is_read = false;
522         }
523         return ret;
524 
525     case COPY_RANGE_SMALL:
526     case COPY_RANGE_FULL:
527         ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
528                                  0, s->write_flags);
529         if (ret >= 0) {
530             /* Successful copy-range, increase chunk size.  */
531             *method = COPY_RANGE_FULL;
532             return 0;
533         }
534 
535         trace_block_copy_copy_range_fail(s, offset, ret);
536         *method = COPY_READ_WRITE;
537         /* Fall through to read+write with allocated buffer */
538 
539     case COPY_READ_WRITE_CLUSTER:
540     case COPY_READ_WRITE:
541         /*
542          * In case of failed copy_range request above, we may proceed with
543          * buffered request larger than BLOCK_COPY_MAX_BUFFER.
544          * Still, further requests will be properly limited, so don't care too
545          * much. Moreover the most likely case (copy_range is unsupported for
546          * the configuration, so the very first copy_range request fails)
547          * is handled by setting large copy_size only after first successful
548          * copy_range.
549          */
550 
551         bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
552 
553         ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
554         if (ret < 0) {
555             trace_block_copy_read_fail(s, offset, ret);
556             *error_is_read = true;
557             goto out;
558         }
559 
560         ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
561                              s->write_flags);
562         if (ret < 0) {
563             trace_block_copy_write_fail(s, offset, ret);
564             *error_is_read = false;
565             goto out;
566         }
567 
568     out:
569         qemu_vfree(bounce_buffer);
570         break;
571 
572     default:
573         abort();
574     }
575 
576     return ret;
577 }
578 
579 static coroutine_fn int block_copy_task_entry(AioTask *task)
580 {
581     BlockCopyTask *t = container_of(task, BlockCopyTask, task);
582     BlockCopyState *s = t->s;
583     bool error_is_read = false;
584     BlockCopyMethod method = t->method;
585     int ret;
586 
587     ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read);
588 
589     WITH_QEMU_LOCK_GUARD(&s->lock) {
590         if (s->method == t->method) {
591             s->method = method;
592         }
593 
594         if (ret < 0) {
595             if (!t->call_state->ret) {
596                 t->call_state->ret = ret;
597                 t->call_state->error_is_read = error_is_read;
598             }
599         } else if (s->progress) {
600             progress_work_done(s->progress, t->bytes);
601         }
602     }
603     co_put_to_shres(s->mem, t->bytes);
604     block_copy_task_end(t, ret);
605 
606     return ret;
607 }
608 
609 static int block_copy_block_status(BlockCopyState *s, int64_t offset,
610                                    int64_t bytes, int64_t *pnum)
611 {
612     int64_t num;
613     BlockDriverState *base;
614     int ret;
615 
616     if (qatomic_read(&s->skip_unallocated)) {
617         base = bdrv_backing_chain_next(s->source->bs);
618     } else {
619         base = NULL;
620     }
621 
622     ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
623                                   NULL, NULL);
624     if (ret < 0 || num < s->cluster_size) {
625         /*
626          * On error or if failed to obtain large enough chunk just fallback to
627          * copy one cluster.
628          */
629         num = s->cluster_size;
630         ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
631     } else if (offset + num == s->len) {
632         num = QEMU_ALIGN_UP(num, s->cluster_size);
633     } else {
634         num = QEMU_ALIGN_DOWN(num, s->cluster_size);
635     }
636 
637     *pnum = num;
638     return ret;
639 }
640 
641 /*
642  * Check if the cluster starting at offset is allocated or not.
643  * return via pnum the number of contiguous clusters sharing this allocation.
644  */
645 static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
646                                            int64_t *pnum)
647 {
648     BlockDriverState *bs = s->source->bs;
649     int64_t count, total_count = 0;
650     int64_t bytes = s->len - offset;
651     int ret;
652 
653     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
654 
655     while (true) {
656         ret = bdrv_is_allocated(bs, offset, bytes, &count);
657         if (ret < 0) {
658             return ret;
659         }
660 
661         total_count += count;
662 
663         if (ret || count == 0) {
664             /*
665              * ret: partial segment(s) are considered allocated.
666              * otherwise: unallocated tail is treated as an entire segment.
667              */
668             *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
669             return ret;
670         }
671 
672         /* Unallocated segment(s) with uncertain following segment(s) */
673         if (total_count >= s->cluster_size) {
674             *pnum = total_count / s->cluster_size;
675             return 0;
676         }
677 
678         offset += count;
679         bytes -= count;
680     }
681 }
682 
683 /*
684  * Reset bits in copy_bitmap starting at offset if they represent unallocated
685  * data in the image. May reset subsequent contiguous bits.
686  * @return 0 when the cluster at @offset was unallocated,
687  *         1 otherwise, and -ret on error.
688  */
689 int64_t block_copy_reset_unallocated(BlockCopyState *s,
690                                      int64_t offset, int64_t *count)
691 {
692     int ret;
693     int64_t clusters, bytes;
694 
695     ret = block_copy_is_cluster_allocated(s, offset, &clusters);
696     if (ret < 0) {
697         return ret;
698     }
699 
700     bytes = clusters * s->cluster_size;
701 
702     if (!ret) {
703         qemu_co_mutex_lock(&s->lock);
704         bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
705         if (s->progress) {
706             progress_set_remaining(s->progress,
707                                    bdrv_get_dirty_count(s->copy_bitmap) +
708                                    s->in_flight_bytes);
709         }
710         qemu_co_mutex_unlock(&s->lock);
711     }
712 
713     *count = bytes;
714     return ret;
715 }
716 
717 /*
718  * block_copy_dirty_clusters
719  *
720  * Copy dirty clusters in @offset/@bytes range.
721  * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
722  * clusters found and -errno on failure.
723  */
724 static int coroutine_fn
725 block_copy_dirty_clusters(BlockCopyCallState *call_state)
726 {
727     BlockCopyState *s = call_state->s;
728     int64_t offset = call_state->offset;
729     int64_t bytes = call_state->bytes;
730 
731     int ret = 0;
732     bool found_dirty = false;
733     int64_t end = offset + bytes;
734     AioTaskPool *aio = NULL;
735 
736     /*
737      * block_copy() user is responsible for keeping source and target in same
738      * aio context
739      */
740     assert(bdrv_get_aio_context(s->source->bs) ==
741            bdrv_get_aio_context(s->target->bs));
742 
743     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
744     assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
745 
746     while (bytes && aio_task_pool_status(aio) == 0 &&
747            !qatomic_read(&call_state->cancelled)) {
748         BlockCopyTask *task;
749         int64_t status_bytes;
750 
751         task = block_copy_task_create(s, call_state, offset, bytes);
752         if (!task) {
753             /* No more dirty bits in the bitmap */
754             trace_block_copy_skip_range(s, offset, bytes);
755             break;
756         }
757         if (task->offset > offset) {
758             trace_block_copy_skip_range(s, offset, task->offset - offset);
759         }
760 
761         found_dirty = true;
762 
763         ret = block_copy_block_status(s, task->offset, task->bytes,
764                                       &status_bytes);
765         assert(ret >= 0); /* never fail */
766         if (status_bytes < task->bytes) {
767             block_copy_task_shrink(task, status_bytes);
768         }
769         if (qatomic_read(&s->skip_unallocated) &&
770             !(ret & BDRV_BLOCK_ALLOCATED)) {
771             block_copy_task_end(task, 0);
772             trace_block_copy_skip_range(s, task->offset, task->bytes);
773             offset = task_end(task);
774             bytes = end - offset;
775             g_free(task);
776             continue;
777         }
778         if (ret & BDRV_BLOCK_ZERO) {
779             task->method = COPY_WRITE_ZEROES;
780         }
781 
782         if (!call_state->ignore_ratelimit) {
783             uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
784             if (ns > 0) {
785                 block_copy_task_end(task, -EAGAIN);
786                 g_free(task);
787                 qemu_co_sleep_ns_wakeable(&call_state->sleep,
788                                           QEMU_CLOCK_REALTIME, ns);
789                 continue;
790             }
791         }
792 
793         ratelimit_calculate_delay(&s->rate_limit, task->bytes);
794 
795         trace_block_copy_process(s, task->offset);
796 
797         co_get_from_shres(s->mem, task->bytes);
798 
799         offset = task_end(task);
800         bytes = end - offset;
801 
802         if (!aio && bytes) {
803             aio = aio_task_pool_new(call_state->max_workers);
804         }
805 
806         ret = block_copy_task_run(aio, task);
807         if (ret < 0) {
808             goto out;
809         }
810     }
811 
812 out:
813     if (aio) {
814         aio_task_pool_wait_all(aio);
815 
816         /*
817          * We are not really interested in -ECANCELED returned from
818          * block_copy_task_run. If it fails, it means some task already failed
819          * for real reason, let's return first failure.
820          * Still, assert that we don't rewrite failure by success.
821          *
822          * Note: ret may be positive here because of block-status result.
823          */
824         assert(ret >= 0 || aio_task_pool_status(aio) < 0);
825         ret = aio_task_pool_status(aio);
826 
827         aio_task_pool_free(aio);
828     }
829 
830     return ret < 0 ? ret : found_dirty;
831 }
832 
833 void block_copy_kick(BlockCopyCallState *call_state)
834 {
835     qemu_co_sleep_wake(&call_state->sleep);
836 }
837 
838 /*
839  * block_copy_common
840  *
841  * Copy requested region, accordingly to dirty bitmap.
842  * Collaborate with parallel block_copy requests: if they succeed it will help
843  * us. If they fail, we will retry not-copied regions. So, if we return error,
844  * it means that some I/O operation failed in context of _this_ block_copy call,
845  * not some parallel operation.
846  */
847 static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
848 {
849     int ret;
850     BlockCopyState *s = call_state->s;
851 
852     qemu_co_mutex_lock(&s->lock);
853     QLIST_INSERT_HEAD(&s->calls, call_state, list);
854     qemu_co_mutex_unlock(&s->lock);
855 
856     do {
857         ret = block_copy_dirty_clusters(call_state);
858 
859         if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
860             WITH_QEMU_LOCK_GUARD(&s->lock) {
861                 /*
862                  * Check that there is no task we still need to
863                  * wait to complete
864                  */
865                 ret = block_copy_wait_one(s, call_state->offset,
866                                           call_state->bytes);
867                 if (ret == 0) {
868                     /*
869                      * No pending tasks, but check again the bitmap in this
870                      * same critical section, since a task might have failed
871                      * between this and the critical section in
872                      * block_copy_dirty_clusters().
873                      *
874                      * block_copy_wait_one return value 0 also means that it
875                      * didn't release the lock. So, we are still in the same
876                      * critical section, not interrupted by any concurrent
877                      * access to state.
878                      */
879                     ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap,
880                                                        call_state->offset,
881                                                        call_state->bytes) >= 0;
882                 }
883             }
884         }
885 
886         /*
887          * We retry in two cases:
888          * 1. Some progress done
889          *    Something was copied, which means that there were yield points
890          *    and some new dirty bits may have appeared (due to failed parallel
891          *    block-copy requests).
892          * 2. We have waited for some intersecting block-copy request
893          *    It may have failed and produced new dirty bits.
894          */
895     } while (ret > 0 && !qatomic_read(&call_state->cancelled));
896 
897     qatomic_store_release(&call_state->finished, true);
898 
899     if (call_state->cb) {
900         call_state->cb(call_state->cb_opaque);
901     }
902 
903     qemu_co_mutex_lock(&s->lock);
904     QLIST_REMOVE(call_state, list);
905     qemu_co_mutex_unlock(&s->lock);
906 
907     return ret;
908 }
909 
910 int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
911                             bool ignore_ratelimit)
912 {
913     BlockCopyCallState call_state = {
914         .s = s,
915         .offset = start,
916         .bytes = bytes,
917         .ignore_ratelimit = ignore_ratelimit,
918         .max_workers = BLOCK_COPY_MAX_WORKERS,
919     };
920 
921     return block_copy_common(&call_state);
922 }
923 
924 static void coroutine_fn block_copy_async_co_entry(void *opaque)
925 {
926     block_copy_common(opaque);
927 }
928 
929 BlockCopyCallState *block_copy_async(BlockCopyState *s,
930                                      int64_t offset, int64_t bytes,
931                                      int max_workers, int64_t max_chunk,
932                                      BlockCopyAsyncCallbackFunc cb,
933                                      void *cb_opaque)
934 {
935     BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
936 
937     *call_state = (BlockCopyCallState) {
938         .s = s,
939         .offset = offset,
940         .bytes = bytes,
941         .max_workers = max_workers,
942         .max_chunk = max_chunk,
943         .cb = cb,
944         .cb_opaque = cb_opaque,
945 
946         .co = qemu_coroutine_create(block_copy_async_co_entry, call_state),
947     };
948 
949     qemu_coroutine_enter(call_state->co);
950 
951     return call_state;
952 }
953 
954 void block_copy_call_free(BlockCopyCallState *call_state)
955 {
956     if (!call_state) {
957         return;
958     }
959 
960     assert(qatomic_read(&call_state->finished));
961     g_free(call_state);
962 }
963 
964 bool block_copy_call_finished(BlockCopyCallState *call_state)
965 {
966     return qatomic_read(&call_state->finished);
967 }
968 
969 bool block_copy_call_succeeded(BlockCopyCallState *call_state)
970 {
971     return qatomic_load_acquire(&call_state->finished) &&
972            !qatomic_read(&call_state->cancelled) &&
973            call_state->ret == 0;
974 }
975 
976 bool block_copy_call_failed(BlockCopyCallState *call_state)
977 {
978     return qatomic_load_acquire(&call_state->finished) &&
979            !qatomic_read(&call_state->cancelled) &&
980            call_state->ret < 0;
981 }
982 
983 bool block_copy_call_cancelled(BlockCopyCallState *call_state)
984 {
985     return qatomic_read(&call_state->cancelled);
986 }
987 
988 int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
989 {
990     assert(qatomic_load_acquire(&call_state->finished));
991     if (error_is_read) {
992         *error_is_read = call_state->error_is_read;
993     }
994     return call_state->ret;
995 }
996 
997 /*
998  * Note that cancelling and finishing are racy.
999  * User can cancel a block-copy that is already finished.
1000  */
1001 void block_copy_call_cancel(BlockCopyCallState *call_state)
1002 {
1003     qatomic_set(&call_state->cancelled, true);
1004     block_copy_kick(call_state);
1005 }
1006 
1007 BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
1008 {
1009     return s->copy_bitmap;
1010 }
1011 
1012 int64_t block_copy_cluster_size(BlockCopyState *s)
1013 {
1014     return s->cluster_size;
1015 }
1016 
1017 void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
1018 {
1019     qatomic_set(&s->skip_unallocated, skip);
1020 }
1021 
1022 void block_copy_set_speed(BlockCopyState *s, uint64_t speed)
1023 {
1024     ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
1025 
1026     /*
1027      * Note: it's good to kick all call states from here, but it should be done
1028      * only from a coroutine, to not crash if s->calls list changed while
1029      * entering one call. So for now, the only user of this function kicks its
1030      * only one call_state by hand.
1031      */
1032 }
1033