xref: /openbmc/qemu/block/mirror.c (revision 62dd4eda)
1 /*
2  * Image mirroring
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Paolo Bonzini  <pbonzini@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10  * See the COPYING.LIB file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "trace.h"
17 #include "block/blockjob_int.h"
18 #include "block/block_int.h"
19 #include "sysemu/block-backend.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "qemu/bitmap.h"
24 
25 #define SLICE_TIME    100000000ULL /* ns */
26 #define MAX_IN_FLIGHT 16
27 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
28 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
29 
30 /* The mirroring buffer is a list of granularity-sized chunks.
31  * Free chunks are organized in a list.
32  */
33 typedef struct MirrorBuffer {
34     QSIMPLEQ_ENTRY(MirrorBuffer) next;
35 } MirrorBuffer;
36 
37 typedef struct MirrorBlockJob {
38     BlockJob common;
39     RateLimit limit;
40     BlockBackend *target;
41     BlockDriverState *mirror_top_bs;
42     BlockDriverState *source;
43     BlockDriverState *base;
44 
45     /* The name of the graph node to replace */
46     char *replaces;
47     /* The BDS to replace */
48     BlockDriverState *to_replace;
49     /* Used to block operations on the drive-mirror-replace target */
50     Error *replace_blocker;
51     bool is_none_mode;
52     BlockMirrorBackingMode backing_mode;
53     BlockdevOnError on_source_error, on_target_error;
54     bool synced;
55     bool should_complete;
56     int64_t granularity;
57     size_t buf_size;
58     int64_t bdev_length;
59     unsigned long *cow_bitmap;
60     BdrvDirtyBitmap *dirty_bitmap;
61     BdrvDirtyBitmapIter *dbi;
62     uint8_t *buf;
63     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
64     int buf_free_count;
65 
66     uint64_t last_pause_ns;
67     unsigned long *in_flight_bitmap;
68     int in_flight;
69     int64_t bytes_in_flight;
70     int ret;
71     bool unmap;
72     bool waiting_for_io;
73     int target_cluster_size;
74     int max_iov;
75     bool initial_zeroing_ongoing;
76 } MirrorBlockJob;
77 
78 typedef struct MirrorOp {
79     MirrorBlockJob *s;
80     QEMUIOVector qiov;
81     int64_t offset;
82     uint64_t bytes;
83 } MirrorOp;
84 
85 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
86                                             int error)
87 {
88     s->synced = false;
89     if (read) {
90         return block_job_error_action(&s->common, s->on_source_error,
91                                       true, error);
92     } else {
93         return block_job_error_action(&s->common, s->on_target_error,
94                                       false, error);
95     }
96 }
97 
98 static void mirror_iteration_done(MirrorOp *op, int ret)
99 {
100     MirrorBlockJob *s = op->s;
101     struct iovec *iov;
102     int64_t chunk_num;
103     int i, nb_chunks;
104 
105     trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
106 
107     s->in_flight--;
108     s->bytes_in_flight -= op->bytes;
109     iov = op->qiov.iov;
110     for (i = 0; i < op->qiov.niov; i++) {
111         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
112         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
113         s->buf_free_count++;
114     }
115 
116     chunk_num = op->offset / s->granularity;
117     nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
118     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
119     if (ret >= 0) {
120         if (s->cow_bitmap) {
121             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
122         }
123         if (!s->initial_zeroing_ongoing) {
124             s->common.offset += op->bytes;
125         }
126     }
127     qemu_iovec_destroy(&op->qiov);
128     g_free(op);
129 
130     if (s->waiting_for_io) {
131         qemu_coroutine_enter(s->common.co);
132     }
133 }
134 
135 static void mirror_write_complete(void *opaque, int ret)
136 {
137     MirrorOp *op = opaque;
138     MirrorBlockJob *s = op->s;
139 
140     aio_context_acquire(blk_get_aio_context(s->common.blk));
141     if (ret < 0) {
142         BlockErrorAction action;
143 
144         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
145                               op->bytes >> BDRV_SECTOR_BITS);
146         action = mirror_error_action(s, false, -ret);
147         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
148             s->ret = ret;
149         }
150     }
151     mirror_iteration_done(op, ret);
152     aio_context_release(blk_get_aio_context(s->common.blk));
153 }
154 
155 static void mirror_read_complete(void *opaque, int ret)
156 {
157     MirrorOp *op = opaque;
158     MirrorBlockJob *s = op->s;
159 
160     aio_context_acquire(blk_get_aio_context(s->common.blk));
161     if (ret < 0) {
162         BlockErrorAction action;
163 
164         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
165                               op->bytes >> BDRV_SECTOR_BITS);
166         action = mirror_error_action(s, true, -ret);
167         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
168             s->ret = ret;
169         }
170 
171         mirror_iteration_done(op, ret);
172     } else {
173         blk_aio_pwritev(s->target, op->offset, &op->qiov,
174                         0, mirror_write_complete, op);
175     }
176     aio_context_release(blk_get_aio_context(s->common.blk));
177 }
178 
179 /* Clip bytes relative to offset to not exceed end-of-file */
180 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
181                                         int64_t offset,
182                                         int64_t bytes)
183 {
184     return MIN(bytes, s->bdev_length - offset);
185 }
186 
187 /* Round offset and/or bytes to target cluster if COW is needed, and
188  * return the offset of the adjusted tail against original. */
189 static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
190                             uint64_t *bytes)
191 {
192     bool need_cow;
193     int ret = 0;
194     int64_t align_offset = *offset;
195     unsigned int align_bytes = *bytes;
196     int max_bytes = s->granularity * s->max_iov;
197 
198     assert(*bytes < INT_MAX);
199     need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
200     need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
201                           s->cow_bitmap);
202     if (need_cow) {
203         bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
204                                &align_offset, &align_bytes);
205     }
206 
207     if (align_bytes > max_bytes) {
208         align_bytes = max_bytes;
209         if (need_cow) {
210             align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
211         }
212     }
213     /* Clipping may result in align_bytes unaligned to chunk boundary, but
214      * that doesn't matter because it's already the end of source image. */
215     align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
216 
217     ret = align_offset + align_bytes - (*offset + *bytes);
218     *offset = align_offset;
219     *bytes = align_bytes;
220     assert(ret >= 0);
221     return ret;
222 }
223 
224 static inline void mirror_wait_for_io(MirrorBlockJob *s)
225 {
226     assert(!s->waiting_for_io);
227     s->waiting_for_io = true;
228     qemu_coroutine_yield();
229     s->waiting_for_io = false;
230 }
231 
232 /* Submit async read while handling COW.
233  * Returns: The number of bytes copied after and including offset,
234  *          excluding any bytes copied prior to offset due to alignment.
235  *          This will be @bytes if no alignment is necessary, or
236  *          (new_end - offset) if tail is rounded up or down due to
237  *          alignment or buffer limit.
238  */
239 static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset,
240                                uint64_t bytes)
241 {
242     BlockBackend *source = s->common.blk;
243     int nb_chunks;
244     uint64_t ret;
245     MirrorOp *op;
246     uint64_t max_bytes;
247 
248     max_bytes = s->granularity * s->max_iov;
249 
250     /* We can only handle as much as buf_size at a time. */
251     bytes = MIN(s->buf_size, MIN(max_bytes, bytes));
252     assert(bytes);
253     assert(bytes < BDRV_REQUEST_MAX_BYTES);
254     ret = bytes;
255 
256     if (s->cow_bitmap) {
257         ret += mirror_cow_align(s, &offset, &bytes);
258     }
259     assert(bytes <= s->buf_size);
260     /* The offset is granularity-aligned because:
261      * 1) Caller passes in aligned values;
262      * 2) mirror_cow_align is used only when target cluster is larger. */
263     assert(QEMU_IS_ALIGNED(offset, s->granularity));
264     /* The range is sector-aligned, since bdrv_getlength() rounds up. */
265     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
266     nb_chunks = DIV_ROUND_UP(bytes, s->granularity);
267 
268     while (s->buf_free_count < nb_chunks) {
269         trace_mirror_yield_in_flight(s, offset, s->in_flight);
270         mirror_wait_for_io(s);
271     }
272 
273     /* Allocate a MirrorOp that is used as an AIO callback.  */
274     op = g_new(MirrorOp, 1);
275     op->s = s;
276     op->offset = offset;
277     op->bytes = bytes;
278 
279     /* Now make a QEMUIOVector taking enough granularity-sized chunks
280      * from s->buf_free.
281      */
282     qemu_iovec_init(&op->qiov, nb_chunks);
283     while (nb_chunks-- > 0) {
284         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
285         size_t remaining = bytes - op->qiov.size;
286 
287         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
288         s->buf_free_count--;
289         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
290     }
291 
292     /* Copy the dirty cluster.  */
293     s->in_flight++;
294     s->bytes_in_flight += bytes;
295     trace_mirror_one_iteration(s, offset, bytes);
296 
297     blk_aio_preadv(source, offset, &op->qiov, 0, mirror_read_complete, op);
298     return ret;
299 }
300 
301 static void mirror_do_zero_or_discard(MirrorBlockJob *s,
302                                       int64_t offset,
303                                       uint64_t bytes,
304                                       bool is_discard)
305 {
306     MirrorOp *op;
307 
308     /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
309      * so the freeing in mirror_iteration_done is nop. */
310     op = g_new0(MirrorOp, 1);
311     op->s = s;
312     op->offset = offset;
313     op->bytes = bytes;
314 
315     s->in_flight++;
316     s->bytes_in_flight += bytes;
317     if (is_discard) {
318         blk_aio_pdiscard(s->target, offset,
319                          op->bytes, mirror_write_complete, op);
320     } else {
321         blk_aio_pwrite_zeroes(s->target, offset,
322                               op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
323                               mirror_write_complete, op);
324     }
325 }
326 
327 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
328 {
329     BlockDriverState *source = s->source;
330     int64_t offset, first_chunk;
331     uint64_t delay_ns = 0;
332     /* At least the first dirty chunk is mirrored in one iteration. */
333     int nb_chunks = 1;
334     int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
335     bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
336     int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
337 
338     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
339     offset = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
340     if (offset < 0) {
341         bdrv_set_dirty_iter(s->dbi, 0);
342         offset = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
343         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap) *
344                                   BDRV_SECTOR_SIZE);
345         assert(offset >= 0);
346     }
347     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
348 
349     first_chunk = offset / s->granularity;
350     while (test_bit(first_chunk, s->in_flight_bitmap)) {
351         trace_mirror_yield_in_flight(s, offset, s->in_flight);
352         mirror_wait_for_io(s);
353     }
354 
355     block_job_pause_point(&s->common);
356 
357     /* Find the number of consective dirty chunks following the first dirty
358      * one, and wait for in flight requests in them. */
359     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
360     while (nb_chunks * s->granularity < s->buf_size) {
361         int64_t next_dirty;
362         int64_t next_offset = offset + nb_chunks * s->granularity;
363         int64_t next_chunk = next_offset / s->granularity;
364         if (next_offset >= s->bdev_length ||
365             !bdrv_get_dirty_locked(source, s->dirty_bitmap,
366                                    next_offset >> BDRV_SECTOR_BITS)) {
367             break;
368         }
369         if (test_bit(next_chunk, s->in_flight_bitmap)) {
370             break;
371         }
372 
373         next_dirty = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
374         if (next_dirty > next_offset || next_dirty < 0) {
375             /* The bitmap iterator's cache is stale, refresh it */
376             bdrv_set_dirty_iter(s->dbi, next_offset >> BDRV_SECTOR_BITS);
377             next_dirty = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
378         }
379         assert(next_dirty == next_offset);
380         nb_chunks++;
381     }
382 
383     /* Clear dirty bits before querying the block status, because
384      * calling bdrv_get_block_status_above could yield - if some blocks are
385      * marked dirty in this window, we need to know.
386      */
387     bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset >> BDRV_SECTOR_BITS,
388                                    nb_chunks * sectors_per_chunk);
389     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
390 
391     bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
392     while (nb_chunks > 0 && offset < s->bdev_length) {
393         int64_t ret;
394         int io_sectors;
395         unsigned int io_bytes;
396         int64_t io_bytes_acct;
397         BlockDriverState *file;
398         enum MirrorMethod {
399             MIRROR_METHOD_COPY,
400             MIRROR_METHOD_ZERO,
401             MIRROR_METHOD_DISCARD
402         } mirror_method = MIRROR_METHOD_COPY;
403 
404         assert(!(offset % s->granularity));
405         ret = bdrv_get_block_status_above(source, NULL,
406                                           offset >> BDRV_SECTOR_BITS,
407                                           nb_chunks * sectors_per_chunk,
408                                           &io_sectors, &file);
409         io_bytes = io_sectors * BDRV_SECTOR_SIZE;
410         if (ret < 0) {
411             io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
412         } else if (ret & BDRV_BLOCK_DATA) {
413             io_bytes = MIN(io_bytes, max_io_bytes);
414         }
415 
416         io_bytes -= io_bytes % s->granularity;
417         if (io_bytes < s->granularity) {
418             io_bytes = s->granularity;
419         } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
420             int64_t target_offset;
421             unsigned int target_bytes;
422             bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
423                                    &target_offset, &target_bytes);
424             if (target_offset == offset &&
425                 target_bytes == io_bytes) {
426                 mirror_method = ret & BDRV_BLOCK_ZERO ?
427                                     MIRROR_METHOD_ZERO :
428                                     MIRROR_METHOD_DISCARD;
429             }
430         }
431 
432         while (s->in_flight >= MAX_IN_FLIGHT) {
433             trace_mirror_yield_in_flight(s, offset, s->in_flight);
434             mirror_wait_for_io(s);
435         }
436 
437         if (s->ret < 0) {
438             return 0;
439         }
440 
441         io_bytes = mirror_clip_bytes(s, offset, io_bytes);
442         switch (mirror_method) {
443         case MIRROR_METHOD_COPY:
444             io_bytes = io_bytes_acct = mirror_do_read(s, offset, io_bytes);
445             break;
446         case MIRROR_METHOD_ZERO:
447         case MIRROR_METHOD_DISCARD:
448             mirror_do_zero_or_discard(s, offset, io_bytes,
449                                       mirror_method == MIRROR_METHOD_DISCARD);
450             if (write_zeroes_ok) {
451                 io_bytes_acct = 0;
452             } else {
453                 io_bytes_acct = io_bytes;
454             }
455             break;
456         default:
457             abort();
458         }
459         assert(io_bytes);
460         offset += io_bytes;
461         nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
462         if (s->common.speed) {
463             delay_ns = ratelimit_calculate_delay(&s->limit, io_bytes_acct);
464         }
465     }
466     return delay_ns;
467 }
468 
469 static void mirror_free_init(MirrorBlockJob *s)
470 {
471     int granularity = s->granularity;
472     size_t buf_size = s->buf_size;
473     uint8_t *buf = s->buf;
474 
475     assert(s->buf_free_count == 0);
476     QSIMPLEQ_INIT(&s->buf_free);
477     while (buf_size != 0) {
478         MirrorBuffer *cur = (MirrorBuffer *)buf;
479         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
480         s->buf_free_count++;
481         buf_size -= granularity;
482         buf += granularity;
483     }
484 }
485 
486 /* This is also used for the .pause callback. There is no matching
487  * mirror_resume() because mirror_run() will begin iterating again
488  * when the job is resumed.
489  */
490 static void mirror_wait_for_all_io(MirrorBlockJob *s)
491 {
492     while (s->in_flight > 0) {
493         mirror_wait_for_io(s);
494     }
495 }
496 
497 typedef struct {
498     int ret;
499 } MirrorExitData;
500 
501 static void mirror_exit(BlockJob *job, void *opaque)
502 {
503     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
504     MirrorExitData *data = opaque;
505     AioContext *replace_aio_context = NULL;
506     BlockDriverState *src = s->source;
507     BlockDriverState *target_bs = blk_bs(s->target);
508     BlockDriverState *mirror_top_bs = s->mirror_top_bs;
509     Error *local_err = NULL;
510 
511     bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
512 
513     /* Make sure that the source BDS doesn't go away before we called
514      * block_job_completed(). */
515     bdrv_ref(src);
516     bdrv_ref(mirror_top_bs);
517     bdrv_ref(target_bs);
518 
519     /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
520      * inserting target_bs at s->to_replace, where we might not be able to get
521      * these permissions.
522      *
523      * Note that blk_unref() alone doesn't necessarily drop permissions because
524      * we might be running nested inside mirror_drain(), which takes an extra
525      * reference, so use an explicit blk_set_perm() first. */
526     blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
527     blk_unref(s->target);
528     s->target = NULL;
529 
530     /* We don't access the source any more. Dropping any WRITE/RESIZE is
531      * required before it could become a backing file of target_bs. */
532     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
533                             &error_abort);
534     if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
535         BlockDriverState *backing = s->is_none_mode ? src : s->base;
536         if (backing_bs(target_bs) != backing) {
537             bdrv_set_backing_hd(target_bs, backing, &local_err);
538             if (local_err) {
539                 error_report_err(local_err);
540                 data->ret = -EPERM;
541             }
542         }
543     }
544 
545     if (s->to_replace) {
546         replace_aio_context = bdrv_get_aio_context(s->to_replace);
547         aio_context_acquire(replace_aio_context);
548     }
549 
550     if (s->should_complete && data->ret == 0) {
551         BlockDriverState *to_replace = src;
552         if (s->to_replace) {
553             to_replace = s->to_replace;
554         }
555 
556         if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
557             bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
558         }
559 
560         /* The mirror job has no requests in flight any more, but we need to
561          * drain potential other users of the BDS before changing the graph. */
562         bdrv_drained_begin(target_bs);
563         bdrv_replace_node(to_replace, target_bs, &local_err);
564         bdrv_drained_end(target_bs);
565         if (local_err) {
566             error_report_err(local_err);
567             data->ret = -EPERM;
568         }
569     }
570     if (s->to_replace) {
571         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
572         error_free(s->replace_blocker);
573         bdrv_unref(s->to_replace);
574     }
575     if (replace_aio_context) {
576         aio_context_release(replace_aio_context);
577     }
578     g_free(s->replaces);
579     bdrv_unref(target_bs);
580 
581     /* Remove the mirror filter driver from the graph. Before this, get rid of
582      * the blockers on the intermediate nodes so that the resulting state is
583      * valid. Also give up permissions on mirror_top_bs->backing, which might
584      * block the removal. */
585     block_job_remove_all_bdrv(job);
586     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
587                             &error_abort);
588     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
589 
590     /* We just changed the BDS the job BB refers to (with either or both of the
591      * bdrv_replace_node() calls), so switch the BB back so the cleanup does
592      * the right thing. We don't need any permissions any more now. */
593     blk_remove_bs(job->blk);
594     blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
595     blk_insert_bs(job->blk, mirror_top_bs, &error_abort);
596 
597     block_job_completed(&s->common, data->ret);
598 
599     g_free(data);
600     bdrv_drained_end(src);
601     bdrv_unref(mirror_top_bs);
602     bdrv_unref(src);
603 }
604 
605 static void mirror_throttle(MirrorBlockJob *s)
606 {
607     int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
608 
609     if (now - s->last_pause_ns > SLICE_TIME) {
610         s->last_pause_ns = now;
611         block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
612     } else {
613         block_job_pause_point(&s->common);
614     }
615 }
616 
617 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
618 {
619     int64_t sector_num, end;
620     BlockDriverState *base = s->base;
621     BlockDriverState *bs = s->source;
622     BlockDriverState *target_bs = blk_bs(s->target);
623     int ret, n;
624     int64_t count;
625 
626     end = s->bdev_length / BDRV_SECTOR_SIZE;
627 
628     if (base == NULL && !bdrv_has_zero_init(target_bs)) {
629         if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
630             bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end);
631             return 0;
632         }
633 
634         s->initial_zeroing_ongoing = true;
635         for (sector_num = 0; sector_num < end; ) {
636             int nb_sectors = MIN(end - sector_num,
637                 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS);
638 
639             mirror_throttle(s);
640 
641             if (block_job_is_cancelled(&s->common)) {
642                 s->initial_zeroing_ongoing = false;
643                 return 0;
644             }
645 
646             if (s->in_flight >= MAX_IN_FLIGHT) {
647                 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
648                                    s->in_flight);
649                 mirror_wait_for_io(s);
650                 continue;
651             }
652 
653             mirror_do_zero_or_discard(s, sector_num * BDRV_SECTOR_SIZE,
654                                       nb_sectors * BDRV_SECTOR_SIZE, false);
655             sector_num += nb_sectors;
656         }
657 
658         mirror_wait_for_all_io(s);
659         s->initial_zeroing_ongoing = false;
660     }
661 
662     /* First part, loop on the sectors and initialize the dirty bitmap.  */
663     for (sector_num = 0; sector_num < end; ) {
664         /* Just to make sure we are not exceeding int limit. */
665         int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
666                              end - sector_num);
667 
668         mirror_throttle(s);
669 
670         if (block_job_is_cancelled(&s->common)) {
671             return 0;
672         }
673 
674         ret = bdrv_is_allocated_above(bs, base, sector_num * BDRV_SECTOR_SIZE,
675                                       nb_sectors * BDRV_SECTOR_SIZE, &count);
676         if (ret < 0) {
677             return ret;
678         }
679 
680         /* TODO: Relax this once bdrv_is_allocated_above and dirty
681          * bitmaps no longer require sector alignment. */
682         assert(QEMU_IS_ALIGNED(count, BDRV_SECTOR_SIZE));
683         n = count >> BDRV_SECTOR_BITS;
684         assert(n > 0);
685         if (ret == 1) {
686             bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
687         }
688         sector_num += n;
689     }
690     return 0;
691 }
692 
693 /* Called when going out of the streaming phase to flush the bulk of the
694  * data to the medium, or just before completing.
695  */
696 static int mirror_flush(MirrorBlockJob *s)
697 {
698     int ret = blk_flush(s->target);
699     if (ret < 0) {
700         if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
701             s->ret = ret;
702         }
703     }
704     return ret;
705 }
706 
707 static void coroutine_fn mirror_run(void *opaque)
708 {
709     MirrorBlockJob *s = opaque;
710     MirrorExitData *data;
711     BlockDriverState *bs = s->source;
712     BlockDriverState *target_bs = blk_bs(s->target);
713     bool need_drain = true;
714     int64_t length;
715     BlockDriverInfo bdi;
716     char backing_filename[2]; /* we only need 2 characters because we are only
717                                  checking for a NULL string */
718     int ret = 0;
719 
720     if (block_job_is_cancelled(&s->common)) {
721         goto immediate_exit;
722     }
723 
724     s->bdev_length = bdrv_getlength(bs);
725     if (s->bdev_length < 0) {
726         ret = s->bdev_length;
727         goto immediate_exit;
728     }
729 
730     /* Active commit must resize the base image if its size differs from the
731      * active layer. */
732     if (s->base == blk_bs(s->target)) {
733         int64_t base_length;
734 
735         base_length = blk_getlength(s->target);
736         if (base_length < 0) {
737             ret = base_length;
738             goto immediate_exit;
739         }
740 
741         if (s->bdev_length > base_length) {
742             ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF,
743                                NULL);
744             if (ret < 0) {
745                 goto immediate_exit;
746             }
747         }
748     }
749 
750     if (s->bdev_length == 0) {
751         /* Report BLOCK_JOB_READY and wait for complete. */
752         block_job_event_ready(&s->common);
753         s->synced = true;
754         while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
755             block_job_yield(&s->common);
756         }
757         s->common.cancelled = false;
758         goto immediate_exit;
759     }
760 
761     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
762     s->in_flight_bitmap = bitmap_new(length);
763 
764     /* If we have no backing file yet in the destination, we cannot let
765      * the destination do COW.  Instead, we copy sectors around the
766      * dirty data if needed.  We need a bitmap to do that.
767      */
768     bdrv_get_backing_filename(target_bs, backing_filename,
769                               sizeof(backing_filename));
770     if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
771         s->target_cluster_size = bdi.cluster_size;
772     } else {
773         s->target_cluster_size = BDRV_SECTOR_SIZE;
774     }
775     if (backing_filename[0] && !target_bs->backing &&
776         s->granularity < s->target_cluster_size) {
777         s->buf_size = MAX(s->buf_size, s->target_cluster_size);
778         s->cow_bitmap = bitmap_new(length);
779     }
780     s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
781 
782     s->buf = qemu_try_blockalign(bs, s->buf_size);
783     if (s->buf == NULL) {
784         ret = -ENOMEM;
785         goto immediate_exit;
786     }
787 
788     mirror_free_init(s);
789 
790     s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
791     if (!s->is_none_mode) {
792         ret = mirror_dirty_init(s);
793         if (ret < 0 || block_job_is_cancelled(&s->common)) {
794             goto immediate_exit;
795         }
796     }
797 
798     assert(!s->dbi);
799     s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0);
800     for (;;) {
801         uint64_t delay_ns = 0;
802         int64_t cnt, delta;
803         bool should_complete;
804 
805         if (s->ret < 0) {
806             ret = s->ret;
807             goto immediate_exit;
808         }
809 
810         block_job_pause_point(&s->common);
811 
812         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
813         /* s->common.offset contains the number of bytes already processed so
814          * far, cnt is the number of dirty sectors remaining and
815          * s->bytes_in_flight is the number of bytes currently being
816          * processed; together those are the current total operation length */
817         s->common.len = s->common.offset + s->bytes_in_flight +
818             cnt * BDRV_SECTOR_SIZE;
819 
820         /* Note that even when no rate limit is applied we need to yield
821          * periodically with no pending I/O so that bdrv_drain_all() returns.
822          * We do so every SLICE_TIME nanoseconds, or when there is an error,
823          * or when the source is clean, whichever comes first.
824          */
825         delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
826         if (delta < SLICE_TIME &&
827             s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
828             if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
829                 (cnt == 0 && s->in_flight > 0)) {
830                 trace_mirror_yield(s, cnt * BDRV_SECTOR_SIZE,
831                                    s->buf_free_count, s->in_flight);
832                 mirror_wait_for_io(s);
833                 continue;
834             } else if (cnt != 0) {
835                 delay_ns = mirror_iteration(s);
836             }
837         }
838 
839         should_complete = false;
840         if (s->in_flight == 0 && cnt == 0) {
841             trace_mirror_before_flush(s);
842             if (!s->synced) {
843                 if (mirror_flush(s) < 0) {
844                     /* Go check s->ret.  */
845                     continue;
846                 }
847                 /* We're out of the streaming phase.  From now on, if the job
848                  * is cancelled we will actually complete all pending I/O and
849                  * report completion.  This way, block-job-cancel will leave
850                  * the target in a consistent state.
851                  */
852                 block_job_event_ready(&s->common);
853                 s->synced = true;
854             }
855 
856             should_complete = s->should_complete ||
857                 block_job_is_cancelled(&s->common);
858             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
859         }
860 
861         if (cnt == 0 && should_complete) {
862             /* The dirty bitmap is not updated while operations are pending.
863              * If we're about to exit, wait for pending operations before
864              * calling bdrv_get_dirty_count(bs), or we may exit while the
865              * source has dirty data to copy!
866              *
867              * Note that I/O can be submitted by the guest while
868              * mirror_populate runs, so pause it now.  Before deciding
869              * whether to switch to target check one last time if I/O has
870              * come in the meanwhile, and if not flush the data to disk.
871              */
872             trace_mirror_before_drain(s, cnt * BDRV_SECTOR_SIZE);
873 
874             bdrv_drained_begin(bs);
875             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
876             if (cnt > 0 || mirror_flush(s) < 0) {
877                 bdrv_drained_end(bs);
878                 continue;
879             }
880 
881             /* The two disks are in sync.  Exit and report successful
882              * completion.
883              */
884             assert(QLIST_EMPTY(&bs->tracked_requests));
885             s->common.cancelled = false;
886             need_drain = false;
887             break;
888         }
889 
890         ret = 0;
891         trace_mirror_before_sleep(s, cnt * BDRV_SECTOR_SIZE,
892                                   s->synced, delay_ns);
893         if (!s->synced) {
894             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
895             if (block_job_is_cancelled(&s->common)) {
896                 break;
897             }
898         } else if (!should_complete) {
899             delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
900             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
901         }
902         s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
903     }
904 
905 immediate_exit:
906     if (s->in_flight > 0) {
907         /* We get here only if something went wrong.  Either the job failed,
908          * or it was cancelled prematurely so that we do not guarantee that
909          * the target is a copy of the source.
910          */
911         assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
912         assert(need_drain);
913         mirror_wait_for_all_io(s);
914     }
915 
916     assert(s->in_flight == 0);
917     qemu_vfree(s->buf);
918     g_free(s->cow_bitmap);
919     g_free(s->in_flight_bitmap);
920     bdrv_dirty_iter_free(s->dbi);
921 
922     data = g_malloc(sizeof(*data));
923     data->ret = ret;
924 
925     if (need_drain) {
926         bdrv_drained_begin(bs);
927     }
928     block_job_defer_to_main_loop(&s->common, mirror_exit, data);
929 }
930 
931 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
932 {
933     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
934 
935     if (speed < 0) {
936         error_setg(errp, QERR_INVALID_PARAMETER, "speed");
937         return;
938     }
939     ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
940 }
941 
942 static void mirror_complete(BlockJob *job, Error **errp)
943 {
944     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
945     BlockDriverState *target;
946 
947     target = blk_bs(s->target);
948 
949     if (!s->synced) {
950         error_setg(errp, "The active block job '%s' cannot be completed",
951                    job->id);
952         return;
953     }
954 
955     if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
956         int ret;
957 
958         assert(!target->backing);
959         ret = bdrv_open_backing_file(target, NULL, "backing", errp);
960         if (ret < 0) {
961             return;
962         }
963     }
964 
965     /* block all operations on to_replace bs */
966     if (s->replaces) {
967         AioContext *replace_aio_context;
968 
969         s->to_replace = bdrv_find_node(s->replaces);
970         if (!s->to_replace) {
971             error_setg(errp, "Node name '%s' not found", s->replaces);
972             return;
973         }
974 
975         replace_aio_context = bdrv_get_aio_context(s->to_replace);
976         aio_context_acquire(replace_aio_context);
977 
978         /* TODO Translate this into permission system. Current definition of
979          * GRAPH_MOD would require to request it for the parents; they might
980          * not even be BlockDriverStates, however, so a BdrvChild can't address
981          * them. May need redefinition of GRAPH_MOD. */
982         error_setg(&s->replace_blocker,
983                    "block device is in use by block-job-complete");
984         bdrv_op_block_all(s->to_replace, s->replace_blocker);
985         bdrv_ref(s->to_replace);
986 
987         aio_context_release(replace_aio_context);
988     }
989 
990     s->should_complete = true;
991     block_job_enter(&s->common);
992 }
993 
994 static void mirror_pause(BlockJob *job)
995 {
996     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
997 
998     mirror_wait_for_all_io(s);
999 }
1000 
1001 static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
1002 {
1003     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1004 
1005     blk_set_aio_context(s->target, new_context);
1006 }
1007 
1008 static void mirror_drain(BlockJob *job)
1009 {
1010     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1011 
1012     /* Need to keep a reference in case blk_drain triggers execution
1013      * of mirror_complete...
1014      */
1015     if (s->target) {
1016         BlockBackend *target = s->target;
1017         blk_ref(target);
1018         blk_drain(target);
1019         blk_unref(target);
1020     }
1021 }
1022 
1023 static const BlockJobDriver mirror_job_driver = {
1024     .instance_size          = sizeof(MirrorBlockJob),
1025     .job_type               = BLOCK_JOB_TYPE_MIRROR,
1026     .set_speed              = mirror_set_speed,
1027     .start                  = mirror_run,
1028     .complete               = mirror_complete,
1029     .pause                  = mirror_pause,
1030     .attached_aio_context   = mirror_attached_aio_context,
1031     .drain                  = mirror_drain,
1032 };
1033 
1034 static const BlockJobDriver commit_active_job_driver = {
1035     .instance_size          = sizeof(MirrorBlockJob),
1036     .job_type               = BLOCK_JOB_TYPE_COMMIT,
1037     .set_speed              = mirror_set_speed,
1038     .start                  = mirror_run,
1039     .complete               = mirror_complete,
1040     .pause                  = mirror_pause,
1041     .attached_aio_context   = mirror_attached_aio_context,
1042     .drain                  = mirror_drain,
1043 };
1044 
1045 static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1046     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1047 {
1048     return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1049 }
1050 
1051 static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1052     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1053 {
1054     return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1055 }
1056 
1057 static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1058 {
1059     return bdrv_co_flush(bs->backing->bs);
1060 }
1061 
1062 static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1063     int64_t offset, int bytes, BdrvRequestFlags flags)
1064 {
1065     return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1066 }
1067 
1068 static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1069     int64_t offset, int bytes)
1070 {
1071     return bdrv_co_pdiscard(bs->backing->bs, offset, bytes);
1072 }
1073 
1074 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1075 {
1076     bdrv_refresh_filename(bs->backing->bs);
1077     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1078             bs->backing->bs->filename);
1079 }
1080 
1081 static void bdrv_mirror_top_close(BlockDriverState *bs)
1082 {
1083 }
1084 
1085 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1086                                        const BdrvChildRole *role,
1087                                        BlockReopenQueue *reopen_queue,
1088                                        uint64_t perm, uint64_t shared,
1089                                        uint64_t *nperm, uint64_t *nshared)
1090 {
1091     /* Must be able to forward guest writes to the real image */
1092     *nperm = 0;
1093     if (perm & BLK_PERM_WRITE) {
1094         *nperm |= BLK_PERM_WRITE;
1095     }
1096 
1097     *nshared = BLK_PERM_ALL;
1098 }
1099 
1100 /* Dummy node that provides consistent read to its users without requiring it
1101  * from its backing file and that allows writes on the backing file chain. */
1102 static BlockDriver bdrv_mirror_top = {
1103     .format_name                = "mirror_top",
1104     .bdrv_co_preadv             = bdrv_mirror_top_preadv,
1105     .bdrv_co_pwritev            = bdrv_mirror_top_pwritev,
1106     .bdrv_co_pwrite_zeroes      = bdrv_mirror_top_pwrite_zeroes,
1107     .bdrv_co_pdiscard           = bdrv_mirror_top_pdiscard,
1108     .bdrv_co_flush              = bdrv_mirror_top_flush,
1109     .bdrv_co_get_block_status   = bdrv_co_get_block_status_from_backing,
1110     .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
1111     .bdrv_close                 = bdrv_mirror_top_close,
1112     .bdrv_child_perm            = bdrv_mirror_top_child_perm,
1113 };
1114 
1115 static void mirror_start_job(const char *job_id, BlockDriverState *bs,
1116                              int creation_flags, BlockDriverState *target,
1117                              const char *replaces, int64_t speed,
1118                              uint32_t granularity, int64_t buf_size,
1119                              BlockMirrorBackingMode backing_mode,
1120                              BlockdevOnError on_source_error,
1121                              BlockdevOnError on_target_error,
1122                              bool unmap,
1123                              BlockCompletionFunc *cb,
1124                              void *opaque,
1125                              const BlockJobDriver *driver,
1126                              bool is_none_mode, BlockDriverState *base,
1127                              bool auto_complete, const char *filter_node_name,
1128                              bool is_mirror,
1129                              Error **errp)
1130 {
1131     MirrorBlockJob *s;
1132     BlockDriverState *mirror_top_bs;
1133     bool target_graph_mod;
1134     bool target_is_backing;
1135     Error *local_err = NULL;
1136     int ret;
1137 
1138     if (granularity == 0) {
1139         granularity = bdrv_get_default_bitmap_granularity(target);
1140     }
1141 
1142     assert ((granularity & (granularity - 1)) == 0);
1143     /* Granularity must be large enough for sector-based dirty bitmap */
1144     assert(granularity >= BDRV_SECTOR_SIZE);
1145 
1146     if (buf_size < 0) {
1147         error_setg(errp, "Invalid parameter 'buf-size'");
1148         return;
1149     }
1150 
1151     if (buf_size == 0) {
1152         buf_size = DEFAULT_MIRROR_BUF_SIZE;
1153     }
1154 
1155     /* In the case of active commit, add dummy driver to provide consistent
1156      * reads on the top, while disabling it in the intermediate nodes, and make
1157      * the backing chain writable. */
1158     mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1159                                          BDRV_O_RDWR, errp);
1160     if (mirror_top_bs == NULL) {
1161         return;
1162     }
1163     if (!filter_node_name) {
1164         mirror_top_bs->implicit = true;
1165     }
1166     mirror_top_bs->total_sectors = bs->total_sectors;
1167     bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
1168 
1169     /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1170      * it alive until block_job_create() succeeds even if bs has no parent. */
1171     bdrv_ref(mirror_top_bs);
1172     bdrv_drained_begin(bs);
1173     bdrv_append(mirror_top_bs, bs, &local_err);
1174     bdrv_drained_end(bs);
1175 
1176     if (local_err) {
1177         bdrv_unref(mirror_top_bs);
1178         error_propagate(errp, local_err);
1179         return;
1180     }
1181 
1182     /* Make sure that the source is not resized while the job is running */
1183     s = block_job_create(job_id, driver, mirror_top_bs,
1184                          BLK_PERM_CONSISTENT_READ,
1185                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1186                          BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1187                          creation_flags, cb, opaque, errp);
1188     if (!s) {
1189         goto fail;
1190     }
1191     /* The block job now has a reference to this node */
1192     bdrv_unref(mirror_top_bs);
1193 
1194     s->source = bs;
1195     s->mirror_top_bs = mirror_top_bs;
1196 
1197     /* No resize for the target either; while the mirror is still running, a
1198      * consistent read isn't necessarily possible. We could possibly allow
1199      * writes and graph modifications, though it would likely defeat the
1200      * purpose of a mirror, so leave them blocked for now.
1201      *
1202      * In the case of active commit, things look a bit different, though,
1203      * because the target is an already populated backing file in active use.
1204      * We can allow anything except resize there.*/
1205     target_is_backing = bdrv_chain_contains(bs, target);
1206     target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
1207     s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
1208                         (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
1209                         BLK_PERM_WRITE_UNCHANGED |
1210                         (target_is_backing ? BLK_PERM_CONSISTENT_READ |
1211                                              BLK_PERM_WRITE |
1212                                              BLK_PERM_GRAPH_MOD : 0));
1213     ret = blk_insert_bs(s->target, target, errp);
1214     if (ret < 0) {
1215         goto fail;
1216     }
1217     if (is_mirror) {
1218         /* XXX: Mirror target could be a NBD server of target QEMU in the case
1219          * of non-shared block migration. To allow migration completion, we
1220          * have to allow "inactivate" of the target BB.  When that happens, we
1221          * know the job is drained, and the vcpus are stopped, so no write
1222          * operation will be performed. Block layer already has assertions to
1223          * ensure that. */
1224         blk_set_force_allow_inactivate(s->target);
1225     }
1226 
1227     s->replaces = g_strdup(replaces);
1228     s->on_source_error = on_source_error;
1229     s->on_target_error = on_target_error;
1230     s->is_none_mode = is_none_mode;
1231     s->backing_mode = backing_mode;
1232     s->base = base;
1233     s->granularity = granularity;
1234     s->buf_size = ROUND_UP(buf_size, granularity);
1235     s->unmap = unmap;
1236     if (auto_complete) {
1237         s->should_complete = true;
1238     }
1239 
1240     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1241     if (!s->dirty_bitmap) {
1242         goto fail;
1243     }
1244 
1245     /* Required permissions are already taken with blk_new() */
1246     block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1247                        &error_abort);
1248 
1249     /* In commit_active_start() all intermediate nodes disappear, so
1250      * any jobs in them must be blocked */
1251     if (target_is_backing) {
1252         BlockDriverState *iter;
1253         for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
1254             /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1255              * ourselves at s->base (if writes are blocked for a node, they are
1256              * also blocked for its backing file). The other options would be a
1257              * second filter driver above s->base (== target). */
1258             ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1259                                      BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
1260                                      errp);
1261             if (ret < 0) {
1262                 goto fail;
1263             }
1264         }
1265     }
1266 
1267     trace_mirror_start(bs, s, opaque);
1268     block_job_start(&s->common);
1269     return;
1270 
1271 fail:
1272     if (s) {
1273         /* Make sure this BDS does not go away until we have completed the graph
1274          * changes below */
1275         bdrv_ref(mirror_top_bs);
1276 
1277         g_free(s->replaces);
1278         blk_unref(s->target);
1279         block_job_early_fail(&s->common);
1280     }
1281 
1282     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
1283                             &error_abort);
1284     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
1285 
1286     bdrv_unref(mirror_top_bs);
1287 }
1288 
1289 void mirror_start(const char *job_id, BlockDriverState *bs,
1290                   BlockDriverState *target, const char *replaces,
1291                   int64_t speed, uint32_t granularity, int64_t buf_size,
1292                   MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1293                   BlockdevOnError on_source_error,
1294                   BlockdevOnError on_target_error,
1295                   bool unmap, const char *filter_node_name, Error **errp)
1296 {
1297     bool is_none_mode;
1298     BlockDriverState *base;
1299 
1300     if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
1301         error_setg(errp, "Sync mode 'incremental' not supported");
1302         return;
1303     }
1304     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1305     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
1306     mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
1307                      speed, granularity, buf_size, backing_mode,
1308                      on_source_error, on_target_error, unmap, NULL, NULL,
1309                      &mirror_job_driver, is_none_mode, base, false,
1310                      filter_node_name, true, errp);
1311 }
1312 
1313 void commit_active_start(const char *job_id, BlockDriverState *bs,
1314                          BlockDriverState *base, int creation_flags,
1315                          int64_t speed, BlockdevOnError on_error,
1316                          const char *filter_node_name,
1317                          BlockCompletionFunc *cb, void *opaque,
1318                          bool auto_complete, Error **errp)
1319 {
1320     int orig_base_flags;
1321     Error *local_err = NULL;
1322 
1323     orig_base_flags = bdrv_get_flags(base);
1324 
1325     if (bdrv_reopen(base, bs->open_flags, errp)) {
1326         return;
1327     }
1328 
1329     mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1330                      MIRROR_LEAVE_BACKING_CHAIN,
1331                      on_error, on_error, true, cb, opaque,
1332                      &commit_active_job_driver, false, base, auto_complete,
1333                      filter_node_name, false, &local_err);
1334     if (local_err) {
1335         error_propagate(errp, local_err);
1336         goto error_restore_flags;
1337     }
1338 
1339     return;
1340 
1341 error_restore_flags:
1342     /* ignore error and errp for bdrv_reopen, because we want to propagate
1343      * the original error */
1344     bdrv_reopen(base, orig_base_flags, NULL);
1345     return;
1346 }
1347