xref: /openbmc/qemu/block/mirror.c (revision e0091133)
1 /*
2  * Image mirroring
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Paolo Bonzini  <pbonzini@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10  * See the COPYING.LIB file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
18 #include "trace.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "sysemu/block-backend.h"
22 #include "qapi/error.h"
23 #include "qapi/qmp/qerror.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
26 #include "qemu/memalign.h"
27 
28 #define MAX_IN_FLIGHT 16
29 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
30 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
31 
32 /* The mirroring buffer is a list of granularity-sized chunks.
33  * Free chunks are organized in a list.
34  */
35 typedef struct MirrorBuffer {
36     QSIMPLEQ_ENTRY(MirrorBuffer) next;
37 } MirrorBuffer;
38 
39 typedef struct MirrorOp MirrorOp;
40 
41 typedef struct MirrorBlockJob {
42     BlockJob common;
43     BlockBackend *target;
44     BlockDriverState *mirror_top_bs;
45     BlockDriverState *base;
46     BlockDriverState *base_overlay;
47 
48     /* The name of the graph node to replace */
49     char *replaces;
50     /* The BDS to replace */
51     BlockDriverState *to_replace;
52     /* Used to block operations on the drive-mirror-replace target */
53     Error *replace_blocker;
54     bool is_none_mode;
55     BlockMirrorBackingMode backing_mode;
56     /* Whether the target image requires explicit zero-initialization */
57     bool zero_target;
58     MirrorCopyMode copy_mode;
59     BlockdevOnError on_source_error, on_target_error;
60     /* Set when the target is synced (dirty bitmap is clean, nothing
61      * in flight) and the job is running in active mode */
62     bool actively_synced;
63     bool should_complete;
64     int64_t granularity;
65     size_t buf_size;
66     int64_t bdev_length;
67     unsigned long *cow_bitmap;
68     BdrvDirtyBitmap *dirty_bitmap;
69     BdrvDirtyBitmapIter *dbi;
70     uint8_t *buf;
71     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
72     int buf_free_count;
73 
74     uint64_t last_pause_ns;
75     unsigned long *in_flight_bitmap;
76     unsigned in_flight;
77     int64_t bytes_in_flight;
78     QTAILQ_HEAD(, MirrorOp) ops_in_flight;
79     int ret;
80     bool unmap;
81     int target_cluster_size;
82     int max_iov;
83     bool initial_zeroing_ongoing;
84     int in_active_write_counter;
85     bool prepared;
86     bool in_drain;
87 } MirrorBlockJob;
88 
89 typedef struct MirrorBDSOpaque {
90     MirrorBlockJob *job;
91     bool stop;
92     bool is_commit;
93 } MirrorBDSOpaque;
94 
95 struct MirrorOp {
96     MirrorBlockJob *s;
97     QEMUIOVector qiov;
98     int64_t offset;
99     uint64_t bytes;
100 
101     /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
102      * mirror_co_discard() before yielding for the first time */
103     int64_t *bytes_handled;
104 
105     bool is_pseudo_op;
106     bool is_active_write;
107     bool is_in_flight;
108     CoQueue waiting_requests;
109     Coroutine *co;
110     MirrorOp *waiting_for_op;
111 
112     QTAILQ_ENTRY(MirrorOp) next;
113 };
114 
115 typedef enum MirrorMethod {
116     MIRROR_METHOD_COPY,
117     MIRROR_METHOD_ZERO,
118     MIRROR_METHOD_DISCARD,
119 } MirrorMethod;
120 
121 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
122                                             int error)
123 {
124     s->actively_synced = false;
125     if (read) {
126         return block_job_error_action(&s->common, s->on_source_error,
127                                       true, error);
128     } else {
129         return block_job_error_action(&s->common, s->on_target_error,
130                                       false, error);
131     }
132 }
133 
134 static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
135                                                   MirrorBlockJob *s,
136                                                   uint64_t offset,
137                                                   uint64_t bytes)
138 {
139     uint64_t self_start_chunk = offset / s->granularity;
140     uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
141     uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
142 
143     while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
144                          self_start_chunk) < self_end_chunk &&
145            s->ret >= 0)
146     {
147         MirrorOp *op;
148 
149         QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
150             uint64_t op_start_chunk = op->offset / s->granularity;
151             uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
152                                                  s->granularity) -
153                                     op_start_chunk;
154 
155             if (op == self) {
156                 continue;
157             }
158 
159             if (ranges_overlap(self_start_chunk, self_nb_chunks,
160                                op_start_chunk, op_nb_chunks))
161             {
162                 if (self) {
163                     /*
164                      * If the operation is already (indirectly) waiting for us,
165                      * or will wait for us as soon as it wakes up, then just go
166                      * on (instead of producing a deadlock in the former case).
167                      */
168                     if (op->waiting_for_op) {
169                         continue;
170                     }
171 
172                     self->waiting_for_op = op;
173                 }
174 
175                 qemu_co_queue_wait(&op->waiting_requests, NULL);
176 
177                 if (self) {
178                     self->waiting_for_op = NULL;
179                 }
180 
181                 break;
182             }
183         }
184     }
185 }
186 
187 static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
188 {
189     MirrorBlockJob *s = op->s;
190     struct iovec *iov;
191     int64_t chunk_num;
192     int i, nb_chunks;
193 
194     trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
195 
196     s->in_flight--;
197     s->bytes_in_flight -= op->bytes;
198     iov = op->qiov.iov;
199     for (i = 0; i < op->qiov.niov; i++) {
200         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
201         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
202         s->buf_free_count++;
203     }
204 
205     chunk_num = op->offset / s->granularity;
206     nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
207 
208     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
209     QTAILQ_REMOVE(&s->ops_in_flight, op, next);
210     if (ret >= 0) {
211         if (s->cow_bitmap) {
212             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
213         }
214         if (!s->initial_zeroing_ongoing) {
215             job_progress_update(&s->common.job, op->bytes);
216         }
217     }
218     qemu_iovec_destroy(&op->qiov);
219 
220     qemu_co_queue_restart_all(&op->waiting_requests);
221     g_free(op);
222 }
223 
224 static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
225 {
226     MirrorBlockJob *s = op->s;
227 
228     if (ret < 0) {
229         BlockErrorAction action;
230 
231         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
232         action = mirror_error_action(s, false, -ret);
233         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
234             s->ret = ret;
235         }
236     }
237 
238     mirror_iteration_done(op, ret);
239 }
240 
241 static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
242 {
243     MirrorBlockJob *s = op->s;
244 
245     if (ret < 0) {
246         BlockErrorAction action;
247 
248         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
249         action = mirror_error_action(s, true, -ret);
250         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
251             s->ret = ret;
252         }
253 
254         mirror_iteration_done(op, ret);
255         return;
256     }
257 
258     ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
259     mirror_write_complete(op, ret);
260 }
261 
262 /* Clip bytes relative to offset to not exceed end-of-file */
263 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
264                                         int64_t offset,
265                                         int64_t bytes)
266 {
267     return MIN(bytes, s->bdev_length - offset);
268 }
269 
270 /* Round offset and/or bytes to target cluster if COW is needed, and
271  * return the offset of the adjusted tail against original. */
272 static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
273                             uint64_t *bytes)
274 {
275     bool need_cow;
276     int ret = 0;
277     int64_t align_offset = *offset;
278     int64_t align_bytes = *bytes;
279     int max_bytes = s->granularity * s->max_iov;
280 
281     need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
282     need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
283                           s->cow_bitmap);
284     if (need_cow) {
285         bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
286                                &align_offset, &align_bytes);
287     }
288 
289     if (align_bytes > max_bytes) {
290         align_bytes = max_bytes;
291         if (need_cow) {
292             align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
293         }
294     }
295     /* Clipping may result in align_bytes unaligned to chunk boundary, but
296      * that doesn't matter because it's already the end of source image. */
297     align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
298 
299     ret = align_offset + align_bytes - (*offset + *bytes);
300     *offset = align_offset;
301     *bytes = align_bytes;
302     assert(ret >= 0);
303     return ret;
304 }
305 
306 static inline void coroutine_fn
307 mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
308 {
309     MirrorOp *op;
310 
311     QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
312         /* Do not wait on pseudo ops, because it may in turn wait on
313          * some other operation to start, which may in fact be the
314          * caller of this function.  Since there is only one pseudo op
315          * at any given time, we will always find some real operation
316          * to wait on. */
317         if (!op->is_pseudo_op && op->is_in_flight &&
318             op->is_active_write == active)
319         {
320             qemu_co_queue_wait(&op->waiting_requests, NULL);
321             return;
322         }
323     }
324     abort();
325 }
326 
327 static inline void coroutine_fn
328 mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
329 {
330     /* Only non-active operations use up in-flight slots */
331     mirror_wait_for_any_operation(s, false);
332 }
333 
334 /* Perform a mirror copy operation.
335  *
336  * *op->bytes_handled is set to the number of bytes copied after and
337  * including offset, excluding any bytes copied prior to offset due
338  * to alignment.  This will be op->bytes if no alignment is necessary,
339  * or (new_end - op->offset) if the tail is rounded up or down due to
340  * alignment or buffer limit.
341  */
342 static void coroutine_fn mirror_co_read(void *opaque)
343 {
344     MirrorOp *op = opaque;
345     MirrorBlockJob *s = op->s;
346     int nb_chunks;
347     uint64_t ret;
348     uint64_t max_bytes;
349 
350     max_bytes = s->granularity * s->max_iov;
351 
352     /* We can only handle as much as buf_size at a time. */
353     op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
354     assert(op->bytes);
355     assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
356     *op->bytes_handled = op->bytes;
357 
358     if (s->cow_bitmap) {
359         *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
360     }
361     /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
362     assert(*op->bytes_handled <= UINT_MAX);
363     assert(op->bytes <= s->buf_size);
364     /* The offset is granularity-aligned because:
365      * 1) Caller passes in aligned values;
366      * 2) mirror_cow_align is used only when target cluster is larger. */
367     assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
368     /* The range is sector-aligned, since bdrv_getlength() rounds up. */
369     assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
370     nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
371 
372     while (s->buf_free_count < nb_chunks) {
373         trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
374         mirror_wait_for_free_in_flight_slot(s);
375     }
376 
377     /* Now make a QEMUIOVector taking enough granularity-sized chunks
378      * from s->buf_free.
379      */
380     qemu_iovec_init(&op->qiov, nb_chunks);
381     while (nb_chunks-- > 0) {
382         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
383         size_t remaining = op->bytes - op->qiov.size;
384 
385         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
386         s->buf_free_count--;
387         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
388     }
389 
390     /* Copy the dirty cluster.  */
391     s->in_flight++;
392     s->bytes_in_flight += op->bytes;
393     op->is_in_flight = true;
394     trace_mirror_one_iteration(s, op->offset, op->bytes);
395 
396     ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
397                          &op->qiov, 0);
398     mirror_read_complete(op, ret);
399 }
400 
401 static void coroutine_fn mirror_co_zero(void *opaque)
402 {
403     MirrorOp *op = opaque;
404     int ret;
405 
406     op->s->in_flight++;
407     op->s->bytes_in_flight += op->bytes;
408     *op->bytes_handled = op->bytes;
409     op->is_in_flight = true;
410 
411     ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
412                                op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
413     mirror_write_complete(op, ret);
414 }
415 
416 static void coroutine_fn mirror_co_discard(void *opaque)
417 {
418     MirrorOp *op = opaque;
419     int ret;
420 
421     op->s->in_flight++;
422     op->s->bytes_in_flight += op->bytes;
423     *op->bytes_handled = op->bytes;
424     op->is_in_flight = true;
425 
426     ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
427     mirror_write_complete(op, ret);
428 }
429 
430 static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
431                                unsigned bytes, MirrorMethod mirror_method)
432 {
433     MirrorOp *op;
434     Coroutine *co;
435     int64_t bytes_handled = -1;
436 
437     op = g_new(MirrorOp, 1);
438     *op = (MirrorOp){
439         .s              = s,
440         .offset         = offset,
441         .bytes          = bytes,
442         .bytes_handled  = &bytes_handled,
443     };
444     qemu_co_queue_init(&op->waiting_requests);
445 
446     switch (mirror_method) {
447     case MIRROR_METHOD_COPY:
448         co = qemu_coroutine_create(mirror_co_read, op);
449         break;
450     case MIRROR_METHOD_ZERO:
451         co = qemu_coroutine_create(mirror_co_zero, op);
452         break;
453     case MIRROR_METHOD_DISCARD:
454         co = qemu_coroutine_create(mirror_co_discard, op);
455         break;
456     default:
457         abort();
458     }
459     op->co = co;
460 
461     QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
462     qemu_coroutine_enter(co);
463     /* At this point, ownership of op has been moved to the coroutine
464      * and the object may already be freed */
465 
466     /* Assert that this value has been set */
467     assert(bytes_handled >= 0);
468 
469     /* Same assertion as in mirror_co_read() (and for mirror_co_read()
470      * and mirror_co_discard(), bytes_handled == op->bytes, which
471      * is the @bytes parameter given to this function) */
472     assert(bytes_handled <= UINT_MAX);
473     return bytes_handled;
474 }
475 
476 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
477 {
478     BlockDriverState *source = s->mirror_top_bs->backing->bs;
479     MirrorOp *pseudo_op;
480     int64_t offset;
481     uint64_t delay_ns = 0, ret = 0;
482     /* At least the first dirty chunk is mirrored in one iteration. */
483     int nb_chunks = 1;
484     bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
485     int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
486 
487     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
488     offset = bdrv_dirty_iter_next(s->dbi);
489     if (offset < 0) {
490         bdrv_set_dirty_iter(s->dbi, 0);
491         offset = bdrv_dirty_iter_next(s->dbi);
492         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
493         assert(offset >= 0);
494     }
495     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
496 
497     mirror_wait_on_conflicts(NULL, s, offset, 1);
498 
499     job_pause_point(&s->common.job);
500 
501     /* Find the number of consective dirty chunks following the first dirty
502      * one, and wait for in flight requests in them. */
503     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
504     while (nb_chunks * s->granularity < s->buf_size) {
505         int64_t next_dirty;
506         int64_t next_offset = offset + nb_chunks * s->granularity;
507         int64_t next_chunk = next_offset / s->granularity;
508         if (next_offset >= s->bdev_length ||
509             !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
510             break;
511         }
512         if (test_bit(next_chunk, s->in_flight_bitmap)) {
513             break;
514         }
515 
516         next_dirty = bdrv_dirty_iter_next(s->dbi);
517         if (next_dirty > next_offset || next_dirty < 0) {
518             /* The bitmap iterator's cache is stale, refresh it */
519             bdrv_set_dirty_iter(s->dbi, next_offset);
520             next_dirty = bdrv_dirty_iter_next(s->dbi);
521         }
522         assert(next_dirty == next_offset);
523         nb_chunks++;
524     }
525 
526     /* Clear dirty bits before querying the block status, because
527      * calling bdrv_block_status_above could yield - if some blocks are
528      * marked dirty in this window, we need to know.
529      */
530     bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
531                                    nb_chunks * s->granularity);
532     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
533 
534     /* Before claiming an area in the in-flight bitmap, we have to
535      * create a MirrorOp for it so that conflicting requests can wait
536      * for it.  mirror_perform() will create the real MirrorOps later,
537      * for now we just create a pseudo operation that will wake up all
538      * conflicting requests once all real operations have been
539      * launched. */
540     pseudo_op = g_new(MirrorOp, 1);
541     *pseudo_op = (MirrorOp){
542         .offset         = offset,
543         .bytes          = nb_chunks * s->granularity,
544         .is_pseudo_op   = true,
545     };
546     qemu_co_queue_init(&pseudo_op->waiting_requests);
547     QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
548 
549     bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
550     while (nb_chunks > 0 && offset < s->bdev_length) {
551         int ret;
552         int64_t io_bytes;
553         int64_t io_bytes_acct;
554         MirrorMethod mirror_method = MIRROR_METHOD_COPY;
555 
556         assert(!(offset % s->granularity));
557         ret = bdrv_block_status_above(source, NULL, offset,
558                                       nb_chunks * s->granularity,
559                                       &io_bytes, NULL, NULL);
560         if (ret < 0) {
561             io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
562         } else if (ret & BDRV_BLOCK_DATA) {
563             io_bytes = MIN(io_bytes, max_io_bytes);
564         }
565 
566         io_bytes -= io_bytes % s->granularity;
567         if (io_bytes < s->granularity) {
568             io_bytes = s->granularity;
569         } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
570             int64_t target_offset;
571             int64_t target_bytes;
572             bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
573                                    &target_offset, &target_bytes);
574             if (target_offset == offset &&
575                 target_bytes == io_bytes) {
576                 mirror_method = ret & BDRV_BLOCK_ZERO ?
577                                     MIRROR_METHOD_ZERO :
578                                     MIRROR_METHOD_DISCARD;
579             }
580         }
581 
582         while (s->in_flight >= MAX_IN_FLIGHT) {
583             trace_mirror_yield_in_flight(s, offset, s->in_flight);
584             mirror_wait_for_free_in_flight_slot(s);
585         }
586 
587         if (s->ret < 0) {
588             ret = 0;
589             goto fail;
590         }
591 
592         io_bytes = mirror_clip_bytes(s, offset, io_bytes);
593         io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
594         if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
595             io_bytes_acct = 0;
596         } else {
597             io_bytes_acct = io_bytes;
598         }
599         assert(io_bytes);
600         offset += io_bytes;
601         nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
602         delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
603     }
604 
605     ret = delay_ns;
606 fail:
607     QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
608     qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
609     g_free(pseudo_op);
610 
611     return ret;
612 }
613 
614 static void mirror_free_init(MirrorBlockJob *s)
615 {
616     int granularity = s->granularity;
617     size_t buf_size = s->buf_size;
618     uint8_t *buf = s->buf;
619 
620     assert(s->buf_free_count == 0);
621     QSIMPLEQ_INIT(&s->buf_free);
622     while (buf_size != 0) {
623         MirrorBuffer *cur = (MirrorBuffer *)buf;
624         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
625         s->buf_free_count++;
626         buf_size -= granularity;
627         buf += granularity;
628     }
629 }
630 
631 /* This is also used for the .pause callback. There is no matching
632  * mirror_resume() because mirror_run() will begin iterating again
633  * when the job is resumed.
634  */
635 static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
636 {
637     while (s->in_flight > 0) {
638         mirror_wait_for_free_in_flight_slot(s);
639     }
640 }
641 
642 /**
643  * mirror_exit_common: handle both abort() and prepare() cases.
644  * for .prepare, returns 0 on success and -errno on failure.
645  * for .abort cases, denoted by abort = true, MUST return 0.
646  */
647 static int mirror_exit_common(Job *job)
648 {
649     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
650     BlockJob *bjob = &s->common;
651     MirrorBDSOpaque *bs_opaque;
652     AioContext *replace_aio_context = NULL;
653     BlockDriverState *src;
654     BlockDriverState *target_bs;
655     BlockDriverState *mirror_top_bs;
656     Error *local_err = NULL;
657     bool abort = job->ret < 0;
658     int ret = 0;
659 
660     if (s->prepared) {
661         return 0;
662     }
663     s->prepared = true;
664 
665     mirror_top_bs = s->mirror_top_bs;
666     bs_opaque = mirror_top_bs->opaque;
667     src = mirror_top_bs->backing->bs;
668     target_bs = blk_bs(s->target);
669 
670     if (bdrv_chain_contains(src, target_bs)) {
671         bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
672     }
673 
674     bdrv_release_dirty_bitmap(s->dirty_bitmap);
675 
676     /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
677      * before we can call bdrv_drained_end */
678     bdrv_ref(src);
679     bdrv_ref(mirror_top_bs);
680     bdrv_ref(target_bs);
681 
682     /*
683      * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
684      * inserting target_bs at s->to_replace, where we might not be able to get
685      * these permissions.
686      */
687     blk_unref(s->target);
688     s->target = NULL;
689 
690     /* We don't access the source any more. Dropping any WRITE/RESIZE is
691      * required before it could become a backing file of target_bs. Not having
692      * these permissions any more means that we can't allow any new requests on
693      * mirror_top_bs from now on, so keep it drained. */
694     bdrv_drained_begin(mirror_top_bs);
695     bs_opaque->stop = true;
696     bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
697                              &error_abort);
698     if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
699         BlockDriverState *backing = s->is_none_mode ? src : s->base;
700         BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
701 
702         if (bdrv_cow_bs(unfiltered_target) != backing) {
703             bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
704             if (local_err) {
705                 error_report_err(local_err);
706                 local_err = NULL;
707                 ret = -EPERM;
708             }
709         }
710     } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
711         assert(!bdrv_backing_chain_next(target_bs));
712         ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
713                                      "backing", &local_err);
714         if (ret < 0) {
715             error_report_err(local_err);
716             local_err = NULL;
717         }
718     }
719 
720     if (s->to_replace) {
721         replace_aio_context = bdrv_get_aio_context(s->to_replace);
722         aio_context_acquire(replace_aio_context);
723     }
724 
725     if (s->should_complete && !abort) {
726         BlockDriverState *to_replace = s->to_replace ?: src;
727         bool ro = bdrv_is_read_only(to_replace);
728 
729         if (ro != bdrv_is_read_only(target_bs)) {
730             bdrv_reopen_set_read_only(target_bs, ro, NULL);
731         }
732 
733         /* The mirror job has no requests in flight any more, but we need to
734          * drain potential other users of the BDS before changing the graph. */
735         assert(s->in_drain);
736         bdrv_drained_begin(target_bs);
737         /*
738          * Cannot use check_to_replace_node() here, because that would
739          * check for an op blocker on @to_replace, and we have our own
740          * there.
741          */
742         if (bdrv_recurse_can_replace(src, to_replace)) {
743             bdrv_replace_node(to_replace, target_bs, &local_err);
744         } else {
745             error_setg(&local_err, "Can no longer replace '%s' by '%s', "
746                        "because it can no longer be guaranteed that doing so "
747                        "would not lead to an abrupt change of visible data",
748                        to_replace->node_name, target_bs->node_name);
749         }
750         bdrv_drained_end(target_bs);
751         if (local_err) {
752             error_report_err(local_err);
753             ret = -EPERM;
754         }
755     }
756     if (s->to_replace) {
757         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
758         error_free(s->replace_blocker);
759         bdrv_unref(s->to_replace);
760     }
761     if (replace_aio_context) {
762         aio_context_release(replace_aio_context);
763     }
764     g_free(s->replaces);
765     bdrv_unref(target_bs);
766 
767     /*
768      * Remove the mirror filter driver from the graph. Before this, get rid of
769      * the blockers on the intermediate nodes so that the resulting state is
770      * valid.
771      */
772     block_job_remove_all_bdrv(bjob);
773     bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
774 
775     bs_opaque->job = NULL;
776 
777     bdrv_drained_end(src);
778     bdrv_drained_end(mirror_top_bs);
779     s->in_drain = false;
780     bdrv_unref(mirror_top_bs);
781     bdrv_unref(src);
782 
783     return ret;
784 }
785 
786 static int mirror_prepare(Job *job)
787 {
788     return mirror_exit_common(job);
789 }
790 
791 static void mirror_abort(Job *job)
792 {
793     int ret = mirror_exit_common(job);
794     assert(ret == 0);
795 }
796 
797 static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
798 {
799     int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
800 
801     if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
802         s->last_pause_ns = now;
803         job_sleep_ns(&s->common.job, 0);
804     } else {
805         job_pause_point(&s->common.job);
806     }
807 }
808 
809 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
810 {
811     int64_t offset;
812     BlockDriverState *bs = s->mirror_top_bs->backing->bs;
813     BlockDriverState *target_bs = blk_bs(s->target);
814     int ret;
815     int64_t count;
816 
817     if (s->zero_target) {
818         if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
819             bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
820             return 0;
821         }
822 
823         s->initial_zeroing_ongoing = true;
824         for (offset = 0; offset < s->bdev_length; ) {
825             int bytes = MIN(s->bdev_length - offset,
826                             QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
827 
828             mirror_throttle(s);
829 
830             if (job_is_cancelled(&s->common.job)) {
831                 s->initial_zeroing_ongoing = false;
832                 return 0;
833             }
834 
835             if (s->in_flight >= MAX_IN_FLIGHT) {
836                 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
837                                    s->in_flight);
838                 mirror_wait_for_free_in_flight_slot(s);
839                 continue;
840             }
841 
842             mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
843             offset += bytes;
844         }
845 
846         mirror_wait_for_all_io(s);
847         s->initial_zeroing_ongoing = false;
848     }
849 
850     /* First part, loop on the sectors and initialize the dirty bitmap.  */
851     for (offset = 0; offset < s->bdev_length; ) {
852         /* Just to make sure we are not exceeding int limit. */
853         int bytes = MIN(s->bdev_length - offset,
854                         QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
855 
856         mirror_throttle(s);
857 
858         if (job_is_cancelled(&s->common.job)) {
859             return 0;
860         }
861 
862         ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes,
863                                       &count);
864         if (ret < 0) {
865             return ret;
866         }
867 
868         assert(count);
869         if (ret > 0) {
870             bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
871         }
872         offset += count;
873     }
874     return 0;
875 }
876 
877 /* Called when going out of the streaming phase to flush the bulk of the
878  * data to the medium, or just before completing.
879  */
880 static int mirror_flush(MirrorBlockJob *s)
881 {
882     int ret = blk_flush(s->target);
883     if (ret < 0) {
884         if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
885             s->ret = ret;
886         }
887     }
888     return ret;
889 }
890 
891 static int coroutine_fn mirror_run(Job *job, Error **errp)
892 {
893     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
894     BlockDriverState *bs = s->mirror_top_bs->backing->bs;
895     BlockDriverState *target_bs = blk_bs(s->target);
896     bool need_drain = true;
897     BlockDeviceIoStatus iostatus;
898     int64_t length;
899     int64_t target_length;
900     BlockDriverInfo bdi;
901     char backing_filename[2]; /* we only need 2 characters because we are only
902                                  checking for a NULL string */
903     int ret = 0;
904 
905     if (job_is_cancelled(&s->common.job)) {
906         goto immediate_exit;
907     }
908 
909     s->bdev_length = bdrv_getlength(bs);
910     if (s->bdev_length < 0) {
911         ret = s->bdev_length;
912         goto immediate_exit;
913     }
914 
915     target_length = blk_getlength(s->target);
916     if (target_length < 0) {
917         ret = target_length;
918         goto immediate_exit;
919     }
920 
921     /* Active commit must resize the base image if its size differs from the
922      * active layer. */
923     if (s->base == blk_bs(s->target)) {
924         if (s->bdev_length > target_length) {
925             ret = blk_co_truncate(s->target, s->bdev_length, false,
926                                   PREALLOC_MODE_OFF, 0, NULL);
927             if (ret < 0) {
928                 goto immediate_exit;
929             }
930         }
931     } else if (s->bdev_length != target_length) {
932         error_setg(errp, "Source and target image have different sizes");
933         ret = -EINVAL;
934         goto immediate_exit;
935     }
936 
937     if (s->bdev_length == 0) {
938         /* Transition to the READY state and wait for complete. */
939         job_transition_to_ready(&s->common.job);
940         s->actively_synced = true;
941         while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
942             job_yield(&s->common.job);
943         }
944         goto immediate_exit;
945     }
946 
947     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
948     s->in_flight_bitmap = bitmap_new(length);
949 
950     /* If we have no backing file yet in the destination, we cannot let
951      * the destination do COW.  Instead, we copy sectors around the
952      * dirty data if needed.  We need a bitmap to do that.
953      */
954     bdrv_get_backing_filename(target_bs, backing_filename,
955                               sizeof(backing_filename));
956     if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
957         s->target_cluster_size = bdi.cluster_size;
958     } else {
959         s->target_cluster_size = BDRV_SECTOR_SIZE;
960     }
961     if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
962         s->granularity < s->target_cluster_size) {
963         s->buf_size = MAX(s->buf_size, s->target_cluster_size);
964         s->cow_bitmap = bitmap_new(length);
965     }
966     s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
967 
968     s->buf = qemu_try_blockalign(bs, s->buf_size);
969     if (s->buf == NULL) {
970         ret = -ENOMEM;
971         goto immediate_exit;
972     }
973 
974     mirror_free_init(s);
975 
976     s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
977     if (!s->is_none_mode) {
978         ret = mirror_dirty_init(s);
979         if (ret < 0 || job_is_cancelled(&s->common.job)) {
980             goto immediate_exit;
981         }
982     }
983 
984     assert(!s->dbi);
985     s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
986     for (;;) {
987         uint64_t delay_ns = 0;
988         int64_t cnt, delta;
989         bool should_complete;
990 
991         /* Do not start passive operations while there are active
992          * writes in progress */
993         while (s->in_active_write_counter) {
994             mirror_wait_for_any_operation(s, true);
995         }
996 
997         if (s->ret < 0) {
998             ret = s->ret;
999             goto immediate_exit;
1000         }
1001 
1002         job_pause_point(&s->common.job);
1003 
1004         if (job_is_cancelled(&s->common.job)) {
1005             ret = 0;
1006             goto immediate_exit;
1007         }
1008 
1009         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1010         /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1011          * the number of bytes currently being processed; together those are
1012          * the current remaining operation length */
1013         job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
1014 
1015         /* Note that even when no rate limit is applied we need to yield
1016          * periodically with no pending I/O so that bdrv_drain_all() returns.
1017          * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
1018          * an error, or when the source is clean, whichever comes first. */
1019         delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
1020         WITH_JOB_LOCK_GUARD() {
1021             iostatus = s->common.iostatus;
1022         }
1023         if (delta < BLOCK_JOB_SLICE_TIME &&
1024             iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1025             if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
1026                 (cnt == 0 && s->in_flight > 0)) {
1027                 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
1028                 mirror_wait_for_free_in_flight_slot(s);
1029                 continue;
1030             } else if (cnt != 0) {
1031                 delay_ns = mirror_iteration(s);
1032             }
1033         }
1034 
1035         should_complete = false;
1036         if (s->in_flight == 0 && cnt == 0) {
1037             trace_mirror_before_flush(s);
1038             if (!job_is_ready(&s->common.job)) {
1039                 if (mirror_flush(s) < 0) {
1040                     /* Go check s->ret.  */
1041                     continue;
1042                 }
1043                 /* We're out of the streaming phase.  From now on, if the job
1044                  * is cancelled we will actually complete all pending I/O and
1045                  * report completion.  This way, block-job-cancel will leave
1046                  * the target in a consistent state.
1047                  */
1048                 job_transition_to_ready(&s->common.job);
1049                 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
1050                     s->actively_synced = true;
1051                 }
1052             }
1053 
1054             should_complete = s->should_complete ||
1055                 job_cancel_requested(&s->common.job);
1056             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1057         }
1058 
1059         if (cnt == 0 && should_complete) {
1060             /* The dirty bitmap is not updated while operations are pending.
1061              * If we're about to exit, wait for pending operations before
1062              * calling bdrv_get_dirty_count(bs), or we may exit while the
1063              * source has dirty data to copy!
1064              *
1065              * Note that I/O can be submitted by the guest while
1066              * mirror_populate runs, so pause it now.  Before deciding
1067              * whether to switch to target check one last time if I/O has
1068              * come in the meanwhile, and if not flush the data to disk.
1069              */
1070             trace_mirror_before_drain(s, cnt);
1071 
1072             s->in_drain = true;
1073             bdrv_drained_begin(bs);
1074             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1075             if (cnt > 0 || mirror_flush(s) < 0) {
1076                 bdrv_drained_end(bs);
1077                 s->in_drain = false;
1078                 continue;
1079             }
1080 
1081             /* The two disks are in sync.  Exit and report successful
1082              * completion.
1083              */
1084             assert(QLIST_EMPTY(&bs->tracked_requests));
1085             need_drain = false;
1086             break;
1087         }
1088 
1089         if (job_is_ready(&s->common.job) && !should_complete) {
1090             delay_ns = (s->in_flight == 0 &&
1091                         cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
1092         }
1093         trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1094                                   delay_ns);
1095         job_sleep_ns(&s->common.job, delay_ns);
1096         s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1097     }
1098 
1099 immediate_exit:
1100     if (s->in_flight > 0) {
1101         /* We get here only if something went wrong.  Either the job failed,
1102          * or it was cancelled prematurely so that we do not guarantee that
1103          * the target is a copy of the source.
1104          */
1105         assert(ret < 0 || job_is_cancelled(&s->common.job));
1106         assert(need_drain);
1107         mirror_wait_for_all_io(s);
1108     }
1109 
1110     assert(s->in_flight == 0);
1111     qemu_vfree(s->buf);
1112     g_free(s->cow_bitmap);
1113     g_free(s->in_flight_bitmap);
1114     bdrv_dirty_iter_free(s->dbi);
1115 
1116     if (need_drain) {
1117         s->in_drain = true;
1118         bdrv_drained_begin(bs);
1119     }
1120 
1121     return ret;
1122 }
1123 
1124 static void mirror_complete(Job *job, Error **errp)
1125 {
1126     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1127 
1128     if (!job_is_ready(job)) {
1129         error_setg(errp, "The active block job '%s' cannot be completed",
1130                    job->id);
1131         return;
1132     }
1133 
1134     /* block all operations on to_replace bs */
1135     if (s->replaces) {
1136         AioContext *replace_aio_context;
1137 
1138         s->to_replace = bdrv_find_node(s->replaces);
1139         if (!s->to_replace) {
1140             error_setg(errp, "Node name '%s' not found", s->replaces);
1141             return;
1142         }
1143 
1144         replace_aio_context = bdrv_get_aio_context(s->to_replace);
1145         aio_context_acquire(replace_aio_context);
1146 
1147         /* TODO Translate this into child freeze system. */
1148         error_setg(&s->replace_blocker,
1149                    "block device is in use by block-job-complete");
1150         bdrv_op_block_all(s->to_replace, s->replace_blocker);
1151         bdrv_ref(s->to_replace);
1152 
1153         aio_context_release(replace_aio_context);
1154     }
1155 
1156     s->should_complete = true;
1157 
1158     /* If the job is paused, it will be re-entered when it is resumed */
1159     WITH_JOB_LOCK_GUARD() {
1160         if (!job->paused) {
1161             job_enter_cond_locked(job, NULL);
1162         }
1163     }
1164 }
1165 
1166 static void coroutine_fn mirror_pause(Job *job)
1167 {
1168     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1169 
1170     mirror_wait_for_all_io(s);
1171 }
1172 
1173 static bool mirror_drained_poll(BlockJob *job)
1174 {
1175     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1176 
1177     /* If the job isn't paused nor cancelled, we can't be sure that it won't
1178      * issue more requests. We make an exception if we've reached this point
1179      * from one of our own drain sections, to avoid a deadlock waiting for
1180      * ourselves.
1181      */
1182     WITH_JOB_LOCK_GUARD() {
1183         if (!s->common.job.paused && !job_is_cancelled_locked(&job->job)
1184             && !s->in_drain) {
1185             return true;
1186         }
1187     }
1188 
1189     return !!s->in_flight;
1190 }
1191 
1192 static bool mirror_cancel(Job *job, bool force)
1193 {
1194     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1195     BlockDriverState *target = blk_bs(s->target);
1196 
1197     /*
1198      * Before the job is READY, we treat any cancellation like a
1199      * force-cancellation.
1200      */
1201     force = force || !job_is_ready(job);
1202 
1203     if (force) {
1204         bdrv_cancel_in_flight(target);
1205     }
1206     return force;
1207 }
1208 
1209 static bool commit_active_cancel(Job *job, bool force)
1210 {
1211     /* Same as above in mirror_cancel() */
1212     return force || !job_is_ready(job);
1213 }
1214 
1215 static const BlockJobDriver mirror_job_driver = {
1216     .job_driver = {
1217         .instance_size          = sizeof(MirrorBlockJob),
1218         .job_type               = JOB_TYPE_MIRROR,
1219         .free                   = block_job_free,
1220         .user_resume            = block_job_user_resume,
1221         .run                    = mirror_run,
1222         .prepare                = mirror_prepare,
1223         .abort                  = mirror_abort,
1224         .pause                  = mirror_pause,
1225         .complete               = mirror_complete,
1226         .cancel                 = mirror_cancel,
1227     },
1228     .drained_poll           = mirror_drained_poll,
1229 };
1230 
1231 static const BlockJobDriver commit_active_job_driver = {
1232     .job_driver = {
1233         .instance_size          = sizeof(MirrorBlockJob),
1234         .job_type               = JOB_TYPE_COMMIT,
1235         .free                   = block_job_free,
1236         .user_resume            = block_job_user_resume,
1237         .run                    = mirror_run,
1238         .prepare                = mirror_prepare,
1239         .abort                  = mirror_abort,
1240         .pause                  = mirror_pause,
1241         .complete               = mirror_complete,
1242         .cancel                 = commit_active_cancel,
1243     },
1244     .drained_poll           = mirror_drained_poll,
1245 };
1246 
1247 static void coroutine_fn
1248 do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1249                      uint64_t offset, uint64_t bytes,
1250                      QEMUIOVector *qiov, int flags)
1251 {
1252     int ret;
1253     size_t qiov_offset = 0;
1254     int64_t bitmap_offset, bitmap_end;
1255 
1256     if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1257         bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1258     {
1259             /*
1260              * Dirty unaligned padding: ignore it.
1261              *
1262              * Reasoning:
1263              * 1. If we copy it, we can't reset corresponding bit in
1264              *    dirty_bitmap as there may be some "dirty" bytes still not
1265              *    copied.
1266              * 2. It's already dirty, so skipping it we don't diverge mirror
1267              *    progress.
1268              *
1269              * Note, that because of this, guest write may have no contribution
1270              * into mirror converge, but that's not bad, as we have background
1271              * process of mirroring. If under some bad circumstances (high guest
1272              * IO load) background process starve, we will not converge anyway,
1273              * even if each write will contribute, as guest is not guaranteed to
1274              * rewrite the whole disk.
1275              */
1276             qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1277             if (bytes <= qiov_offset) {
1278                 /* nothing to do after shrink */
1279                 return;
1280             }
1281             offset += qiov_offset;
1282             bytes -= qiov_offset;
1283     }
1284 
1285     if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1286         bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1287     {
1288         uint64_t tail = (offset + bytes) % job->granularity;
1289 
1290         if (bytes <= tail) {
1291             /* nothing to do after shrink */
1292             return;
1293         }
1294         bytes -= tail;
1295     }
1296 
1297     /*
1298      * Tails are either clean or shrunk, so for bitmap resetting
1299      * we safely align the range down.
1300      */
1301     bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1302     bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1303     if (bitmap_offset < bitmap_end) {
1304         bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1305                                 bitmap_end - bitmap_offset);
1306     }
1307 
1308     job_progress_increase_remaining(&job->common.job, bytes);
1309 
1310     switch (method) {
1311     case MIRROR_METHOD_COPY:
1312         ret = blk_co_pwritev_part(job->target, offset, bytes,
1313                                   qiov, qiov_offset, flags);
1314         break;
1315 
1316     case MIRROR_METHOD_ZERO:
1317         assert(!qiov);
1318         ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1319         break;
1320 
1321     case MIRROR_METHOD_DISCARD:
1322         assert(!qiov);
1323         ret = blk_co_pdiscard(job->target, offset, bytes);
1324         break;
1325 
1326     default:
1327         abort();
1328     }
1329 
1330     if (ret >= 0) {
1331         job_progress_update(&job->common.job, bytes);
1332     } else {
1333         BlockErrorAction action;
1334 
1335         /*
1336          * We failed, so we should mark dirty the whole area, aligned up.
1337          * Note that we don't care about shrunk tails if any: they were dirty
1338          * at function start, and they must be still dirty, as we've locked
1339          * the region for in-flight op.
1340          */
1341         bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1342         bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1343         bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1344                               bitmap_end - bitmap_offset);
1345         job->actively_synced = false;
1346 
1347         action = mirror_error_action(job, false, -ret);
1348         if (action == BLOCK_ERROR_ACTION_REPORT) {
1349             if (!job->ret) {
1350                 job->ret = ret;
1351             }
1352         }
1353     }
1354 }
1355 
1356 static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1357                                                    uint64_t offset,
1358                                                    uint64_t bytes)
1359 {
1360     MirrorOp *op;
1361     uint64_t start_chunk = offset / s->granularity;
1362     uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1363 
1364     op = g_new(MirrorOp, 1);
1365     *op = (MirrorOp){
1366         .s                  = s,
1367         .offset             = offset,
1368         .bytes              = bytes,
1369         .is_active_write    = true,
1370         .is_in_flight       = true,
1371         .co                 = qemu_coroutine_self(),
1372     };
1373     qemu_co_queue_init(&op->waiting_requests);
1374     QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1375 
1376     s->in_active_write_counter++;
1377 
1378     mirror_wait_on_conflicts(op, s, offset, bytes);
1379 
1380     bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1381 
1382     return op;
1383 }
1384 
1385 static void coroutine_fn active_write_settle(MirrorOp *op)
1386 {
1387     uint64_t start_chunk = op->offset / op->s->granularity;
1388     uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1389                                       op->s->granularity);
1390 
1391     if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1392         BdrvChild *source = op->s->mirror_top_bs->backing;
1393 
1394         if (QLIST_FIRST(&source->bs->parents) == source &&
1395             QLIST_NEXT(source, next_parent) == NULL)
1396         {
1397             /* Assert that we are back in sync once all active write
1398              * operations are settled.
1399              * Note that we can only assert this if the mirror node
1400              * is the source node's only parent. */
1401             assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1402         }
1403     }
1404     bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1405     QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1406     qemu_co_queue_restart_all(&op->waiting_requests);
1407     g_free(op);
1408 }
1409 
1410 static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1411     int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
1412 {
1413     return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1414 }
1415 
1416 static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1417     MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1418     int flags)
1419 {
1420     MirrorOp *op = NULL;
1421     MirrorBDSOpaque *s = bs->opaque;
1422     int ret = 0;
1423     bool copy_to_target;
1424 
1425     copy_to_target = s->job->ret >= 0 &&
1426                      !job_is_cancelled(&s->job->common.job) &&
1427                      s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1428 
1429     if (copy_to_target) {
1430         op = active_write_prepare(s->job, offset, bytes);
1431     }
1432 
1433     switch (method) {
1434     case MIRROR_METHOD_COPY:
1435         ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1436         break;
1437 
1438     case MIRROR_METHOD_ZERO:
1439         ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1440         break;
1441 
1442     case MIRROR_METHOD_DISCARD:
1443         ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1444         break;
1445 
1446     default:
1447         abort();
1448     }
1449 
1450     if (ret < 0) {
1451         goto out;
1452     }
1453 
1454     if (copy_to_target) {
1455         do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1456     }
1457 
1458 out:
1459     if (copy_to_target) {
1460         active_write_settle(op);
1461     }
1462     return ret;
1463 }
1464 
1465 static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1466     int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
1467 {
1468     MirrorBDSOpaque *s = bs->opaque;
1469     QEMUIOVector bounce_qiov;
1470     void *bounce_buf;
1471     int ret = 0;
1472     bool copy_to_target;
1473 
1474     copy_to_target = s->job->ret >= 0 &&
1475                      !job_is_cancelled(&s->job->common.job) &&
1476                      s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1477 
1478     if (copy_to_target) {
1479         /* The guest might concurrently modify the data to write; but
1480          * the data on source and destination must match, so we have
1481          * to use a bounce buffer if we are going to write to the
1482          * target now. */
1483         bounce_buf = qemu_blockalign(bs, bytes);
1484         iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1485 
1486         qemu_iovec_init(&bounce_qiov, 1);
1487         qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1488         qiov = &bounce_qiov;
1489 
1490         flags &= ~BDRV_REQ_REGISTERED_BUF;
1491     }
1492 
1493     ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1494                                    flags);
1495 
1496     if (copy_to_target) {
1497         qemu_iovec_destroy(&bounce_qiov);
1498         qemu_vfree(bounce_buf);
1499     }
1500 
1501     return ret;
1502 }
1503 
1504 static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1505 {
1506     if (bs->backing == NULL) {
1507         /* we can be here after failed bdrv_append in mirror_start_job */
1508         return 0;
1509     }
1510     return bdrv_co_flush(bs->backing->bs);
1511 }
1512 
1513 static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1514     int64_t offset, int64_t bytes, BdrvRequestFlags flags)
1515 {
1516     return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1517                                     flags);
1518 }
1519 
1520 static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1521     int64_t offset, int64_t bytes)
1522 {
1523     return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1524                                     NULL, 0);
1525 }
1526 
1527 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1528 {
1529     if (bs->backing == NULL) {
1530         /* we can be here after failed bdrv_attach_child in
1531          * bdrv_set_backing_hd */
1532         return;
1533     }
1534     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1535             bs->backing->bs->filename);
1536 }
1537 
1538 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1539                                        BdrvChildRole role,
1540                                        BlockReopenQueue *reopen_queue,
1541                                        uint64_t perm, uint64_t shared,
1542                                        uint64_t *nperm, uint64_t *nshared)
1543 {
1544     MirrorBDSOpaque *s = bs->opaque;
1545 
1546     if (s->stop) {
1547         /*
1548          * If the job is to be stopped, we do not need to forward
1549          * anything to the real image.
1550          */
1551         *nperm = 0;
1552         *nshared = BLK_PERM_ALL;
1553         return;
1554     }
1555 
1556     bdrv_default_perms(bs, c, role, reopen_queue,
1557                        perm, shared, nperm, nshared);
1558 
1559     if (s->is_commit) {
1560         /*
1561          * For commit jobs, we cannot take CONSISTENT_READ, because
1562          * that permission is unshared for everything above the base
1563          * node (except for filters on the base node).
1564          * We also have to force-share the WRITE permission, or
1565          * otherwise we would block ourselves at the base node (if
1566          * writes are blocked for a node, they are also blocked for
1567          * its backing file).
1568          * (We could also share RESIZE, because it may be needed for
1569          * the target if its size is less than the top node's; but
1570          * bdrv_default_perms_for_cow() automatically shares RESIZE
1571          * for backing nodes if WRITE is shared, so there is no need
1572          * to do it here.)
1573          */
1574         *nperm &= ~BLK_PERM_CONSISTENT_READ;
1575         *nshared |= BLK_PERM_WRITE;
1576     }
1577 }
1578 
1579 /* Dummy node that provides consistent read to its users without requiring it
1580  * from its backing file and that allows writes on the backing file chain. */
1581 static BlockDriver bdrv_mirror_top = {
1582     .format_name                = "mirror_top",
1583     .bdrv_co_preadv             = bdrv_mirror_top_preadv,
1584     .bdrv_co_pwritev            = bdrv_mirror_top_pwritev,
1585     .bdrv_co_pwrite_zeroes      = bdrv_mirror_top_pwrite_zeroes,
1586     .bdrv_co_pdiscard           = bdrv_mirror_top_pdiscard,
1587     .bdrv_co_flush              = bdrv_mirror_top_flush,
1588     .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
1589     .bdrv_child_perm            = bdrv_mirror_top_child_perm,
1590 
1591     .is_filter                  = true,
1592     .filtered_child_is_backing  = true,
1593 };
1594 
1595 static BlockJob *mirror_start_job(
1596                              const char *job_id, BlockDriverState *bs,
1597                              int creation_flags, BlockDriverState *target,
1598                              const char *replaces, int64_t speed,
1599                              uint32_t granularity, int64_t buf_size,
1600                              BlockMirrorBackingMode backing_mode,
1601                              bool zero_target,
1602                              BlockdevOnError on_source_error,
1603                              BlockdevOnError on_target_error,
1604                              bool unmap,
1605                              BlockCompletionFunc *cb,
1606                              void *opaque,
1607                              const BlockJobDriver *driver,
1608                              bool is_none_mode, BlockDriverState *base,
1609                              bool auto_complete, const char *filter_node_name,
1610                              bool is_mirror, MirrorCopyMode copy_mode,
1611                              Error **errp)
1612 {
1613     MirrorBlockJob *s;
1614     MirrorBDSOpaque *bs_opaque;
1615     BlockDriverState *mirror_top_bs;
1616     bool target_is_backing;
1617     uint64_t target_perms, target_shared_perms;
1618     int ret;
1619 
1620     if (granularity == 0) {
1621         granularity = bdrv_get_default_bitmap_granularity(target);
1622     }
1623 
1624     assert(is_power_of_2(granularity));
1625 
1626     if (buf_size < 0) {
1627         error_setg(errp, "Invalid parameter 'buf-size'");
1628         return NULL;
1629     }
1630 
1631     if (buf_size == 0) {
1632         buf_size = DEFAULT_MIRROR_BUF_SIZE;
1633     }
1634 
1635     if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
1636         error_setg(errp, "Can't mirror node into itself");
1637         return NULL;
1638     }
1639 
1640     target_is_backing = bdrv_chain_contains(bs, target);
1641 
1642     /* In the case of active commit, add dummy driver to provide consistent
1643      * reads on the top, while disabling it in the intermediate nodes, and make
1644      * the backing chain writable. */
1645     mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1646                                          BDRV_O_RDWR, errp);
1647     if (mirror_top_bs == NULL) {
1648         return NULL;
1649     }
1650     if (!filter_node_name) {
1651         mirror_top_bs->implicit = true;
1652     }
1653 
1654     /* So that we can always drop this node */
1655     mirror_top_bs->never_freeze = true;
1656 
1657     mirror_top_bs->total_sectors = bs->total_sectors;
1658     mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1659     mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1660                                           BDRV_REQ_NO_FALLBACK;
1661     bs_opaque = g_new0(MirrorBDSOpaque, 1);
1662     mirror_top_bs->opaque = bs_opaque;
1663 
1664     bs_opaque->is_commit = target_is_backing;
1665 
1666     bdrv_drained_begin(bs);
1667     ret = bdrv_append(mirror_top_bs, bs, errp);
1668     bdrv_drained_end(bs);
1669 
1670     if (ret < 0) {
1671         bdrv_unref(mirror_top_bs);
1672         return NULL;
1673     }
1674 
1675     /* Make sure that the source is not resized while the job is running */
1676     s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1677                          BLK_PERM_CONSISTENT_READ,
1678                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1679                          BLK_PERM_WRITE, speed,
1680                          creation_flags, cb, opaque, errp);
1681     if (!s) {
1682         goto fail;
1683     }
1684     bs_opaque->job = s;
1685 
1686     /* The block job now has a reference to this node */
1687     bdrv_unref(mirror_top_bs);
1688 
1689     s->mirror_top_bs = mirror_top_bs;
1690 
1691     /* No resize for the target either; while the mirror is still running, a
1692      * consistent read isn't necessarily possible. We could possibly allow
1693      * writes and graph modifications, though it would likely defeat the
1694      * purpose of a mirror, so leave them blocked for now.
1695      *
1696      * In the case of active commit, things look a bit different, though,
1697      * because the target is an already populated backing file in active use.
1698      * We can allow anything except resize there.*/
1699 
1700     target_perms = BLK_PERM_WRITE;
1701     target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1702 
1703     if (target_is_backing) {
1704         int64_t bs_size, target_size;
1705         bs_size = bdrv_getlength(bs);
1706         if (bs_size < 0) {
1707             error_setg_errno(errp, -bs_size,
1708                              "Could not inquire top image size");
1709             goto fail;
1710         }
1711 
1712         target_size = bdrv_getlength(target);
1713         if (target_size < 0) {
1714             error_setg_errno(errp, -target_size,
1715                              "Could not inquire base image size");
1716             goto fail;
1717         }
1718 
1719         if (target_size < bs_size) {
1720             target_perms |= BLK_PERM_RESIZE;
1721         }
1722 
1723         target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
1724     } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1725         /*
1726          * We may want to allow this in the future, but it would
1727          * require taking some extra care.
1728          */
1729         error_setg(errp, "Cannot mirror to a filter on top of a node in the "
1730                    "source's backing chain");
1731         goto fail;
1732     }
1733 
1734     s->target = blk_new(s->common.job.aio_context,
1735                         target_perms, target_shared_perms);
1736     ret = blk_insert_bs(s->target, target, errp);
1737     if (ret < 0) {
1738         goto fail;
1739     }
1740     if (is_mirror) {
1741         /* XXX: Mirror target could be a NBD server of target QEMU in the case
1742          * of non-shared block migration. To allow migration completion, we
1743          * have to allow "inactivate" of the target BB.  When that happens, we
1744          * know the job is drained, and the vcpus are stopped, so no write
1745          * operation will be performed. Block layer already has assertions to
1746          * ensure that. */
1747         blk_set_force_allow_inactivate(s->target);
1748     }
1749     blk_set_allow_aio_context_change(s->target, true);
1750     blk_set_disable_request_queuing(s->target, true);
1751 
1752     s->replaces = g_strdup(replaces);
1753     s->on_source_error = on_source_error;
1754     s->on_target_error = on_target_error;
1755     s->is_none_mode = is_none_mode;
1756     s->backing_mode = backing_mode;
1757     s->zero_target = zero_target;
1758     s->copy_mode = copy_mode;
1759     s->base = base;
1760     s->base_overlay = bdrv_find_overlay(bs, base);
1761     s->granularity = granularity;
1762     s->buf_size = ROUND_UP(buf_size, granularity);
1763     s->unmap = unmap;
1764     if (auto_complete) {
1765         s->should_complete = true;
1766     }
1767 
1768     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1769     if (!s->dirty_bitmap) {
1770         goto fail;
1771     }
1772     if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
1773         bdrv_disable_dirty_bitmap(s->dirty_bitmap);
1774     }
1775 
1776     ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1777                              BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1778                              BLK_PERM_CONSISTENT_READ,
1779                              errp);
1780     if (ret < 0) {
1781         goto fail;
1782     }
1783 
1784     /* Required permissions are already taken with blk_new() */
1785     block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1786                        &error_abort);
1787 
1788     /* In commit_active_start() all intermediate nodes disappear, so
1789      * any jobs in them must be blocked */
1790     if (target_is_backing) {
1791         BlockDriverState *iter, *filtered_target;
1792         uint64_t iter_shared_perms;
1793 
1794         /*
1795          * The topmost node with
1796          * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
1797          */
1798         filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
1799 
1800         assert(bdrv_skip_filters(filtered_target) ==
1801                bdrv_skip_filters(target));
1802 
1803         /*
1804          * XXX BLK_PERM_WRITE needs to be allowed so we don't block
1805          * ourselves at s->base (if writes are blocked for a node, they are
1806          * also blocked for its backing file). The other options would be a
1807          * second filter driver above s->base (== target).
1808          */
1809         iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
1810 
1811         for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
1812              iter = bdrv_filter_or_cow_bs(iter))
1813         {
1814             if (iter == filtered_target) {
1815                 /*
1816                  * From here on, all nodes are filters on the base.
1817                  * This allows us to share BLK_PERM_CONSISTENT_READ.
1818                  */
1819                 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
1820             }
1821 
1822             ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1823                                      iter_shared_perms, errp);
1824             if (ret < 0) {
1825                 goto fail;
1826             }
1827         }
1828 
1829         if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1830             goto fail;
1831         }
1832     }
1833 
1834     QTAILQ_INIT(&s->ops_in_flight);
1835 
1836     trace_mirror_start(bs, s, opaque);
1837     job_start(&s->common.job);
1838 
1839     return &s->common;
1840 
1841 fail:
1842     if (s) {
1843         /* Make sure this BDS does not go away until we have completed the graph
1844          * changes below */
1845         bdrv_ref(mirror_top_bs);
1846 
1847         g_free(s->replaces);
1848         blk_unref(s->target);
1849         bs_opaque->job = NULL;
1850         if (s->dirty_bitmap) {
1851             bdrv_release_dirty_bitmap(s->dirty_bitmap);
1852         }
1853         job_early_fail(&s->common.job);
1854     }
1855 
1856     bs_opaque->stop = true;
1857     bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1858                              &error_abort);
1859     bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
1860 
1861     bdrv_unref(mirror_top_bs);
1862 
1863     return NULL;
1864 }
1865 
1866 void mirror_start(const char *job_id, BlockDriverState *bs,
1867                   BlockDriverState *target, const char *replaces,
1868                   int creation_flags, int64_t speed,
1869                   uint32_t granularity, int64_t buf_size,
1870                   MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1871                   bool zero_target,
1872                   BlockdevOnError on_source_error,
1873                   BlockdevOnError on_target_error,
1874                   bool unmap, const char *filter_node_name,
1875                   MirrorCopyMode copy_mode, Error **errp)
1876 {
1877     bool is_none_mode;
1878     BlockDriverState *base;
1879 
1880     GLOBAL_STATE_CODE();
1881 
1882     if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
1883         (mode == MIRROR_SYNC_MODE_BITMAP)) {
1884         error_setg(errp, "Sync mode '%s' not supported",
1885                    MirrorSyncMode_str(mode));
1886         return;
1887     }
1888     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1889     base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
1890     mirror_start_job(job_id, bs, creation_flags, target, replaces,
1891                      speed, granularity, buf_size, backing_mode, zero_target,
1892                      on_source_error, on_target_error, unmap, NULL, NULL,
1893                      &mirror_job_driver, is_none_mode, base, false,
1894                      filter_node_name, true, copy_mode, errp);
1895 }
1896 
1897 BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1898                               BlockDriverState *base, int creation_flags,
1899                               int64_t speed, BlockdevOnError on_error,
1900                               const char *filter_node_name,
1901                               BlockCompletionFunc *cb, void *opaque,
1902                               bool auto_complete, Error **errp)
1903 {
1904     bool base_read_only;
1905     BlockJob *job;
1906 
1907     GLOBAL_STATE_CODE();
1908 
1909     base_read_only = bdrv_is_read_only(base);
1910 
1911     if (base_read_only) {
1912         if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
1913             return NULL;
1914         }
1915     }
1916 
1917     job = mirror_start_job(
1918                      job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1919                      MIRROR_LEAVE_BACKING_CHAIN, false,
1920                      on_error, on_error, true, cb, opaque,
1921                      &commit_active_job_driver, false, base, auto_complete,
1922                      filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1923                      errp);
1924     if (!job) {
1925         goto error_restore_flags;
1926     }
1927 
1928     return job;
1929 
1930 error_restore_flags:
1931     /* ignore error and errp for bdrv_reopen, because we want to propagate
1932      * the original error */
1933     if (base_read_only) {
1934         bdrv_reopen_set_read_only(base, true, NULL);
1935     }
1936     return NULL;
1937 }
1938