xref: /openbmc/qemu/block/mirror.c (revision d341d9f3)
1 /*
2  * Image mirroring
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Paolo Bonzini  <pbonzini@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10  * See the COPYING.LIB file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "trace.h"
16 #include "block/blockjob.h"
17 #include "block/block_int.h"
18 #include "sysemu/block-backend.h"
19 #include "qapi/qmp/qerror.h"
20 #include "qemu/ratelimit.h"
21 #include "qemu/bitmap.h"
22 #include "qemu/error-report.h"
23 
24 #define SLICE_TIME    100000000ULL /* ns */
25 #define MAX_IN_FLIGHT 16
26 #define DEFAULT_MIRROR_BUF_SIZE   (10 << 20)
27 
28 /* The mirroring buffer is a list of granularity-sized chunks.
29  * Free chunks are organized in a list.
30  */
31 typedef struct MirrorBuffer {
32     QSIMPLEQ_ENTRY(MirrorBuffer) next;
33 } MirrorBuffer;
34 
35 typedef struct MirrorBlockJob {
36     BlockJob common;
37     RateLimit limit;
38     BlockDriverState *target;
39     BlockDriverState *base;
40     /* The name of the graph node to replace */
41     char *replaces;
42     /* The BDS to replace */
43     BlockDriverState *to_replace;
44     /* Used to block operations on the drive-mirror-replace target */
45     Error *replace_blocker;
46     bool is_none_mode;
47     BlockdevOnError on_source_error, on_target_error;
48     bool synced;
49     bool should_complete;
50     int64_t sector_num;
51     int64_t granularity;
52     size_t buf_size;
53     int64_t bdev_length;
54     unsigned long *cow_bitmap;
55     BdrvDirtyBitmap *dirty_bitmap;
56     HBitmapIter hbi;
57     uint8_t *buf;
58     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
59     int buf_free_count;
60 
61     unsigned long *in_flight_bitmap;
62     int in_flight;
63     int sectors_in_flight;
64     int ret;
65     bool unmap;
66     bool waiting_for_io;
67 } MirrorBlockJob;
68 
69 typedef struct MirrorOp {
70     MirrorBlockJob *s;
71     QEMUIOVector qiov;
72     int64_t sector_num;
73     int nb_sectors;
74 } MirrorOp;
75 
76 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
77                                             int error)
78 {
79     s->synced = false;
80     if (read) {
81         return block_job_error_action(&s->common, s->common.bs,
82                                       s->on_source_error, true, error);
83     } else {
84         return block_job_error_action(&s->common, s->target,
85                                       s->on_target_error, false, error);
86     }
87 }
88 
89 static void mirror_iteration_done(MirrorOp *op, int ret)
90 {
91     MirrorBlockJob *s = op->s;
92     struct iovec *iov;
93     int64_t chunk_num;
94     int i, nb_chunks, sectors_per_chunk;
95 
96     trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
97 
98     s->in_flight--;
99     s->sectors_in_flight -= op->nb_sectors;
100     iov = op->qiov.iov;
101     for (i = 0; i < op->qiov.niov; i++) {
102         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
103         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
104         s->buf_free_count++;
105     }
106 
107     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
108     chunk_num = op->sector_num / sectors_per_chunk;
109     nb_chunks = op->nb_sectors / sectors_per_chunk;
110     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
111     if (ret >= 0) {
112         if (s->cow_bitmap) {
113             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
114         }
115         s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
116     }
117 
118     qemu_iovec_destroy(&op->qiov);
119     g_free(op);
120 
121     if (s->waiting_for_io) {
122         qemu_coroutine_enter(s->common.co, NULL);
123     }
124 }
125 
126 static void mirror_write_complete(void *opaque, int ret)
127 {
128     MirrorOp *op = opaque;
129     MirrorBlockJob *s = op->s;
130     if (ret < 0) {
131         BlockErrorAction action;
132 
133         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
134         action = mirror_error_action(s, false, -ret);
135         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
136             s->ret = ret;
137         }
138     }
139     mirror_iteration_done(op, ret);
140 }
141 
142 static void mirror_read_complete(void *opaque, int ret)
143 {
144     MirrorOp *op = opaque;
145     MirrorBlockJob *s = op->s;
146     if (ret < 0) {
147         BlockErrorAction action;
148 
149         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
150         action = mirror_error_action(s, true, -ret);
151         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
152             s->ret = ret;
153         }
154 
155         mirror_iteration_done(op, ret);
156         return;
157     }
158     bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
159                     mirror_write_complete, op);
160 }
161 
162 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
163 {
164     BlockDriverState *source = s->common.bs;
165     int nb_sectors, sectors_per_chunk, nb_chunks, max_iov;
166     int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
167     uint64_t delay_ns = 0;
168     MirrorOp *op;
169     int pnum;
170     int64_t ret;
171 
172     max_iov = MIN(source->bl.max_iov, s->target->bl.max_iov);
173 
174     s->sector_num = hbitmap_iter_next(&s->hbi);
175     if (s->sector_num < 0) {
176         bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
177         s->sector_num = hbitmap_iter_next(&s->hbi);
178         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
179         assert(s->sector_num >= 0);
180     }
181 
182     hbitmap_next_sector = s->sector_num;
183     sector_num = s->sector_num;
184     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
185     end = s->bdev_length / BDRV_SECTOR_SIZE;
186 
187     /* Extend the QEMUIOVector to include all adjacent blocks that will
188      * be copied in this operation.
189      *
190      * We have to do this if we have no backing file yet in the destination,
191      * and the cluster size is very large.  Then we need to do COW ourselves.
192      * The first time a cluster is copied, copy it entirely.  Note that,
193      * because both the granularity and the cluster size are powers of two,
194      * the number of sectors to copy cannot exceed one cluster.
195      *
196      * We also want to extend the QEMUIOVector to include more adjacent
197      * dirty blocks if possible, to limit the number of I/O operations and
198      * run efficiently even with a small granularity.
199      */
200     nb_chunks = 0;
201     nb_sectors = 0;
202     next_sector = sector_num;
203     next_chunk = sector_num / sectors_per_chunk;
204 
205     /* Wait for I/O to this cluster (from a previous iteration) to be done.  */
206     while (test_bit(next_chunk, s->in_flight_bitmap)) {
207         trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
208         s->waiting_for_io = true;
209         qemu_coroutine_yield();
210         s->waiting_for_io = false;
211     }
212 
213     do {
214         int added_sectors, added_chunks;
215 
216         if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) ||
217             test_bit(next_chunk, s->in_flight_bitmap)) {
218             assert(nb_sectors > 0);
219             break;
220         }
221 
222         added_sectors = sectors_per_chunk;
223         if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
224             bdrv_round_to_clusters(s->target,
225                                    next_sector, added_sectors,
226                                    &next_sector, &added_sectors);
227 
228             /* On the first iteration, the rounding may make us copy
229              * sectors before the first dirty one.
230              */
231             if (next_sector < sector_num) {
232                 assert(nb_sectors == 0);
233                 sector_num = next_sector;
234                 next_chunk = next_sector / sectors_per_chunk;
235             }
236         }
237 
238         added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
239         added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
240 
241         /* When doing COW, it may happen that there is not enough space for
242          * a full cluster.  Wait if that is the case.
243          */
244         while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
245             trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
246             s->waiting_for_io = true;
247             qemu_coroutine_yield();
248             s->waiting_for_io = false;
249         }
250         if (s->buf_free_count < nb_chunks + added_chunks) {
251             trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
252             break;
253         }
254         if (max_iov < nb_chunks + added_chunks) {
255             trace_mirror_break_iov_max(s, nb_chunks, added_chunks);
256             break;
257         }
258 
259         /* We have enough free space to copy these sectors.  */
260         bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);
261 
262         nb_sectors += added_sectors;
263         nb_chunks += added_chunks;
264         next_sector += added_sectors;
265         next_chunk += added_chunks;
266         if (!s->synced && s->common.speed) {
267             delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors);
268         }
269     } while (delay_ns == 0 && next_sector < end);
270 
271     /* Allocate a MirrorOp that is used as an AIO callback.  */
272     op = g_new(MirrorOp, 1);
273     op->s = s;
274     op->sector_num = sector_num;
275     op->nb_sectors = nb_sectors;
276 
277     /* Now make a QEMUIOVector taking enough granularity-sized chunks
278      * from s->buf_free.
279      */
280     qemu_iovec_init(&op->qiov, nb_chunks);
281     next_sector = sector_num;
282     while (nb_chunks-- > 0) {
283         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
284         size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size;
285 
286         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
287         s->buf_free_count--;
288         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
289 
290         /* Advance the HBitmapIter in parallel, so that we do not examine
291          * the same sector twice.
292          */
293         if (next_sector > hbitmap_next_sector
294             && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
295             hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
296         }
297 
298         next_sector += sectors_per_chunk;
299     }
300 
301     bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors);
302 
303     /* Copy the dirty cluster.  */
304     s->in_flight++;
305     s->sectors_in_flight += nb_sectors;
306     trace_mirror_one_iteration(s, sector_num, nb_sectors);
307 
308     ret = bdrv_get_block_status_above(source, NULL, sector_num,
309                                       nb_sectors, &pnum);
310     if (ret < 0 || pnum < nb_sectors ||
311             (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) {
312         bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
313                        mirror_read_complete, op);
314     } else if (ret & BDRV_BLOCK_ZERO) {
315         bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors,
316                               s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
317                               mirror_write_complete, op);
318     } else {
319         assert(!(ret & BDRV_BLOCK_DATA));
320         bdrv_aio_discard(s->target, sector_num, op->nb_sectors,
321                          mirror_write_complete, op);
322     }
323     return delay_ns;
324 }
325 
326 static void mirror_free_init(MirrorBlockJob *s)
327 {
328     int granularity = s->granularity;
329     size_t buf_size = s->buf_size;
330     uint8_t *buf = s->buf;
331 
332     assert(s->buf_free_count == 0);
333     QSIMPLEQ_INIT(&s->buf_free);
334     while (buf_size != 0) {
335         MirrorBuffer *cur = (MirrorBuffer *)buf;
336         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
337         s->buf_free_count++;
338         buf_size -= granularity;
339         buf += granularity;
340     }
341 }
342 
343 static void mirror_drain(MirrorBlockJob *s)
344 {
345     while (s->in_flight > 0) {
346         s->waiting_for_io = true;
347         qemu_coroutine_yield();
348         s->waiting_for_io = false;
349     }
350 }
351 
352 typedef struct {
353     int ret;
354 } MirrorExitData;
355 
356 static void mirror_exit(BlockJob *job, void *opaque)
357 {
358     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
359     MirrorExitData *data = opaque;
360     AioContext *replace_aio_context = NULL;
361     BlockDriverState *src = s->common.bs;
362 
363     /* Make sure that the source BDS doesn't go away before we called
364      * block_job_completed(). */
365     bdrv_ref(src);
366 
367     if (s->to_replace) {
368         replace_aio_context = bdrv_get_aio_context(s->to_replace);
369         aio_context_acquire(replace_aio_context);
370     }
371 
372     if (s->should_complete && data->ret == 0) {
373         BlockDriverState *to_replace = s->common.bs;
374         if (s->to_replace) {
375             to_replace = s->to_replace;
376         }
377 
378         /* This was checked in mirror_start_job(), but meanwhile one of the
379          * nodes could have been newly attached to a BlockBackend. */
380         if (to_replace->blk && s->target->blk) {
381             error_report("block job: Can't create node with two BlockBackends");
382             data->ret = -EINVAL;
383             goto out;
384         }
385 
386         if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) {
387             bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL);
388         }
389         bdrv_replace_in_backing_chain(to_replace, s->target);
390     }
391 
392 out:
393     if (s->to_replace) {
394         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
395         error_free(s->replace_blocker);
396         bdrv_unref(s->to_replace);
397     }
398     if (replace_aio_context) {
399         aio_context_release(replace_aio_context);
400     }
401     g_free(s->replaces);
402     bdrv_op_unblock_all(s->target, s->common.blocker);
403     bdrv_unref(s->target);
404     block_job_completed(&s->common, data->ret);
405     g_free(data);
406     bdrv_drained_end(src);
407     bdrv_unref(src);
408 }
409 
410 static void coroutine_fn mirror_run(void *opaque)
411 {
412     MirrorBlockJob *s = opaque;
413     MirrorExitData *data;
414     BlockDriverState *bs = s->common.bs;
415     int64_t sector_num, end, length;
416     uint64_t last_pause_ns;
417     BlockDriverInfo bdi;
418     char backing_filename[2]; /* we only need 2 characters because we are only
419                                  checking for a NULL string */
420     int ret = 0;
421     int n;
422 
423     if (block_job_is_cancelled(&s->common)) {
424         goto immediate_exit;
425     }
426 
427     s->bdev_length = bdrv_getlength(bs);
428     if (s->bdev_length < 0) {
429         ret = s->bdev_length;
430         goto immediate_exit;
431     } else if (s->bdev_length == 0) {
432         /* Report BLOCK_JOB_READY and wait for complete. */
433         block_job_event_ready(&s->common);
434         s->synced = true;
435         while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
436             block_job_yield(&s->common);
437         }
438         s->common.cancelled = false;
439         goto immediate_exit;
440     }
441 
442     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
443     s->in_flight_bitmap = bitmap_new(length);
444 
445     /* If we have no backing file yet in the destination, we cannot let
446      * the destination do COW.  Instead, we copy sectors around the
447      * dirty data if needed.  We need a bitmap to do that.
448      */
449     bdrv_get_backing_filename(s->target, backing_filename,
450                               sizeof(backing_filename));
451     if (backing_filename[0] && !s->target->backing) {
452         ret = bdrv_get_info(s->target, &bdi);
453         if (ret < 0) {
454             goto immediate_exit;
455         }
456         if (s->granularity < bdi.cluster_size) {
457             s->buf_size = MAX(s->buf_size, bdi.cluster_size);
458             s->cow_bitmap = bitmap_new(length);
459         }
460     }
461 
462     end = s->bdev_length / BDRV_SECTOR_SIZE;
463     s->buf = qemu_try_blockalign(bs, s->buf_size);
464     if (s->buf == NULL) {
465         ret = -ENOMEM;
466         goto immediate_exit;
467     }
468 
469     mirror_free_init(s);
470 
471     last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
472     if (!s->is_none_mode) {
473         /* First part, loop on the sectors and initialize the dirty bitmap.  */
474         BlockDriverState *base = s->base;
475         bool mark_all_dirty = s->base == NULL && !bdrv_has_zero_init(s->target);
476 
477         for (sector_num = 0; sector_num < end; ) {
478             /* Just to make sure we are not exceeding int limit. */
479             int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
480                                  end - sector_num);
481             int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
482 
483             if (now - last_pause_ns > SLICE_TIME) {
484                 last_pause_ns = now;
485                 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
486             }
487 
488             if (block_job_is_cancelled(&s->common)) {
489                 goto immediate_exit;
490             }
491 
492             ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
493 
494             if (ret < 0) {
495                 goto immediate_exit;
496             }
497 
498             assert(n > 0);
499             if (ret == 1 || mark_all_dirty) {
500                 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
501             }
502             sector_num += n;
503         }
504     }
505 
506     bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
507     for (;;) {
508         uint64_t delay_ns = 0;
509         int64_t cnt;
510         bool should_complete;
511 
512         if (s->ret < 0) {
513             ret = s->ret;
514             goto immediate_exit;
515         }
516 
517         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
518         /* s->common.offset contains the number of bytes already processed so
519          * far, cnt is the number of dirty sectors remaining and
520          * s->sectors_in_flight is the number of sectors currently being
521          * processed; together those are the current total operation length */
522         s->common.len = s->common.offset +
523                         (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
524 
525         /* Note that even when no rate limit is applied we need to yield
526          * periodically with no pending I/O so that bdrv_drain_all() returns.
527          * We do so every SLICE_TIME nanoseconds, or when there is an error,
528          * or when the source is clean, whichever comes first.
529          */
530         if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
531             s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
532             if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
533                 (cnt == 0 && s->in_flight > 0)) {
534                 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
535                 s->waiting_for_io = true;
536                 qemu_coroutine_yield();
537                 s->waiting_for_io = false;
538                 continue;
539             } else if (cnt != 0) {
540                 delay_ns = mirror_iteration(s);
541             }
542         }
543 
544         should_complete = false;
545         if (s->in_flight == 0 && cnt == 0) {
546             trace_mirror_before_flush(s);
547             ret = bdrv_flush(s->target);
548             if (ret < 0) {
549                 if (mirror_error_action(s, false, -ret) ==
550                     BLOCK_ERROR_ACTION_REPORT) {
551                     goto immediate_exit;
552                 }
553             } else {
554                 /* We're out of the streaming phase.  From now on, if the job
555                  * is cancelled we will actually complete all pending I/O and
556                  * report completion.  This way, block-job-cancel will leave
557                  * the target in a consistent state.
558                  */
559                 if (!s->synced) {
560                     block_job_event_ready(&s->common);
561                     s->synced = true;
562                 }
563 
564                 should_complete = s->should_complete ||
565                     block_job_is_cancelled(&s->common);
566                 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
567             }
568         }
569 
570         if (cnt == 0 && should_complete) {
571             /* The dirty bitmap is not updated while operations are pending.
572              * If we're about to exit, wait for pending operations before
573              * calling bdrv_get_dirty_count(bs), or we may exit while the
574              * source has dirty data to copy!
575              *
576              * Note that I/O can be submitted by the guest while
577              * mirror_populate runs.
578              */
579             trace_mirror_before_drain(s, cnt);
580             bdrv_drain(bs);
581             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
582         }
583 
584         ret = 0;
585         trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
586         if (!s->synced) {
587             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
588             if (block_job_is_cancelled(&s->common)) {
589                 break;
590             }
591         } else if (!should_complete) {
592             delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
593             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
594         } else if (cnt == 0) {
595             /* The two disks are in sync.  Exit and report successful
596              * completion.
597              */
598             assert(QLIST_EMPTY(&bs->tracked_requests));
599             s->common.cancelled = false;
600             break;
601         }
602         last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
603     }
604 
605 immediate_exit:
606     if (s->in_flight > 0) {
607         /* We get here only if something went wrong.  Either the job failed,
608          * or it was cancelled prematurely so that we do not guarantee that
609          * the target is a copy of the source.
610          */
611         assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
612         mirror_drain(s);
613     }
614 
615     assert(s->in_flight == 0);
616     qemu_vfree(s->buf);
617     g_free(s->cow_bitmap);
618     g_free(s->in_flight_bitmap);
619     bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
620     if (s->target->blk) {
621         blk_iostatus_disable(s->target->blk);
622     }
623 
624     data = g_malloc(sizeof(*data));
625     data->ret = ret;
626     /* Before we switch to target in mirror_exit, make sure data doesn't
627      * change. */
628     bdrv_drained_begin(s->common.bs);
629     block_job_defer_to_main_loop(&s->common, mirror_exit, data);
630 }
631 
632 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
633 {
634     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
635 
636     if (speed < 0) {
637         error_setg(errp, QERR_INVALID_PARAMETER, "speed");
638         return;
639     }
640     ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
641 }
642 
643 static void mirror_iostatus_reset(BlockJob *job)
644 {
645     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
646 
647     if (s->target->blk) {
648         blk_iostatus_reset(s->target->blk);
649     }
650 }
651 
652 static void mirror_complete(BlockJob *job, Error **errp)
653 {
654     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
655     Error *local_err = NULL;
656     int ret;
657 
658     ret = bdrv_open_backing_file(s->target, NULL, "backing", &local_err);
659     if (ret < 0) {
660         error_propagate(errp, local_err);
661         return;
662     }
663     if (!s->synced) {
664         error_setg(errp, QERR_BLOCK_JOB_NOT_READY, job->id);
665         return;
666     }
667 
668     /* check the target bs is not blocked and block all operations on it */
669     if (s->replaces) {
670         AioContext *replace_aio_context;
671 
672         s->to_replace = bdrv_find_node(s->replaces);
673         if (!s->to_replace) {
674             error_setg(errp, "Node name '%s' not found", s->replaces);
675             return;
676         }
677 
678         replace_aio_context = bdrv_get_aio_context(s->to_replace);
679         aio_context_acquire(replace_aio_context);
680 
681         error_setg(&s->replace_blocker,
682                    "block device is in use by block-job-complete");
683         bdrv_op_block_all(s->to_replace, s->replace_blocker);
684         bdrv_ref(s->to_replace);
685 
686         aio_context_release(replace_aio_context);
687     }
688 
689     s->should_complete = true;
690     block_job_enter(&s->common);
691 }
692 
693 static const BlockJobDriver mirror_job_driver = {
694     .instance_size = sizeof(MirrorBlockJob),
695     .job_type      = BLOCK_JOB_TYPE_MIRROR,
696     .set_speed     = mirror_set_speed,
697     .iostatus_reset= mirror_iostatus_reset,
698     .complete      = mirror_complete,
699 };
700 
701 static const BlockJobDriver commit_active_job_driver = {
702     .instance_size = sizeof(MirrorBlockJob),
703     .job_type      = BLOCK_JOB_TYPE_COMMIT,
704     .set_speed     = mirror_set_speed,
705     .iostatus_reset
706                    = mirror_iostatus_reset,
707     .complete      = mirror_complete,
708 };
709 
710 static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
711                              const char *replaces,
712                              int64_t speed, uint32_t granularity,
713                              int64_t buf_size,
714                              BlockdevOnError on_source_error,
715                              BlockdevOnError on_target_error,
716                              bool unmap,
717                              BlockCompletionFunc *cb,
718                              void *opaque, Error **errp,
719                              const BlockJobDriver *driver,
720                              bool is_none_mode, BlockDriverState *base)
721 {
722     MirrorBlockJob *s;
723     BlockDriverState *replaced_bs;
724 
725     if (granularity == 0) {
726         granularity = bdrv_get_default_bitmap_granularity(target);
727     }
728 
729     assert ((granularity & (granularity - 1)) == 0);
730 
731     if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
732          on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
733         (!bs->blk || !blk_iostatus_is_enabled(bs->blk))) {
734         error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
735         return;
736     }
737 
738     if (buf_size < 0) {
739         error_setg(errp, "Invalid parameter 'buf-size'");
740         return;
741     }
742 
743     if (buf_size == 0) {
744         buf_size = DEFAULT_MIRROR_BUF_SIZE;
745     }
746 
747     /* We can't support this case as long as the block layer can't handle
748      * multiple BlockBackends per BlockDriverState. */
749     if (replaces) {
750         replaced_bs = bdrv_lookup_bs(replaces, replaces, errp);
751         if (replaced_bs == NULL) {
752             return;
753         }
754     } else {
755         replaced_bs = bs;
756     }
757     if (replaced_bs->blk && target->blk) {
758         error_setg(errp, "Can't create node with two BlockBackends");
759         return;
760     }
761 
762     s = block_job_create(driver, bs, speed, cb, opaque, errp);
763     if (!s) {
764         return;
765     }
766 
767     s->replaces = g_strdup(replaces);
768     s->on_source_error = on_source_error;
769     s->on_target_error = on_target_error;
770     s->target = target;
771     s->is_none_mode = is_none_mode;
772     s->base = base;
773     s->granularity = granularity;
774     s->buf_size = ROUND_UP(buf_size, granularity);
775     s->unmap = unmap;
776 
777     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
778     if (!s->dirty_bitmap) {
779         g_free(s->replaces);
780         block_job_unref(&s->common);
781         return;
782     }
783 
784     bdrv_op_block_all(s->target, s->common.blocker);
785 
786     bdrv_set_enable_write_cache(s->target, true);
787     if (s->target->blk) {
788         blk_set_on_error(s->target->blk, on_target_error, on_target_error);
789         blk_iostatus_enable(s->target->blk);
790     }
791     s->common.co = qemu_coroutine_create(mirror_run);
792     trace_mirror_start(bs, s, s->common.co, opaque);
793     qemu_coroutine_enter(s->common.co, s);
794 }
795 
796 void mirror_start(BlockDriverState *bs, BlockDriverState *target,
797                   const char *replaces,
798                   int64_t speed, uint32_t granularity, int64_t buf_size,
799                   MirrorSyncMode mode, BlockdevOnError on_source_error,
800                   BlockdevOnError on_target_error,
801                   bool unmap,
802                   BlockCompletionFunc *cb,
803                   void *opaque, Error **errp)
804 {
805     bool is_none_mode;
806     BlockDriverState *base;
807 
808     if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
809         error_setg(errp, "Sync mode 'incremental' not supported");
810         return;
811     }
812     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
813     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
814     mirror_start_job(bs, target, replaces,
815                      speed, granularity, buf_size,
816                      on_source_error, on_target_error, unmap, cb, opaque, errp,
817                      &mirror_job_driver, is_none_mode, base);
818 }
819 
820 void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
821                          int64_t speed,
822                          BlockdevOnError on_error,
823                          BlockCompletionFunc *cb,
824                          void *opaque, Error **errp)
825 {
826     int64_t length, base_length;
827     int orig_base_flags;
828     int ret;
829     Error *local_err = NULL;
830 
831     orig_base_flags = bdrv_get_flags(base);
832 
833     if (bdrv_reopen(base, bs->open_flags, errp)) {
834         return;
835     }
836 
837     length = bdrv_getlength(bs);
838     if (length < 0) {
839         error_setg_errno(errp, -length,
840                          "Unable to determine length of %s", bs->filename);
841         goto error_restore_flags;
842     }
843 
844     base_length = bdrv_getlength(base);
845     if (base_length < 0) {
846         error_setg_errno(errp, -base_length,
847                          "Unable to determine length of %s", base->filename);
848         goto error_restore_flags;
849     }
850 
851     if (length > base_length) {
852         ret = bdrv_truncate(base, length);
853         if (ret < 0) {
854             error_setg_errno(errp, -ret,
855                             "Top image %s is larger than base image %s, and "
856                              "resize of base image failed",
857                              bs->filename, base->filename);
858             goto error_restore_flags;
859         }
860     }
861 
862     bdrv_ref(base);
863     mirror_start_job(bs, base, NULL, speed, 0, 0,
864                      on_error, on_error, false, cb, opaque, &local_err,
865                      &commit_active_job_driver, false, base);
866     if (local_err) {
867         error_propagate(errp, local_err);
868         goto error_restore_flags;
869     }
870 
871     return;
872 
873 error_restore_flags:
874     /* ignore error and errp for bdrv_reopen, because we want to propagate
875      * the original error */
876     bdrv_reopen(base, orig_base_flags, NULL);
877     return;
878 }
879