1 /*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
18 #include "trace.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "block/dirty-bitmap.h"
22 #include "system/block-backend.h"
23 #include "qapi/error.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
26 #include "qemu/memalign.h"
27
28 #define MAX_IN_FLIGHT 16
29 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
30 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
31
32 /* The mirroring buffer is a list of granularity-sized chunks.
33 * Free chunks are organized in a list.
34 */
35 typedef struct MirrorBuffer {
36 QSIMPLEQ_ENTRY(MirrorBuffer) next;
37 } MirrorBuffer;
38
39 typedef struct MirrorOp MirrorOp;
40
41 typedef struct MirrorBlockJob {
42 BlockJob common;
43 BlockBackend *target;
44 BlockDriverState *mirror_top_bs;
45 BlockDriverState *base;
46 BlockDriverState *base_overlay;
47
48 /* The name of the graph node to replace */
49 char *replaces;
50 /* The BDS to replace */
51 BlockDriverState *to_replace;
52 /* Used to block operations on the drive-mirror-replace target */
53 Error *replace_blocker;
54 MirrorSyncMode sync_mode;
55 BlockMirrorBackingMode backing_mode;
56 /* Whether the target should be assumed to be already zero initialized */
57 bool target_is_zero;
58 /*
59 * To be accesssed with atomics. Written only under the BQL (required by the
60 * current implementation of mirror_change()).
61 */
62 MirrorCopyMode copy_mode;
63 BlockdevOnError on_source_error, on_target_error;
64 /*
65 * To be accessed with atomics.
66 *
67 * Set when the target is synced (dirty bitmap is clean, nothing in flight)
68 * and the job is running in active mode.
69 */
70 bool actively_synced;
71 bool should_complete;
72 int64_t granularity;
73 size_t buf_size;
74 int64_t bdev_length;
75 unsigned long *cow_bitmap;
76 unsigned long *zero_bitmap;
77 BdrvDirtyBitmap *dirty_bitmap;
78 BdrvDirtyBitmapIter *dbi;
79 uint8_t *buf;
80 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
81 int buf_free_count;
82
83 uint64_t last_pause_ns;
84 unsigned long *in_flight_bitmap;
85 unsigned in_flight;
86 int64_t bytes_in_flight;
87 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
88 int ret;
89 bool unmap;
90 int target_cluster_size;
91 int max_iov;
92 bool initial_zeroing_ongoing;
93 int in_active_write_counter;
94 int64_t active_write_bytes_in_flight;
95 bool prepared;
96 bool in_drain;
97 bool base_ro;
98 } MirrorBlockJob;
99
100 typedef struct MirrorBDSOpaque {
101 MirrorBlockJob *job;
102 bool stop;
103 bool is_commit;
104 } MirrorBDSOpaque;
105
106 struct MirrorOp {
107 MirrorBlockJob *s;
108 QEMUIOVector qiov;
109 int64_t offset;
110 uint64_t bytes;
111
112 /*
113 * These pointers are set by mirror_co_read(), mirror_co_zero(), and
114 * mirror_co_discard() before yielding for the first time
115 */
116 int64_t *bytes_handled;
117 bool *io_skipped;
118
119 bool is_pseudo_op;
120 bool is_active_write;
121 bool is_in_flight;
122 CoQueue waiting_requests;
123 Coroutine *co;
124 MirrorOp *waiting_for_op;
125
126 QTAILQ_ENTRY(MirrorOp) next;
127 };
128
129 typedef enum MirrorMethod {
130 MIRROR_METHOD_COPY,
131 MIRROR_METHOD_ZERO,
132 MIRROR_METHOD_DISCARD,
133 } MirrorMethod;
134
mirror_error_action(MirrorBlockJob * s,bool read,int error)135 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
136 int error)
137 {
138 qatomic_set(&s->actively_synced, false);
139 if (read) {
140 return block_job_error_action(&s->common, s->on_source_error,
141 true, error);
142 } else {
143 return block_job_error_action(&s->common, s->on_target_error,
144 false, error);
145 }
146 }
147
mirror_wait_on_conflicts(MirrorOp * self,MirrorBlockJob * s,uint64_t offset,uint64_t bytes)148 static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
149 MirrorBlockJob *s,
150 uint64_t offset,
151 uint64_t bytes)
152 {
153 uint64_t self_start_chunk = offset / s->granularity;
154 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
155 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
156
157 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
158 self_start_chunk) < self_end_chunk &&
159 s->ret >= 0)
160 {
161 MirrorOp *op;
162
163 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
164 uint64_t op_start_chunk = op->offset / s->granularity;
165 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
166 s->granularity) -
167 op_start_chunk;
168
169 if (op == self) {
170 continue;
171 }
172
173 if (ranges_overlap(self_start_chunk, self_nb_chunks,
174 op_start_chunk, op_nb_chunks))
175 {
176 if (self) {
177 /*
178 * If the operation is already (indirectly) waiting for us,
179 * or will wait for us as soon as it wakes up, then just go
180 * on (instead of producing a deadlock in the former case).
181 */
182 if (op->waiting_for_op) {
183 continue;
184 }
185
186 self->waiting_for_op = op;
187 }
188
189 qemu_co_queue_wait(&op->waiting_requests, NULL);
190
191 if (self) {
192 self->waiting_for_op = NULL;
193 }
194
195 break;
196 }
197 }
198 }
199 }
200
mirror_iteration_done(MirrorOp * op,int ret)201 static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
202 {
203 MirrorBlockJob *s = op->s;
204 struct iovec *iov;
205 int64_t chunk_num;
206 int i, nb_chunks;
207
208 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
209
210 s->in_flight--;
211 s->bytes_in_flight -= op->bytes;
212 iov = op->qiov.iov;
213 for (i = 0; i < op->qiov.niov; i++) {
214 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
215 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
216 s->buf_free_count++;
217 }
218
219 chunk_num = op->offset / s->granularity;
220 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
221
222 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
223 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
224 if (ret >= 0) {
225 if (s->cow_bitmap) {
226 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
227 }
228 if (!s->initial_zeroing_ongoing) {
229 job_progress_update(&s->common.job, op->bytes);
230 }
231 }
232 qemu_iovec_destroy(&op->qiov);
233
234 qemu_co_queue_restart_all(&op->waiting_requests);
235 g_free(op);
236 }
237
mirror_write_complete(MirrorOp * op,int ret)238 static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
239 {
240 MirrorBlockJob *s = op->s;
241
242 if (ret < 0) {
243 BlockErrorAction action;
244
245 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
246 action = mirror_error_action(s, false, -ret);
247 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
248 s->ret = ret;
249 }
250 }
251
252 mirror_iteration_done(op, ret);
253 }
254
mirror_read_complete(MirrorOp * op,int ret)255 static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
256 {
257 MirrorBlockJob *s = op->s;
258
259 if (ret < 0) {
260 BlockErrorAction action;
261
262 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
263 action = mirror_error_action(s, true, -ret);
264 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
265 s->ret = ret;
266 }
267
268 mirror_iteration_done(op, ret);
269 return;
270 }
271
272 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
273 mirror_write_complete(op, ret);
274 }
275
276 /* Clip bytes relative to offset to not exceed end-of-file */
mirror_clip_bytes(MirrorBlockJob * s,int64_t offset,int64_t bytes)277 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
278 int64_t offset,
279 int64_t bytes)
280 {
281 return MIN(bytes, s->bdev_length - offset);
282 }
283
284 /* Round offset and/or bytes to target cluster if COW is needed, and
285 * return the offset of the adjusted tail against original. */
mirror_cow_align(MirrorBlockJob * s,int64_t * offset,uint64_t * bytes)286 static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
287 uint64_t *bytes)
288 {
289 bool need_cow;
290 int ret = 0;
291 int64_t align_offset = *offset;
292 int64_t align_bytes = *bytes;
293 int max_bytes = s->granularity * s->max_iov;
294
295 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
296 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
297 s->cow_bitmap);
298 if (need_cow) {
299 bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes,
300 &align_offset, &align_bytes);
301 }
302
303 if (align_bytes > max_bytes) {
304 align_bytes = max_bytes;
305 if (need_cow) {
306 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
307 }
308 }
309 /* Clipping may result in align_bytes unaligned to chunk boundary, but
310 * that doesn't matter because it's already the end of source image. */
311 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
312
313 ret = align_offset + align_bytes - (*offset + *bytes);
314 *offset = align_offset;
315 *bytes = align_bytes;
316 assert(ret >= 0);
317 return ret;
318 }
319
320 static inline void coroutine_fn
mirror_wait_for_free_in_flight_slot(MirrorBlockJob * s)321 mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
322 {
323 MirrorOp *op;
324
325 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
326 /*
327 * Do not wait on pseudo ops, because it may in turn wait on
328 * some other operation to start, which may in fact be the
329 * caller of this function. Since there is only one pseudo op
330 * at any given time, we will always find some real operation
331 * to wait on.
332 * Also, do not wait on active operations, because they do not
333 * use up in-flight slots.
334 */
335 if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) {
336 qemu_co_queue_wait(&op->waiting_requests, NULL);
337 return;
338 }
339 }
340 abort();
341 }
342
343 /* Perform a mirror copy operation.
344 *
345 * *op->bytes_handled is set to the number of bytes copied after and
346 * including offset, excluding any bytes copied prior to offset due
347 * to alignment. This will be op->bytes if no alignment is necessary,
348 * or (new_end - op->offset) if the tail is rounded up or down due to
349 * alignment or buffer limit.
350 */
mirror_co_read(void * opaque)351 static void coroutine_fn mirror_co_read(void *opaque)
352 {
353 MirrorOp *op = opaque;
354 MirrorBlockJob *s = op->s;
355 int nb_chunks;
356 int ret = -1;
357 uint64_t max_bytes;
358
359 max_bytes = s->granularity * s->max_iov;
360
361 /* We can only handle as much as buf_size at a time. */
362 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
363 assert(op->bytes);
364 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
365 *op->bytes_handled = op->bytes;
366
367 if (s->cow_bitmap) {
368 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
369 }
370 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
371 assert(*op->bytes_handled <= UINT_MAX);
372 assert(op->bytes <= s->buf_size);
373 /* The offset is granularity-aligned because:
374 * 1) Caller passes in aligned values;
375 * 2) mirror_cow_align is used only when target cluster is larger. */
376 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
377 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
378 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
379 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
380
381 while (s->buf_free_count < nb_chunks) {
382 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
383 mirror_wait_for_free_in_flight_slot(s);
384 }
385
386 /* Now make a QEMUIOVector taking enough granularity-sized chunks
387 * from s->buf_free.
388 */
389 qemu_iovec_init(&op->qiov, nb_chunks);
390 while (nb_chunks-- > 0) {
391 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
392 size_t remaining = op->bytes - op->qiov.size;
393
394 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
395 s->buf_free_count--;
396 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
397 }
398
399 /* Copy the dirty cluster. */
400 s->in_flight++;
401 s->bytes_in_flight += op->bytes;
402 op->is_in_flight = true;
403 trace_mirror_one_iteration(s, op->offset, op->bytes);
404
405 WITH_GRAPH_RDLOCK_GUARD() {
406 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
407 &op->qiov, 0);
408 }
409 mirror_read_complete(op, ret);
410 }
411
mirror_co_zero(void * opaque)412 static void coroutine_fn mirror_co_zero(void *opaque)
413 {
414 MirrorOp *op = opaque;
415 bool write_needed = true;
416 int ret = 0;
417
418 op->s->in_flight++;
419 op->s->bytes_in_flight += op->bytes;
420 *op->bytes_handled = op->bytes;
421 op->is_in_flight = true;
422
423 if (op->s->zero_bitmap) {
424 unsigned long end = DIV_ROUND_UP(op->offset + op->bytes,
425 op->s->granularity);
426 assert(QEMU_IS_ALIGNED(op->offset, op->s->granularity));
427 assert(QEMU_IS_ALIGNED(op->bytes, op->s->granularity) ||
428 op->offset + op->bytes == op->s->bdev_length);
429 if (find_next_zero_bit(op->s->zero_bitmap, end,
430 op->offset / op->s->granularity) == end) {
431 write_needed = false;
432 *op->io_skipped = true;
433 }
434 }
435 if (write_needed) {
436 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
437 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
438 }
439 if (ret >= 0 && op->s->zero_bitmap) {
440 bitmap_set(op->s->zero_bitmap, op->offset / op->s->granularity,
441 DIV_ROUND_UP(op->bytes, op->s->granularity));
442 }
443 mirror_write_complete(op, ret);
444 }
445
mirror_co_discard(void * opaque)446 static void coroutine_fn mirror_co_discard(void *opaque)
447 {
448 MirrorOp *op = opaque;
449 int ret;
450
451 op->s->in_flight++;
452 op->s->bytes_in_flight += op->bytes;
453 *op->bytes_handled = op->bytes;
454 op->is_in_flight = true;
455
456 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
457 mirror_write_complete(op, ret);
458 }
459
mirror_perform(MirrorBlockJob * s,int64_t offset,unsigned bytes,MirrorMethod mirror_method,bool * io_skipped)460 static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
461 unsigned bytes, MirrorMethod mirror_method,
462 bool *io_skipped)
463 {
464 MirrorOp *op;
465 Coroutine *co;
466 int64_t bytes_handled = -1;
467
468 assert(QEMU_IS_ALIGNED(offset, s->granularity));
469 assert(QEMU_IS_ALIGNED(bytes, s->granularity) ||
470 offset + bytes == s->bdev_length);
471 op = g_new(MirrorOp, 1);
472 *op = (MirrorOp){
473 .s = s,
474 .offset = offset,
475 .bytes = bytes,
476 .bytes_handled = &bytes_handled,
477 .io_skipped = io_skipped,
478 };
479 qemu_co_queue_init(&op->waiting_requests);
480
481 switch (mirror_method) {
482 case MIRROR_METHOD_COPY:
483 if (s->zero_bitmap) {
484 bitmap_clear(s->zero_bitmap, offset / s->granularity,
485 DIV_ROUND_UP(bytes, s->granularity));
486 }
487 co = qemu_coroutine_create(mirror_co_read, op);
488 break;
489 case MIRROR_METHOD_ZERO:
490 /* s->zero_bitmap handled in mirror_co_zero */
491 co = qemu_coroutine_create(mirror_co_zero, op);
492 break;
493 case MIRROR_METHOD_DISCARD:
494 if (s->zero_bitmap) {
495 bitmap_clear(s->zero_bitmap, offset / s->granularity,
496 DIV_ROUND_UP(bytes, s->granularity));
497 }
498 co = qemu_coroutine_create(mirror_co_discard, op);
499 break;
500 default:
501 abort();
502 }
503 op->co = co;
504
505 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
506 qemu_coroutine_enter(co);
507 /* At this point, ownership of op has been moved to the coroutine
508 * and the object may already be freed */
509
510 /* Assert that this value has been set */
511 assert(bytes_handled >= 0);
512
513 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
514 * and mirror_co_discard(), bytes_handled == op->bytes, which
515 * is the @bytes parameter given to this function) */
516 assert(bytes_handled <= UINT_MAX);
517 return bytes_handled;
518 }
519
mirror_iteration(MirrorBlockJob * s)520 static void coroutine_fn GRAPH_UNLOCKED mirror_iteration(MirrorBlockJob *s)
521 {
522 BlockDriverState *source;
523 MirrorOp *pseudo_op;
524 int64_t offset;
525 /* At least the first dirty chunk is mirrored in one iteration. */
526 int nb_chunks = 1;
527 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
528 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
529
530 bdrv_graph_co_rdlock();
531 source = s->mirror_top_bs->backing->bs;
532 bdrv_graph_co_rdunlock();
533
534 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
535 offset = bdrv_dirty_iter_next(s->dbi);
536 if (offset < 0) {
537 bdrv_set_dirty_iter(s->dbi, 0);
538 offset = bdrv_dirty_iter_next(s->dbi);
539 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
540 assert(offset >= 0);
541 }
542 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
543
544 /*
545 * Wait for concurrent requests to @offset. The next loop will limit the
546 * copied area based on in_flight_bitmap so we only copy an area that does
547 * not overlap with concurrent in-flight requests. Still, we would like to
548 * copy something, so wait until there are at least no more requests to the
549 * very beginning of the area.
550 */
551 mirror_wait_on_conflicts(NULL, s, offset, 1);
552
553 job_pause_point(&s->common.job);
554
555 /* Find the number of consecutive dirty chunks following the first dirty
556 * one, and wait for in flight requests in them. */
557 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
558 while (nb_chunks * s->granularity < s->buf_size) {
559 int64_t next_dirty;
560 int64_t next_offset = offset + nb_chunks * s->granularity;
561 int64_t next_chunk = next_offset / s->granularity;
562 if (next_offset >= s->bdev_length ||
563 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
564 break;
565 }
566 if (test_bit(next_chunk, s->in_flight_bitmap)) {
567 break;
568 }
569
570 next_dirty = bdrv_dirty_iter_next(s->dbi);
571 if (next_dirty > next_offset || next_dirty < 0) {
572 /* The bitmap iterator's cache is stale, refresh it */
573 bdrv_set_dirty_iter(s->dbi, next_offset);
574 next_dirty = bdrv_dirty_iter_next(s->dbi);
575 }
576 assert(next_dirty == next_offset);
577 nb_chunks++;
578 }
579
580 /* Clear dirty bits before querying the block status, because
581 * calling bdrv_block_status_above could yield - if some blocks are
582 * marked dirty in this window, we need to know.
583 */
584 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
585 nb_chunks * s->granularity);
586 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
587
588 /* Before claiming an area in the in-flight bitmap, we have to
589 * create a MirrorOp for it so that conflicting requests can wait
590 * for it. mirror_perform() will create the real MirrorOps later,
591 * for now we just create a pseudo operation that will wake up all
592 * conflicting requests once all real operations have been
593 * launched. */
594 pseudo_op = g_new(MirrorOp, 1);
595 *pseudo_op = (MirrorOp){
596 .offset = offset,
597 .bytes = nb_chunks * s->granularity,
598 .is_pseudo_op = true,
599 };
600 qemu_co_queue_init(&pseudo_op->waiting_requests);
601 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
602
603 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
604 while (nb_chunks > 0 && offset < s->bdev_length) {
605 int ret = -1;
606 int64_t io_bytes;
607 int64_t io_bytes_acct;
608 bool io_skipped = false;
609 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
610
611 assert(!(offset % s->granularity));
612 WITH_GRAPH_RDLOCK_GUARD() {
613 ret = bdrv_co_block_status_above(source, NULL, offset,
614 nb_chunks * s->granularity,
615 &io_bytes, NULL, NULL);
616 }
617 if (ret < 0) {
618 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
619 } else if (ret & BDRV_BLOCK_DATA) {
620 io_bytes = MIN(io_bytes, max_io_bytes);
621 }
622
623 io_bytes -= io_bytes % s->granularity;
624 if (io_bytes < s->granularity) {
625 io_bytes = s->granularity;
626 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
627 int64_t target_offset;
628 int64_t target_bytes;
629 WITH_GRAPH_RDLOCK_GUARD() {
630 bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes,
631 &target_offset, &target_bytes);
632 }
633 if (target_offset == offset &&
634 target_bytes == io_bytes) {
635 mirror_method = ret & BDRV_BLOCK_ZERO ?
636 MIRROR_METHOD_ZERO :
637 MIRROR_METHOD_DISCARD;
638 }
639 }
640
641 while (s->in_flight >= MAX_IN_FLIGHT) {
642 trace_mirror_yield_in_flight(s, offset, s->in_flight);
643 mirror_wait_for_free_in_flight_slot(s);
644 }
645
646 if (s->ret < 0) {
647 ret = 0;
648 goto fail;
649 }
650
651 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
652 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method,
653 &io_skipped);
654 if (io_skipped ||
655 (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok)) {
656 io_bytes_acct = 0;
657 } else {
658 io_bytes_acct = io_bytes;
659 }
660 assert(io_bytes);
661 offset += io_bytes;
662 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
663 block_job_ratelimit_processed_bytes(&s->common, io_bytes_acct);
664 }
665
666 fail:
667 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
668 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
669 g_free(pseudo_op);
670 }
671
mirror_free_init(MirrorBlockJob * s)672 static void mirror_free_init(MirrorBlockJob *s)
673 {
674 int granularity = s->granularity;
675 size_t buf_size = s->buf_size;
676 uint8_t *buf = s->buf;
677
678 assert(s->buf_free_count == 0);
679 QSIMPLEQ_INIT(&s->buf_free);
680 while (buf_size != 0) {
681 MirrorBuffer *cur = (MirrorBuffer *)buf;
682 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
683 s->buf_free_count++;
684 buf_size -= granularity;
685 buf += granularity;
686 }
687 }
688
689 /* This is also used for the .pause callback. There is no matching
690 * mirror_resume() because mirror_run() will begin iterating again
691 * when the job is resumed.
692 */
mirror_wait_for_all_io(MirrorBlockJob * s)693 static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
694 {
695 while (s->in_flight > 0) {
696 mirror_wait_for_free_in_flight_slot(s);
697 }
698 }
699
700 /**
701 * mirror_exit_common: handle both abort() and prepare() cases.
702 * for .prepare, returns 0 on success and -errno on failure.
703 * for .abort cases, denoted by abort = true, MUST return 0.
704 */
mirror_exit_common(Job * job)705 static int mirror_exit_common(Job *job)
706 {
707 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
708 BlockJob *bjob = &s->common;
709 MirrorBDSOpaque *bs_opaque;
710 BlockDriverState *src;
711 BlockDriverState *target_bs;
712 BlockDriverState *mirror_top_bs;
713 Error *local_err = NULL;
714 bool abort = job->ret < 0;
715 int ret = 0;
716
717 GLOBAL_STATE_CODE();
718
719 if (s->prepared) {
720 return 0;
721 }
722 s->prepared = true;
723
724 bdrv_graph_rdlock_main_loop();
725
726 mirror_top_bs = s->mirror_top_bs;
727 bs_opaque = mirror_top_bs->opaque;
728 src = mirror_top_bs->backing->bs;
729 target_bs = blk_bs(s->target);
730
731 if (bdrv_chain_contains(src, target_bs)) {
732 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
733 }
734
735 bdrv_release_dirty_bitmap(s->dirty_bitmap);
736
737 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
738 * before we can call bdrv_drained_end */
739 bdrv_ref(src);
740 bdrv_ref(mirror_top_bs);
741 bdrv_ref(target_bs);
742
743 bdrv_graph_rdunlock_main_loop();
744
745 /*
746 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
747 * inserting target_bs at s->to_replace, where we might not be able to get
748 * these permissions.
749 */
750 blk_unref(s->target);
751 s->target = NULL;
752
753 /* We don't access the source any more. Dropping any WRITE/RESIZE is
754 * required before it could become a backing file of target_bs. Not having
755 * these permissions any more means that we can't allow any new requests on
756 * mirror_top_bs from now on, so keep it drained. */
757 bdrv_drained_begin(mirror_top_bs);
758 bdrv_drained_begin(target_bs);
759 bs_opaque->stop = true;
760
761 bdrv_graph_rdlock_main_loop();
762 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
763 &error_abort);
764 bdrv_graph_rdunlock_main_loop();
765
766 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
767 BlockDriverState *backing;
768 BlockDriverState *unfiltered_target;
769
770 bdrv_graph_wrlock_drained();
771 unfiltered_target = bdrv_skip_filters(target_bs);
772
773 backing = s->sync_mode == MIRROR_SYNC_MODE_NONE ? src : s->base;
774 if (bdrv_cow_bs(unfiltered_target) != backing) {
775 bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
776 if (local_err) {
777 error_report_err(local_err);
778 local_err = NULL;
779 ret = -EPERM;
780 }
781 }
782 bdrv_graph_wrunlock();
783 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
784 bdrv_graph_rdlock_main_loop();
785 assert(!bdrv_backing_chain_next(target_bs));
786 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
787 "backing", &local_err);
788 bdrv_graph_rdunlock_main_loop();
789 if (ret < 0) {
790 error_report_err(local_err);
791 local_err = NULL;
792 }
793 }
794
795 if (s->should_complete && !abort) {
796 BlockDriverState *to_replace = s->to_replace ?: src;
797 bool ro = bdrv_is_read_only(to_replace);
798
799 if (ro != bdrv_is_read_only(target_bs)) {
800 bdrv_reopen_set_read_only(target_bs, ro, NULL);
801 }
802
803 /* The mirror job has no requests in flight any more, but we need to
804 * drain potential other users of the BDS before changing the graph. */
805 assert(s->in_drain);
806 bdrv_drained_begin(to_replace);
807 /*
808 * Cannot use check_to_replace_node() here, because that would
809 * check for an op blocker on @to_replace, and we have our own
810 * there.
811 */
812 bdrv_graph_wrlock();
813 if (bdrv_recurse_can_replace(src, to_replace)) {
814 bdrv_replace_node(to_replace, target_bs, &local_err);
815 } else {
816 error_setg(&local_err, "Can no longer replace '%s' by '%s', "
817 "because it can no longer be guaranteed that doing so "
818 "would not lead to an abrupt change of visible data",
819 to_replace->node_name, target_bs->node_name);
820 }
821 bdrv_graph_wrunlock();
822 bdrv_drained_end(to_replace);
823 if (local_err) {
824 error_report_err(local_err);
825 ret = -EPERM;
826 }
827 }
828 if (s->to_replace) {
829 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
830 error_free(s->replace_blocker);
831 bdrv_unref(s->to_replace);
832 }
833 g_free(s->replaces);
834
835 /*
836 * Remove the mirror filter driver from the graph. Before this, get rid of
837 * the blockers on the intermediate nodes so that the resulting state is
838 * valid.
839 */
840 block_job_remove_all_bdrv(bjob);
841 bdrv_graph_wrlock();
842 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
843 bdrv_graph_wrunlock();
844
845 if (abort && s->base_ro && !bdrv_is_read_only(target_bs)) {
846 bdrv_reopen_set_read_only(target_bs, true, NULL);
847 }
848
849 bdrv_drained_end(target_bs);
850 bdrv_unref(target_bs);
851
852 bs_opaque->job = NULL;
853
854 bdrv_drained_end(src);
855 bdrv_drained_end(mirror_top_bs);
856 s->in_drain = false;
857 bdrv_unref(mirror_top_bs);
858 bdrv_unref(src);
859
860 return ret;
861 }
862
mirror_prepare(Job * job)863 static int mirror_prepare(Job *job)
864 {
865 return mirror_exit_common(job);
866 }
867
mirror_abort(Job * job)868 static void mirror_abort(Job *job)
869 {
870 int ret = mirror_exit_common(job);
871 assert(ret == 0);
872 }
873
mirror_throttle(MirrorBlockJob * s)874 static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
875 {
876 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
877
878 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
879 s->last_pause_ns = now;
880 job_sleep_ns(&s->common.job, 0);
881 } else {
882 job_pause_point(&s->common.job);
883 }
884 }
885
mirror_dirty_init(MirrorBlockJob * s)886 static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s)
887 {
888 int64_t offset;
889 BlockDriverState *bs;
890 BlockDriverState *target_bs = blk_bs(s->target);
891 int ret = -EIO;
892 int64_t count;
893 bool punch_holes =
894 target_bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP &&
895 bdrv_can_write_zeroes_with_unmap(target_bs);
896 int64_t bitmap_length = DIV_ROUND_UP(s->bdev_length, s->granularity);
897
898 /* Determine if the image is already zero, regardless of sync mode. */
899 s->zero_bitmap = bitmap_new(bitmap_length);
900 bdrv_graph_co_rdlock();
901 bs = s->mirror_top_bs->backing->bs;
902 if (s->target_is_zero) {
903 ret = 1;
904 } else {
905 ret = bdrv_co_is_all_zeroes(target_bs);
906 }
907 bdrv_graph_co_rdunlock();
908
909 /* Determine if a pre-zeroing pass is necessary. */
910 if (ret < 0) {
911 return ret;
912 } else if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
913 /*
914 * In TOP mode, there is no benefit to a pre-zeroing pass, but
915 * the zero bitmap can be set if the destination already reads
916 * as zero and we are not punching holes.
917 */
918 if (ret > 0 && !punch_holes) {
919 bitmap_set(s->zero_bitmap, 0, bitmap_length);
920 }
921 } else if (ret == 0 || punch_holes) {
922 /*
923 * Here, we are in FULL mode; our goal is to avoid writing
924 * zeroes if the destination already reads as zero, except
925 * when we are trying to punch holes. This is possible if
926 * zeroing happened externally (ret > 0) or if we have a fast
927 * way to pre-zero the image (the dirty bitmap will be
928 * populated later by the non-zero portions, the same as for
929 * TOP mode). If pre-zeroing is not fast, or we need to visit
930 * the entire image in order to punch holes even in the
931 * non-allocated regions of the source, then just mark the
932 * entire image dirty and leave the zero bitmap clear at this
933 * point in time. Otherwise, it can be faster to pre-zero the
934 * image now, even if we re-write the allocated portions of
935 * the disk later, and the pre-zero pass will populate the
936 * zero bitmap.
937 */
938 if (!bdrv_can_write_zeroes_with_unmap(target_bs) || punch_holes) {
939 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
940 return 0;
941 }
942
943 s->initial_zeroing_ongoing = true;
944 for (offset = 0; offset < s->bdev_length; ) {
945 int bytes = MIN(s->bdev_length - offset,
946 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
947 bool ignored;
948
949 mirror_throttle(s);
950
951 if (job_is_cancelled(&s->common.job)) {
952 s->initial_zeroing_ongoing = false;
953 return 0;
954 }
955
956 if (s->in_flight >= MAX_IN_FLIGHT) {
957 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
958 s->in_flight);
959 mirror_wait_for_free_in_flight_slot(s);
960 continue;
961 }
962
963 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO, &ignored);
964 offset += bytes;
965 }
966
967 mirror_wait_for_all_io(s);
968 s->initial_zeroing_ongoing = false;
969 } else {
970 /* In FULL mode, and image already reads as zero. */
971 bitmap_set(s->zero_bitmap, 0, bitmap_length);
972 }
973
974 /* First part, loop on the sectors and initialize the dirty bitmap. */
975 for (offset = 0; offset < s->bdev_length; ) {
976 /* Just to make sure we are not exceeding int limit. */
977 int bytes = MIN(s->bdev_length - offset,
978 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
979
980 mirror_throttle(s);
981
982 if (job_is_cancelled(&s->common.job)) {
983 return 0;
984 }
985
986 WITH_GRAPH_RDLOCK_GUARD() {
987 ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset,
988 bytes, &count);
989 }
990 if (ret < 0) {
991 return ret;
992 }
993
994 assert(count);
995 if (ret > 0) {
996 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
997 }
998 offset += count;
999 }
1000 return 0;
1001 }
1002
1003 /* Called when going out of the streaming phase to flush the bulk of the
1004 * data to the medium, or just before completing.
1005 */
mirror_flush(MirrorBlockJob * s)1006 static int coroutine_fn mirror_flush(MirrorBlockJob *s)
1007 {
1008 int ret = blk_co_flush(s->target);
1009 if (ret < 0) {
1010 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
1011 s->ret = ret;
1012 }
1013 }
1014 return ret;
1015 }
1016
mirror_run(Job * job,Error ** errp)1017 static int coroutine_fn mirror_run(Job *job, Error **errp)
1018 {
1019 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1020 BlockDriverState *bs;
1021 MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
1022 BlockDriverState *target_bs = blk_bs(s->target);
1023 bool need_drain = true;
1024 BlockDeviceIoStatus iostatus = BLOCK_DEVICE_IO_STATUS__MAX;
1025 int64_t length;
1026 int64_t target_length;
1027 BlockDriverInfo bdi;
1028 char backing_filename[2]; /* we only need 2 characters because we are only
1029 checking for a NULL string */
1030 int ret = 0;
1031
1032 bdrv_graph_co_rdlock();
1033 bs = bdrv_filter_bs(s->mirror_top_bs);
1034 bdrv_graph_co_rdunlock();
1035
1036 if (job_is_cancelled(&s->common.job)) {
1037 goto immediate_exit;
1038 }
1039
1040 bdrv_graph_co_rdlock();
1041 s->bdev_length = bdrv_co_getlength(bs);
1042 bdrv_graph_co_rdunlock();
1043
1044 if (s->bdev_length < 0) {
1045 ret = s->bdev_length;
1046 goto immediate_exit;
1047 }
1048
1049 target_length = blk_co_getlength(s->target);
1050 if (target_length < 0) {
1051 ret = target_length;
1052 goto immediate_exit;
1053 }
1054
1055 /* Active commit must resize the base image if its size differs from the
1056 * active layer. */
1057 if (s->base == blk_bs(s->target)) {
1058 if (s->bdev_length > target_length) {
1059 ret = blk_co_truncate(s->target, s->bdev_length, false,
1060 PREALLOC_MODE_OFF, 0, NULL);
1061 if (ret < 0) {
1062 goto immediate_exit;
1063 }
1064 }
1065 } else if (s->bdev_length != target_length) {
1066 error_setg(errp, "Source and target image have different sizes");
1067 ret = -EINVAL;
1068 goto immediate_exit;
1069 }
1070
1071 if (s->bdev_length == 0) {
1072 /* Transition to the READY state and wait for complete. */
1073 job_transition_to_ready(&s->common.job);
1074 qatomic_set(&s->actively_synced, true);
1075 while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
1076 job_yield(&s->common.job);
1077 }
1078 goto immediate_exit;
1079 }
1080
1081 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
1082 s->in_flight_bitmap = bitmap_new(length);
1083
1084 /* If we have no backing file yet in the destination, we cannot let
1085 * the destination do COW. Instead, we copy sectors around the
1086 * dirty data if needed. We need a bitmap to do that.
1087 */
1088 bdrv_get_backing_filename(target_bs, backing_filename,
1089 sizeof(backing_filename));
1090 bdrv_graph_co_rdlock();
1091 if (!bdrv_co_get_info(target_bs, &bdi) && bdi.cluster_size) {
1092 s->target_cluster_size = bdi.cluster_size;
1093 } else {
1094 s->target_cluster_size = BDRV_SECTOR_SIZE;
1095 }
1096 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
1097 s->granularity < s->target_cluster_size) {
1098 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
1099 s->cow_bitmap = bitmap_new(length);
1100 }
1101 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
1102 bdrv_graph_co_rdunlock();
1103
1104 s->buf = qemu_try_blockalign(bs, s->buf_size);
1105 if (s->buf == NULL) {
1106 ret = -ENOMEM;
1107 goto immediate_exit;
1108 }
1109
1110 mirror_free_init(s);
1111
1112 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1113 if (s->sync_mode != MIRROR_SYNC_MODE_NONE) {
1114 ret = mirror_dirty_init(s);
1115 if (ret < 0 || job_is_cancelled(&s->common.job)) {
1116 goto immediate_exit;
1117 }
1118 }
1119
1120 /*
1121 * Only now the job is fully initialised and mirror_top_bs should start
1122 * accessing it.
1123 */
1124 mirror_top_opaque->job = s;
1125
1126 assert(!s->dbi);
1127 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
1128 for (;;) {
1129 int64_t cnt, delta;
1130 bool should_complete;
1131
1132 if (s->ret < 0) {
1133 ret = s->ret;
1134 goto immediate_exit;
1135 }
1136
1137 job_pause_point(&s->common.job);
1138
1139 if (job_is_cancelled(&s->common.job)) {
1140 ret = 0;
1141 goto immediate_exit;
1142 }
1143
1144 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1145 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1146 * the number of bytes currently being processed; together those are
1147 * the current remaining operation length */
1148 job_progress_set_remaining(&s->common.job,
1149 s->bytes_in_flight + cnt +
1150 s->active_write_bytes_in_flight);
1151
1152 /* Note that even when no rate limit is applied we need to yield
1153 * periodically with no pending I/O so that bdrv_drain_all() returns.
1154 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
1155 * an error, or when the source is clean, whichever comes first. */
1156 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
1157 WITH_JOB_LOCK_GUARD() {
1158 iostatus = s->common.iostatus;
1159 }
1160 if (delta < BLOCK_JOB_SLICE_TIME &&
1161 iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1162 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
1163 (cnt == 0 && s->in_flight > 0)) {
1164 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
1165 mirror_wait_for_free_in_flight_slot(s);
1166 continue;
1167 } else if (cnt != 0) {
1168 mirror_iteration(s);
1169 }
1170 }
1171
1172 should_complete = false;
1173 if (s->in_flight == 0 && cnt == 0) {
1174 trace_mirror_before_flush(s);
1175 if (!job_is_ready(&s->common.job)) {
1176 if (mirror_flush(s) < 0) {
1177 /* Go check s->ret. */
1178 continue;
1179 }
1180 /* We're out of the streaming phase. From now on, if the job
1181 * is cancelled we will actually complete all pending I/O and
1182 * report completion. This way, block-job-cancel will leave
1183 * the target in a consistent state.
1184 */
1185 job_transition_to_ready(&s->common.job);
1186 }
1187 if (qatomic_read(&s->copy_mode) != MIRROR_COPY_MODE_BACKGROUND) {
1188 qatomic_set(&s->actively_synced, true);
1189 }
1190
1191 should_complete = s->should_complete ||
1192 job_cancel_requested(&s->common.job);
1193 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1194 }
1195
1196 if (cnt == 0 && should_complete) {
1197 /* The dirty bitmap is not updated while operations are pending.
1198 * If we're about to exit, wait for pending operations before
1199 * calling bdrv_get_dirty_count(bs), or we may exit while the
1200 * source has dirty data to copy!
1201 *
1202 * Note that I/O can be submitted by the guest while
1203 * mirror_populate runs, so pause it now. Before deciding
1204 * whether to switch to target check one last time if I/O has
1205 * come in the meanwhile, and if not flush the data to disk.
1206 */
1207 trace_mirror_before_drain(s, cnt);
1208
1209 s->in_drain = true;
1210 bdrv_drained_begin(bs);
1211
1212 /* Must be zero because we are drained */
1213 assert(s->in_active_write_counter == 0);
1214
1215 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1216 if (cnt > 0 || mirror_flush(s) < 0) {
1217 bdrv_drained_end(bs);
1218 s->in_drain = false;
1219 continue;
1220 }
1221
1222 /* The two disks are in sync. Exit and report successful
1223 * completion.
1224 */
1225 assert(QLIST_EMPTY(&bs->tracked_requests));
1226 need_drain = false;
1227 break;
1228 }
1229
1230 if (job_is_ready(&s->common.job) && !should_complete) {
1231 if (s->in_flight == 0 && cnt == 0) {
1232 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1233 BLOCK_JOB_SLICE_TIME);
1234 job_sleep_ns(&s->common.job, BLOCK_JOB_SLICE_TIME);
1235 }
1236 } else {
1237 block_job_ratelimit_sleep(&s->common);
1238 }
1239 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1240 }
1241
1242 immediate_exit:
1243 if (s->in_flight > 0) {
1244 /* We get here only if something went wrong. Either the job failed,
1245 * or it was cancelled prematurely so that we do not guarantee that
1246 * the target is a copy of the source.
1247 */
1248 assert(ret < 0 || job_is_cancelled(&s->common.job));
1249 assert(need_drain);
1250 mirror_wait_for_all_io(s);
1251 }
1252
1253 assert(s->in_flight == 0);
1254 qemu_vfree(s->buf);
1255 g_free(s->cow_bitmap);
1256 g_free(s->zero_bitmap);
1257 g_free(s->in_flight_bitmap);
1258 bdrv_dirty_iter_free(s->dbi);
1259
1260 if (need_drain) {
1261 s->in_drain = true;
1262 bdrv_drained_begin(bs);
1263 }
1264
1265 return ret;
1266 }
1267
mirror_complete(Job * job,Error ** errp)1268 static void mirror_complete(Job *job, Error **errp)
1269 {
1270 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1271
1272 if (!job_is_ready(job)) {
1273 error_setg(errp, "The active block job '%s' cannot be completed",
1274 job->id);
1275 return;
1276 }
1277
1278 /* block all operations on to_replace bs */
1279 if (s->replaces) {
1280 s->to_replace = bdrv_find_node(s->replaces);
1281 if (!s->to_replace) {
1282 error_setg(errp, "Node name '%s' not found", s->replaces);
1283 return;
1284 }
1285
1286 /* TODO Translate this into child freeze system. */
1287 error_setg(&s->replace_blocker,
1288 "block device is in use by block-job-complete");
1289 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1290 bdrv_ref(s->to_replace);
1291 }
1292
1293 s->should_complete = true;
1294
1295 /* If the job is paused, it will be re-entered when it is resumed */
1296 WITH_JOB_LOCK_GUARD() {
1297 if (!job->paused) {
1298 job_enter_cond_locked(job, NULL);
1299 }
1300 }
1301 }
1302
mirror_pause(Job * job)1303 static void coroutine_fn mirror_pause(Job *job)
1304 {
1305 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1306
1307 mirror_wait_for_all_io(s);
1308 }
1309
mirror_drained_poll(BlockJob * job)1310 static bool mirror_drained_poll(BlockJob *job)
1311 {
1312 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1313
1314 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1315 * issue more requests. We make an exception if we've reached this point
1316 * from one of our own drain sections, to avoid a deadlock waiting for
1317 * ourselves.
1318 */
1319 WITH_JOB_LOCK_GUARD() {
1320 if (!s->common.job.paused && !job_is_cancelled_locked(&job->job)
1321 && !s->in_drain) {
1322 return true;
1323 }
1324 }
1325
1326 return !!s->in_flight;
1327 }
1328
mirror_cancel(Job * job,bool force)1329 static bool mirror_cancel(Job *job, bool force)
1330 {
1331 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1332 BlockDriverState *target = blk_bs(s->target);
1333
1334 /*
1335 * Before the job is READY, we treat any cancellation like a
1336 * force-cancellation.
1337 */
1338 force = force || !job_is_ready(job);
1339
1340 if (force) {
1341 bdrv_cancel_in_flight(target);
1342 }
1343 return force;
1344 }
1345
commit_active_cancel(Job * job,bool force)1346 static bool commit_active_cancel(Job *job, bool force)
1347 {
1348 /* Same as above in mirror_cancel() */
1349 return force || !job_is_ready(job);
1350 }
1351
mirror_change(BlockJob * job,BlockJobChangeOptions * opts,Error ** errp)1352 static void mirror_change(BlockJob *job, BlockJobChangeOptions *opts,
1353 Error **errp)
1354 {
1355 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1356 BlockJobChangeOptionsMirror *change_opts = &opts->u.mirror;
1357 MirrorCopyMode current;
1358
1359 /*
1360 * The implementation relies on the fact that copy_mode is only written
1361 * under the BQL. Otherwise, further synchronization would be required.
1362 */
1363
1364 GLOBAL_STATE_CODE();
1365
1366 if (qatomic_read(&s->copy_mode) == change_opts->copy_mode) {
1367 return;
1368 }
1369
1370 if (change_opts->copy_mode != MIRROR_COPY_MODE_WRITE_BLOCKING) {
1371 error_setg(errp, "Change to copy mode '%s' is not implemented",
1372 MirrorCopyMode_str(change_opts->copy_mode));
1373 return;
1374 }
1375
1376 current = qatomic_cmpxchg(&s->copy_mode, MIRROR_COPY_MODE_BACKGROUND,
1377 change_opts->copy_mode);
1378 if (current != MIRROR_COPY_MODE_BACKGROUND) {
1379 error_setg(errp, "Expected current copy mode '%s', got '%s'",
1380 MirrorCopyMode_str(MIRROR_COPY_MODE_BACKGROUND),
1381 MirrorCopyMode_str(current));
1382 }
1383 }
1384
mirror_query(BlockJob * job,BlockJobInfo * info)1385 static void mirror_query(BlockJob *job, BlockJobInfo *info)
1386 {
1387 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1388
1389 info->u.mirror = (BlockJobInfoMirror) {
1390 .actively_synced = qatomic_read(&s->actively_synced),
1391 };
1392 }
1393
1394 static const BlockJobDriver mirror_job_driver = {
1395 .job_driver = {
1396 .instance_size = sizeof(MirrorBlockJob),
1397 .job_type = JOB_TYPE_MIRROR,
1398 .free = block_job_free,
1399 .user_resume = block_job_user_resume,
1400 .run = mirror_run,
1401 .prepare = mirror_prepare,
1402 .abort = mirror_abort,
1403 .pause = mirror_pause,
1404 .complete = mirror_complete,
1405 .cancel = mirror_cancel,
1406 },
1407 .drained_poll = mirror_drained_poll,
1408 .change = mirror_change,
1409 .query = mirror_query,
1410 };
1411
1412 static const BlockJobDriver commit_active_job_driver = {
1413 .job_driver = {
1414 .instance_size = sizeof(MirrorBlockJob),
1415 .job_type = JOB_TYPE_COMMIT,
1416 .free = block_job_free,
1417 .user_resume = block_job_user_resume,
1418 .run = mirror_run,
1419 .prepare = mirror_prepare,
1420 .abort = mirror_abort,
1421 .pause = mirror_pause,
1422 .complete = mirror_complete,
1423 .cancel = commit_active_cancel,
1424 },
1425 .drained_poll = mirror_drained_poll,
1426 };
1427
1428 static void coroutine_fn
do_sync_target_write(MirrorBlockJob * job,MirrorMethod method,uint64_t offset,uint64_t bytes,QEMUIOVector * qiov,int flags)1429 do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1430 uint64_t offset, uint64_t bytes,
1431 QEMUIOVector *qiov, int flags)
1432 {
1433 int ret;
1434 size_t qiov_offset = 0;
1435 int64_t dirty_bitmap_offset, dirty_bitmap_end;
1436 int64_t zero_bitmap_offset, zero_bitmap_end;
1437
1438 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1439 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1440 {
1441 /*
1442 * Dirty unaligned padding: ignore it.
1443 *
1444 * Reasoning:
1445 * 1. If we copy it, we can't reset corresponding bit in
1446 * dirty_bitmap as there may be some "dirty" bytes still not
1447 * copied.
1448 * 2. It's already dirty, so skipping it we don't diverge mirror
1449 * progress.
1450 *
1451 * Note, that because of this, guest write may have no contribution
1452 * into mirror converge, but that's not bad, as we have background
1453 * process of mirroring. If under some bad circumstances (high guest
1454 * IO load) background process starve, we will not converge anyway,
1455 * even if each write will contribute, as guest is not guaranteed to
1456 * rewrite the whole disk.
1457 */
1458 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1459 if (bytes <= qiov_offset) {
1460 /* nothing to do after shrink */
1461 return;
1462 }
1463 offset += qiov_offset;
1464 bytes -= qiov_offset;
1465 }
1466
1467 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1468 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1469 {
1470 uint64_t tail = (offset + bytes) % job->granularity;
1471
1472 if (bytes <= tail) {
1473 /* nothing to do after shrink */
1474 return;
1475 }
1476 bytes -= tail;
1477 }
1478
1479 /*
1480 * Tails are either clean or shrunk, so for dirty bitmap resetting
1481 * we safely align the range narrower. But for zero bitmap, round
1482 * range wider for checking or clearing, and narrower for setting.
1483 */
1484 dirty_bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1485 dirty_bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1486 if (dirty_bitmap_offset < dirty_bitmap_end) {
1487 bdrv_reset_dirty_bitmap(job->dirty_bitmap, dirty_bitmap_offset,
1488 dirty_bitmap_end - dirty_bitmap_offset);
1489 }
1490 zero_bitmap_offset = offset / job->granularity;
1491 zero_bitmap_end = DIV_ROUND_UP(offset + bytes, job->granularity);
1492
1493 job_progress_increase_remaining(&job->common.job, bytes);
1494 job->active_write_bytes_in_flight += bytes;
1495
1496 switch (method) {
1497 case MIRROR_METHOD_COPY:
1498 if (job->zero_bitmap) {
1499 bitmap_clear(job->zero_bitmap, zero_bitmap_offset,
1500 zero_bitmap_end - zero_bitmap_offset);
1501 }
1502 ret = blk_co_pwritev_part(job->target, offset, bytes,
1503 qiov, qiov_offset, flags);
1504 break;
1505
1506 case MIRROR_METHOD_ZERO:
1507 if (job->zero_bitmap) {
1508 if (find_next_zero_bit(job->zero_bitmap, zero_bitmap_end,
1509 zero_bitmap_offset) == zero_bitmap_end) {
1510 ret = 0;
1511 break;
1512 }
1513 }
1514 assert(!qiov);
1515 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1516 if (job->zero_bitmap && ret >= 0) {
1517 bitmap_set(job->zero_bitmap, dirty_bitmap_offset / job->granularity,
1518 (dirty_bitmap_end - dirty_bitmap_offset) /
1519 job->granularity);
1520 }
1521 break;
1522
1523 case MIRROR_METHOD_DISCARD:
1524 if (job->zero_bitmap) {
1525 bitmap_clear(job->zero_bitmap, zero_bitmap_offset,
1526 zero_bitmap_end - zero_bitmap_offset);
1527 }
1528 assert(!qiov);
1529 ret = blk_co_pdiscard(job->target, offset, bytes);
1530 break;
1531
1532 default:
1533 abort();
1534 }
1535
1536 job->active_write_bytes_in_flight -= bytes;
1537 if (ret >= 0) {
1538 job_progress_update(&job->common.job, bytes);
1539 } else {
1540 BlockErrorAction action;
1541
1542 /*
1543 * We failed, so we should mark dirty the whole area, aligned up.
1544 * Note that we don't care about shrunk tails if any: they were dirty
1545 * at function start, and they must be still dirty, as we've locked
1546 * the region for in-flight op.
1547 */
1548 dirty_bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1549 dirty_bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1550 bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_bitmap_offset,
1551 dirty_bitmap_end - dirty_bitmap_offset);
1552 qatomic_set(&job->actively_synced, false);
1553
1554 action = mirror_error_action(job, false, -ret);
1555 if (action == BLOCK_ERROR_ACTION_REPORT) {
1556 if (!job->ret) {
1557 job->ret = ret;
1558 }
1559 }
1560 }
1561 }
1562
active_write_prepare(MirrorBlockJob * s,uint64_t offset,uint64_t bytes)1563 static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1564 uint64_t offset,
1565 uint64_t bytes)
1566 {
1567 MirrorOp *op;
1568 uint64_t start_chunk = offset / s->granularity;
1569 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1570
1571 op = g_new(MirrorOp, 1);
1572 *op = (MirrorOp){
1573 .s = s,
1574 .offset = offset,
1575 .bytes = bytes,
1576 .is_active_write = true,
1577 .is_in_flight = true,
1578 .co = qemu_coroutine_self(),
1579 };
1580 qemu_co_queue_init(&op->waiting_requests);
1581 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1582
1583 s->in_active_write_counter++;
1584
1585 /*
1586 * Wait for concurrent requests affecting the area. If there are already
1587 * running requests that are copying off now-to-be stale data in the area,
1588 * we must wait for them to finish before we begin writing fresh data to the
1589 * target so that the write operations appear in the correct order.
1590 * Note that background requests (see mirror_iteration()) in contrast only
1591 * wait for conflicting requests at the start of the dirty area, and then
1592 * (based on the in_flight_bitmap) truncate the area to copy so it will not
1593 * conflict with any requests beyond that. For active writes, however, we
1594 * cannot truncate that area. The request from our parent must be blocked
1595 * until the area is copied in full. Therefore, we must wait for the whole
1596 * area to become free of concurrent requests.
1597 */
1598 mirror_wait_on_conflicts(op, s, offset, bytes);
1599
1600 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1601
1602 return op;
1603 }
1604
active_write_settle(MirrorOp * op)1605 static void coroutine_fn GRAPH_RDLOCK active_write_settle(MirrorOp *op)
1606 {
1607 uint64_t start_chunk = op->offset / op->s->granularity;
1608 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1609 op->s->granularity);
1610
1611 if (!--op->s->in_active_write_counter &&
1612 qatomic_read(&op->s->actively_synced)) {
1613 BdrvChild *source = op->s->mirror_top_bs->backing;
1614
1615 if (QLIST_FIRST(&source->bs->parents) == source &&
1616 QLIST_NEXT(source, next_parent) == NULL)
1617 {
1618 /* Assert that we are back in sync once all active write
1619 * operations are settled.
1620 * Note that we can only assert this if the mirror node
1621 * is the source node's only parent. */
1622 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1623 }
1624 }
1625 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1626 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1627 qemu_co_queue_restart_all(&op->waiting_requests);
1628 g_free(op);
1629 }
1630
1631 static int coroutine_fn GRAPH_RDLOCK
bdrv_mirror_top_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)1632 bdrv_mirror_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1633 QEMUIOVector *qiov, BdrvRequestFlags flags)
1634 {
1635 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1636 }
1637
should_copy_to_target(MirrorBDSOpaque * s)1638 static bool should_copy_to_target(MirrorBDSOpaque *s)
1639 {
1640 return s->job && s->job->ret >= 0 &&
1641 !job_is_cancelled(&s->job->common.job) &&
1642 qatomic_read(&s->job->copy_mode) == MIRROR_COPY_MODE_WRITE_BLOCKING;
1643 }
1644
1645 static int coroutine_fn GRAPH_RDLOCK
bdrv_mirror_top_do_write(BlockDriverState * bs,MirrorMethod method,bool copy_to_target,uint64_t offset,uint64_t bytes,QEMUIOVector * qiov,int flags)1646 bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorMethod method,
1647 bool copy_to_target, uint64_t offset, uint64_t bytes,
1648 QEMUIOVector *qiov, int flags)
1649 {
1650 MirrorOp *op = NULL;
1651 MirrorBDSOpaque *s = bs->opaque;
1652 int ret = 0;
1653
1654 if (copy_to_target) {
1655 op = active_write_prepare(s->job, offset, bytes);
1656 }
1657
1658 switch (method) {
1659 case MIRROR_METHOD_COPY:
1660 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1661 break;
1662
1663 case MIRROR_METHOD_ZERO:
1664 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1665 break;
1666
1667 case MIRROR_METHOD_DISCARD:
1668 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1669 break;
1670
1671 default:
1672 abort();
1673 }
1674
1675 if (!copy_to_target && s->job && s->job->dirty_bitmap) {
1676 qatomic_set(&s->job->actively_synced, false);
1677 bdrv_set_dirty_bitmap(s->job->dirty_bitmap, offset, bytes);
1678 }
1679
1680 if (ret < 0) {
1681 goto out;
1682 }
1683
1684 if (copy_to_target) {
1685 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1686 }
1687
1688 out:
1689 if (copy_to_target) {
1690 active_write_settle(op);
1691 }
1692 return ret;
1693 }
1694
1695 static int coroutine_fn GRAPH_RDLOCK
bdrv_mirror_top_pwritev(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)1696 bdrv_mirror_top_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1697 QEMUIOVector *qiov, BdrvRequestFlags flags)
1698 {
1699 QEMUIOVector bounce_qiov;
1700 void *bounce_buf;
1701 int ret = 0;
1702 bool copy_to_target = should_copy_to_target(bs->opaque);
1703
1704 if (copy_to_target) {
1705 /* The guest might concurrently modify the data to write; but
1706 * the data on source and destination must match, so we have
1707 * to use a bounce buffer if we are going to write to the
1708 * target now. */
1709 bounce_buf = qemu_blockalign(bs, bytes);
1710 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1711
1712 qemu_iovec_init(&bounce_qiov, 1);
1713 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1714 qiov = &bounce_qiov;
1715
1716 flags &= ~BDRV_REQ_REGISTERED_BUF;
1717 }
1718
1719 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, copy_to_target,
1720 offset, bytes, qiov, flags);
1721
1722 if (copy_to_target) {
1723 qemu_iovec_destroy(&bounce_qiov);
1724 qemu_vfree(bounce_buf);
1725 }
1726
1727 return ret;
1728 }
1729
bdrv_mirror_top_flush(BlockDriverState * bs)1730 static int coroutine_fn GRAPH_RDLOCK bdrv_mirror_top_flush(BlockDriverState *bs)
1731 {
1732 if (bs->backing == NULL) {
1733 /* we can be here after failed bdrv_append in mirror_start_job */
1734 return 0;
1735 }
1736 return bdrv_co_flush(bs->backing->bs);
1737 }
1738
1739 static int coroutine_fn GRAPH_RDLOCK
bdrv_mirror_top_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)1740 bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
1741 int64_t bytes, BdrvRequestFlags flags)
1742 {
1743 bool copy_to_target = should_copy_to_target(bs->opaque);
1744 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, copy_to_target,
1745 offset, bytes, NULL, flags);
1746 }
1747
1748 static int coroutine_fn GRAPH_RDLOCK
bdrv_mirror_top_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes)1749 bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
1750 {
1751 bool copy_to_target = should_copy_to_target(bs->opaque);
1752 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, copy_to_target,
1753 offset, bytes, NULL, 0);
1754 }
1755
bdrv_mirror_top_refresh_filename(BlockDriverState * bs)1756 static void GRAPH_RDLOCK bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1757 {
1758 if (bs->backing == NULL) {
1759 /* we can be here after failed bdrv_attach_child in
1760 * bdrv_set_backing_hd */
1761 return;
1762 }
1763 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1764 bs->backing->bs->filename);
1765 }
1766
bdrv_mirror_top_child_perm(BlockDriverState * bs,BdrvChild * c,BdrvChildRole role,BlockReopenQueue * reopen_queue,uint64_t perm,uint64_t shared,uint64_t * nperm,uint64_t * nshared)1767 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1768 BdrvChildRole role,
1769 BlockReopenQueue *reopen_queue,
1770 uint64_t perm, uint64_t shared,
1771 uint64_t *nperm, uint64_t *nshared)
1772 {
1773 MirrorBDSOpaque *s = bs->opaque;
1774
1775 if (s->stop) {
1776 /*
1777 * If the job is to be stopped, we do not need to forward
1778 * anything to the real image.
1779 */
1780 *nperm = 0;
1781 *nshared = BLK_PERM_ALL;
1782 return;
1783 }
1784
1785 bdrv_default_perms(bs, c, role, reopen_queue,
1786 perm, shared, nperm, nshared);
1787
1788 if (s->is_commit) {
1789 /*
1790 * For commit jobs, we cannot take CONSISTENT_READ, because
1791 * that permission is unshared for everything above the base
1792 * node (except for filters on the base node).
1793 * We also have to force-share the WRITE permission, or
1794 * otherwise we would block ourselves at the base node (if
1795 * writes are blocked for a node, they are also blocked for
1796 * its backing file).
1797 * (We could also share RESIZE, because it may be needed for
1798 * the target if its size is less than the top node's; but
1799 * bdrv_default_perms_for_cow() automatically shares RESIZE
1800 * for backing nodes if WRITE is shared, so there is no need
1801 * to do it here.)
1802 */
1803 *nperm &= ~BLK_PERM_CONSISTENT_READ;
1804 *nshared |= BLK_PERM_WRITE;
1805 }
1806 }
1807
1808 /* Dummy node that provides consistent read to its users without requiring it
1809 * from its backing file and that allows writes on the backing file chain. */
1810 static BlockDriver bdrv_mirror_top = {
1811 .format_name = "mirror_top",
1812 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1813 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1814 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1815 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1816 .bdrv_co_flush = bdrv_mirror_top_flush,
1817 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1818 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1819
1820 .is_filter = true,
1821 .filtered_child_is_backing = true,
1822 };
1823
mirror_start_job(const char * job_id,BlockDriverState * bs,int creation_flags,BlockDriverState * target,const char * replaces,int64_t speed,uint32_t granularity,int64_t buf_size,MirrorSyncMode sync_mode,BlockMirrorBackingMode backing_mode,bool target_is_zero,BlockdevOnError on_source_error,BlockdevOnError on_target_error,bool unmap,BlockCompletionFunc * cb,void * opaque,const BlockJobDriver * driver,BlockDriverState * base,bool auto_complete,const char * filter_node_name,bool is_mirror,MirrorCopyMode copy_mode,bool base_ro,Error ** errp)1824 static BlockJob *mirror_start_job(
1825 const char *job_id, BlockDriverState *bs,
1826 int creation_flags, BlockDriverState *target,
1827 const char *replaces, int64_t speed,
1828 uint32_t granularity, int64_t buf_size,
1829 MirrorSyncMode sync_mode,
1830 BlockMirrorBackingMode backing_mode,
1831 bool target_is_zero,
1832 BlockdevOnError on_source_error,
1833 BlockdevOnError on_target_error,
1834 bool unmap,
1835 BlockCompletionFunc *cb,
1836 void *opaque,
1837 const BlockJobDriver *driver,
1838 BlockDriverState *base,
1839 bool auto_complete, const char *filter_node_name,
1840 bool is_mirror, MirrorCopyMode copy_mode,
1841 bool base_ro,
1842 Error **errp)
1843 {
1844 MirrorBlockJob *s;
1845 MirrorBDSOpaque *bs_opaque;
1846 BlockDriverState *mirror_top_bs;
1847 bool target_is_backing;
1848 uint64_t target_perms, target_shared_perms;
1849 int ret;
1850
1851 GLOBAL_STATE_CODE();
1852
1853 if (granularity == 0) {
1854 granularity = bdrv_get_default_bitmap_granularity(target);
1855 }
1856
1857 assert(is_power_of_2(granularity));
1858
1859 if (buf_size < 0) {
1860 error_setg(errp, "Invalid parameter 'buf-size'");
1861 return NULL;
1862 }
1863
1864 if (buf_size == 0) {
1865 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1866 }
1867
1868 bdrv_graph_rdlock_main_loop();
1869 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
1870 error_setg(errp, "Can't mirror node into itself");
1871 bdrv_graph_rdunlock_main_loop();
1872 return NULL;
1873 }
1874
1875 target_is_backing = bdrv_chain_contains(bs, target);
1876 bdrv_graph_rdunlock_main_loop();
1877
1878 /* In the case of active commit, add dummy driver to provide consistent
1879 * reads on the top, while disabling it in the intermediate nodes, and make
1880 * the backing chain writable. */
1881 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1882 BDRV_O_RDWR, errp);
1883 if (mirror_top_bs == NULL) {
1884 return NULL;
1885 }
1886 if (!filter_node_name) {
1887 mirror_top_bs->implicit = true;
1888 }
1889
1890 /* So that we can always drop this node */
1891 mirror_top_bs->never_freeze = true;
1892
1893 mirror_top_bs->total_sectors = bs->total_sectors;
1894 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1895 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1896 BDRV_REQ_NO_FALLBACK;
1897 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1898 mirror_top_bs->opaque = bs_opaque;
1899
1900 bs_opaque->is_commit = target_is_backing;
1901
1902 bdrv_drained_begin(bs);
1903 ret = bdrv_append(mirror_top_bs, bs, errp);
1904 bdrv_drained_end(bs);
1905
1906 if (ret < 0) {
1907 bdrv_unref(mirror_top_bs);
1908 return NULL;
1909 }
1910
1911 /* Make sure that the source is not resized while the job is running */
1912 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1913 BLK_PERM_CONSISTENT_READ,
1914 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1915 BLK_PERM_WRITE, speed,
1916 creation_flags, cb, opaque, errp);
1917 if (!s) {
1918 goto fail;
1919 }
1920
1921 /* The block job now has a reference to this node */
1922 bdrv_unref(mirror_top_bs);
1923
1924 s->mirror_top_bs = mirror_top_bs;
1925 s->base_ro = base_ro;
1926
1927 /* No resize for the target either; while the mirror is still running, a
1928 * consistent read isn't necessarily possible. We could possibly allow
1929 * writes and graph modifications, though it would likely defeat the
1930 * purpose of a mirror, so leave them blocked for now.
1931 *
1932 * In the case of active commit, things look a bit different, though,
1933 * because the target is an already populated backing file in active use.
1934 * We can allow anything except resize there.*/
1935
1936 target_perms = BLK_PERM_WRITE;
1937 target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1938
1939 if (target_is_backing) {
1940 int64_t bs_size, target_size;
1941 bs_size = bdrv_getlength(bs);
1942 if (bs_size < 0) {
1943 error_setg_errno(errp, -bs_size,
1944 "Could not inquire top image size");
1945 goto fail;
1946 }
1947
1948 target_size = bdrv_getlength(target);
1949 if (target_size < 0) {
1950 error_setg_errno(errp, -target_size,
1951 "Could not inquire base image size");
1952 goto fail;
1953 }
1954
1955 if (target_size < bs_size) {
1956 target_perms |= BLK_PERM_RESIZE;
1957 }
1958
1959 target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
1960 } else {
1961 bdrv_graph_rdlock_main_loop();
1962 if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1963 /*
1964 * We may want to allow this in the future, but it would
1965 * require taking some extra care.
1966 */
1967 error_setg(errp, "Cannot mirror to a filter on top of a node in "
1968 "the source's backing chain");
1969 bdrv_graph_rdunlock_main_loop();
1970 goto fail;
1971 }
1972 bdrv_graph_rdunlock_main_loop();
1973 }
1974
1975 s->target = blk_new(s->common.job.aio_context,
1976 target_perms, target_shared_perms);
1977 ret = blk_insert_bs(s->target, target, errp);
1978 if (ret < 0) {
1979 goto fail;
1980 }
1981 if (is_mirror) {
1982 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1983 * of non-shared block migration. To allow migration completion, we
1984 * have to allow "inactivate" of the target BB. When that happens, we
1985 * know the job is drained, and the vcpus are stopped, so no write
1986 * operation will be performed. Block layer already has assertions to
1987 * ensure that. */
1988 blk_set_force_allow_inactivate(s->target);
1989 }
1990 blk_set_allow_aio_context_change(s->target, true);
1991 blk_set_disable_request_queuing(s->target, true);
1992
1993 bdrv_graph_rdlock_main_loop();
1994 s->replaces = g_strdup(replaces);
1995 s->on_source_error = on_source_error;
1996 s->on_target_error = on_target_error;
1997 s->sync_mode = sync_mode;
1998 s->backing_mode = backing_mode;
1999 s->target_is_zero = target_is_zero;
2000 qatomic_set(&s->copy_mode, copy_mode);
2001 s->base = base;
2002 s->base_overlay = bdrv_find_overlay(bs, base);
2003 s->granularity = granularity;
2004 s->buf_size = ROUND_UP(buf_size, granularity);
2005 s->unmap = unmap;
2006 if (auto_complete) {
2007 s->should_complete = true;
2008 }
2009 bdrv_graph_rdunlock_main_loop();
2010
2011 s->dirty_bitmap = bdrv_create_dirty_bitmap(s->mirror_top_bs, granularity,
2012 NULL, errp);
2013 if (!s->dirty_bitmap) {
2014 goto fail;
2015 }
2016
2017 /*
2018 * The dirty bitmap is set by bdrv_mirror_top_do_write() when not in active
2019 * mode.
2020 */
2021 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
2022
2023 bdrv_graph_wrlock_drained();
2024 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
2025 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
2026 BLK_PERM_CONSISTENT_READ,
2027 errp);
2028 if (ret < 0) {
2029 bdrv_graph_wrunlock();
2030 goto fail;
2031 }
2032
2033 /* Required permissions are already taken with blk_new() */
2034 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
2035 &error_abort);
2036
2037 /* In commit_active_start() all intermediate nodes disappear, so
2038 * any jobs in them must be blocked */
2039 if (target_is_backing) {
2040 BlockDriverState *iter, *filtered_target;
2041 uint64_t iter_shared_perms;
2042
2043 /*
2044 * The topmost node with
2045 * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
2046 */
2047 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
2048
2049 assert(bdrv_skip_filters(filtered_target) ==
2050 bdrv_skip_filters(target));
2051
2052 /*
2053 * XXX BLK_PERM_WRITE needs to be allowed so we don't block
2054 * ourselves at s->base (if writes are blocked for a node, they are
2055 * also blocked for its backing file). The other options would be a
2056 * second filter driver above s->base (== target).
2057 */
2058 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
2059
2060 for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
2061 iter = bdrv_filter_or_cow_bs(iter))
2062 {
2063 if (iter == filtered_target) {
2064 /*
2065 * From here on, all nodes are filters on the base.
2066 * This allows us to share BLK_PERM_CONSISTENT_READ.
2067 */
2068 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
2069 }
2070
2071 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
2072 iter_shared_perms, errp);
2073 if (ret < 0) {
2074 bdrv_graph_wrunlock();
2075 goto fail;
2076 }
2077 }
2078
2079 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
2080 bdrv_graph_wrunlock();
2081 goto fail;
2082 }
2083 }
2084 bdrv_graph_wrunlock();
2085
2086 QTAILQ_INIT(&s->ops_in_flight);
2087
2088 trace_mirror_start(bs, s, opaque);
2089 job_start(&s->common.job);
2090
2091 return &s->common;
2092
2093 fail:
2094 if (s) {
2095 /* Make sure this BDS does not go away until we have completed the graph
2096 * changes below */
2097 bdrv_ref(mirror_top_bs);
2098
2099 g_free(s->replaces);
2100 blk_unref(s->target);
2101 bs_opaque->job = NULL;
2102 if (s->dirty_bitmap) {
2103 bdrv_release_dirty_bitmap(s->dirty_bitmap);
2104 }
2105 job_early_fail(&s->common.job);
2106 }
2107
2108 bs_opaque->stop = true;
2109 bdrv_drained_begin(bs);
2110 bdrv_graph_wrlock();
2111 assert(mirror_top_bs->backing->bs == bs);
2112 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
2113 &error_abort);
2114 bdrv_replace_node(mirror_top_bs, bs, &error_abort);
2115 bdrv_graph_wrunlock();
2116 bdrv_drained_end(bs);
2117
2118 bdrv_unref(mirror_top_bs);
2119
2120 return NULL;
2121 }
2122
mirror_start(const char * job_id,BlockDriverState * bs,BlockDriverState * target,const char * replaces,int creation_flags,int64_t speed,uint32_t granularity,int64_t buf_size,MirrorSyncMode mode,BlockMirrorBackingMode backing_mode,bool target_is_zero,BlockdevOnError on_source_error,BlockdevOnError on_target_error,bool unmap,const char * filter_node_name,MirrorCopyMode copy_mode,Error ** errp)2123 void mirror_start(const char *job_id, BlockDriverState *bs,
2124 BlockDriverState *target, const char *replaces,
2125 int creation_flags, int64_t speed,
2126 uint32_t granularity, int64_t buf_size,
2127 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
2128 bool target_is_zero,
2129 BlockdevOnError on_source_error,
2130 BlockdevOnError on_target_error,
2131 bool unmap, const char *filter_node_name,
2132 MirrorCopyMode copy_mode, Error **errp)
2133 {
2134 BlockDriverState *base;
2135
2136 GLOBAL_STATE_CODE();
2137
2138 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
2139 (mode == MIRROR_SYNC_MODE_BITMAP)) {
2140 error_setg(errp, "Sync mode '%s' not supported",
2141 MirrorSyncMode_str(mode));
2142 return;
2143 }
2144
2145 bdrv_graph_rdlock_main_loop();
2146 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
2147 bdrv_graph_rdunlock_main_loop();
2148
2149 mirror_start_job(job_id, bs, creation_flags, target, replaces,
2150 speed, granularity, buf_size, mode, backing_mode,
2151 target_is_zero, on_source_error, on_target_error, unmap,
2152 NULL, NULL, &mirror_job_driver, base, false,
2153 filter_node_name, true, copy_mode, false, errp);
2154 }
2155
commit_active_start(const char * job_id,BlockDriverState * bs,BlockDriverState * base,int creation_flags,int64_t speed,BlockdevOnError on_error,const char * filter_node_name,BlockCompletionFunc * cb,void * opaque,bool auto_complete,Error ** errp)2156 BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
2157 BlockDriverState *base, int creation_flags,
2158 int64_t speed, BlockdevOnError on_error,
2159 const char *filter_node_name,
2160 BlockCompletionFunc *cb, void *opaque,
2161 bool auto_complete, Error **errp)
2162 {
2163 bool base_read_only;
2164 BlockJob *job;
2165
2166 GLOBAL_STATE_CODE();
2167
2168 base_read_only = bdrv_is_read_only(base);
2169
2170 if (base_read_only) {
2171 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
2172 return NULL;
2173 }
2174 }
2175
2176 job = mirror_start_job(
2177 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
2178 MIRROR_SYNC_MODE_TOP, MIRROR_LEAVE_BACKING_CHAIN, false,
2179 on_error, on_error, true, cb, opaque,
2180 &commit_active_job_driver, base, auto_complete,
2181 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
2182 base_read_only, errp);
2183 if (!job) {
2184 goto error_restore_flags;
2185 }
2186
2187 return job;
2188
2189 error_restore_flags:
2190 /* ignore error and errp for bdrv_reopen, because we want to propagate
2191 * the original error */
2192 if (base_read_only) {
2193 bdrv_reopen_set_read_only(base, true, NULL);
2194 }
2195 return NULL;
2196 }
2197