xref: /openbmc/qemu/block/backup.c (revision 59a3a1c0)
1 /*
2  * QEMU backup
3  *
4  * Copyright (C) 2013 Proxmox Server Solutions
5  *
6  * Authors:
7  *  Dietmar Maurer (dietmar@proxmox.com)
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 
16 #include "trace.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_backup.h"
21 #include "qapi/error.h"
22 #include "qapi/qmp/qerror.h"
23 #include "qemu/ratelimit.h"
24 #include "qemu/cutils.h"
25 #include "sysemu/block-backend.h"
26 #include "qemu/bitmap.h"
27 #include "qemu/error-report.h"
28 
29 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
30 
31 typedef struct CowRequest {
32     int64_t start_byte;
33     int64_t end_byte;
34     QLIST_ENTRY(CowRequest) list;
35     CoQueue wait_queue; /* coroutines blocked on this request */
36 } CowRequest;
37 
38 typedef struct BackupBlockJob {
39     BlockJob common;
40     BlockBackend *target;
41 
42     BdrvDirtyBitmap *sync_bitmap;
43     BdrvDirtyBitmap *copy_bitmap;
44 
45     MirrorSyncMode sync_mode;
46     BitmapSyncMode bitmap_mode;
47     BlockdevOnError on_source_error;
48     BlockdevOnError on_target_error;
49     CoRwlock flush_rwlock;
50     uint64_t len;
51     uint64_t bytes_read;
52     int64_t cluster_size;
53     NotifierWithReturn before_write;
54     QLIST_HEAD(, CowRequest) inflight_reqs;
55 
56     bool use_copy_range;
57     int64_t copy_range_size;
58 
59     BdrvRequestFlags write_flags;
60     bool initializing_bitmap;
61 } BackupBlockJob;
62 
63 static const BlockJobDriver backup_job_driver;
64 
65 /* See if in-flight requests overlap and wait for them to complete */
66 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
67                                                        int64_t start,
68                                                        int64_t end)
69 {
70     CowRequest *req;
71     bool retry;
72 
73     do {
74         retry = false;
75         QLIST_FOREACH(req, &job->inflight_reqs, list) {
76             if (end > req->start_byte && start < req->end_byte) {
77                 qemu_co_queue_wait(&req->wait_queue, NULL);
78                 retry = true;
79                 break;
80             }
81         }
82     } while (retry);
83 }
84 
85 /* Keep track of an in-flight request */
86 static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
87                               int64_t start, int64_t end)
88 {
89     req->start_byte = start;
90     req->end_byte = end;
91     qemu_co_queue_init(&req->wait_queue);
92     QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
93 }
94 
95 /* Forget about a completed request */
96 static void cow_request_end(CowRequest *req)
97 {
98     QLIST_REMOVE(req, list);
99     qemu_co_queue_restart_all(&req->wait_queue);
100 }
101 
102 /* Copy range to target with a bounce buffer and return the bytes copied. If
103  * error occurred, return a negative error number */
104 static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
105                                                       int64_t start,
106                                                       int64_t end,
107                                                       bool is_write_notifier,
108                                                       bool *error_is_read,
109                                                       void **bounce_buffer)
110 {
111     int ret;
112     BlockBackend *blk = job->common.blk;
113     int nbytes;
114     int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
115 
116     assert(QEMU_IS_ALIGNED(start, job->cluster_size));
117     bdrv_reset_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
118     nbytes = MIN(job->cluster_size, job->len - start);
119     if (!*bounce_buffer) {
120         *bounce_buffer = blk_blockalign(blk, job->cluster_size);
121     }
122 
123     ret = blk_co_pread(blk, start, nbytes, *bounce_buffer, read_flags);
124     if (ret < 0) {
125         trace_backup_do_cow_read_fail(job, start, ret);
126         if (error_is_read) {
127             *error_is_read = true;
128         }
129         goto fail;
130     }
131 
132     ret = blk_co_pwrite(job->target, start, nbytes, *bounce_buffer,
133                         job->write_flags);
134     if (ret < 0) {
135         trace_backup_do_cow_write_fail(job, start, ret);
136         if (error_is_read) {
137             *error_is_read = false;
138         }
139         goto fail;
140     }
141 
142     return nbytes;
143 fail:
144     bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
145     return ret;
146 
147 }
148 
149 /* Copy range to target and return the bytes copied. If error occurred, return a
150  * negative error number. */
151 static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
152                                                 int64_t start,
153                                                 int64_t end,
154                                                 bool is_write_notifier)
155 {
156     int ret;
157     int nr_clusters;
158     BlockBackend *blk = job->common.blk;
159     int nbytes;
160     int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
161 
162     assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
163     assert(QEMU_IS_ALIGNED(start, job->cluster_size));
164     nbytes = MIN(job->copy_range_size, end - start);
165     nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
166     bdrv_reset_dirty_bitmap(job->copy_bitmap, start,
167                             job->cluster_size * nr_clusters);
168     ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
169                             read_flags, job->write_flags);
170     if (ret < 0) {
171         trace_backup_do_cow_copy_range_fail(job, start, ret);
172         bdrv_set_dirty_bitmap(job->copy_bitmap, start,
173                               job->cluster_size * nr_clusters);
174         return ret;
175     }
176 
177     return nbytes;
178 }
179 
180 /*
181  * Check if the cluster starting at offset is allocated or not.
182  * return via pnum the number of contiguous clusters sharing this allocation.
183  */
184 static int backup_is_cluster_allocated(BackupBlockJob *s, int64_t offset,
185                                        int64_t *pnum)
186 {
187     BlockDriverState *bs = blk_bs(s->common.blk);
188     int64_t count, total_count = 0;
189     int64_t bytes = s->len - offset;
190     int ret;
191 
192     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
193 
194     while (true) {
195         ret = bdrv_is_allocated(bs, offset, bytes, &count);
196         if (ret < 0) {
197             return ret;
198         }
199 
200         total_count += count;
201 
202         if (ret || count == 0) {
203             /*
204              * ret: partial segment(s) are considered allocated.
205              * otherwise: unallocated tail is treated as an entire segment.
206              */
207             *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
208             return ret;
209         }
210 
211         /* Unallocated segment(s) with uncertain following segment(s) */
212         if (total_count >= s->cluster_size) {
213             *pnum = total_count / s->cluster_size;
214             return 0;
215         }
216 
217         offset += count;
218         bytes -= count;
219     }
220 }
221 
222 /**
223  * Reset bits in copy_bitmap starting at offset if they represent unallocated
224  * data in the image. May reset subsequent contiguous bits.
225  * @return 0 when the cluster at @offset was unallocated,
226  *         1 otherwise, and -ret on error.
227  */
228 static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
229                                                int64_t offset, int64_t *count)
230 {
231     int ret;
232     int64_t clusters, bytes, estimate;
233 
234     ret = backup_is_cluster_allocated(s, offset, &clusters);
235     if (ret < 0) {
236         return ret;
237     }
238 
239     bytes = clusters * s->cluster_size;
240 
241     if (!ret) {
242         bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
243         estimate = bdrv_get_dirty_count(s->copy_bitmap);
244         job_progress_set_remaining(&s->common.job, estimate);
245     }
246 
247     *count = bytes;
248     return ret;
249 }
250 
251 static int coroutine_fn backup_do_cow(BackupBlockJob *job,
252                                       int64_t offset, uint64_t bytes,
253                                       bool *error_is_read,
254                                       bool is_write_notifier)
255 {
256     CowRequest cow_request;
257     int ret = 0;
258     int64_t start, end; /* bytes */
259     void *bounce_buffer = NULL;
260     int64_t status_bytes;
261 
262     qemu_co_rwlock_rdlock(&job->flush_rwlock);
263 
264     start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
265     end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
266 
267     trace_backup_do_cow_enter(job, start, offset, bytes);
268 
269     wait_for_overlapping_requests(job, start, end);
270     cow_request_begin(&cow_request, job, start, end);
271 
272     while (start < end) {
273         int64_t dirty_end;
274 
275         if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) {
276             trace_backup_do_cow_skip(job, start);
277             start += job->cluster_size;
278             continue; /* already copied */
279         }
280 
281         dirty_end = bdrv_dirty_bitmap_next_zero(job->copy_bitmap, start,
282                                                 (end - start));
283         if (dirty_end < 0) {
284             dirty_end = end;
285         }
286 
287         if (job->initializing_bitmap) {
288             ret = backup_bitmap_reset_unallocated(job, start, &status_bytes);
289             if (ret == 0) {
290                 trace_backup_do_cow_skip_range(job, start, status_bytes);
291                 start += status_bytes;
292                 continue;
293             }
294             /* Clamp to known allocated region */
295             dirty_end = MIN(dirty_end, start + status_bytes);
296         }
297 
298         trace_backup_do_cow_process(job, start);
299 
300         if (job->use_copy_range) {
301             ret = backup_cow_with_offload(job, start, dirty_end,
302                                           is_write_notifier);
303             if (ret < 0) {
304                 job->use_copy_range = false;
305             }
306         }
307         if (!job->use_copy_range) {
308             ret = backup_cow_with_bounce_buffer(job, start, dirty_end,
309                                                 is_write_notifier,
310                                                 error_is_read, &bounce_buffer);
311         }
312         if (ret < 0) {
313             break;
314         }
315 
316         /* Publish progress, guest I/O counts as progress too.  Note that the
317          * offset field is an opaque progress value, it is not a disk offset.
318          */
319         start += ret;
320         job->bytes_read += ret;
321         job_progress_update(&job->common.job, ret);
322         ret = 0;
323     }
324 
325     if (bounce_buffer) {
326         qemu_vfree(bounce_buffer);
327     }
328 
329     cow_request_end(&cow_request);
330 
331     trace_backup_do_cow_return(job, offset, bytes, ret);
332 
333     qemu_co_rwlock_unlock(&job->flush_rwlock);
334 
335     return ret;
336 }
337 
338 static int coroutine_fn backup_before_write_notify(
339         NotifierWithReturn *notifier,
340         void *opaque)
341 {
342     BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write);
343     BdrvTrackedRequest *req = opaque;
344 
345     assert(req->bs == blk_bs(job->common.blk));
346     assert(QEMU_IS_ALIGNED(req->offset, BDRV_SECTOR_SIZE));
347     assert(QEMU_IS_ALIGNED(req->bytes, BDRV_SECTOR_SIZE));
348 
349     return backup_do_cow(job, req->offset, req->bytes, NULL, true);
350 }
351 
352 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
353 {
354     BdrvDirtyBitmap *bm;
355     BlockDriverState *bs = blk_bs(job->common.blk);
356     bool sync = (((ret == 0) || (job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS)) \
357                  && (job->bitmap_mode != BITMAP_SYNC_MODE_NEVER));
358 
359     if (sync) {
360         /*
361          * We succeeded, or we always intended to sync the bitmap.
362          * Delete this bitmap and install the child.
363          */
364         bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
365     } else {
366         /*
367          * We failed, or we never intended to sync the bitmap anyway.
368          * Merge the successor back into the parent, keeping all data.
369          */
370         bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
371     }
372 
373     assert(bm);
374 
375     if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
376         /* If we failed and synced, merge in the bits we didn't copy: */
377         bdrv_dirty_bitmap_merge_internal(bm, job->copy_bitmap,
378                                          NULL, true);
379     }
380 }
381 
382 static void backup_commit(Job *job)
383 {
384     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
385     if (s->sync_bitmap) {
386         backup_cleanup_sync_bitmap(s, 0);
387     }
388 }
389 
390 static void backup_abort(Job *job)
391 {
392     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
393     if (s->sync_bitmap) {
394         backup_cleanup_sync_bitmap(s, -1);
395     }
396 }
397 
398 static void backup_clean(Job *job)
399 {
400     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
401     BlockDriverState *bs = blk_bs(s->common.blk);
402 
403     if (s->copy_bitmap) {
404         bdrv_release_dirty_bitmap(bs, s->copy_bitmap);
405         s->copy_bitmap = NULL;
406     }
407 
408     assert(s->target);
409     blk_unref(s->target);
410     s->target = NULL;
411 }
412 
413 void backup_do_checkpoint(BlockJob *job, Error **errp)
414 {
415     BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
416 
417     assert(block_job_driver(job) == &backup_job_driver);
418 
419     if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) {
420         error_setg(errp, "The backup job only supports block checkpoint in"
421                    " sync=none mode");
422         return;
423     }
424 
425     bdrv_set_dirty_bitmap(backup_job->copy_bitmap, 0, backup_job->len);
426 }
427 
428 static void backup_drain(BlockJob *job)
429 {
430     BackupBlockJob *s = container_of(job, BackupBlockJob, common);
431 
432     /* Need to keep a reference in case blk_drain triggers execution
433      * of backup_complete...
434      */
435     if (s->target) {
436         BlockBackend *target = s->target;
437         blk_ref(target);
438         blk_drain(target);
439         blk_unref(target);
440     }
441 }
442 
443 static BlockErrorAction backup_error_action(BackupBlockJob *job,
444                                             bool read, int error)
445 {
446     if (read) {
447         return block_job_error_action(&job->common, job->on_source_error,
448                                       true, error);
449     } else {
450         return block_job_error_action(&job->common, job->on_target_error,
451                                       false, error);
452     }
453 }
454 
455 static bool coroutine_fn yield_and_check(BackupBlockJob *job)
456 {
457     uint64_t delay_ns;
458 
459     if (job_is_cancelled(&job->common.job)) {
460         return true;
461     }
462 
463     /* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
464      * return. Without a yield, the VM would not reboot. */
465     delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
466     job->bytes_read = 0;
467     job_sleep_ns(&job->common.job, delay_ns);
468 
469     if (job_is_cancelled(&job->common.job)) {
470         return true;
471     }
472 
473     return false;
474 }
475 
476 static int coroutine_fn backup_loop(BackupBlockJob *job)
477 {
478     bool error_is_read;
479     int64_t offset;
480     BdrvDirtyBitmapIter *bdbi;
481     int ret = 0;
482 
483     bdbi = bdrv_dirty_iter_new(job->copy_bitmap);
484     while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
485         do {
486             if (yield_and_check(job)) {
487                 goto out;
488             }
489             ret = backup_do_cow(job, offset,
490                                 job->cluster_size, &error_is_read, false);
491             if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
492                            BLOCK_ERROR_ACTION_REPORT)
493             {
494                 goto out;
495             }
496         } while (ret < 0);
497     }
498 
499  out:
500     bdrv_dirty_iter_free(bdbi);
501     return ret;
502 }
503 
504 static void backup_init_copy_bitmap(BackupBlockJob *job)
505 {
506     bool ret;
507     uint64_t estimate;
508 
509     if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
510         ret = bdrv_dirty_bitmap_merge_internal(job->copy_bitmap,
511                                                job->sync_bitmap,
512                                                NULL, true);
513         assert(ret);
514     } else {
515         if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
516             /*
517              * We can't hog the coroutine to initialize this thoroughly.
518              * Set a flag and resume work when we are able to yield safely.
519              */
520             job->initializing_bitmap = true;
521         }
522         bdrv_set_dirty_bitmap(job->copy_bitmap, 0, job->len);
523     }
524 
525     estimate = bdrv_get_dirty_count(job->copy_bitmap);
526     job_progress_set_remaining(&job->common.job, estimate);
527 }
528 
529 static int coroutine_fn backup_run(Job *job, Error **errp)
530 {
531     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
532     BlockDriverState *bs = blk_bs(s->common.blk);
533     int ret = 0;
534 
535     QLIST_INIT(&s->inflight_reqs);
536     qemu_co_rwlock_init(&s->flush_rwlock);
537 
538     backup_init_copy_bitmap(s);
539 
540     s->before_write.notify = backup_before_write_notify;
541     bdrv_add_before_write_notifier(bs, &s->before_write);
542 
543     if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
544         int64_t offset = 0;
545         int64_t count;
546 
547         for (offset = 0; offset < s->len; ) {
548             if (yield_and_check(s)) {
549                 ret = -ECANCELED;
550                 goto out;
551             }
552 
553             ret = backup_bitmap_reset_unallocated(s, offset, &count);
554             if (ret < 0) {
555                 goto out;
556             }
557 
558             offset += count;
559         }
560         s->initializing_bitmap = false;
561     }
562 
563     if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
564         /* All bits are set in copy_bitmap to allow any cluster to be copied.
565          * This does not actually require them to be copied. */
566         while (!job_is_cancelled(job)) {
567             /* Yield until the job is cancelled.  We just let our before_write
568              * notify callback service CoW requests. */
569             job_yield(job);
570         }
571     } else {
572         ret = backup_loop(s);
573     }
574 
575  out:
576     notifier_with_return_remove(&s->before_write);
577 
578     /* wait until pending backup_do_cow() calls have completed */
579     qemu_co_rwlock_wrlock(&s->flush_rwlock);
580     qemu_co_rwlock_unlock(&s->flush_rwlock);
581 
582     return ret;
583 }
584 
585 static const BlockJobDriver backup_job_driver = {
586     .job_driver = {
587         .instance_size          = sizeof(BackupBlockJob),
588         .job_type               = JOB_TYPE_BACKUP,
589         .free                   = block_job_free,
590         .user_resume            = block_job_user_resume,
591         .drain                  = block_job_drain,
592         .run                    = backup_run,
593         .commit                 = backup_commit,
594         .abort                  = backup_abort,
595         .clean                  = backup_clean,
596     },
597     .drain                  = backup_drain,
598 };
599 
600 static int64_t backup_calculate_cluster_size(BlockDriverState *target,
601                                              Error **errp)
602 {
603     int ret;
604     BlockDriverInfo bdi;
605 
606     /*
607      * If there is no backing file on the target, we cannot rely on COW if our
608      * backup cluster size is smaller than the target cluster size. Even for
609      * targets with a backing file, try to avoid COW if possible.
610      */
611     ret = bdrv_get_info(target, &bdi);
612     if (ret == -ENOTSUP && !target->backing) {
613         /* Cluster size is not defined */
614         warn_report("The target block device doesn't provide "
615                     "information about the block size and it doesn't have a "
616                     "backing file. The default block size of %u bytes is "
617                     "used. If the actual block size of the target exceeds "
618                     "this default, the backup may be unusable",
619                     BACKUP_CLUSTER_SIZE_DEFAULT);
620         return BACKUP_CLUSTER_SIZE_DEFAULT;
621     } else if (ret < 0 && !target->backing) {
622         error_setg_errno(errp, -ret,
623             "Couldn't determine the cluster size of the target image, "
624             "which has no backing file");
625         error_append_hint(errp,
626             "Aborting, since this may create an unusable destination image\n");
627         return ret;
628     } else if (ret < 0 && target->backing) {
629         /* Not fatal; just trudge on ahead. */
630         return BACKUP_CLUSTER_SIZE_DEFAULT;
631     }
632 
633     return MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
634 }
635 
636 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
637                   BlockDriverState *target, int64_t speed,
638                   MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
639                   BitmapSyncMode bitmap_mode,
640                   bool compress,
641                   BlockdevOnError on_source_error,
642                   BlockdevOnError on_target_error,
643                   int creation_flags,
644                   BlockCompletionFunc *cb, void *opaque,
645                   JobTxn *txn, Error **errp)
646 {
647     int64_t len;
648     BackupBlockJob *job = NULL;
649     int ret;
650     int64_t cluster_size;
651     BdrvDirtyBitmap *copy_bitmap = NULL;
652 
653     assert(bs);
654     assert(target);
655 
656     /* QMP interface protects us from these cases */
657     assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL);
658     assert(sync_bitmap || sync_mode != MIRROR_SYNC_MODE_BITMAP);
659 
660     if (bs == target) {
661         error_setg(errp, "Source and target cannot be the same");
662         return NULL;
663     }
664 
665     if (!bdrv_is_inserted(bs)) {
666         error_setg(errp, "Device is not inserted: %s",
667                    bdrv_get_device_name(bs));
668         return NULL;
669     }
670 
671     if (!bdrv_is_inserted(target)) {
672         error_setg(errp, "Device is not inserted: %s",
673                    bdrv_get_device_name(target));
674         return NULL;
675     }
676 
677     if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
678         error_setg(errp, "Compression is not supported for this drive %s",
679                    bdrv_get_device_name(target));
680         return NULL;
681     }
682 
683     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
684         return NULL;
685     }
686 
687     if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
688         return NULL;
689     }
690 
691     if (sync_bitmap) {
692         /* If we need to write to this bitmap, check that we can: */
693         if (bitmap_mode != BITMAP_SYNC_MODE_NEVER &&
694             bdrv_dirty_bitmap_check(sync_bitmap, BDRV_BITMAP_DEFAULT, errp)) {
695             return NULL;
696         }
697 
698         /* Create a new bitmap, and freeze/disable this one. */
699         if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
700             return NULL;
701         }
702     }
703 
704     len = bdrv_getlength(bs);
705     if (len < 0) {
706         error_setg_errno(errp, -len, "unable to get length for '%s'",
707                          bdrv_get_device_name(bs));
708         goto error;
709     }
710 
711     cluster_size = backup_calculate_cluster_size(target, errp);
712     if (cluster_size < 0) {
713         goto error;
714     }
715 
716     copy_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
717     if (!copy_bitmap) {
718         goto error;
719     }
720     bdrv_disable_dirty_bitmap(copy_bitmap);
721 
722     /* job->len is fixed, so we can't allow resize */
723     job = block_job_create(job_id, &backup_job_driver, txn, bs,
724                            BLK_PERM_CONSISTENT_READ,
725                            BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
726                            BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
727                            speed, creation_flags, cb, opaque, errp);
728     if (!job) {
729         goto error;
730     }
731 
732     /* The target must match the source in size, so no resize here either */
733     job->target = blk_new(job->common.job.aio_context,
734                           BLK_PERM_WRITE,
735                           BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
736                           BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
737     ret = blk_insert_bs(job->target, target, errp);
738     if (ret < 0) {
739         goto error;
740     }
741     blk_set_disable_request_queuing(job->target, true);
742 
743     job->on_source_error = on_source_error;
744     job->on_target_error = on_target_error;
745     job->sync_mode = sync_mode;
746     job->sync_bitmap = sync_bitmap;
747     job->bitmap_mode = bitmap_mode;
748 
749     /*
750      * Set write flags:
751      * 1. Detect image-fleecing (and similar) schemes
752      * 2. Handle compression
753      */
754     job->write_flags =
755         (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) |
756         (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
757 
758     job->cluster_size = cluster_size;
759     job->copy_bitmap = copy_bitmap;
760     copy_bitmap = NULL;
761     job->use_copy_range = !compress; /* compression isn't supported for it */
762     job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
763                                         blk_get_max_transfer(job->target));
764     job->copy_range_size = MAX(job->cluster_size,
765                                QEMU_ALIGN_UP(job->copy_range_size,
766                                              job->cluster_size));
767 
768     /* Required permissions are already taken with target's blk_new() */
769     block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
770                        &error_abort);
771     job->len = len;
772 
773     return &job->common;
774 
775  error:
776     if (copy_bitmap) {
777         assert(!job || !job->copy_bitmap);
778         bdrv_release_dirty_bitmap(bs, copy_bitmap);
779     }
780     if (sync_bitmap) {
781         bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
782     }
783     if (job) {
784         backup_clean(&job->common.job);
785         job_early_fail(&job->common.job);
786     }
787 
788     return NULL;
789 }
790