xref: /openbmc/qemu/block/backup.c (revision 6b8f9c6e)
1 /*
2  * QEMU backup
3  *
4  * Copyright (C) 2013 Proxmox Server Solutions
5  *
6  * Authors:
7  *  Dietmar Maurer (dietmar@proxmox.com)
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 
16 #include "trace.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_backup.h"
21 #include "qapi/error.h"
22 #include "qapi/qmp/qerror.h"
23 #include "qemu/ratelimit.h"
24 #include "qemu/cutils.h"
25 #include "sysemu/block-backend.h"
26 #include "qemu/bitmap.h"
27 #include "qemu/error-report.h"
28 
29 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
30 
31 typedef struct CowRequest {
32     int64_t start_byte;
33     int64_t end_byte;
34     QLIST_ENTRY(CowRequest) list;
35     CoQueue wait_queue; /* coroutines blocked on this request */
36 } CowRequest;
37 
38 typedef struct BackupBlockJob {
39     BlockJob common;
40     BlockBackend *target;
41     /* bitmap for sync=incremental */
42     BdrvDirtyBitmap *sync_bitmap;
43     MirrorSyncMode sync_mode;
44     BlockdevOnError on_source_error;
45     BlockdevOnError on_target_error;
46     CoRwlock flush_rwlock;
47     uint64_t len;
48     uint64_t bytes_read;
49     int64_t cluster_size;
50     bool compress;
51     NotifierWithReturn before_write;
52     QLIST_HEAD(, CowRequest) inflight_reqs;
53 
54     HBitmap *copy_bitmap;
55     bool use_copy_range;
56     int64_t copy_range_size;
57 
58     bool serialize_target_writes;
59 } BackupBlockJob;
60 
61 static const BlockJobDriver backup_job_driver;
62 
63 /* See if in-flight requests overlap and wait for them to complete */
64 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
65                                                        int64_t start,
66                                                        int64_t end)
67 {
68     CowRequest *req;
69     bool retry;
70 
71     do {
72         retry = false;
73         QLIST_FOREACH(req, &job->inflight_reqs, list) {
74             if (end > req->start_byte && start < req->end_byte) {
75                 qemu_co_queue_wait(&req->wait_queue, NULL);
76                 retry = true;
77                 break;
78             }
79         }
80     } while (retry);
81 }
82 
83 /* Keep track of an in-flight request */
84 static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
85                               int64_t start, int64_t end)
86 {
87     req->start_byte = start;
88     req->end_byte = end;
89     qemu_co_queue_init(&req->wait_queue);
90     QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
91 }
92 
93 /* Forget about a completed request */
94 static void cow_request_end(CowRequest *req)
95 {
96     QLIST_REMOVE(req, list);
97     qemu_co_queue_restart_all(&req->wait_queue);
98 }
99 
100 /* Copy range to target with a bounce buffer and return the bytes copied. If
101  * error occurred, return a negative error number */
102 static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
103                                                       int64_t start,
104                                                       int64_t end,
105                                                       bool is_write_notifier,
106                                                       bool *error_is_read,
107                                                       void **bounce_buffer)
108 {
109     int ret;
110     BlockBackend *blk = job->common.blk;
111     int nbytes;
112     int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
113     int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
114 
115     hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1);
116     nbytes = MIN(job->cluster_size, job->len - start);
117     if (!*bounce_buffer) {
118         *bounce_buffer = blk_blockalign(blk, job->cluster_size);
119     }
120 
121     ret = blk_co_pread(blk, start, nbytes, *bounce_buffer, read_flags);
122     if (ret < 0) {
123         trace_backup_do_cow_read_fail(job, start, ret);
124         if (error_is_read) {
125             *error_is_read = true;
126         }
127         goto fail;
128     }
129 
130     if (buffer_is_zero(*bounce_buffer, nbytes)) {
131         ret = blk_co_pwrite_zeroes(job->target, start,
132                                    nbytes, write_flags | BDRV_REQ_MAY_UNMAP);
133     } else {
134         ret = blk_co_pwrite(job->target, start,
135                             nbytes, *bounce_buffer, write_flags |
136                             (job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
137     }
138     if (ret < 0) {
139         trace_backup_do_cow_write_fail(job, start, ret);
140         if (error_is_read) {
141             *error_is_read = false;
142         }
143         goto fail;
144     }
145 
146     return nbytes;
147 fail:
148     hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
149     return ret;
150 
151 }
152 
153 /* Copy range to target and return the bytes copied. If error occurred, return a
154  * negative error number. */
155 static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
156                                                 int64_t start,
157                                                 int64_t end,
158                                                 bool is_write_notifier)
159 {
160     int ret;
161     int nr_clusters;
162     BlockBackend *blk = job->common.blk;
163     int nbytes;
164     int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
165     int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
166 
167     assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
168     nbytes = MIN(job->copy_range_size, end - start);
169     nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
170     hbitmap_reset(job->copy_bitmap, start / job->cluster_size,
171                   nr_clusters);
172     ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
173                             read_flags, write_flags);
174     if (ret < 0) {
175         trace_backup_do_cow_copy_range_fail(job, start, ret);
176         hbitmap_set(job->copy_bitmap, start / job->cluster_size,
177                     nr_clusters);
178         return ret;
179     }
180 
181     return nbytes;
182 }
183 
184 static int coroutine_fn backup_do_cow(BackupBlockJob *job,
185                                       int64_t offset, uint64_t bytes,
186                                       bool *error_is_read,
187                                       bool is_write_notifier)
188 {
189     CowRequest cow_request;
190     int ret = 0;
191     int64_t start, end; /* bytes */
192     void *bounce_buffer = NULL;
193 
194     qemu_co_rwlock_rdlock(&job->flush_rwlock);
195 
196     start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
197     end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
198 
199     trace_backup_do_cow_enter(job, start, offset, bytes);
200 
201     wait_for_overlapping_requests(job, start, end);
202     cow_request_begin(&cow_request, job, start, end);
203 
204     while (start < end) {
205         if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) {
206             trace_backup_do_cow_skip(job, start);
207             start += job->cluster_size;
208             continue; /* already copied */
209         }
210 
211         trace_backup_do_cow_process(job, start);
212 
213         if (job->use_copy_range) {
214             ret = backup_cow_with_offload(job, start, end, is_write_notifier);
215             if (ret < 0) {
216                 job->use_copy_range = false;
217             }
218         }
219         if (!job->use_copy_range) {
220             ret = backup_cow_with_bounce_buffer(job, start, end, is_write_notifier,
221                                                 error_is_read, &bounce_buffer);
222         }
223         if (ret < 0) {
224             break;
225         }
226 
227         /* Publish progress, guest I/O counts as progress too.  Note that the
228          * offset field is an opaque progress value, it is not a disk offset.
229          */
230         start += ret;
231         job->bytes_read += ret;
232         job_progress_update(&job->common.job, ret);
233         ret = 0;
234     }
235 
236     if (bounce_buffer) {
237         qemu_vfree(bounce_buffer);
238     }
239 
240     cow_request_end(&cow_request);
241 
242     trace_backup_do_cow_return(job, offset, bytes, ret);
243 
244     qemu_co_rwlock_unlock(&job->flush_rwlock);
245 
246     return ret;
247 }
248 
249 static int coroutine_fn backup_before_write_notify(
250         NotifierWithReturn *notifier,
251         void *opaque)
252 {
253     BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write);
254     BdrvTrackedRequest *req = opaque;
255 
256     assert(req->bs == blk_bs(job->common.blk));
257     assert(QEMU_IS_ALIGNED(req->offset, BDRV_SECTOR_SIZE));
258     assert(QEMU_IS_ALIGNED(req->bytes, BDRV_SECTOR_SIZE));
259 
260     return backup_do_cow(job, req->offset, req->bytes, NULL, true);
261 }
262 
263 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
264 {
265     BdrvDirtyBitmap *bm;
266     BlockDriverState *bs = blk_bs(job->common.blk);
267 
268     if (ret < 0) {
269         /* Merge the successor back into the parent, delete nothing. */
270         bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
271         assert(bm);
272     } else {
273         /* Everything is fine, delete this bitmap and install the backup. */
274         bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
275         assert(bm);
276     }
277 }
278 
279 static void backup_commit(Job *job)
280 {
281     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
282     if (s->sync_bitmap) {
283         backup_cleanup_sync_bitmap(s, 0);
284     }
285 }
286 
287 static void backup_abort(Job *job)
288 {
289     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
290     if (s->sync_bitmap) {
291         backup_cleanup_sync_bitmap(s, -1);
292     }
293 }
294 
295 static void backup_clean(Job *job)
296 {
297     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
298     assert(s->target);
299     blk_unref(s->target);
300     s->target = NULL;
301 }
302 
303 void backup_do_checkpoint(BlockJob *job, Error **errp)
304 {
305     BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
306     int64_t len;
307 
308     assert(block_job_driver(job) == &backup_job_driver);
309 
310     if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) {
311         error_setg(errp, "The backup job only supports block checkpoint in"
312                    " sync=none mode");
313         return;
314     }
315 
316     len = DIV_ROUND_UP(backup_job->len, backup_job->cluster_size);
317     hbitmap_set(backup_job->copy_bitmap, 0, len);
318 }
319 
320 static void backup_drain(BlockJob *job)
321 {
322     BackupBlockJob *s = container_of(job, BackupBlockJob, common);
323 
324     /* Need to keep a reference in case blk_drain triggers execution
325      * of backup_complete...
326      */
327     if (s->target) {
328         BlockBackend *target = s->target;
329         blk_ref(target);
330         blk_drain(target);
331         blk_unref(target);
332     }
333 }
334 
335 static BlockErrorAction backup_error_action(BackupBlockJob *job,
336                                             bool read, int error)
337 {
338     if (read) {
339         return block_job_error_action(&job->common, job->on_source_error,
340                                       true, error);
341     } else {
342         return block_job_error_action(&job->common, job->on_target_error,
343                                       false, error);
344     }
345 }
346 
347 static bool coroutine_fn yield_and_check(BackupBlockJob *job)
348 {
349     uint64_t delay_ns;
350 
351     if (job_is_cancelled(&job->common.job)) {
352         return true;
353     }
354 
355     /* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
356      * return. Without a yield, the VM would not reboot. */
357     delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
358     job->bytes_read = 0;
359     job_sleep_ns(&job->common.job, delay_ns);
360 
361     if (job_is_cancelled(&job->common.job)) {
362         return true;
363     }
364 
365     return false;
366 }
367 
368 static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
369 {
370     int ret;
371     bool error_is_read;
372     int64_t cluster;
373     HBitmapIter hbi;
374 
375     hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
376     while ((cluster = hbitmap_iter_next(&hbi)) != -1) {
377         do {
378             if (yield_and_check(job)) {
379                 return 0;
380             }
381             ret = backup_do_cow(job, cluster * job->cluster_size,
382                                 job->cluster_size, &error_is_read, false);
383             if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
384                            BLOCK_ERROR_ACTION_REPORT)
385             {
386                 return ret;
387             }
388         } while (ret < 0);
389     }
390 
391     return 0;
392 }
393 
394 /* init copy_bitmap from sync_bitmap */
395 static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
396 {
397     BdrvDirtyBitmapIter *dbi;
398     int64_t offset;
399     int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap),
400                                job->cluster_size);
401 
402     dbi = bdrv_dirty_iter_new(job->sync_bitmap);
403     while ((offset = bdrv_dirty_iter_next(dbi)) != -1) {
404         int64_t cluster = offset / job->cluster_size;
405         int64_t next_cluster;
406 
407         offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap);
408         if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) {
409             hbitmap_set(job->copy_bitmap, cluster, end - cluster);
410             break;
411         }
412 
413         offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset,
414                                              UINT64_MAX);
415         if (offset == -1) {
416             hbitmap_set(job->copy_bitmap, cluster, end - cluster);
417             break;
418         }
419 
420         next_cluster = DIV_ROUND_UP(offset, job->cluster_size);
421         hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster);
422         if (next_cluster >= end) {
423             break;
424         }
425 
426         bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size);
427     }
428 
429     /* TODO job_progress_set_remaining() would make more sense */
430     job_progress_update(&job->common.job,
431         job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size);
432 
433     bdrv_dirty_iter_free(dbi);
434 }
435 
436 static int coroutine_fn backup_run(Job *job, Error **errp)
437 {
438     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
439     BlockDriverState *bs = blk_bs(s->common.blk);
440     int64_t offset, nb_clusters;
441     int ret = 0;
442 
443     QLIST_INIT(&s->inflight_reqs);
444     qemu_co_rwlock_init(&s->flush_rwlock);
445 
446     nb_clusters = DIV_ROUND_UP(s->len, s->cluster_size);
447     job_progress_set_remaining(job, s->len);
448 
449     s->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
450     if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
451         backup_incremental_init_copy_bitmap(s);
452     } else {
453         hbitmap_set(s->copy_bitmap, 0, nb_clusters);
454     }
455 
456 
457     s->before_write.notify = backup_before_write_notify;
458     bdrv_add_before_write_notifier(bs, &s->before_write);
459 
460     if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
461         /* All bits are set in copy_bitmap to allow any cluster to be copied.
462          * This does not actually require them to be copied. */
463         while (!job_is_cancelled(job)) {
464             /* Yield until the job is cancelled.  We just let our before_write
465              * notify callback service CoW requests. */
466             job_yield(job);
467         }
468     } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
469         ret = backup_run_incremental(s);
470     } else {
471         /* Both FULL and TOP SYNC_MODE's require copying.. */
472         for (offset = 0; offset < s->len;
473              offset += s->cluster_size) {
474             bool error_is_read;
475             int alloced = 0;
476 
477             if (yield_and_check(s)) {
478                 break;
479             }
480 
481             if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
482                 int i;
483                 int64_t n;
484 
485                 /* Check to see if these blocks are already in the
486                  * backing file. */
487 
488                 for (i = 0; i < s->cluster_size;) {
489                     /* bdrv_is_allocated() only returns true/false based
490                      * on the first set of sectors it comes across that
491                      * are are all in the same state.
492                      * For that reason we must verify each sector in the
493                      * backup cluster length.  We end up copying more than
494                      * needed but at some point that is always the case. */
495                     alloced =
496                         bdrv_is_allocated(bs, offset + i,
497                                           s->cluster_size - i, &n);
498                     i += n;
499 
500                     if (alloced || n == 0) {
501                         break;
502                     }
503                 }
504 
505                 /* If the above loop never found any sectors that are in
506                  * the topmost image, skip this backup. */
507                 if (alloced == 0) {
508                     continue;
509                 }
510             }
511             /* FULL sync mode we copy the whole drive. */
512             if (alloced < 0) {
513                 ret = alloced;
514             } else {
515                 ret = backup_do_cow(s, offset, s->cluster_size,
516                                     &error_is_read, false);
517             }
518             if (ret < 0) {
519                 /* Depending on error action, fail now or retry cluster */
520                 BlockErrorAction action =
521                     backup_error_action(s, error_is_read, -ret);
522                 if (action == BLOCK_ERROR_ACTION_REPORT) {
523                     break;
524                 } else {
525                     offset -= s->cluster_size;
526                     continue;
527                 }
528             }
529         }
530     }
531 
532     notifier_with_return_remove(&s->before_write);
533 
534     /* wait until pending backup_do_cow() calls have completed */
535     qemu_co_rwlock_wrlock(&s->flush_rwlock);
536     qemu_co_rwlock_unlock(&s->flush_rwlock);
537     hbitmap_free(s->copy_bitmap);
538 
539     return ret;
540 }
541 
542 static const BlockJobDriver backup_job_driver = {
543     .job_driver = {
544         .instance_size          = sizeof(BackupBlockJob),
545         .job_type               = JOB_TYPE_BACKUP,
546         .free                   = block_job_free,
547         .user_resume            = block_job_user_resume,
548         .drain                  = block_job_drain,
549         .run                    = backup_run,
550         .commit                 = backup_commit,
551         .abort                  = backup_abort,
552         .clean                  = backup_clean,
553     },
554     .drain                  = backup_drain,
555 };
556 
557 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
558                   BlockDriverState *target, int64_t speed,
559                   MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
560                   bool compress,
561                   BlockdevOnError on_source_error,
562                   BlockdevOnError on_target_error,
563                   int creation_flags,
564                   BlockCompletionFunc *cb, void *opaque,
565                   JobTxn *txn, Error **errp)
566 {
567     int64_t len;
568     BlockDriverInfo bdi;
569     BackupBlockJob *job = NULL;
570     int ret;
571 
572     assert(bs);
573     assert(target);
574 
575     if (bs == target) {
576         error_setg(errp, "Source and target cannot be the same");
577         return NULL;
578     }
579 
580     if (!bdrv_is_inserted(bs)) {
581         error_setg(errp, "Device is not inserted: %s",
582                    bdrv_get_device_name(bs));
583         return NULL;
584     }
585 
586     if (!bdrv_is_inserted(target)) {
587         error_setg(errp, "Device is not inserted: %s",
588                    bdrv_get_device_name(target));
589         return NULL;
590     }
591 
592     if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
593         error_setg(errp, "Compression is not supported for this drive %s",
594                    bdrv_get_device_name(target));
595         return NULL;
596     }
597 
598     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
599         return NULL;
600     }
601 
602     if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
603         return NULL;
604     }
605 
606     if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
607         if (!sync_bitmap) {
608             error_setg(errp, "must provide a valid bitmap name for "
609                              "\"incremental\" sync mode");
610             return NULL;
611         }
612 
613         /* Create a new bitmap, and freeze/disable this one. */
614         if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
615             return NULL;
616         }
617     } else if (sync_bitmap) {
618         error_setg(errp,
619                    "a sync_bitmap was provided to backup_run, "
620                    "but received an incompatible sync_mode (%s)",
621                    MirrorSyncMode_str(sync_mode));
622         return NULL;
623     }
624 
625     len = bdrv_getlength(bs);
626     if (len < 0) {
627         error_setg_errno(errp, -len, "unable to get length for '%s'",
628                          bdrv_get_device_name(bs));
629         goto error;
630     }
631 
632     /* job->len is fixed, so we can't allow resize */
633     job = block_job_create(job_id, &backup_job_driver, txn, bs,
634                            BLK_PERM_CONSISTENT_READ,
635                            BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
636                            BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
637                            speed, creation_flags, cb, opaque, errp);
638     if (!job) {
639         goto error;
640     }
641 
642     /* The target must match the source in size, so no resize here either */
643     job->target = blk_new(BLK_PERM_WRITE,
644                           BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
645                           BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
646     ret = blk_insert_bs(job->target, target, errp);
647     if (ret < 0) {
648         goto error;
649     }
650 
651     job->on_source_error = on_source_error;
652     job->on_target_error = on_target_error;
653     job->sync_mode = sync_mode;
654     job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
655                        sync_bitmap : NULL;
656     job->compress = compress;
657 
658     /* Detect image-fleecing (and similar) schemes */
659     job->serialize_target_writes = bdrv_chain_contains(target, bs);
660 
661     /* If there is no backing file on the target, we cannot rely on COW if our
662      * backup cluster size is smaller than the target cluster size. Even for
663      * targets with a backing file, try to avoid COW if possible. */
664     ret = bdrv_get_info(target, &bdi);
665     if (ret == -ENOTSUP && !target->backing) {
666         /* Cluster size is not defined */
667         warn_report("The target block device doesn't provide "
668                     "information about the block size and it doesn't have a "
669                     "backing file. The default block size of %u bytes is "
670                     "used. If the actual block size of the target exceeds "
671                     "this default, the backup may be unusable",
672                     BACKUP_CLUSTER_SIZE_DEFAULT);
673         job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
674     } else if (ret < 0 && !target->backing) {
675         error_setg_errno(errp, -ret,
676             "Couldn't determine the cluster size of the target image, "
677             "which has no backing file");
678         error_append_hint(errp,
679             "Aborting, since this may create an unusable destination image\n");
680         goto error;
681     } else if (ret < 0 && target->backing) {
682         /* Not fatal; just trudge on ahead. */
683         job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
684     } else {
685         job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
686     }
687     job->use_copy_range = true;
688     job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
689                                         blk_get_max_transfer(job->target));
690     job->copy_range_size = MAX(job->cluster_size,
691                                QEMU_ALIGN_UP(job->copy_range_size,
692                                              job->cluster_size));
693 
694     /* Required permissions are already taken with target's blk_new() */
695     block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
696                        &error_abort);
697     job->len = len;
698 
699     return &job->common;
700 
701  error:
702     if (sync_bitmap) {
703         bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
704     }
705     if (job) {
706         backup_clean(&job->common.job);
707         job_early_fail(&job->common.job);
708     }
709 
710     return NULL;
711 }
712