xref: /openbmc/qemu/block/backup.c (revision 0c0c1fd9)
1 /*
2  * QEMU backup
3  *
4  * Copyright (C) 2013 Proxmox Server Solutions
5  *
6  * Authors:
7  *  Dietmar Maurer (dietmar@proxmox.com)
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 
16 #include "trace.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob.h"
20 #include "block/block_backup.h"
21 #include "qapi/error.h"
22 #include "qapi/qmp/qerror.h"
23 #include "qemu/ratelimit.h"
24 #include "qemu/cutils.h"
25 #include "sysemu/block-backend.h"
26 #include "qemu/bitmap.h"
27 
28 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
29 #define SLICE_TIME 100000000ULL /* ns */
30 
31 typedef struct BackupBlockJob {
32     BlockJob common;
33     BlockBackend *target;
34     /* bitmap for sync=incremental */
35     BdrvDirtyBitmap *sync_bitmap;
36     MirrorSyncMode sync_mode;
37     RateLimit limit;
38     BlockdevOnError on_source_error;
39     BlockdevOnError on_target_error;
40     CoRwlock flush_rwlock;
41     uint64_t sectors_read;
42     unsigned long *done_bitmap;
43     int64_t cluster_size;
44     bool compress;
45     NotifierWithReturn before_write;
46     QLIST_HEAD(, CowRequest) inflight_reqs;
47 } BackupBlockJob;
48 
49 /* Size of a cluster in sectors, instead of bytes. */
50 static inline int64_t cluster_size_sectors(BackupBlockJob *job)
51 {
52   return job->cluster_size / BDRV_SECTOR_SIZE;
53 }
54 
55 /* See if in-flight requests overlap and wait for them to complete */
56 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
57                                                        int64_t start,
58                                                        int64_t end)
59 {
60     CowRequest *req;
61     bool retry;
62 
63     do {
64         retry = false;
65         QLIST_FOREACH(req, &job->inflight_reqs, list) {
66             if (end > req->start && start < req->end) {
67                 qemu_co_queue_wait(&req->wait_queue);
68                 retry = true;
69                 break;
70             }
71         }
72     } while (retry);
73 }
74 
75 /* Keep track of an in-flight request */
76 static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
77                                      int64_t start, int64_t end)
78 {
79     req->start = start;
80     req->end = end;
81     qemu_co_queue_init(&req->wait_queue);
82     QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
83 }
84 
85 /* Forget about a completed request */
86 static void cow_request_end(CowRequest *req)
87 {
88     QLIST_REMOVE(req, list);
89     qemu_co_queue_restart_all(&req->wait_queue);
90 }
91 
92 static int coroutine_fn backup_do_cow(BackupBlockJob *job,
93                                       int64_t sector_num, int nb_sectors,
94                                       bool *error_is_read,
95                                       bool is_write_notifier)
96 {
97     BlockBackend *blk = job->common.blk;
98     CowRequest cow_request;
99     struct iovec iov;
100     QEMUIOVector bounce_qiov;
101     void *bounce_buffer = NULL;
102     int ret = 0;
103     int64_t sectors_per_cluster = cluster_size_sectors(job);
104     int64_t start, end;
105     int n;
106 
107     qemu_co_rwlock_rdlock(&job->flush_rwlock);
108 
109     start = sector_num / sectors_per_cluster;
110     end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
111 
112     trace_backup_do_cow_enter(job, start, sector_num, nb_sectors);
113 
114     wait_for_overlapping_requests(job, start, end);
115     cow_request_begin(&cow_request, job, start, end);
116 
117     for (; start < end; start++) {
118         if (test_bit(start, job->done_bitmap)) {
119             trace_backup_do_cow_skip(job, start);
120             continue; /* already copied */
121         }
122 
123         trace_backup_do_cow_process(job, start);
124 
125         n = MIN(sectors_per_cluster,
126                 job->common.len / BDRV_SECTOR_SIZE -
127                 start * sectors_per_cluster);
128 
129         if (!bounce_buffer) {
130             bounce_buffer = blk_blockalign(blk, job->cluster_size);
131         }
132         iov.iov_base = bounce_buffer;
133         iov.iov_len = n * BDRV_SECTOR_SIZE;
134         qemu_iovec_init_external(&bounce_qiov, &iov, 1);
135 
136         ret = blk_co_preadv(blk, start * job->cluster_size,
137                             bounce_qiov.size, &bounce_qiov,
138                             is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
139         if (ret < 0) {
140             trace_backup_do_cow_read_fail(job, start, ret);
141             if (error_is_read) {
142                 *error_is_read = true;
143             }
144             goto out;
145         }
146 
147         if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
148             ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size,
149                                        bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
150         } else {
151             ret = blk_co_pwritev(job->target, start * job->cluster_size,
152                                  bounce_qiov.size, &bounce_qiov,
153                                  job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
154         }
155         if (ret < 0) {
156             trace_backup_do_cow_write_fail(job, start, ret);
157             if (error_is_read) {
158                 *error_is_read = false;
159             }
160             goto out;
161         }
162 
163         set_bit(start, job->done_bitmap);
164 
165         /* Publish progress, guest I/O counts as progress too.  Note that the
166          * offset field is an opaque progress value, it is not a disk offset.
167          */
168         job->sectors_read += n;
169         job->common.offset += n * BDRV_SECTOR_SIZE;
170     }
171 
172 out:
173     if (bounce_buffer) {
174         qemu_vfree(bounce_buffer);
175     }
176 
177     cow_request_end(&cow_request);
178 
179     trace_backup_do_cow_return(job, sector_num, nb_sectors, ret);
180 
181     qemu_co_rwlock_unlock(&job->flush_rwlock);
182 
183     return ret;
184 }
185 
186 static int coroutine_fn backup_before_write_notify(
187         NotifierWithReturn *notifier,
188         void *opaque)
189 {
190     BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write);
191     BdrvTrackedRequest *req = opaque;
192     int64_t sector_num = req->offset >> BDRV_SECTOR_BITS;
193     int nb_sectors = req->bytes >> BDRV_SECTOR_BITS;
194 
195     assert(req->bs == blk_bs(job->common.blk));
196     assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0);
197     assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
198 
199     return backup_do_cow(job, sector_num, nb_sectors, NULL, true);
200 }
201 
202 static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
203 {
204     BackupBlockJob *s = container_of(job, BackupBlockJob, common);
205 
206     if (speed < 0) {
207         error_setg(errp, QERR_INVALID_PARAMETER, "speed");
208         return;
209     }
210     ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
211 }
212 
213 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
214 {
215     BdrvDirtyBitmap *bm;
216     BlockDriverState *bs = blk_bs(job->common.blk);
217 
218     if (ret < 0 || block_job_is_cancelled(&job->common)) {
219         /* Merge the successor back into the parent, delete nothing. */
220         bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
221         assert(bm);
222     } else {
223         /* Everything is fine, delete this bitmap and install the backup. */
224         bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
225         assert(bm);
226     }
227 }
228 
229 static void backup_commit(BlockJob *job)
230 {
231     BackupBlockJob *s = container_of(job, BackupBlockJob, common);
232     if (s->sync_bitmap) {
233         backup_cleanup_sync_bitmap(s, 0);
234     }
235 }
236 
237 static void backup_abort(BlockJob *job)
238 {
239     BackupBlockJob *s = container_of(job, BackupBlockJob, common);
240     if (s->sync_bitmap) {
241         backup_cleanup_sync_bitmap(s, -1);
242     }
243 }
244 
245 static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
246 {
247     BackupBlockJob *s = container_of(job, BackupBlockJob, common);
248 
249     blk_set_aio_context(s->target, aio_context);
250 }
251 
252 void backup_do_checkpoint(BlockJob *job, Error **errp)
253 {
254     BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
255     int64_t len;
256 
257     assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
258 
259     if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) {
260         error_setg(errp, "The backup job only supports block checkpoint in"
261                    " sync=none mode");
262         return;
263     }
264 
265     len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size);
266     bitmap_zero(backup_job->done_bitmap, len);
267 }
268 
269 void backup_wait_for_overlapping_requests(BlockJob *job, int64_t sector_num,
270                                           int nb_sectors)
271 {
272     BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
273     int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
274     int64_t start, end;
275 
276     assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
277 
278     start = sector_num / sectors_per_cluster;
279     end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
280     wait_for_overlapping_requests(backup_job, start, end);
281 }
282 
283 void backup_cow_request_begin(CowRequest *req, BlockJob *job,
284                               int64_t sector_num,
285                               int nb_sectors)
286 {
287     BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
288     int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
289     int64_t start, end;
290 
291     assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
292 
293     start = sector_num / sectors_per_cluster;
294     end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
295     cow_request_begin(req, backup_job, start, end);
296 }
297 
298 void backup_cow_request_end(CowRequest *req)
299 {
300     cow_request_end(req);
301 }
302 
303 static const BlockJobDriver backup_job_driver = {
304     .instance_size          = sizeof(BackupBlockJob),
305     .job_type               = BLOCK_JOB_TYPE_BACKUP,
306     .set_speed              = backup_set_speed,
307     .commit                 = backup_commit,
308     .abort                  = backup_abort,
309     .attached_aio_context   = backup_attached_aio_context,
310 };
311 
312 static BlockErrorAction backup_error_action(BackupBlockJob *job,
313                                             bool read, int error)
314 {
315     if (read) {
316         return block_job_error_action(&job->common, job->on_source_error,
317                                       true, error);
318     } else {
319         return block_job_error_action(&job->common, job->on_target_error,
320                                       false, error);
321     }
322 }
323 
324 typedef struct {
325     int ret;
326 } BackupCompleteData;
327 
328 static void backup_complete(BlockJob *job, void *opaque)
329 {
330     BackupBlockJob *s = container_of(job, BackupBlockJob, common);
331     BackupCompleteData *data = opaque;
332 
333     blk_unref(s->target);
334 
335     block_job_completed(job, data->ret);
336     g_free(data);
337 }
338 
339 static bool coroutine_fn yield_and_check(BackupBlockJob *job)
340 {
341     if (block_job_is_cancelled(&job->common)) {
342         return true;
343     }
344 
345     /* we need to yield so that bdrv_drain_all() returns.
346      * (without, VM does not reboot)
347      */
348     if (job->common.speed) {
349         uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
350                                                       job->sectors_read);
351         job->sectors_read = 0;
352         block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
353     } else {
354         block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
355     }
356 
357     if (block_job_is_cancelled(&job->common)) {
358         return true;
359     }
360 
361     return false;
362 }
363 
364 static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
365 {
366     bool error_is_read;
367     int ret = 0;
368     int clusters_per_iter;
369     uint32_t granularity;
370     int64_t sector;
371     int64_t cluster;
372     int64_t end;
373     int64_t last_cluster = -1;
374     int64_t sectors_per_cluster = cluster_size_sectors(job);
375     HBitmapIter hbi;
376 
377     granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
378     clusters_per_iter = MAX((granularity / job->cluster_size), 1);
379     bdrv_dirty_iter_init(job->sync_bitmap, &hbi);
380 
381     /* Find the next dirty sector(s) */
382     while ((sector = hbitmap_iter_next(&hbi)) != -1) {
383         cluster = sector / sectors_per_cluster;
384 
385         /* Fake progress updates for any clusters we skipped */
386         if (cluster != last_cluster + 1) {
387             job->common.offset += ((cluster - last_cluster - 1) *
388                                    job->cluster_size);
389         }
390 
391         for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
392             do {
393                 if (yield_and_check(job)) {
394                     return ret;
395                 }
396                 ret = backup_do_cow(job, cluster * sectors_per_cluster,
397                                     sectors_per_cluster, &error_is_read,
398                                     false);
399                 if ((ret < 0) &&
400                     backup_error_action(job, error_is_read, -ret) ==
401                     BLOCK_ERROR_ACTION_REPORT) {
402                     return ret;
403                 }
404             } while (ret < 0);
405         }
406 
407         /* If the bitmap granularity is smaller than the backup granularity,
408          * we need to advance the iterator pointer to the next cluster. */
409         if (granularity < job->cluster_size) {
410             bdrv_set_dirty_iter(&hbi, cluster * sectors_per_cluster);
411         }
412 
413         last_cluster = cluster - 1;
414     }
415 
416     /* Play some final catchup with the progress meter */
417     end = DIV_ROUND_UP(job->common.len, job->cluster_size);
418     if (last_cluster + 1 < end) {
419         job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
420     }
421 
422     return ret;
423 }
424 
425 static void coroutine_fn backup_run(void *opaque)
426 {
427     BackupBlockJob *job = opaque;
428     BackupCompleteData *data;
429     BlockDriverState *bs = blk_bs(job->common.blk);
430     BlockBackend *target = job->target;
431     int64_t start, end;
432     int64_t sectors_per_cluster = cluster_size_sectors(job);
433     int ret = 0;
434 
435     QLIST_INIT(&job->inflight_reqs);
436     qemu_co_rwlock_init(&job->flush_rwlock);
437 
438     start = 0;
439     end = DIV_ROUND_UP(job->common.len, job->cluster_size);
440 
441     job->done_bitmap = bitmap_new(end);
442 
443     job->before_write.notify = backup_before_write_notify;
444     bdrv_add_before_write_notifier(bs, &job->before_write);
445 
446     if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
447         while (!block_job_is_cancelled(&job->common)) {
448             /* Yield until the job is cancelled.  We just let our before_write
449              * notify callback service CoW requests. */
450             block_job_yield(&job->common);
451         }
452     } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
453         ret = backup_run_incremental(job);
454     } else {
455         /* Both FULL and TOP SYNC_MODE's require copying.. */
456         for (; start < end; start++) {
457             bool error_is_read;
458             if (yield_and_check(job)) {
459                 break;
460             }
461 
462             if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
463                 int i, n;
464                 int alloced = 0;
465 
466                 /* Check to see if these blocks are already in the
467                  * backing file. */
468 
469                 for (i = 0; i < sectors_per_cluster;) {
470                     /* bdrv_is_allocated() only returns true/false based
471                      * on the first set of sectors it comes across that
472                      * are are all in the same state.
473                      * For that reason we must verify each sector in the
474                      * backup cluster length.  We end up copying more than
475                      * needed but at some point that is always the case. */
476                     alloced =
477                         bdrv_is_allocated(bs,
478                                 start * sectors_per_cluster + i,
479                                 sectors_per_cluster - i, &n);
480                     i += n;
481 
482                     if (alloced == 1 || n == 0) {
483                         break;
484                     }
485                 }
486 
487                 /* If the above loop never found any sectors that are in
488                  * the topmost image, skip this backup. */
489                 if (alloced == 0) {
490                     continue;
491                 }
492             }
493             /* FULL sync mode we copy the whole drive. */
494             ret = backup_do_cow(job, start * sectors_per_cluster,
495                                 sectors_per_cluster, &error_is_read, false);
496             if (ret < 0) {
497                 /* Depending on error action, fail now or retry cluster */
498                 BlockErrorAction action =
499                     backup_error_action(job, error_is_read, -ret);
500                 if (action == BLOCK_ERROR_ACTION_REPORT) {
501                     break;
502                 } else {
503                     start--;
504                     continue;
505                 }
506             }
507         }
508     }
509 
510     notifier_with_return_remove(&job->before_write);
511 
512     /* wait until pending backup_do_cow() calls have completed */
513     qemu_co_rwlock_wrlock(&job->flush_rwlock);
514     qemu_co_rwlock_unlock(&job->flush_rwlock);
515     g_free(job->done_bitmap);
516 
517     bdrv_op_unblock_all(blk_bs(target), job->common.blocker);
518 
519     data = g_malloc(sizeof(*data));
520     data->ret = ret;
521     block_job_defer_to_main_loop(&job->common, backup_complete, data);
522 }
523 
524 void backup_start(const char *job_id, BlockDriverState *bs,
525                   BlockDriverState *target, int64_t speed,
526                   MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
527                   bool compress,
528                   BlockdevOnError on_source_error,
529                   BlockdevOnError on_target_error,
530                   BlockCompletionFunc *cb, void *opaque,
531                   BlockJobTxn *txn, Error **errp)
532 {
533     int64_t len;
534     BlockDriverInfo bdi;
535     BackupBlockJob *job = NULL;
536     int ret;
537 
538     assert(bs);
539     assert(target);
540 
541     if (bs == target) {
542         error_setg(errp, "Source and target cannot be the same");
543         return;
544     }
545 
546     if (!bdrv_is_inserted(bs)) {
547         error_setg(errp, "Device is not inserted: %s",
548                    bdrv_get_device_name(bs));
549         return;
550     }
551 
552     if (!bdrv_is_inserted(target)) {
553         error_setg(errp, "Device is not inserted: %s",
554                    bdrv_get_device_name(target));
555         return;
556     }
557 
558     if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
559         error_setg(errp, "Compression is not supported for this drive %s",
560                    bdrv_get_device_name(target));
561         return;
562     }
563 
564     if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
565         return;
566     }
567 
568     if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
569         return;
570     }
571 
572     if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
573         if (!sync_bitmap) {
574             error_setg(errp, "must provide a valid bitmap name for "
575                              "\"incremental\" sync mode");
576             return;
577         }
578 
579         /* Create a new bitmap, and freeze/disable this one. */
580         if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
581             return;
582         }
583     } else if (sync_bitmap) {
584         error_setg(errp,
585                    "a sync_bitmap was provided to backup_run, "
586                    "but received an incompatible sync_mode (%s)",
587                    MirrorSyncMode_lookup[sync_mode]);
588         return;
589     }
590 
591     len = bdrv_getlength(bs);
592     if (len < 0) {
593         error_setg_errno(errp, -len, "unable to get length for '%s'",
594                          bdrv_get_device_name(bs));
595         goto error;
596     }
597 
598     job = block_job_create(job_id, &backup_job_driver, bs, speed,
599                            cb, opaque, errp);
600     if (!job) {
601         goto error;
602     }
603 
604     job->target = blk_new();
605     blk_insert_bs(job->target, target);
606 
607     job->on_source_error = on_source_error;
608     job->on_target_error = on_target_error;
609     job->sync_mode = sync_mode;
610     job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
611                        sync_bitmap : NULL;
612     job->compress = compress;
613 
614     /* If there is no backing file on the target, we cannot rely on COW if our
615      * backup cluster size is smaller than the target cluster size. Even for
616      * targets with a backing file, try to avoid COW if possible. */
617     ret = bdrv_get_info(target, &bdi);
618     if (ret < 0 && !target->backing) {
619         error_setg_errno(errp, -ret,
620             "Couldn't determine the cluster size of the target image, "
621             "which has no backing file");
622         error_append_hint(errp,
623             "Aborting, since this may create an unusable destination image\n");
624         goto error;
625     } else if (ret < 0 && target->backing) {
626         /* Not fatal; just trudge on ahead. */
627         job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
628     } else {
629         job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
630     }
631 
632     bdrv_op_block_all(target, job->common.blocker);
633     job->common.len = len;
634     job->common.co = qemu_coroutine_create(backup_run, job);
635     block_job_txn_add_job(txn, &job->common);
636     qemu_coroutine_enter(job->common.co);
637     return;
638 
639  error:
640     if (sync_bitmap) {
641         bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
642     }
643     if (job) {
644         blk_unref(job->target);
645         block_job_unref(&job->common);
646     }
647 }
648