1 /*
2 * copy-before-write filter driver
3 *
4 * The driver performs Copy-Before-Write (CBW) operation: it is injected above
5 * some node, and before each write it copies _old_ data to the target node.
6 *
7 * Copyright (c) 2018-2021 Virtuozzo International GmbH.
8 *
9 * Author:
10 * Sementsov-Ogievskiy Vladimir <vsementsov@virtuozzo.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 */
25
26 #include "qemu/osdep.h"
27 #include "qapi/qmp/qjson.h"
28
29 #include "sysemu/block-backend.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "block/block_int.h"
33 #include "block/qdict.h"
34 #include "block/block-copy.h"
35 #include "block/dirty-bitmap.h"
36
37 #include "block/copy-before-write.h"
38 #include "block/reqlist.h"
39
40 #include "qapi/qapi-visit-block-core.h"
41
42 typedef struct BDRVCopyBeforeWriteState {
43 BlockCopyState *bcs;
44 BdrvChild *target;
45 OnCbwError on_cbw_error;
46 uint64_t cbw_timeout_ns;
47 bool discard_source;
48
49 /*
50 * @lock: protects access to @access_bitmap, @done_bitmap and
51 * @frozen_read_reqs
52 */
53 CoMutex lock;
54
55 /*
56 * @access_bitmap: represents areas allowed for reading by fleecing user.
57 * Reading from non-dirty areas leads to -EACCES.
58 */
59 BdrvDirtyBitmap *access_bitmap;
60
61 /*
62 * @done_bitmap: represents areas that was successfully copied to @target by
63 * copy-before-write operations.
64 */
65 BdrvDirtyBitmap *done_bitmap;
66
67 /*
68 * @frozen_read_reqs: current read requests for fleecing user in bs->file
69 * node. These areas must not be rewritten by guest. There can be multiple
70 * overlapping read requests.
71 */
72 BlockReqList frozen_read_reqs;
73
74 /*
75 * @snapshot_error is normally zero. But on first copy-before-write failure
76 * when @on_cbw_error == ON_CBW_ERROR_BREAK_SNAPSHOT, @snapshot_error takes
77 * value of this error (<0). After that all in-flight and further
78 * snapshot-API requests will fail with that error.
79 */
80 int snapshot_error;
81 } BDRVCopyBeforeWriteState;
82
83 static int coroutine_fn GRAPH_RDLOCK
cbw_co_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)84 cbw_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
85 QEMUIOVector *qiov, BdrvRequestFlags flags)
86 {
87 return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
88 }
89
block_copy_cb(void * opaque)90 static void block_copy_cb(void *opaque)
91 {
92 BlockDriverState *bs = opaque;
93
94 bdrv_dec_in_flight(bs);
95 }
96
97 /*
98 * Do copy-before-write operation.
99 *
100 * On failure guest request must be failed too.
101 *
102 * On success, we also wait for all in-flight fleecing read requests in source
103 * node, and it's guaranteed that after cbw_do_copy_before_write() successful
104 * return there are no such requests and they will never appear.
105 */
cbw_do_copy_before_write(BlockDriverState * bs,uint64_t offset,uint64_t bytes,BdrvRequestFlags flags)106 static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs,
107 uint64_t offset, uint64_t bytes, BdrvRequestFlags flags)
108 {
109 BDRVCopyBeforeWriteState *s = bs->opaque;
110 int ret;
111 uint64_t off, end;
112 int64_t cluster_size = block_copy_cluster_size(s->bcs);
113
114 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
115 return 0;
116 }
117
118 if (s->snapshot_error) {
119 return 0;
120 }
121
122 off = QEMU_ALIGN_DOWN(offset, cluster_size);
123 end = QEMU_ALIGN_UP(offset + bytes, cluster_size);
124
125 /*
126 * Increase in_flight, so that in case of timed-out block-copy, the
127 * remaining background block_copy() request (which can't be immediately
128 * cancelled by timeout) is presented in bs->in_flight. This way we are
129 * sure that on bs close() we'll previously wait for all timed-out but yet
130 * running block_copy calls.
131 */
132 bdrv_inc_in_flight(bs);
133 ret = block_copy(s->bcs, off, end - off, true, s->cbw_timeout_ns,
134 block_copy_cb, bs);
135 if (ret < 0 && s->on_cbw_error == ON_CBW_ERROR_BREAK_GUEST_WRITE) {
136 return ret;
137 }
138
139 WITH_QEMU_LOCK_GUARD(&s->lock) {
140 if (ret < 0) {
141 assert(s->on_cbw_error == ON_CBW_ERROR_BREAK_SNAPSHOT);
142 if (!s->snapshot_error) {
143 s->snapshot_error = ret;
144 }
145 } else {
146 bdrv_set_dirty_bitmap(s->done_bitmap, off, end - off);
147 }
148 reqlist_wait_all(&s->frozen_read_reqs, off, end - off, &s->lock);
149 }
150
151 return 0;
152 }
153
154 static int coroutine_fn GRAPH_RDLOCK
cbw_co_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes)155 cbw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
156 {
157 int ret = cbw_do_copy_before_write(bs, offset, bytes, 0);
158 if (ret < 0) {
159 return ret;
160 }
161
162 return bdrv_co_pdiscard(bs->file, offset, bytes);
163 }
164
165 static int coroutine_fn GRAPH_RDLOCK
cbw_co_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)166 cbw_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
167 BdrvRequestFlags flags)
168 {
169 int ret = cbw_do_copy_before_write(bs, offset, bytes, flags);
170 if (ret < 0) {
171 return ret;
172 }
173
174 return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
175 }
176
177 static coroutine_fn GRAPH_RDLOCK
cbw_co_pwritev(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)178 int cbw_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
179 QEMUIOVector *qiov, BdrvRequestFlags flags)
180 {
181 int ret = cbw_do_copy_before_write(bs, offset, bytes, flags);
182 if (ret < 0) {
183 return ret;
184 }
185
186 return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
187 }
188
cbw_co_flush(BlockDriverState * bs)189 static int coroutine_fn GRAPH_RDLOCK cbw_co_flush(BlockDriverState *bs)
190 {
191 if (!bs->file) {
192 return 0;
193 }
194
195 return bdrv_co_flush(bs->file->bs);
196 }
197
198 /*
199 * If @offset not accessible - return NULL.
200 *
201 * Otherwise, set @pnum to some bytes that accessible from @file (@file is set
202 * to bs->file or to s->target). Return newly allocated BlockReq object that
203 * should be than passed to cbw_snapshot_read_unlock().
204 *
205 * It's guaranteed that guest writes will not interact in the region until
206 * cbw_snapshot_read_unlock() called.
207 */
208 static BlockReq * coroutine_fn GRAPH_RDLOCK
cbw_snapshot_read_lock(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * pnum,BdrvChild ** file)209 cbw_snapshot_read_lock(BlockDriverState *bs, int64_t offset, int64_t bytes,
210 int64_t *pnum, BdrvChild **file)
211 {
212 BDRVCopyBeforeWriteState *s = bs->opaque;
213 BlockReq *req = g_new(BlockReq, 1);
214 bool done;
215
216 QEMU_LOCK_GUARD(&s->lock);
217
218 if (s->snapshot_error) {
219 g_free(req);
220 return NULL;
221 }
222
223 if (bdrv_dirty_bitmap_next_zero(s->access_bitmap, offset, bytes) != -1) {
224 g_free(req);
225 return NULL;
226 }
227
228 done = bdrv_dirty_bitmap_status(s->done_bitmap, offset, bytes, pnum);
229 if (done) {
230 /*
231 * Special invalid BlockReq, that is handled in
232 * cbw_snapshot_read_unlock(). We don't need to lock something to read
233 * from s->target.
234 */
235 *req = (BlockReq) {.offset = -1, .bytes = -1};
236 *file = s->target;
237 } else {
238 reqlist_init_req(&s->frozen_read_reqs, req, offset, bytes);
239 *file = bs->file;
240 }
241
242 return req;
243 }
244
245 static coroutine_fn void
cbw_snapshot_read_unlock(BlockDriverState * bs,BlockReq * req)246 cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req)
247 {
248 BDRVCopyBeforeWriteState *s = bs->opaque;
249
250 if (req->offset == -1 && req->bytes == -1) {
251 g_free(req);
252 return;
253 }
254
255 QEMU_LOCK_GUARD(&s->lock);
256
257 reqlist_remove_req(req);
258 g_free(req);
259 }
260
261 static int coroutine_fn GRAPH_RDLOCK
cbw_co_preadv_snapshot(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)262 cbw_co_preadv_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes,
263 QEMUIOVector *qiov, size_t qiov_offset)
264 {
265 BlockReq *req;
266 BdrvChild *file;
267 int ret;
268
269 /* TODO: upgrade to async loop using AioTask */
270 while (bytes) {
271 int64_t cur_bytes;
272
273 req = cbw_snapshot_read_lock(bs, offset, bytes, &cur_bytes, &file);
274 if (!req) {
275 return -EACCES;
276 }
277
278 ret = bdrv_co_preadv_part(file, offset, cur_bytes,
279 qiov, qiov_offset, 0);
280 cbw_snapshot_read_unlock(bs, req);
281 if (ret < 0) {
282 return ret;
283 }
284
285 bytes -= cur_bytes;
286 offset += cur_bytes;
287 qiov_offset += cur_bytes;
288 }
289
290 return 0;
291 }
292
293 static int coroutine_fn GRAPH_RDLOCK
cbw_co_snapshot_block_status(BlockDriverState * bs,bool want_zero,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)294 cbw_co_snapshot_block_status(BlockDriverState *bs,
295 bool want_zero, int64_t offset, int64_t bytes,
296 int64_t *pnum, int64_t *map,
297 BlockDriverState **file)
298 {
299 BDRVCopyBeforeWriteState *s = bs->opaque;
300 BlockReq *req;
301 int ret;
302 int64_t cur_bytes;
303 BdrvChild *child;
304
305 req = cbw_snapshot_read_lock(bs, offset, bytes, &cur_bytes, &child);
306 if (!req) {
307 return -EACCES;
308 }
309
310 ret = bdrv_co_block_status(child->bs, offset, cur_bytes, pnum, map, file);
311 if (child == s->target) {
312 /*
313 * We refer to s->target only for areas that we've written to it.
314 * And we can not report unallocated blocks in s->target: this will
315 * break generic block-status-above logic, that will go to
316 * copy-before-write filtered child in this case.
317 */
318 assert(ret & BDRV_BLOCK_ALLOCATED);
319 }
320
321 cbw_snapshot_read_unlock(bs, req);
322
323 return ret;
324 }
325
326 static int coroutine_fn GRAPH_RDLOCK
cbw_co_pdiscard_snapshot(BlockDriverState * bs,int64_t offset,int64_t bytes)327 cbw_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
328 {
329 BDRVCopyBeforeWriteState *s = bs->opaque;
330 uint32_t cluster_size = block_copy_cluster_size(s->bcs);
331 int64_t aligned_offset = QEMU_ALIGN_UP(offset, cluster_size);
332 int64_t aligned_end = QEMU_ALIGN_DOWN(offset + bytes, cluster_size);
333 int64_t aligned_bytes;
334
335 if (aligned_end <= aligned_offset) {
336 return 0;
337 }
338 aligned_bytes = aligned_end - aligned_offset;
339
340 WITH_QEMU_LOCK_GUARD(&s->lock) {
341 bdrv_reset_dirty_bitmap(s->access_bitmap, aligned_offset,
342 aligned_bytes);
343 }
344
345 block_copy_reset(s->bcs, aligned_offset, aligned_bytes);
346
347 return bdrv_co_pdiscard(s->target, aligned_offset, aligned_bytes);
348 }
349
cbw_refresh_filename(BlockDriverState * bs)350 static void GRAPH_RDLOCK cbw_refresh_filename(BlockDriverState *bs)
351 {
352 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
353 bs->file->bs->filename);
354 }
355
356 static void GRAPH_RDLOCK
cbw_child_perm(BlockDriverState * bs,BdrvChild * c,BdrvChildRole role,BlockReopenQueue * reopen_queue,uint64_t perm,uint64_t shared,uint64_t * nperm,uint64_t * nshared)357 cbw_child_perm(BlockDriverState *bs, BdrvChild *c, BdrvChildRole role,
358 BlockReopenQueue *reopen_queue,
359 uint64_t perm, uint64_t shared,
360 uint64_t *nperm, uint64_t *nshared)
361 {
362 BDRVCopyBeforeWriteState *s = bs->opaque;
363
364 if (!(role & BDRV_CHILD_FILTERED)) {
365 /*
366 * Target child
367 *
368 * Share write to target (child_file), to not interfere
369 * with guest writes to its disk which may be in target backing chain.
370 * Can't resize during a backup block job because we check the size
371 * only upfront.
372 */
373 *nshared = BLK_PERM_ALL & ~BLK_PERM_RESIZE;
374 *nperm = BLK_PERM_WRITE;
375 } else {
376 /* Source child */
377 bdrv_default_perms(bs, c, role, reopen_queue,
378 perm, shared, nperm, nshared);
379
380 if (!QLIST_EMPTY(&bs->parents)) {
381 /*
382 * Note, that source child may be shared with backup job. Backup job
383 * does create own blk parent on copy-before-write node, so this
384 * works even if source node does not have any parents before backup
385 * start
386 */
387 *nperm = *nperm | BLK_PERM_CONSISTENT_READ;
388 if (s->discard_source) {
389 *nperm = *nperm | BLK_PERM_WRITE;
390 }
391
392 *nshared &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE);
393 }
394 }
395 }
396
cbw_parse_options(QDict * options,Error ** errp)397 static BlockdevOptions *cbw_parse_options(QDict *options, Error **errp)
398 {
399 BlockdevOptions *opts = NULL;
400 Visitor *v = NULL;
401
402 qdict_put_str(options, "driver", "copy-before-write");
403
404 v = qobject_input_visitor_new_flat_confused(options, errp);
405 if (!v) {
406 goto out;
407 }
408
409 visit_type_BlockdevOptions(v, NULL, &opts, errp);
410 if (!opts) {
411 goto out;
412 }
413
414 /*
415 * Delete options which we are going to parse through BlockdevOptions
416 * object for original options.
417 */
418 qdict_extract_subqdict(options, NULL, "bitmap");
419 qdict_del(options, "on-cbw-error");
420 qdict_del(options, "cbw-timeout");
421
422 out:
423 visit_free(v);
424 qdict_del(options, "driver");
425
426 return opts;
427 }
428
cbw_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)429 static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
430 Error **errp)
431 {
432 ERRP_GUARD();
433 BDRVCopyBeforeWriteState *s = bs->opaque;
434 BdrvDirtyBitmap *bitmap = NULL;
435 int64_t cluster_size;
436 g_autoptr(BlockdevOptions) full_opts = NULL;
437 BlockdevOptionsCbw *opts;
438 int ret;
439
440 full_opts = cbw_parse_options(options, errp);
441 if (!full_opts) {
442 return -EINVAL;
443 }
444 assert(full_opts->driver == BLOCKDEV_DRIVER_COPY_BEFORE_WRITE);
445 opts = &full_opts->u.copy_before_write;
446
447 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
448 if (ret < 0) {
449 return ret;
450 }
451
452 s->target = bdrv_open_child(NULL, options, "target", bs, &child_of_bds,
453 BDRV_CHILD_DATA, false, errp);
454 if (!s->target) {
455 return -EINVAL;
456 }
457
458 GRAPH_RDLOCK_GUARD_MAINLOOP();
459
460 if (opts->bitmap) {
461 bitmap = block_dirty_bitmap_lookup(opts->bitmap->node,
462 opts->bitmap->name, NULL, errp);
463 if (!bitmap) {
464 return -EINVAL;
465 }
466 }
467 s->on_cbw_error = opts->has_on_cbw_error ? opts->on_cbw_error :
468 ON_CBW_ERROR_BREAK_GUEST_WRITE;
469 s->cbw_timeout_ns = opts->has_cbw_timeout ?
470 opts->cbw_timeout * NANOSECONDS_PER_SECOND : 0;
471
472 bs->total_sectors = bs->file->bs->total_sectors;
473 bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
474 (BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
475 bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
476 ((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
477 bs->file->bs->supported_zero_flags);
478
479 s->discard_source = flags & BDRV_O_CBW_DISCARD_SOURCE;
480 s->bcs = block_copy_state_new(bs->file, s->target, bs, bitmap,
481 flags & BDRV_O_CBW_DISCARD_SOURCE, errp);
482 if (!s->bcs) {
483 error_prepend(errp, "Cannot create block-copy-state: ");
484 return -EINVAL;
485 }
486
487 cluster_size = block_copy_cluster_size(s->bcs);
488
489 s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
490 if (!s->done_bitmap) {
491 return -EINVAL;
492 }
493 bdrv_disable_dirty_bitmap(s->done_bitmap);
494
495 /* s->access_bitmap starts equal to bcs bitmap */
496 s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
497 if (!s->access_bitmap) {
498 return -EINVAL;
499 }
500 bdrv_disable_dirty_bitmap(s->access_bitmap);
501 bdrv_dirty_bitmap_merge_internal(s->access_bitmap,
502 block_copy_dirty_bitmap(s->bcs), NULL,
503 true);
504
505 qemu_co_mutex_init(&s->lock);
506 QLIST_INIT(&s->frozen_read_reqs);
507 return 0;
508 }
509
cbw_close(BlockDriverState * bs)510 static void cbw_close(BlockDriverState *bs)
511 {
512 BDRVCopyBeforeWriteState *s = bs->opaque;
513
514 bdrv_release_dirty_bitmap(s->access_bitmap);
515 bdrv_release_dirty_bitmap(s->done_bitmap);
516
517 block_copy_state_free(s->bcs);
518 s->bcs = NULL;
519 }
520
521 static BlockDriver bdrv_cbw_filter = {
522 .format_name = "copy-before-write",
523 .instance_size = sizeof(BDRVCopyBeforeWriteState),
524
525 .bdrv_open = cbw_open,
526 .bdrv_close = cbw_close,
527
528 .bdrv_co_preadv = cbw_co_preadv,
529 .bdrv_co_pwritev = cbw_co_pwritev,
530 .bdrv_co_pwrite_zeroes = cbw_co_pwrite_zeroes,
531 .bdrv_co_pdiscard = cbw_co_pdiscard,
532 .bdrv_co_flush = cbw_co_flush,
533
534 .bdrv_co_preadv_snapshot = cbw_co_preadv_snapshot,
535 .bdrv_co_pdiscard_snapshot = cbw_co_pdiscard_snapshot,
536 .bdrv_co_snapshot_block_status = cbw_co_snapshot_block_status,
537
538 .bdrv_refresh_filename = cbw_refresh_filename,
539
540 .bdrv_child_perm = cbw_child_perm,
541
542 .is_filter = true,
543 };
544
bdrv_cbw_append(BlockDriverState * source,BlockDriverState * target,const char * filter_node_name,bool discard_source,BlockCopyState ** bcs,Error ** errp)545 BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
546 BlockDriverState *target,
547 const char *filter_node_name,
548 bool discard_source,
549 BlockCopyState **bcs,
550 Error **errp)
551 {
552 BDRVCopyBeforeWriteState *state;
553 BlockDriverState *top;
554 QDict *opts;
555 int flags = BDRV_O_RDWR | (discard_source ? BDRV_O_CBW_DISCARD_SOURCE : 0);
556
557 assert(source->total_sectors == target->total_sectors);
558 GLOBAL_STATE_CODE();
559
560 opts = qdict_new();
561 qdict_put_str(opts, "driver", "copy-before-write");
562 if (filter_node_name) {
563 qdict_put_str(opts, "node-name", filter_node_name);
564 }
565 qdict_put_str(opts, "file", bdrv_get_node_name(source));
566 qdict_put_str(opts, "target", bdrv_get_node_name(target));
567
568 top = bdrv_insert_node(source, opts, flags, errp);
569 if (!top) {
570 return NULL;
571 }
572
573 state = top->opaque;
574 *bcs = state->bcs;
575
576 return top;
577 }
578
bdrv_cbw_drop(BlockDriverState * bs)579 void bdrv_cbw_drop(BlockDriverState *bs)
580 {
581 GLOBAL_STATE_CODE();
582 bdrv_drop_filter(bs, &error_abort);
583 bdrv_unref(bs);
584 }
585
cbw_init(void)586 static void cbw_init(void)
587 {
588 bdrv_register(&bdrv_cbw_filter);
589 }
590
591 block_init(cbw_init);
592