1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 27 #include "block/qdict.h" 28 #include "sysemu/block-backend.h" 29 #include "qemu/main-loop.h" 30 #include "qemu/module.h" 31 #include "qcow2.h" 32 #include "qemu/error-report.h" 33 #include "qapi/error.h" 34 #include "qapi/qapi-events-block-core.h" 35 #include "qapi/qmp/qdict.h" 36 #include "qapi/qmp/qstring.h" 37 #include "trace.h" 38 #include "qemu/option_int.h" 39 #include "qemu/cutils.h" 40 #include "qemu/bswap.h" 41 #include "qemu/memalign.h" 42 #include "qapi/qobject-input-visitor.h" 43 #include "qapi/qapi-visit-block-core.h" 44 #include "crypto.h" 45 #include "block/aio_task.h" 46 #include "block/dirty-bitmap.h" 47 48 /* 49 Differences with QCOW: 50 51 - Support for multiple incremental snapshots. 52 - Memory management by reference counts. 53 - Clusters which have a reference count of one have the bit 54 QCOW_OFLAG_COPIED to optimize write performance. 55 - Size of compressed clusters is stored in sectors to reduce bit usage 56 in the cluster offsets. 57 - Support for storing additional data (such as the VM state) in the 58 snapshots. 59 - If a backing store is used, the cluster size is not constrained 60 (could be backported to QCOW). 61 - L2 tables have always a size of one cluster. 62 */ 63 64 65 typedef struct { 66 uint32_t magic; 67 uint32_t len; 68 } QEMU_PACKED QCowExtension; 69 70 #define QCOW2_EXT_MAGIC_END 0 71 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xe2792aca 72 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 73 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 74 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 75 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441 76 77 static int coroutine_fn 78 qcow2_co_preadv_compressed(BlockDriverState *bs, 79 uint64_t l2_entry, 80 uint64_t offset, 81 uint64_t bytes, 82 QEMUIOVector *qiov, 83 size_t qiov_offset); 84 85 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 86 { 87 const QCowHeader *cow_header = (const void *)buf; 88 89 if (buf_size >= sizeof(QCowHeader) && 90 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 91 be32_to_cpu(cow_header->version) >= 2) 92 return 100; 93 else 94 return 0; 95 } 96 97 98 static int qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 99 uint8_t *buf, size_t buflen, 100 void *opaque, Error **errp) 101 { 102 BlockDriverState *bs = opaque; 103 BDRVQcow2State *s = bs->opaque; 104 ssize_t ret; 105 106 if ((offset + buflen) > s->crypto_header.length) { 107 error_setg(errp, "Request for data outside of extension header"); 108 return -1; 109 } 110 111 ret = bdrv_pread(bs->file, s->crypto_header.offset + offset, buflen, buf, 112 0); 113 if (ret < 0) { 114 error_setg_errno(errp, -ret, "Could not read encryption header"); 115 return -1; 116 } 117 return 0; 118 } 119 120 121 static int coroutine_fn GRAPH_RDLOCK 122 qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, void *opaque, 123 Error **errp) 124 { 125 BlockDriverState *bs = opaque; 126 BDRVQcow2State *s = bs->opaque; 127 int64_t ret; 128 int64_t clusterlen; 129 130 ret = qcow2_alloc_clusters(bs, headerlen); 131 if (ret < 0) { 132 error_setg_errno(errp, -ret, 133 "Cannot allocate cluster for LUKS header size %zu", 134 headerlen); 135 return -1; 136 } 137 138 s->crypto_header.length = headerlen; 139 s->crypto_header.offset = ret; 140 141 /* 142 * Zero fill all space in cluster so it has predictable 143 * content, as we may not initialize some regions of the 144 * header (eg only 1 out of 8 key slots will be initialized) 145 */ 146 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 147 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0); 148 ret = bdrv_co_pwrite_zeroes(bs->file, ret, clusterlen, 0); 149 if (ret < 0) { 150 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 151 return -1; 152 } 153 154 return 0; 155 } 156 157 158 /* The graph lock must be held when called in coroutine context */ 159 static int coroutine_mixed_fn 160 qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 161 const uint8_t *buf, size_t buflen, 162 void *opaque, Error **errp) 163 { 164 BlockDriverState *bs = opaque; 165 BDRVQcow2State *s = bs->opaque; 166 ssize_t ret; 167 168 if ((offset + buflen) > s->crypto_header.length) { 169 error_setg(errp, "Request for data outside of extension header"); 170 return -1; 171 } 172 173 ret = bdrv_pwrite(bs->file, s->crypto_header.offset + offset, buflen, buf, 174 0); 175 if (ret < 0) { 176 error_setg_errno(errp, -ret, "Could not read encryption header"); 177 return -1; 178 } 179 return 0; 180 } 181 182 static QDict* 183 qcow2_extract_crypto_opts(QemuOpts *opts, const char *fmt, Error **errp) 184 { 185 QDict *cryptoopts_qdict; 186 QDict *opts_qdict; 187 188 /* Extract "encrypt." options into a qdict */ 189 opts_qdict = qemu_opts_to_qdict(opts, NULL); 190 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt."); 191 qobject_unref(opts_qdict); 192 qdict_put_str(cryptoopts_qdict, "format", fmt); 193 return cryptoopts_qdict; 194 } 195 196 /* 197 * read qcow2 extension and fill bs 198 * start reading from start_offset 199 * finish reading upon magic of value 0 or when end_offset reached 200 * unknown magic is skipped (future extension this version knows nothing about) 201 * return 0 upon success, non-0 otherwise 202 */ 203 static int coroutine_fn GRAPH_RDLOCK 204 qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 205 uint64_t end_offset, void **p_feature_table, 206 int flags, bool *need_update_header, Error **errp) 207 { 208 BDRVQcow2State *s = bs->opaque; 209 QCowExtension ext; 210 uint64_t offset; 211 int ret; 212 Qcow2BitmapHeaderExt bitmaps_ext; 213 214 if (need_update_header != NULL) { 215 *need_update_header = false; 216 } 217 218 #ifdef DEBUG_EXT 219 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 220 #endif 221 offset = start_offset; 222 while (offset < end_offset) { 223 224 #ifdef DEBUG_EXT 225 /* Sanity check */ 226 if (offset > s->cluster_size) 227 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 228 229 printf("attempting to read extended header in offset %lu\n", offset); 230 #endif 231 232 ret = bdrv_co_pread(bs->file, offset, sizeof(ext), &ext, 0); 233 if (ret < 0) { 234 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 235 "pread fail from offset %" PRIu64, offset); 236 return 1; 237 } 238 ext.magic = be32_to_cpu(ext.magic); 239 ext.len = be32_to_cpu(ext.len); 240 offset += sizeof(ext); 241 #ifdef DEBUG_EXT 242 printf("ext.magic = 0x%x\n", ext.magic); 243 #endif 244 if (offset > end_offset || ext.len > end_offset - offset) { 245 error_setg(errp, "Header extension too large"); 246 return -EINVAL; 247 } 248 249 switch (ext.magic) { 250 case QCOW2_EXT_MAGIC_END: 251 return 0; 252 253 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 254 if (ext.len >= sizeof(bs->backing_format)) { 255 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 256 " too large (>=%zu)", ext.len, 257 sizeof(bs->backing_format)); 258 return 2; 259 } 260 ret = bdrv_co_pread(bs->file, offset, ext.len, bs->backing_format, 0); 261 if (ret < 0) { 262 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 263 "Could not read format name"); 264 return 3; 265 } 266 bs->backing_format[ext.len] = '\0'; 267 s->image_backing_format = g_strdup(bs->backing_format); 268 #ifdef DEBUG_EXT 269 printf("Qcow2: Got format extension %s\n", bs->backing_format); 270 #endif 271 break; 272 273 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 274 if (p_feature_table != NULL) { 275 void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 276 ret = bdrv_co_pread(bs->file, offset, ext.len, feature_table, 0); 277 if (ret < 0) { 278 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 279 "Could not read table"); 280 g_free(feature_table); 281 return ret; 282 } 283 284 *p_feature_table = feature_table; 285 } 286 break; 287 288 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 289 unsigned int cflags = 0; 290 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 291 error_setg(errp, "CRYPTO header extension only " 292 "expected with LUKS encryption method"); 293 return -EINVAL; 294 } 295 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 296 error_setg(errp, "CRYPTO header extension size %u, " 297 "but expected size %zu", ext.len, 298 sizeof(Qcow2CryptoHeaderExtension)); 299 return -EINVAL; 300 } 301 302 ret = bdrv_co_pread(bs->file, offset, ext.len, &s->crypto_header, 0); 303 if (ret < 0) { 304 error_setg_errno(errp, -ret, 305 "Unable to read CRYPTO header extension"); 306 return ret; 307 } 308 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 309 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 310 311 if ((s->crypto_header.offset % s->cluster_size) != 0) { 312 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 313 "not a multiple of cluster size '%u'", 314 s->crypto_header.offset, s->cluster_size); 315 return -EINVAL; 316 } 317 318 if (flags & BDRV_O_NO_IO) { 319 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 320 } 321 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 322 qcow2_crypto_hdr_read_func, 323 bs, cflags, QCOW2_MAX_THREADS, errp); 324 if (!s->crypto) { 325 return -EINVAL; 326 } 327 } break; 328 329 case QCOW2_EXT_MAGIC_BITMAPS: 330 if (ext.len != sizeof(bitmaps_ext)) { 331 error_setg_errno(errp, -ret, "bitmaps_ext: " 332 "Invalid extension length"); 333 return -EINVAL; 334 } 335 336 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 337 if (s->qcow_version < 3) { 338 /* Let's be a bit more specific */ 339 warn_report("This qcow2 v2 image contains bitmaps, but " 340 "they may have been modified by a program " 341 "without persistent bitmap support; so now " 342 "they must all be considered inconsistent"); 343 } else { 344 warn_report("a program lacking bitmap support " 345 "modified this file, so all bitmaps are now " 346 "considered inconsistent"); 347 } 348 error_printf("Some clusters may be leaked, " 349 "run 'qemu-img check -r' on the image " 350 "file to fix."); 351 if (need_update_header != NULL) { 352 /* Updating is needed to drop invalid bitmap extension. */ 353 *need_update_header = true; 354 } 355 break; 356 } 357 358 ret = bdrv_co_pread(bs->file, offset, ext.len, &bitmaps_ext, 0); 359 if (ret < 0) { 360 error_setg_errno(errp, -ret, "bitmaps_ext: " 361 "Could not read ext header"); 362 return ret; 363 } 364 365 if (bitmaps_ext.reserved32 != 0) { 366 error_setg_errno(errp, -ret, "bitmaps_ext: " 367 "Reserved field is not zero"); 368 return -EINVAL; 369 } 370 371 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps); 372 bitmaps_ext.bitmap_directory_size = 373 be64_to_cpu(bitmaps_ext.bitmap_directory_size); 374 bitmaps_ext.bitmap_directory_offset = 375 be64_to_cpu(bitmaps_ext.bitmap_directory_offset); 376 377 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 378 error_setg(errp, 379 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 380 "exceeding the QEMU supported maximum of %d", 381 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 382 return -EINVAL; 383 } 384 385 if (bitmaps_ext.nb_bitmaps == 0) { 386 error_setg(errp, "found bitmaps extension with zero bitmaps"); 387 return -EINVAL; 388 } 389 390 if (offset_into_cluster(s, bitmaps_ext.bitmap_directory_offset)) { 391 error_setg(errp, "bitmaps_ext: " 392 "invalid bitmap directory offset"); 393 return -EINVAL; 394 } 395 396 if (bitmaps_ext.bitmap_directory_size > 397 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 398 error_setg(errp, "bitmaps_ext: " 399 "bitmap directory size (%" PRIu64 ") exceeds " 400 "the maximum supported size (%d)", 401 bitmaps_ext.bitmap_directory_size, 402 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 403 return -EINVAL; 404 } 405 406 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 407 s->bitmap_directory_offset = 408 bitmaps_ext.bitmap_directory_offset; 409 s->bitmap_directory_size = 410 bitmaps_ext.bitmap_directory_size; 411 412 #ifdef DEBUG_EXT 413 printf("Qcow2: Got bitmaps extension: " 414 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 415 s->bitmap_directory_offset, s->nb_bitmaps); 416 #endif 417 break; 418 419 case QCOW2_EXT_MAGIC_DATA_FILE: 420 { 421 s->image_data_file = g_malloc0(ext.len + 1); 422 ret = bdrv_co_pread(bs->file, offset, ext.len, s->image_data_file, 0); 423 if (ret < 0) { 424 error_setg_errno(errp, -ret, 425 "ERROR: Could not read data file name"); 426 return ret; 427 } 428 #ifdef DEBUG_EXT 429 printf("Qcow2: Got external data file %s\n", s->image_data_file); 430 #endif 431 break; 432 } 433 434 default: 435 /* unknown magic - save it in case we need to rewrite the header */ 436 /* If you add a new feature, make sure to also update the fast 437 * path of qcow2_make_empty() to deal with it. */ 438 { 439 Qcow2UnknownHeaderExtension *uext; 440 441 uext = g_malloc0(sizeof(*uext) + ext.len); 442 uext->magic = ext.magic; 443 uext->len = ext.len; 444 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 445 446 ret = bdrv_co_pread(bs->file, offset, uext->len, uext->data, 0); 447 if (ret < 0) { 448 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 449 "Could not read data"); 450 return ret; 451 } 452 } 453 break; 454 } 455 456 offset += ((ext.len + 7) & ~7); 457 } 458 459 return 0; 460 } 461 462 static void cleanup_unknown_header_ext(BlockDriverState *bs) 463 { 464 BDRVQcow2State *s = bs->opaque; 465 Qcow2UnknownHeaderExtension *uext, *next; 466 467 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 468 QLIST_REMOVE(uext, next); 469 g_free(uext); 470 } 471 } 472 473 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 474 uint64_t mask) 475 { 476 g_autoptr(GString) features = g_string_sized_new(60); 477 478 while (table && table->name[0] != '\0') { 479 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 480 if (mask & (1ULL << table->bit)) { 481 if (features->len > 0) { 482 g_string_append(features, ", "); 483 } 484 g_string_append_printf(features, "%.46s", table->name); 485 mask &= ~(1ULL << table->bit); 486 } 487 } 488 table++; 489 } 490 491 if (mask) { 492 if (features->len > 0) { 493 g_string_append(features, ", "); 494 } 495 g_string_append_printf(features, 496 "Unknown incompatible feature: %" PRIx64, mask); 497 } 498 499 error_setg(errp, "Unsupported qcow2 feature(s): %s", features->str); 500 } 501 502 /* 503 * Sets the dirty bit and flushes afterwards if necessary. 504 * 505 * The incompatible_features bit is only set if the image file header was 506 * updated successfully. Therefore it is not required to check the return 507 * value of this function. 508 */ 509 int qcow2_mark_dirty(BlockDriverState *bs) 510 { 511 BDRVQcow2State *s = bs->opaque; 512 uint64_t val; 513 int ret; 514 515 assert(s->qcow_version >= 3); 516 517 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 518 return 0; /* already dirty */ 519 } 520 521 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 522 ret = bdrv_pwrite_sync(bs->file, 523 offsetof(QCowHeader, incompatible_features), 524 sizeof(val), &val, 0); 525 if (ret < 0) { 526 return ret; 527 } 528 529 /* Only treat image as dirty if the header was updated successfully */ 530 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 531 return 0; 532 } 533 534 /* 535 * Clears the dirty bit and flushes before if necessary. Only call this 536 * function when there are no pending requests, it does not guard against 537 * concurrent requests dirtying the image. 538 */ 539 static int GRAPH_RDLOCK qcow2_mark_clean(BlockDriverState *bs) 540 { 541 BDRVQcow2State *s = bs->opaque; 542 543 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 544 int ret; 545 546 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 547 548 ret = qcow2_flush_caches(bs); 549 if (ret < 0) { 550 return ret; 551 } 552 553 return qcow2_update_header(bs); 554 } 555 return 0; 556 } 557 558 /* 559 * Marks the image as corrupt. 560 */ 561 int qcow2_mark_corrupt(BlockDriverState *bs) 562 { 563 BDRVQcow2State *s = bs->opaque; 564 565 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 566 return qcow2_update_header(bs); 567 } 568 569 /* 570 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 571 * before if necessary. 572 */ 573 static int coroutine_fn GRAPH_RDLOCK 574 qcow2_mark_consistent(BlockDriverState *bs) 575 { 576 BDRVQcow2State *s = bs->opaque; 577 578 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 579 int ret = qcow2_flush_caches(bs); 580 if (ret < 0) { 581 return ret; 582 } 583 584 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 585 return qcow2_update_header(bs); 586 } 587 return 0; 588 } 589 590 static void qcow2_add_check_result(BdrvCheckResult *out, 591 const BdrvCheckResult *src, 592 bool set_allocation_info) 593 { 594 out->corruptions += src->corruptions; 595 out->leaks += src->leaks; 596 out->check_errors += src->check_errors; 597 out->corruptions_fixed += src->corruptions_fixed; 598 out->leaks_fixed += src->leaks_fixed; 599 600 if (set_allocation_info) { 601 out->image_end_offset = src->image_end_offset; 602 out->bfi = src->bfi; 603 } 604 } 605 606 static int coroutine_fn GRAPH_RDLOCK 607 qcow2_co_check_locked(BlockDriverState *bs, BdrvCheckResult *result, 608 BdrvCheckMode fix) 609 { 610 BdrvCheckResult snapshot_res = {}; 611 BdrvCheckResult refcount_res = {}; 612 int ret; 613 614 memset(result, 0, sizeof(*result)); 615 616 ret = qcow2_check_read_snapshot_table(bs, &snapshot_res, fix); 617 if (ret < 0) { 618 qcow2_add_check_result(result, &snapshot_res, false); 619 return ret; 620 } 621 622 ret = qcow2_check_refcounts(bs, &refcount_res, fix); 623 qcow2_add_check_result(result, &refcount_res, true); 624 if (ret < 0) { 625 qcow2_add_check_result(result, &snapshot_res, false); 626 return ret; 627 } 628 629 ret = qcow2_check_fix_snapshot_table(bs, &snapshot_res, fix); 630 qcow2_add_check_result(result, &snapshot_res, false); 631 if (ret < 0) { 632 return ret; 633 } 634 635 if (fix && result->check_errors == 0 && result->corruptions == 0) { 636 ret = qcow2_mark_clean(bs); 637 if (ret < 0) { 638 return ret; 639 } 640 return qcow2_mark_consistent(bs); 641 } 642 return ret; 643 } 644 645 static int coroutine_fn GRAPH_RDLOCK 646 qcow2_co_check(BlockDriverState *bs, BdrvCheckResult *result, 647 BdrvCheckMode fix) 648 { 649 BDRVQcow2State *s = bs->opaque; 650 int ret; 651 652 qemu_co_mutex_lock(&s->lock); 653 ret = qcow2_co_check_locked(bs, result, fix); 654 qemu_co_mutex_unlock(&s->lock); 655 return ret; 656 } 657 658 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, 659 uint64_t entries, size_t entry_len, 660 int64_t max_size_bytes, const char *table_name, 661 Error **errp) 662 { 663 BDRVQcow2State *s = bs->opaque; 664 665 if (entries > max_size_bytes / entry_len) { 666 error_setg(errp, "%s too large", table_name); 667 return -EFBIG; 668 } 669 670 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 671 * because values will be passed to qemu functions taking int64_t. */ 672 if ((INT64_MAX - entries * entry_len < offset) || 673 (offset_into_cluster(s, offset) != 0)) { 674 error_setg(errp, "%s offset invalid", table_name); 675 return -EINVAL; 676 } 677 678 return 0; 679 } 680 681 static const char *const mutable_opts[] = { 682 QCOW2_OPT_LAZY_REFCOUNTS, 683 QCOW2_OPT_DISCARD_REQUEST, 684 QCOW2_OPT_DISCARD_SNAPSHOT, 685 QCOW2_OPT_DISCARD_OTHER, 686 QCOW2_OPT_DISCARD_NO_UNREF, 687 QCOW2_OPT_OVERLAP, 688 QCOW2_OPT_OVERLAP_TEMPLATE, 689 QCOW2_OPT_OVERLAP_MAIN_HEADER, 690 QCOW2_OPT_OVERLAP_ACTIVE_L1, 691 QCOW2_OPT_OVERLAP_ACTIVE_L2, 692 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 693 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 694 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 695 QCOW2_OPT_OVERLAP_INACTIVE_L1, 696 QCOW2_OPT_OVERLAP_INACTIVE_L2, 697 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 698 QCOW2_OPT_CACHE_SIZE, 699 QCOW2_OPT_L2_CACHE_SIZE, 700 QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 701 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 702 QCOW2_OPT_CACHE_CLEAN_INTERVAL, 703 NULL 704 }; 705 706 static QemuOptsList qcow2_runtime_opts = { 707 .name = "qcow2", 708 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 709 .desc = { 710 { 711 .name = QCOW2_OPT_LAZY_REFCOUNTS, 712 .type = QEMU_OPT_BOOL, 713 .help = "Postpone refcount updates", 714 }, 715 { 716 .name = QCOW2_OPT_DISCARD_REQUEST, 717 .type = QEMU_OPT_BOOL, 718 .help = "Pass guest discard requests to the layer below", 719 }, 720 { 721 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 722 .type = QEMU_OPT_BOOL, 723 .help = "Generate discard requests when snapshot related space " 724 "is freed", 725 }, 726 { 727 .name = QCOW2_OPT_DISCARD_OTHER, 728 .type = QEMU_OPT_BOOL, 729 .help = "Generate discard requests when other clusters are freed", 730 }, 731 { 732 .name = QCOW2_OPT_DISCARD_NO_UNREF, 733 .type = QEMU_OPT_BOOL, 734 .help = "Do not unreference discarded clusters", 735 }, 736 { 737 .name = QCOW2_OPT_OVERLAP, 738 .type = QEMU_OPT_STRING, 739 .help = "Selects which overlap checks to perform from a range of " 740 "templates (none, constant, cached, all)", 741 }, 742 { 743 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 744 .type = QEMU_OPT_STRING, 745 .help = "Selects which overlap checks to perform from a range of " 746 "templates (none, constant, cached, all)", 747 }, 748 { 749 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 750 .type = QEMU_OPT_BOOL, 751 .help = "Check for unintended writes into the main qcow2 header", 752 }, 753 { 754 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 755 .type = QEMU_OPT_BOOL, 756 .help = "Check for unintended writes into the active L1 table", 757 }, 758 { 759 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 760 .type = QEMU_OPT_BOOL, 761 .help = "Check for unintended writes into an active L2 table", 762 }, 763 { 764 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 765 .type = QEMU_OPT_BOOL, 766 .help = "Check for unintended writes into the refcount table", 767 }, 768 { 769 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 770 .type = QEMU_OPT_BOOL, 771 .help = "Check for unintended writes into a refcount block", 772 }, 773 { 774 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 775 .type = QEMU_OPT_BOOL, 776 .help = "Check for unintended writes into the snapshot table", 777 }, 778 { 779 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 780 .type = QEMU_OPT_BOOL, 781 .help = "Check for unintended writes into an inactive L1 table", 782 }, 783 { 784 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 785 .type = QEMU_OPT_BOOL, 786 .help = "Check for unintended writes into an inactive L2 table", 787 }, 788 { 789 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 790 .type = QEMU_OPT_BOOL, 791 .help = "Check for unintended writes into the bitmap directory", 792 }, 793 { 794 .name = QCOW2_OPT_CACHE_SIZE, 795 .type = QEMU_OPT_SIZE, 796 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 797 "cache size", 798 }, 799 { 800 .name = QCOW2_OPT_L2_CACHE_SIZE, 801 .type = QEMU_OPT_SIZE, 802 .help = "Maximum L2 table cache size", 803 }, 804 { 805 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 806 .type = QEMU_OPT_SIZE, 807 .help = "Size of each entry in the L2 cache", 808 }, 809 { 810 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 811 .type = QEMU_OPT_SIZE, 812 .help = "Maximum refcount block cache size", 813 }, 814 { 815 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 816 .type = QEMU_OPT_NUMBER, 817 .help = "Clean unused cache entries after this time (in seconds)", 818 }, 819 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 820 "ID of secret providing qcow2 AES key or LUKS passphrase"), 821 { /* end of list */ } 822 }, 823 }; 824 825 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 826 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 827 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 828 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 829 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 830 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 831 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 832 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 833 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 834 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 835 }; 836 837 static void cache_clean_timer_cb(void *opaque) 838 { 839 BlockDriverState *bs = opaque; 840 BDRVQcow2State *s = bs->opaque; 841 qcow2_cache_clean_unused(s->l2_table_cache); 842 qcow2_cache_clean_unused(s->refcount_block_cache); 843 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 844 (int64_t) s->cache_clean_interval * 1000); 845 } 846 847 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 848 { 849 BDRVQcow2State *s = bs->opaque; 850 if (s->cache_clean_interval > 0) { 851 s->cache_clean_timer = 852 aio_timer_new_with_attrs(context, QEMU_CLOCK_VIRTUAL, 853 SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL, 854 cache_clean_timer_cb, bs); 855 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 856 (int64_t) s->cache_clean_interval * 1000); 857 } 858 } 859 860 static void cache_clean_timer_del(BlockDriverState *bs) 861 { 862 BDRVQcow2State *s = bs->opaque; 863 if (s->cache_clean_timer) { 864 timer_free(s->cache_clean_timer); 865 s->cache_clean_timer = NULL; 866 } 867 } 868 869 static void qcow2_detach_aio_context(BlockDriverState *bs) 870 { 871 cache_clean_timer_del(bs); 872 } 873 874 static void qcow2_attach_aio_context(BlockDriverState *bs, 875 AioContext *new_context) 876 { 877 cache_clean_timer_init(bs, new_context); 878 } 879 880 static bool read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 881 uint64_t *l2_cache_size, 882 uint64_t *l2_cache_entry_size, 883 uint64_t *refcount_cache_size, Error **errp) 884 { 885 BDRVQcow2State *s = bs->opaque; 886 uint64_t combined_cache_size, l2_cache_max_setting; 887 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 888 bool l2_cache_entry_size_set; 889 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size; 890 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; 891 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size); 892 /* An L2 table is always one cluster in size so the max cache size 893 * should be a multiple of the cluster size. */ 894 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * l2_entry_size(s), 895 s->cluster_size); 896 897 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 898 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 899 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 900 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE); 901 902 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 903 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 904 DEFAULT_L2_CACHE_MAX_SIZE); 905 *refcount_cache_size = qemu_opt_get_size(opts, 906 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 907 908 *l2_cache_entry_size = qemu_opt_get_size( 909 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size); 910 911 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting); 912 913 if (combined_cache_size_set) { 914 if (l2_cache_size_set && refcount_cache_size_set) { 915 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 916 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 917 "at the same time"); 918 return false; 919 } else if (l2_cache_size_set && 920 (l2_cache_max_setting > combined_cache_size)) { 921 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 922 QCOW2_OPT_CACHE_SIZE); 923 return false; 924 } else if (*refcount_cache_size > combined_cache_size) { 925 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 926 QCOW2_OPT_CACHE_SIZE); 927 return false; 928 } 929 930 if (l2_cache_size_set) { 931 *refcount_cache_size = combined_cache_size - *l2_cache_size; 932 } else if (refcount_cache_size_set) { 933 *l2_cache_size = combined_cache_size - *refcount_cache_size; 934 } else { 935 /* Assign as much memory as possible to the L2 cache, and 936 * use the remainder for the refcount cache */ 937 if (combined_cache_size >= max_l2_cache + min_refcount_cache) { 938 *l2_cache_size = max_l2_cache; 939 *refcount_cache_size = combined_cache_size - *l2_cache_size; 940 } else { 941 *refcount_cache_size = 942 MIN(combined_cache_size, min_refcount_cache); 943 *l2_cache_size = combined_cache_size - *refcount_cache_size; 944 } 945 } 946 } 947 948 /* 949 * If the L2 cache is not enough to cover the whole disk then 950 * default to 4KB entries. Smaller entries reduce the cost of 951 * loads and evictions and increase I/O performance. 952 */ 953 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) { 954 *l2_cache_entry_size = MIN(s->cluster_size, 4096); 955 } 956 957 /* l2_cache_size and refcount_cache_size are ensured to have at least 958 * their minimum values in qcow2_update_options_prepare() */ 959 960 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) || 961 *l2_cache_entry_size > s->cluster_size || 962 !is_power_of_2(*l2_cache_entry_size)) { 963 error_setg(errp, "L2 cache entry size must be a power of two " 964 "between %d and the cluster size (%d)", 965 1 << MIN_CLUSTER_BITS, s->cluster_size); 966 return false; 967 } 968 969 return true; 970 } 971 972 typedef struct Qcow2ReopenState { 973 Qcow2Cache *l2_table_cache; 974 Qcow2Cache *refcount_block_cache; 975 int l2_slice_size; /* Number of entries in a slice of the L2 table */ 976 bool use_lazy_refcounts; 977 int overlap_check; 978 bool discard_passthrough[QCOW2_DISCARD_MAX]; 979 bool discard_no_unref; 980 uint64_t cache_clean_interval; 981 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 982 } Qcow2ReopenState; 983 984 static int GRAPH_RDLOCK 985 qcow2_update_options_prepare(BlockDriverState *bs, Qcow2ReopenState *r, 986 QDict *options, int flags, Error **errp) 987 { 988 BDRVQcow2State *s = bs->opaque; 989 QemuOpts *opts = NULL; 990 const char *opt_overlap_check, *opt_overlap_check_template; 991 int overlap_check_template = 0; 992 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size; 993 int i; 994 const char *encryptfmt; 995 QDict *encryptopts = NULL; 996 int ret; 997 998 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 999 encryptfmt = qdict_get_try_str(encryptopts, "format"); 1000 1001 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 1002 if (!qemu_opts_absorb_qdict(opts, options, errp)) { 1003 ret = -EINVAL; 1004 goto fail; 1005 } 1006 1007 /* get L2 table/refcount block cache size from command line options */ 1008 if (!read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size, 1009 &refcount_cache_size, errp)) { 1010 ret = -EINVAL; 1011 goto fail; 1012 } 1013 1014 l2_cache_size /= l2_cache_entry_size; 1015 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 1016 l2_cache_size = MIN_L2_CACHE_SIZE; 1017 } 1018 if (l2_cache_size > INT_MAX) { 1019 error_setg(errp, "L2 cache size too big"); 1020 ret = -EINVAL; 1021 goto fail; 1022 } 1023 1024 refcount_cache_size /= s->cluster_size; 1025 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 1026 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 1027 } 1028 if (refcount_cache_size > INT_MAX) { 1029 error_setg(errp, "Refcount cache size too big"); 1030 ret = -EINVAL; 1031 goto fail; 1032 } 1033 1034 /* alloc new L2 table/refcount block cache, flush old one */ 1035 if (s->l2_table_cache) { 1036 ret = qcow2_cache_flush(bs, s->l2_table_cache); 1037 if (ret) { 1038 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 1039 goto fail; 1040 } 1041 } 1042 1043 if (s->refcount_block_cache) { 1044 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 1045 if (ret) { 1046 error_setg_errno(errp, -ret, 1047 "Failed to flush the refcount block cache"); 1048 goto fail; 1049 } 1050 } 1051 1052 r->l2_slice_size = l2_cache_entry_size / l2_entry_size(s); 1053 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size, 1054 l2_cache_entry_size); 1055 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size, 1056 s->cluster_size); 1057 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 1058 error_setg(errp, "Could not allocate metadata caches"); 1059 ret = -ENOMEM; 1060 goto fail; 1061 } 1062 1063 /* New interval for cache cleanup timer */ 1064 r->cache_clean_interval = 1065 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 1066 DEFAULT_CACHE_CLEAN_INTERVAL); 1067 #ifndef CONFIG_LINUX 1068 if (r->cache_clean_interval != 0) { 1069 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 1070 " not supported on this host"); 1071 ret = -EINVAL; 1072 goto fail; 1073 } 1074 #endif 1075 if (r->cache_clean_interval > UINT_MAX) { 1076 error_setg(errp, "Cache clean interval too big"); 1077 ret = -EINVAL; 1078 goto fail; 1079 } 1080 1081 /* lazy-refcounts; flush if going from enabled to disabled */ 1082 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 1083 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 1084 if (r->use_lazy_refcounts && s->qcow_version < 3) { 1085 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 1086 "qemu 1.1 compatibility level"); 1087 ret = -EINVAL; 1088 goto fail; 1089 } 1090 1091 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 1092 ret = qcow2_mark_clean(bs); 1093 if (ret < 0) { 1094 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 1095 goto fail; 1096 } 1097 } 1098 1099 /* Overlap check options */ 1100 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 1101 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 1102 if (opt_overlap_check_template && opt_overlap_check && 1103 strcmp(opt_overlap_check_template, opt_overlap_check)) 1104 { 1105 error_setg(errp, "Conflicting values for qcow2 options '" 1106 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 1107 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 1108 ret = -EINVAL; 1109 goto fail; 1110 } 1111 if (!opt_overlap_check) { 1112 opt_overlap_check = opt_overlap_check_template ?: "cached"; 1113 } 1114 1115 if (!strcmp(opt_overlap_check, "none")) { 1116 overlap_check_template = 0; 1117 } else if (!strcmp(opt_overlap_check, "constant")) { 1118 overlap_check_template = QCOW2_OL_CONSTANT; 1119 } else if (!strcmp(opt_overlap_check, "cached")) { 1120 overlap_check_template = QCOW2_OL_CACHED; 1121 } else if (!strcmp(opt_overlap_check, "all")) { 1122 overlap_check_template = QCOW2_OL_ALL; 1123 } else { 1124 error_setg(errp, "Unsupported value '%s' for qcow2 option " 1125 "'overlap-check'. Allowed are any of the following: " 1126 "none, constant, cached, all", opt_overlap_check); 1127 ret = -EINVAL; 1128 goto fail; 1129 } 1130 1131 r->overlap_check = 0; 1132 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 1133 /* overlap-check defines a template bitmask, but every flag may be 1134 * overwritten through the associated boolean option */ 1135 r->overlap_check |= 1136 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 1137 overlap_check_template & (1 << i)) << i; 1138 } 1139 1140 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 1141 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 1142 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 1143 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 1144 flags & BDRV_O_UNMAP); 1145 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 1146 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 1147 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 1148 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 1149 1150 r->discard_no_unref = qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_NO_UNREF, 1151 false); 1152 if (r->discard_no_unref && s->qcow_version < 3) { 1153 error_setg(errp, 1154 "discard-no-unref is only supported since qcow2 version 3"); 1155 ret = -EINVAL; 1156 goto fail; 1157 } 1158 1159 switch (s->crypt_method_header) { 1160 case QCOW_CRYPT_NONE: 1161 if (encryptfmt) { 1162 error_setg(errp, "No encryption in image header, but options " 1163 "specified format '%s'", encryptfmt); 1164 ret = -EINVAL; 1165 goto fail; 1166 } 1167 break; 1168 1169 case QCOW_CRYPT_AES: 1170 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 1171 error_setg(errp, 1172 "Header reported 'aes' encryption format but " 1173 "options specify '%s'", encryptfmt); 1174 ret = -EINVAL; 1175 goto fail; 1176 } 1177 qdict_put_str(encryptopts, "format", "qcow"); 1178 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1179 if (!r->crypto_opts) { 1180 ret = -EINVAL; 1181 goto fail; 1182 } 1183 break; 1184 1185 case QCOW_CRYPT_LUKS: 1186 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 1187 error_setg(errp, 1188 "Header reported 'luks' encryption format but " 1189 "options specify '%s'", encryptfmt); 1190 ret = -EINVAL; 1191 goto fail; 1192 } 1193 qdict_put_str(encryptopts, "format", "luks"); 1194 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1195 if (!r->crypto_opts) { 1196 ret = -EINVAL; 1197 goto fail; 1198 } 1199 break; 1200 1201 default: 1202 error_setg(errp, "Unsupported encryption method %d", 1203 s->crypt_method_header); 1204 ret = -EINVAL; 1205 goto fail; 1206 } 1207 1208 ret = 0; 1209 fail: 1210 qobject_unref(encryptopts); 1211 qemu_opts_del(opts); 1212 opts = NULL; 1213 return ret; 1214 } 1215 1216 static void qcow2_update_options_commit(BlockDriverState *bs, 1217 Qcow2ReopenState *r) 1218 { 1219 BDRVQcow2State *s = bs->opaque; 1220 int i; 1221 1222 if (s->l2_table_cache) { 1223 qcow2_cache_destroy(s->l2_table_cache); 1224 } 1225 if (s->refcount_block_cache) { 1226 qcow2_cache_destroy(s->refcount_block_cache); 1227 } 1228 s->l2_table_cache = r->l2_table_cache; 1229 s->refcount_block_cache = r->refcount_block_cache; 1230 s->l2_slice_size = r->l2_slice_size; 1231 1232 s->overlap_check = r->overlap_check; 1233 s->use_lazy_refcounts = r->use_lazy_refcounts; 1234 1235 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1236 s->discard_passthrough[i] = r->discard_passthrough[i]; 1237 } 1238 1239 s->discard_no_unref = r->discard_no_unref; 1240 1241 if (s->cache_clean_interval != r->cache_clean_interval) { 1242 cache_clean_timer_del(bs); 1243 s->cache_clean_interval = r->cache_clean_interval; 1244 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1245 } 1246 1247 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1248 s->crypto_opts = r->crypto_opts; 1249 } 1250 1251 static void qcow2_update_options_abort(BlockDriverState *bs, 1252 Qcow2ReopenState *r) 1253 { 1254 if (r->l2_table_cache) { 1255 qcow2_cache_destroy(r->l2_table_cache); 1256 } 1257 if (r->refcount_block_cache) { 1258 qcow2_cache_destroy(r->refcount_block_cache); 1259 } 1260 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1261 } 1262 1263 static int coroutine_fn GRAPH_RDLOCK 1264 qcow2_update_options(BlockDriverState *bs, QDict *options, int flags, 1265 Error **errp) 1266 { 1267 Qcow2ReopenState r = {}; 1268 int ret; 1269 1270 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1271 if (ret >= 0) { 1272 qcow2_update_options_commit(bs, &r); 1273 } else { 1274 qcow2_update_options_abort(bs, &r); 1275 } 1276 1277 return ret; 1278 } 1279 1280 static int validate_compression_type(BDRVQcow2State *s, Error **errp) 1281 { 1282 switch (s->compression_type) { 1283 case QCOW2_COMPRESSION_TYPE_ZLIB: 1284 #ifdef CONFIG_ZSTD 1285 case QCOW2_COMPRESSION_TYPE_ZSTD: 1286 #endif 1287 break; 1288 1289 default: 1290 error_setg(errp, "qcow2: unknown compression type: %u", 1291 s->compression_type); 1292 return -ENOTSUP; 1293 } 1294 1295 /* 1296 * if the compression type differs from QCOW2_COMPRESSION_TYPE_ZLIB 1297 * the incompatible feature flag must be set 1298 */ 1299 if (s->compression_type == QCOW2_COMPRESSION_TYPE_ZLIB) { 1300 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) { 1301 error_setg(errp, "qcow2: Compression type incompatible feature " 1302 "bit must not be set"); 1303 return -EINVAL; 1304 } 1305 } else { 1306 if (!(s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION)) { 1307 error_setg(errp, "qcow2: Compression type incompatible feature " 1308 "bit must be set"); 1309 return -EINVAL; 1310 } 1311 } 1312 1313 return 0; 1314 } 1315 1316 /* Called with s->lock held. */ 1317 static int coroutine_fn GRAPH_RDLOCK 1318 qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, 1319 bool open_data_file, Error **errp) 1320 { 1321 ERRP_GUARD(); 1322 BDRVQcow2State *s = bs->opaque; 1323 unsigned int len, i; 1324 int ret = 0; 1325 QCowHeader header; 1326 uint64_t ext_end; 1327 uint64_t l1_vm_state_index; 1328 bool update_header = false; 1329 1330 ret = bdrv_co_pread(bs->file, 0, sizeof(header), &header, 0); 1331 if (ret < 0) { 1332 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1333 goto fail; 1334 } 1335 header.magic = be32_to_cpu(header.magic); 1336 header.version = be32_to_cpu(header.version); 1337 header.backing_file_offset = be64_to_cpu(header.backing_file_offset); 1338 header.backing_file_size = be32_to_cpu(header.backing_file_size); 1339 header.size = be64_to_cpu(header.size); 1340 header.cluster_bits = be32_to_cpu(header.cluster_bits); 1341 header.crypt_method = be32_to_cpu(header.crypt_method); 1342 header.l1_table_offset = be64_to_cpu(header.l1_table_offset); 1343 header.l1_size = be32_to_cpu(header.l1_size); 1344 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset); 1345 header.refcount_table_clusters = 1346 be32_to_cpu(header.refcount_table_clusters); 1347 header.snapshots_offset = be64_to_cpu(header.snapshots_offset); 1348 header.nb_snapshots = be32_to_cpu(header.nb_snapshots); 1349 1350 if (header.magic != QCOW_MAGIC) { 1351 error_setg(errp, "Image is not in qcow2 format"); 1352 ret = -EINVAL; 1353 goto fail; 1354 } 1355 if (header.version < 2 || header.version > 3) { 1356 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1357 ret = -ENOTSUP; 1358 goto fail; 1359 } 1360 1361 s->qcow_version = header.version; 1362 1363 /* Initialise cluster size */ 1364 if (header.cluster_bits < MIN_CLUSTER_BITS || 1365 header.cluster_bits > MAX_CLUSTER_BITS) { 1366 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1367 header.cluster_bits); 1368 ret = -EINVAL; 1369 goto fail; 1370 } 1371 1372 s->cluster_bits = header.cluster_bits; 1373 s->cluster_size = 1 << s->cluster_bits; 1374 1375 /* Initialise version 3 header fields */ 1376 if (header.version == 2) { 1377 header.incompatible_features = 0; 1378 header.compatible_features = 0; 1379 header.autoclear_features = 0; 1380 header.refcount_order = 4; 1381 header.header_length = 72; 1382 } else { 1383 header.incompatible_features = 1384 be64_to_cpu(header.incompatible_features); 1385 header.compatible_features = be64_to_cpu(header.compatible_features); 1386 header.autoclear_features = be64_to_cpu(header.autoclear_features); 1387 header.refcount_order = be32_to_cpu(header.refcount_order); 1388 header.header_length = be32_to_cpu(header.header_length); 1389 1390 if (header.header_length < 104) { 1391 error_setg(errp, "qcow2 header too short"); 1392 ret = -EINVAL; 1393 goto fail; 1394 } 1395 } 1396 1397 if (header.header_length > s->cluster_size) { 1398 error_setg(errp, "qcow2 header exceeds cluster size"); 1399 ret = -EINVAL; 1400 goto fail; 1401 } 1402 1403 if (header.header_length > sizeof(header)) { 1404 s->unknown_header_fields_size = header.header_length - sizeof(header); 1405 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1406 ret = bdrv_co_pread(bs->file, sizeof(header), 1407 s->unknown_header_fields_size, 1408 s->unknown_header_fields, 0); 1409 if (ret < 0) { 1410 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1411 "fields"); 1412 goto fail; 1413 } 1414 } 1415 1416 if (header.backing_file_offset > s->cluster_size) { 1417 error_setg(errp, "Invalid backing file offset"); 1418 ret = -EINVAL; 1419 goto fail; 1420 } 1421 1422 if (header.backing_file_offset) { 1423 ext_end = header.backing_file_offset; 1424 } else { 1425 ext_end = 1 << header.cluster_bits; 1426 } 1427 1428 /* Handle feature bits */ 1429 s->incompatible_features = header.incompatible_features; 1430 s->compatible_features = header.compatible_features; 1431 s->autoclear_features = header.autoclear_features; 1432 1433 /* 1434 * Handle compression type 1435 * Older qcow2 images don't contain the compression type header. 1436 * Distinguish them by the header length and use 1437 * the only valid (default) compression type in that case 1438 */ 1439 if (header.header_length > offsetof(QCowHeader, compression_type)) { 1440 s->compression_type = header.compression_type; 1441 } else { 1442 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; 1443 } 1444 1445 ret = validate_compression_type(s, errp); 1446 if (ret) { 1447 goto fail; 1448 } 1449 1450 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1451 void *feature_table = NULL; 1452 qcow2_read_extensions(bs, header.header_length, ext_end, 1453 &feature_table, flags, NULL, NULL); 1454 report_unsupported_feature(errp, feature_table, 1455 s->incompatible_features & 1456 ~QCOW2_INCOMPAT_MASK); 1457 ret = -ENOTSUP; 1458 g_free(feature_table); 1459 goto fail; 1460 } 1461 1462 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1463 /* Corrupt images may not be written to unless they are being repaired 1464 */ 1465 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1466 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1467 "read/write"); 1468 ret = -EACCES; 1469 goto fail; 1470 } 1471 } 1472 1473 s->subclusters_per_cluster = 1474 has_subclusters(s) ? QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER : 1; 1475 s->subcluster_size = s->cluster_size / s->subclusters_per_cluster; 1476 s->subcluster_bits = ctz32(s->subcluster_size); 1477 1478 if (s->subcluster_size < (1 << MIN_CLUSTER_BITS)) { 1479 error_setg(errp, "Unsupported subcluster size: %d", s->subcluster_size); 1480 ret = -EINVAL; 1481 goto fail; 1482 } 1483 1484 /* Check support for various header values */ 1485 if (header.refcount_order > 6) { 1486 error_setg(errp, "Reference count entry width too large; may not " 1487 "exceed 64 bits"); 1488 ret = -EINVAL; 1489 goto fail; 1490 } 1491 s->refcount_order = header.refcount_order; 1492 s->refcount_bits = 1 << s->refcount_order; 1493 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1494 s->refcount_max += s->refcount_max - 1; 1495 1496 s->crypt_method_header = header.crypt_method; 1497 if (s->crypt_method_header) { 1498 if (bdrv_uses_whitelist() && 1499 s->crypt_method_header == QCOW_CRYPT_AES) { 1500 error_setg(errp, 1501 "Use of AES-CBC encrypted qcow2 images is no longer " 1502 "supported in system emulators"); 1503 error_append_hint(errp, 1504 "You can use 'qemu-img convert' to convert your " 1505 "image to an alternative supported format, such " 1506 "as unencrypted qcow2, or raw with the LUKS " 1507 "format instead.\n"); 1508 ret = -ENOSYS; 1509 goto fail; 1510 } 1511 1512 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1513 s->crypt_physical_offset = false; 1514 } else { 1515 /* Assuming LUKS and any future crypt methods we 1516 * add will all use physical offsets, due to the 1517 * fact that the alternative is insecure... */ 1518 s->crypt_physical_offset = true; 1519 } 1520 1521 bs->encrypted = true; 1522 } 1523 1524 s->l2_bits = s->cluster_bits - ctz32(l2_entry_size(s)); 1525 s->l2_size = 1 << s->l2_bits; 1526 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1527 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1528 s->refcount_block_size = 1 << s->refcount_block_bits; 1529 bs->total_sectors = header.size / BDRV_SECTOR_SIZE; 1530 s->csize_shift = (62 - (s->cluster_bits - 8)); 1531 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1532 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1533 1534 s->refcount_table_offset = header.refcount_table_offset; 1535 s->refcount_table_size = 1536 header.refcount_table_clusters << (s->cluster_bits - 3); 1537 1538 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) { 1539 error_setg(errp, "Image does not contain a reference count table"); 1540 ret = -EINVAL; 1541 goto fail; 1542 } 1543 1544 ret = qcow2_validate_table(bs, s->refcount_table_offset, 1545 header.refcount_table_clusters, 1546 s->cluster_size, QCOW_MAX_REFTABLE_SIZE, 1547 "Reference count table", errp); 1548 if (ret < 0) { 1549 goto fail; 1550 } 1551 1552 if (!(flags & BDRV_O_CHECK)) { 1553 /* 1554 * The total size in bytes of the snapshot table is checked in 1555 * qcow2_read_snapshots() because the size of each snapshot is 1556 * variable and we don't know it yet. 1557 * Here we only check the offset and number of snapshots. 1558 */ 1559 ret = qcow2_validate_table(bs, header.snapshots_offset, 1560 header.nb_snapshots, 1561 sizeof(QCowSnapshotHeader), 1562 sizeof(QCowSnapshotHeader) * 1563 QCOW_MAX_SNAPSHOTS, 1564 "Snapshot table", errp); 1565 if (ret < 0) { 1566 goto fail; 1567 } 1568 } 1569 1570 /* read the level 1 table */ 1571 ret = qcow2_validate_table(bs, header.l1_table_offset, 1572 header.l1_size, L1E_SIZE, 1573 QCOW_MAX_L1_SIZE, "Active L1 table", errp); 1574 if (ret < 0) { 1575 goto fail; 1576 } 1577 s->l1_size = header.l1_size; 1578 s->l1_table_offset = header.l1_table_offset; 1579 1580 l1_vm_state_index = size_to_l1(s, header.size); 1581 if (l1_vm_state_index > INT_MAX) { 1582 error_setg(errp, "Image is too big"); 1583 ret = -EFBIG; 1584 goto fail; 1585 } 1586 s->l1_vm_state_index = l1_vm_state_index; 1587 1588 /* the L1 table must contain at least enough entries to put 1589 header.size bytes */ 1590 if (s->l1_size < s->l1_vm_state_index) { 1591 error_setg(errp, "L1 table is too small"); 1592 ret = -EINVAL; 1593 goto fail; 1594 } 1595 1596 if (s->l1_size > 0) { 1597 s->l1_table = qemu_try_blockalign(bs->file->bs, s->l1_size * L1E_SIZE); 1598 if (s->l1_table == NULL) { 1599 error_setg(errp, "Could not allocate L1 table"); 1600 ret = -ENOMEM; 1601 goto fail; 1602 } 1603 ret = bdrv_co_pread(bs->file, s->l1_table_offset, s->l1_size * L1E_SIZE, 1604 s->l1_table, 0); 1605 if (ret < 0) { 1606 error_setg_errno(errp, -ret, "Could not read L1 table"); 1607 goto fail; 1608 } 1609 for(i = 0;i < s->l1_size; i++) { 1610 s->l1_table[i] = be64_to_cpu(s->l1_table[i]); 1611 } 1612 } 1613 1614 /* Parse driver-specific options */ 1615 ret = qcow2_update_options(bs, options, flags, errp); 1616 if (ret < 0) { 1617 goto fail; 1618 } 1619 1620 s->flags = flags; 1621 1622 ret = qcow2_refcount_init(bs); 1623 if (ret != 0) { 1624 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1625 goto fail; 1626 } 1627 1628 QLIST_INIT(&s->cluster_allocs); 1629 QTAILQ_INIT(&s->discards); 1630 1631 /* read qcow2 extensions */ 1632 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1633 flags, &update_header, errp)) { 1634 ret = -EINVAL; 1635 goto fail; 1636 } 1637 1638 if (open_data_file) { 1639 /* Open external data file */ 1640 bdrv_graph_co_rdunlock(); 1641 s->data_file = bdrv_co_open_child(NULL, options, "data-file", bs, 1642 &child_of_bds, BDRV_CHILD_DATA, 1643 true, errp); 1644 bdrv_graph_co_rdlock(); 1645 if (*errp) { 1646 ret = -EINVAL; 1647 goto fail; 1648 } 1649 1650 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { 1651 if (!s->data_file && s->image_data_file) { 1652 bdrv_graph_co_rdunlock(); 1653 s->data_file = bdrv_co_open_child(s->image_data_file, options, 1654 "data-file", bs, 1655 &child_of_bds, 1656 BDRV_CHILD_DATA, false, errp); 1657 bdrv_graph_co_rdlock(); 1658 if (!s->data_file) { 1659 ret = -EINVAL; 1660 goto fail; 1661 } 1662 } 1663 if (!s->data_file) { 1664 error_setg(errp, "'data-file' is required for this image"); 1665 ret = -EINVAL; 1666 goto fail; 1667 } 1668 1669 /* No data here */ 1670 bs->file->role &= ~BDRV_CHILD_DATA; 1671 1672 /* Must succeed because we have given up permissions if anything */ 1673 bdrv_child_refresh_perms(bs, bs->file, &error_abort); 1674 } else { 1675 if (s->data_file) { 1676 error_setg(errp, "'data-file' can only be set for images with " 1677 "an external data file"); 1678 ret = -EINVAL; 1679 goto fail; 1680 } 1681 1682 s->data_file = bs->file; 1683 1684 if (data_file_is_raw(bs)) { 1685 error_setg(errp, "data-file-raw requires a data file"); 1686 ret = -EINVAL; 1687 goto fail; 1688 } 1689 } 1690 } 1691 1692 /* qcow2_read_extension may have set up the crypto context 1693 * if the crypt method needs a header region, some methods 1694 * don't need header extensions, so must check here 1695 */ 1696 if (s->crypt_method_header && !s->crypto) { 1697 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1698 unsigned int cflags = 0; 1699 if (flags & BDRV_O_NO_IO) { 1700 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1701 } 1702 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1703 NULL, NULL, cflags, 1704 QCOW2_MAX_THREADS, errp); 1705 if (!s->crypto) { 1706 ret = -EINVAL; 1707 goto fail; 1708 } 1709 } else if (!(flags & BDRV_O_NO_IO)) { 1710 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1711 s->crypt_method_header); 1712 ret = -EINVAL; 1713 goto fail; 1714 } 1715 } 1716 1717 /* read the backing file name */ 1718 if (header.backing_file_offset != 0) { 1719 len = header.backing_file_size; 1720 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1721 len >= sizeof(bs->backing_file)) { 1722 error_setg(errp, "Backing file name too long"); 1723 ret = -EINVAL; 1724 goto fail; 1725 } 1726 1727 s->image_backing_file = g_malloc(len + 1); 1728 ret = bdrv_co_pread(bs->file, header.backing_file_offset, len, 1729 s->image_backing_file, 0); 1730 if (ret < 0) { 1731 error_setg_errno(errp, -ret, "Could not read backing file name"); 1732 goto fail; 1733 } 1734 s->image_backing_file[len] = '\0'; 1735 1736 /* 1737 * Update only when something has changed. This function is called by 1738 * qcow2_co_invalidate_cache(), and we do not want to reset 1739 * auto_backing_file unless necessary. 1740 */ 1741 if (!g_str_equal(s->image_backing_file, bs->backing_file)) { 1742 pstrcpy(bs->backing_file, sizeof(bs->backing_file), 1743 s->image_backing_file); 1744 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 1745 s->image_backing_file); 1746 } 1747 } 1748 1749 /* 1750 * Internal snapshots; skip reading them in check mode, because 1751 * we do not need them then, and we do not want to abort because 1752 * of a broken table. 1753 */ 1754 if (!(flags & BDRV_O_CHECK)) { 1755 s->snapshots_offset = header.snapshots_offset; 1756 s->nb_snapshots = header.nb_snapshots; 1757 1758 ret = qcow2_read_snapshots(bs, errp); 1759 if (ret < 0) { 1760 goto fail; 1761 } 1762 } 1763 1764 /* Clear unknown autoclear feature bits */ 1765 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1766 update_header = update_header && bdrv_is_writable(bs); 1767 if (update_header) { 1768 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1769 } 1770 1771 /* == Handle persistent dirty bitmaps == 1772 * 1773 * We want load dirty bitmaps in three cases: 1774 * 1775 * 1. Normal open of the disk in active mode, not related to invalidation 1776 * after migration. 1777 * 1778 * 2. Invalidation of the target vm after pre-copy phase of migration, if 1779 * bitmaps are _not_ migrating through migration channel, i.e. 1780 * 'dirty-bitmaps' capability is disabled. 1781 * 1782 * 3. Invalidation of source vm after failed or canceled migration. 1783 * This is a very interesting case. There are two possible types of 1784 * bitmaps: 1785 * 1786 * A. Stored on inactivation and removed. They should be loaded from the 1787 * image. 1788 * 1789 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through 1790 * the migration channel (with dirty-bitmaps capability). 1791 * 1792 * On the other hand, there are two possible sub-cases: 1793 * 1794 * 3.1 disk was changed by somebody else while were inactive. In this 1795 * case all in-RAM dirty bitmaps (both persistent and not) are 1796 * definitely invalid. And we don't have any method to determine 1797 * this. 1798 * 1799 * Simple and safe thing is to just drop all the bitmaps of type B on 1800 * inactivation. But in this case we lose bitmaps in valid 4.2 case. 1801 * 1802 * On the other hand, resuming source vm, if disk was already changed 1803 * is a bad thing anyway: not only bitmaps, the whole vm state is 1804 * out of sync with disk. 1805 * 1806 * This means, that user or management tool, who for some reason 1807 * decided to resume source vm, after disk was already changed by 1808 * target vm, should at least drop all dirty bitmaps by hand. 1809 * 1810 * So, we can ignore this case for now, but TODO: "generation" 1811 * extension for qcow2, to determine, that image was changed after 1812 * last inactivation. And if it is changed, we will drop (or at least 1813 * mark as 'invalid' all the bitmaps of type B, both persistent 1814 * and not). 1815 * 1816 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved 1817 * to disk ('dirty-bitmaps' capability disabled), or not saved 1818 * ('dirty-bitmaps' capability enabled), but we don't need to care 1819 * of: let's load bitmaps as always: stored bitmaps will be loaded, 1820 * and not stored has flag IN_USE=1 in the image and will be skipped 1821 * on loading. 1822 * 1823 * One remaining possible case when we don't want load bitmaps: 1824 * 1825 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or 1826 * will be loaded on invalidation, no needs try loading them before) 1827 */ 1828 1829 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) { 1830 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */ 1831 bool header_updated; 1832 if (!qcow2_load_dirty_bitmaps(bs, &header_updated, errp)) { 1833 ret = -EINVAL; 1834 goto fail; 1835 } 1836 1837 update_header = update_header && !header_updated; 1838 } 1839 1840 if (update_header) { 1841 ret = qcow2_update_header(bs); 1842 if (ret < 0) { 1843 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1844 goto fail; 1845 } 1846 } 1847 1848 bs->supported_zero_flags = header.version >= 3 ? 1849 BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK : 0; 1850 bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE; 1851 1852 /* Repair image if dirty */ 1853 if (!(flags & BDRV_O_CHECK) && bdrv_is_writable(bs) && 1854 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1855 BdrvCheckResult result = {0}; 1856 1857 ret = qcow2_co_check_locked(bs, &result, 1858 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1859 if (ret < 0 || result.check_errors) { 1860 if (ret >= 0) { 1861 ret = -EIO; 1862 } 1863 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1864 goto fail; 1865 } 1866 } 1867 1868 #ifdef DEBUG_ALLOC 1869 { 1870 BdrvCheckResult result = {0}; 1871 qcow2_check_refcounts(bs, &result, 0); 1872 } 1873 #endif 1874 1875 qemu_co_queue_init(&s->thread_task_queue); 1876 1877 return ret; 1878 1879 fail: 1880 g_free(s->image_data_file); 1881 if (open_data_file && has_data_file(bs)) { 1882 bdrv_graph_co_rdunlock(); 1883 bdrv_co_unref_child(bs, s->data_file); 1884 bdrv_graph_co_rdlock(); 1885 s->data_file = NULL; 1886 } 1887 g_free(s->unknown_header_fields); 1888 cleanup_unknown_header_ext(bs); 1889 qcow2_free_snapshots(bs); 1890 qcow2_refcount_close(bs); 1891 qemu_vfree(s->l1_table); 1892 /* else pre-write overlap checks in cache_destroy may crash */ 1893 s->l1_table = NULL; 1894 cache_clean_timer_del(bs); 1895 if (s->l2_table_cache) { 1896 qcow2_cache_destroy(s->l2_table_cache); 1897 } 1898 if (s->refcount_block_cache) { 1899 qcow2_cache_destroy(s->refcount_block_cache); 1900 } 1901 qcrypto_block_free(s->crypto); 1902 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1903 return ret; 1904 } 1905 1906 typedef struct QCow2OpenCo { 1907 BlockDriverState *bs; 1908 QDict *options; 1909 int flags; 1910 Error **errp; 1911 int ret; 1912 } QCow2OpenCo; 1913 1914 static void coroutine_fn qcow2_open_entry(void *opaque) 1915 { 1916 QCow2OpenCo *qoc = opaque; 1917 BDRVQcow2State *s = qoc->bs->opaque; 1918 1919 GRAPH_RDLOCK_GUARD(); 1920 1921 qemu_co_mutex_lock(&s->lock); 1922 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, true, 1923 qoc->errp); 1924 qemu_co_mutex_unlock(&s->lock); 1925 1926 aio_wait_kick(); 1927 } 1928 1929 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1930 Error **errp) 1931 { 1932 BDRVQcow2State *s = bs->opaque; 1933 QCow2OpenCo qoc = { 1934 .bs = bs, 1935 .options = options, 1936 .flags = flags, 1937 .errp = errp, 1938 .ret = -EINPROGRESS 1939 }; 1940 int ret; 1941 1942 ret = bdrv_open_file_child(NULL, options, "file", bs, errp); 1943 if (ret < 0) { 1944 return ret; 1945 } 1946 1947 /* Initialise locks */ 1948 qemu_co_mutex_init(&s->lock); 1949 1950 assert(!qemu_in_coroutine()); 1951 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1952 1953 aio_co_enter(bdrv_get_aio_context(bs), 1954 qemu_coroutine_create(qcow2_open_entry, &qoc)); 1955 AIO_WAIT_WHILE_UNLOCKED(NULL, qoc.ret == -EINPROGRESS); 1956 1957 return qoc.ret; 1958 } 1959 1960 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1961 { 1962 BDRVQcow2State *s = bs->opaque; 1963 1964 if (bs->encrypted) { 1965 /* Encryption works on a sector granularity */ 1966 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto); 1967 } 1968 bs->bl.pwrite_zeroes_alignment = s->subcluster_size; 1969 bs->bl.pdiscard_alignment = s->cluster_size; 1970 } 1971 1972 static int GRAPH_UNLOCKED 1973 qcow2_reopen_prepare(BDRVReopenState *state,BlockReopenQueue *queue, 1974 Error **errp) 1975 { 1976 BDRVQcow2State *s = state->bs->opaque; 1977 Qcow2ReopenState *r; 1978 int ret; 1979 1980 GLOBAL_STATE_CODE(); 1981 GRAPH_RDLOCK_GUARD_MAINLOOP(); 1982 1983 r = g_new0(Qcow2ReopenState, 1); 1984 state->opaque = r; 1985 1986 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1987 state->flags, errp); 1988 if (ret < 0) { 1989 goto fail; 1990 } 1991 1992 /* We need to write out any unwritten data if we reopen read-only. */ 1993 if ((state->flags & BDRV_O_RDWR) == 0) { 1994 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1995 if (ret < 0) { 1996 goto fail; 1997 } 1998 1999 ret = bdrv_flush(state->bs); 2000 if (ret < 0) { 2001 goto fail; 2002 } 2003 2004 ret = qcow2_mark_clean(state->bs); 2005 if (ret < 0) { 2006 goto fail; 2007 } 2008 } 2009 2010 /* 2011 * Without an external data file, s->data_file points to the same BdrvChild 2012 * as bs->file. It needs to be resynced after reopen because bs->file may 2013 * be changed. We can't use it in the meantime. 2014 */ 2015 if (!has_data_file(state->bs)) { 2016 assert(s->data_file == state->bs->file); 2017 s->data_file = NULL; 2018 } 2019 2020 return 0; 2021 2022 fail: 2023 qcow2_update_options_abort(state->bs, r); 2024 g_free(r); 2025 return ret; 2026 } 2027 2028 static void qcow2_reopen_commit(BDRVReopenState *state) 2029 { 2030 BDRVQcow2State *s = state->bs->opaque; 2031 2032 qcow2_update_options_commit(state->bs, state->opaque); 2033 if (!s->data_file) { 2034 /* 2035 * If we don't have an external data file, s->data_file was cleared by 2036 * qcow2_reopen_prepare() and needs to be updated. 2037 */ 2038 s->data_file = state->bs->file; 2039 } 2040 g_free(state->opaque); 2041 } 2042 2043 static void qcow2_reopen_commit_post(BDRVReopenState *state) 2044 { 2045 GRAPH_RDLOCK_GUARD_MAINLOOP(); 2046 2047 if (state->flags & BDRV_O_RDWR) { 2048 Error *local_err = NULL; 2049 2050 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) { 2051 /* 2052 * This is not fatal, bitmaps just left read-only, so all following 2053 * writes will fail. User can remove read-only bitmaps to unblock 2054 * writes or retry reopen. 2055 */ 2056 error_reportf_err(local_err, 2057 "%s: Failed to make dirty bitmaps writable: ", 2058 bdrv_get_node_name(state->bs)); 2059 } 2060 } 2061 } 2062 2063 static void qcow2_reopen_abort(BDRVReopenState *state) 2064 { 2065 BDRVQcow2State *s = state->bs->opaque; 2066 2067 if (!s->data_file) { 2068 /* 2069 * If we don't have an external data file, s->data_file was cleared by 2070 * qcow2_reopen_prepare() and needs to be restored. 2071 */ 2072 s->data_file = state->bs->file; 2073 } 2074 qcow2_update_options_abort(state->bs, state->opaque); 2075 g_free(state->opaque); 2076 } 2077 2078 static void qcow2_join_options(QDict *options, QDict *old_options) 2079 { 2080 bool has_new_overlap_template = 2081 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 2082 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 2083 bool has_new_total_cache_size = 2084 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 2085 bool has_all_cache_options; 2086 2087 /* New overlap template overrides all old overlap options */ 2088 if (has_new_overlap_template) { 2089 qdict_del(old_options, QCOW2_OPT_OVERLAP); 2090 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 2091 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 2092 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 2093 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 2094 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 2095 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 2096 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 2097 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 2098 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 2099 } 2100 2101 /* New total cache size overrides all old options */ 2102 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 2103 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 2104 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 2105 } 2106 2107 qdict_join(options, old_options, false); 2108 2109 /* 2110 * If after merging all cache size options are set, an old total size is 2111 * overwritten. Do keep all options, however, if all three are new. The 2112 * resulting error message is what we want to happen. 2113 */ 2114 has_all_cache_options = 2115 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 2116 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 2117 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 2118 2119 if (has_all_cache_options && !has_new_total_cache_size) { 2120 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 2121 } 2122 } 2123 2124 static int coroutine_fn GRAPH_RDLOCK 2125 qcow2_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset, 2126 int64_t count, int64_t *pnum, int64_t *map, 2127 BlockDriverState **file) 2128 { 2129 BDRVQcow2State *s = bs->opaque; 2130 uint64_t host_offset; 2131 unsigned int bytes; 2132 QCow2SubclusterType type; 2133 int ret, status = 0; 2134 2135 qemu_co_mutex_lock(&s->lock); 2136 2137 if (!s->metadata_preallocation_checked) { 2138 ret = qcow2_detect_metadata_preallocation(bs); 2139 s->metadata_preallocation = (ret == 1); 2140 s->metadata_preallocation_checked = true; 2141 } 2142 2143 bytes = MIN(INT_MAX, count); 2144 ret = qcow2_get_host_offset(bs, offset, &bytes, &host_offset, &type); 2145 qemu_co_mutex_unlock(&s->lock); 2146 if (ret < 0) { 2147 return ret; 2148 } 2149 2150 *pnum = bytes; 2151 2152 if ((type == QCOW2_SUBCLUSTER_NORMAL || 2153 type == QCOW2_SUBCLUSTER_ZERO_ALLOC || 2154 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) && !s->crypto) { 2155 *map = host_offset; 2156 *file = s->data_file->bs; 2157 status |= BDRV_BLOCK_OFFSET_VALID; 2158 } 2159 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN || 2160 type == QCOW2_SUBCLUSTER_ZERO_ALLOC) { 2161 status |= BDRV_BLOCK_ZERO; 2162 } else if (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && 2163 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) { 2164 status |= BDRV_BLOCK_DATA; 2165 } 2166 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) && 2167 (status & BDRV_BLOCK_OFFSET_VALID)) 2168 { 2169 status |= BDRV_BLOCK_RECURSE; 2170 } 2171 if (type == QCOW2_SUBCLUSTER_COMPRESSED) { 2172 status |= BDRV_BLOCK_COMPRESSED; 2173 } 2174 return status; 2175 } 2176 2177 static int coroutine_fn GRAPH_RDLOCK 2178 qcow2_handle_l2meta(BlockDriverState *bs, QCowL2Meta **pl2meta, bool link_l2) 2179 { 2180 int ret = 0; 2181 QCowL2Meta *l2meta = *pl2meta; 2182 2183 while (l2meta != NULL) { 2184 QCowL2Meta *next; 2185 2186 if (link_l2) { 2187 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 2188 if (ret) { 2189 goto out; 2190 } 2191 } else { 2192 qcow2_alloc_cluster_abort(bs, l2meta); 2193 } 2194 2195 /* Take the request off the list of running requests */ 2196 QLIST_REMOVE(l2meta, next_in_flight); 2197 2198 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2199 2200 next = l2meta->next; 2201 g_free(l2meta); 2202 l2meta = next; 2203 } 2204 out: 2205 *pl2meta = l2meta; 2206 return ret; 2207 } 2208 2209 static int coroutine_fn GRAPH_RDLOCK 2210 qcow2_co_preadv_encrypted(BlockDriverState *bs, 2211 uint64_t host_offset, 2212 uint64_t offset, 2213 uint64_t bytes, 2214 QEMUIOVector *qiov, 2215 uint64_t qiov_offset) 2216 { 2217 int ret; 2218 BDRVQcow2State *s = bs->opaque; 2219 uint8_t *buf; 2220 2221 assert(bs->encrypted && s->crypto); 2222 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2223 2224 /* 2225 * For encrypted images, read everything into a temporary 2226 * contiguous buffer on which the AES functions can work. 2227 * Also, decryption in a separate buffer is better as it 2228 * prevents the guest from learning information about the 2229 * encrypted nature of the virtual disk. 2230 */ 2231 2232 buf = qemu_try_blockalign(s->data_file->bs, bytes); 2233 if (buf == NULL) { 2234 return -ENOMEM; 2235 } 2236 2237 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); 2238 ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0); 2239 if (ret < 0) { 2240 goto fail; 2241 } 2242 2243 if (qcow2_co_decrypt(bs, host_offset, offset, buf, bytes) < 0) 2244 { 2245 ret = -EIO; 2246 goto fail; 2247 } 2248 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes); 2249 2250 fail: 2251 qemu_vfree(buf); 2252 2253 return ret; 2254 } 2255 2256 typedef struct Qcow2AioTask { 2257 AioTask task; 2258 2259 BlockDriverState *bs; 2260 QCow2SubclusterType subcluster_type; /* only for read */ 2261 uint64_t host_offset; /* or l2_entry for compressed read */ 2262 uint64_t offset; 2263 uint64_t bytes; 2264 QEMUIOVector *qiov; 2265 uint64_t qiov_offset; 2266 QCowL2Meta *l2meta; /* only for write */ 2267 } Qcow2AioTask; 2268 2269 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task); 2270 static coroutine_fn int qcow2_add_task(BlockDriverState *bs, 2271 AioTaskPool *pool, 2272 AioTaskFunc func, 2273 QCow2SubclusterType subcluster_type, 2274 uint64_t host_offset, 2275 uint64_t offset, 2276 uint64_t bytes, 2277 QEMUIOVector *qiov, 2278 size_t qiov_offset, 2279 QCowL2Meta *l2meta) 2280 { 2281 Qcow2AioTask local_task; 2282 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task; 2283 2284 *task = (Qcow2AioTask) { 2285 .task.func = func, 2286 .bs = bs, 2287 .subcluster_type = subcluster_type, 2288 .qiov = qiov, 2289 .host_offset = host_offset, 2290 .offset = offset, 2291 .bytes = bytes, 2292 .qiov_offset = qiov_offset, 2293 .l2meta = l2meta, 2294 }; 2295 2296 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool, 2297 func == qcow2_co_preadv_task_entry ? "read" : "write", 2298 subcluster_type, host_offset, offset, bytes, 2299 qiov, qiov_offset); 2300 2301 if (!pool) { 2302 return func(&task->task); 2303 } 2304 2305 aio_task_pool_start_task(pool, &task->task); 2306 2307 return 0; 2308 } 2309 2310 static int coroutine_fn GRAPH_RDLOCK 2311 qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type, 2312 uint64_t host_offset, uint64_t offset, uint64_t bytes, 2313 QEMUIOVector *qiov, size_t qiov_offset) 2314 { 2315 BDRVQcow2State *s = bs->opaque; 2316 2317 switch (subc_type) { 2318 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 2319 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 2320 /* Both zero types are handled in qcow2_co_preadv_part */ 2321 g_assert_not_reached(); 2322 2323 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 2324 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 2325 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */ 2326 2327 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 2328 return bdrv_co_preadv_part(bs->backing, offset, bytes, 2329 qiov, qiov_offset, 0); 2330 2331 case QCOW2_SUBCLUSTER_COMPRESSED: 2332 return qcow2_co_preadv_compressed(bs, host_offset, 2333 offset, bytes, qiov, qiov_offset); 2334 2335 case QCOW2_SUBCLUSTER_NORMAL: 2336 if (bs->encrypted) { 2337 return qcow2_co_preadv_encrypted(bs, host_offset, 2338 offset, bytes, qiov, qiov_offset); 2339 } 2340 2341 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); 2342 return bdrv_co_preadv_part(s->data_file, host_offset, 2343 bytes, qiov, qiov_offset, 0); 2344 2345 default: 2346 g_assert_not_reached(); 2347 } 2348 2349 g_assert_not_reached(); 2350 } 2351 2352 /* 2353 * This function can count as GRAPH_RDLOCK because qcow2_co_preadv_part() holds 2354 * the graph lock and keeps it until this coroutine has terminated. 2355 */ 2356 static int coroutine_fn GRAPH_RDLOCK qcow2_co_preadv_task_entry(AioTask *task) 2357 { 2358 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2359 2360 assert(!t->l2meta); 2361 2362 return qcow2_co_preadv_task(t->bs, t->subcluster_type, 2363 t->host_offset, t->offset, t->bytes, 2364 t->qiov, t->qiov_offset); 2365 } 2366 2367 static int coroutine_fn GRAPH_RDLOCK 2368 qcow2_co_preadv_part(BlockDriverState *bs, int64_t offset, int64_t bytes, 2369 QEMUIOVector *qiov, size_t qiov_offset, 2370 BdrvRequestFlags flags) 2371 { 2372 BDRVQcow2State *s = bs->opaque; 2373 int ret = 0; 2374 unsigned int cur_bytes; /* number of bytes in current iteration */ 2375 uint64_t host_offset = 0; 2376 QCow2SubclusterType type; 2377 AioTaskPool *aio = NULL; 2378 2379 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2380 /* prepare next request */ 2381 cur_bytes = MIN(bytes, INT_MAX); 2382 if (s->crypto) { 2383 cur_bytes = MIN(cur_bytes, 2384 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2385 } 2386 2387 qemu_co_mutex_lock(&s->lock); 2388 ret = qcow2_get_host_offset(bs, offset, &cur_bytes, 2389 &host_offset, &type); 2390 qemu_co_mutex_unlock(&s->lock); 2391 if (ret < 0) { 2392 goto out; 2393 } 2394 2395 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN || 2396 type == QCOW2_SUBCLUSTER_ZERO_ALLOC || 2397 (type == QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && !bs->backing) || 2398 (type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && !bs->backing)) 2399 { 2400 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes); 2401 } else { 2402 if (!aio && cur_bytes != bytes) { 2403 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2404 } 2405 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, type, 2406 host_offset, offset, cur_bytes, 2407 qiov, qiov_offset, NULL); 2408 if (ret < 0) { 2409 goto out; 2410 } 2411 } 2412 2413 bytes -= cur_bytes; 2414 offset += cur_bytes; 2415 qiov_offset += cur_bytes; 2416 } 2417 2418 out: 2419 if (aio) { 2420 aio_task_pool_wait_all(aio); 2421 if (ret == 0) { 2422 ret = aio_task_pool_status(aio); 2423 } 2424 g_free(aio); 2425 } 2426 2427 return ret; 2428 } 2429 2430 /* Check if it's possible to merge a write request with the writing of 2431 * the data from the COW regions */ 2432 static bool merge_cow(uint64_t offset, unsigned bytes, 2433 QEMUIOVector *qiov, size_t qiov_offset, 2434 QCowL2Meta *l2meta) 2435 { 2436 QCowL2Meta *m; 2437 2438 for (m = l2meta; m != NULL; m = m->next) { 2439 /* If both COW regions are empty then there's nothing to merge */ 2440 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 2441 continue; 2442 } 2443 2444 /* If COW regions are handled already, skip this too */ 2445 if (m->skip_cow) { 2446 continue; 2447 } 2448 2449 /* 2450 * The write request should start immediately after the first 2451 * COW region. This does not always happen because the area 2452 * touched by the request can be larger than the one defined 2453 * by @m (a single request can span an area consisting of a 2454 * mix of previously unallocated and allocated clusters, that 2455 * is why @l2meta is a list). 2456 */ 2457 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 2458 /* In this case the request starts before this region */ 2459 assert(offset < l2meta_cow_start(m)); 2460 assert(m->cow_start.nb_bytes == 0); 2461 continue; 2462 } 2463 2464 /* The write request should end immediately before the second 2465 * COW region (see above for why it does not always happen) */ 2466 if (m->offset + m->cow_end.offset != offset + bytes) { 2467 assert(offset + bytes > m->offset + m->cow_end.offset); 2468 assert(m->cow_end.nb_bytes == 0); 2469 continue; 2470 } 2471 2472 /* Make sure that adding both COW regions to the QEMUIOVector 2473 * does not exceed IOV_MAX */ 2474 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) { 2475 continue; 2476 } 2477 2478 m->data_qiov = qiov; 2479 m->data_qiov_offset = qiov_offset; 2480 return true; 2481 } 2482 2483 return false; 2484 } 2485 2486 /* 2487 * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error. 2488 * Note that returning 0 does not guarantee non-zero data. 2489 */ 2490 static int coroutine_fn GRAPH_RDLOCK 2491 is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) 2492 { 2493 /* 2494 * This check is designed for optimization shortcut so it must be 2495 * efficient. 2496 * Instead of is_zero(), use bdrv_co_is_zero_fast() as it is 2497 * faster (but not as accurate and can result in false negatives). 2498 */ 2499 int ret = bdrv_co_is_zero_fast(bs, m->offset + m->cow_start.offset, 2500 m->cow_start.nb_bytes); 2501 if (ret <= 0) { 2502 return ret; 2503 } 2504 2505 return bdrv_co_is_zero_fast(bs, m->offset + m->cow_end.offset, 2506 m->cow_end.nb_bytes); 2507 } 2508 2509 static int coroutine_fn GRAPH_RDLOCK 2510 handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) 2511 { 2512 BDRVQcow2State *s = bs->opaque; 2513 QCowL2Meta *m; 2514 2515 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) { 2516 return 0; 2517 } 2518 2519 if (bs->encrypted) { 2520 return 0; 2521 } 2522 2523 for (m = l2meta; m != NULL; m = m->next) { 2524 int ret; 2525 uint64_t start_offset = m->alloc_offset + m->cow_start.offset; 2526 unsigned nb_bytes = m->cow_end.offset + m->cow_end.nb_bytes - 2527 m->cow_start.offset; 2528 2529 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) { 2530 continue; 2531 } 2532 2533 ret = is_zero_cow(bs, m); 2534 if (ret < 0) { 2535 return ret; 2536 } else if (ret == 0) { 2537 continue; 2538 } 2539 2540 /* 2541 * instead of writing zero COW buffers, 2542 * efficiently zero out the whole clusters 2543 */ 2544 2545 ret = qcow2_pre_write_overlap_check(bs, 0, start_offset, nb_bytes, 2546 true); 2547 if (ret < 0) { 2548 return ret; 2549 } 2550 2551 BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE); 2552 ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes, 2553 BDRV_REQ_NO_FALLBACK); 2554 if (ret < 0) { 2555 if (ret != -ENOTSUP && ret != -EAGAIN) { 2556 return ret; 2557 } 2558 continue; 2559 } 2560 2561 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters); 2562 m->skip_cow = true; 2563 } 2564 return 0; 2565 } 2566 2567 /* 2568 * qcow2_co_pwritev_task 2569 * Called with s->lock unlocked 2570 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must 2571 * not use it somehow after qcow2_co_pwritev_task() call 2572 */ 2573 static coroutine_fn GRAPH_RDLOCK 2574 int qcow2_co_pwritev_task(BlockDriverState *bs, uint64_t host_offset, 2575 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, 2576 uint64_t qiov_offset, QCowL2Meta *l2meta) 2577 { 2578 int ret; 2579 BDRVQcow2State *s = bs->opaque; 2580 void *crypt_buf = NULL; 2581 QEMUIOVector encrypted_qiov; 2582 2583 if (bs->encrypted) { 2584 assert(s->crypto); 2585 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2586 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes); 2587 if (crypt_buf == NULL) { 2588 ret = -ENOMEM; 2589 goto out_unlocked; 2590 } 2591 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes); 2592 2593 if (qcow2_co_encrypt(bs, host_offset, offset, crypt_buf, bytes) < 0) { 2594 ret = -EIO; 2595 goto out_unlocked; 2596 } 2597 2598 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes); 2599 qiov = &encrypted_qiov; 2600 qiov_offset = 0; 2601 } 2602 2603 /* Try to efficiently initialize the physical space with zeroes */ 2604 ret = handle_alloc_space(bs, l2meta); 2605 if (ret < 0) { 2606 goto out_unlocked; 2607 } 2608 2609 /* 2610 * If we need to do COW, check if it's possible to merge the 2611 * writing of the guest data together with that of the COW regions. 2612 * If it's not possible (or not necessary) then write the 2613 * guest data now. 2614 */ 2615 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) { 2616 BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO); 2617 trace_qcow2_writev_data(qemu_coroutine_self(), host_offset); 2618 ret = bdrv_co_pwritev_part(s->data_file, host_offset, 2619 bytes, qiov, qiov_offset, 0); 2620 if (ret < 0) { 2621 goto out_unlocked; 2622 } 2623 } 2624 2625 qemu_co_mutex_lock(&s->lock); 2626 2627 ret = qcow2_handle_l2meta(bs, &l2meta, true); 2628 goto out_locked; 2629 2630 out_unlocked: 2631 qemu_co_mutex_lock(&s->lock); 2632 2633 out_locked: 2634 qcow2_handle_l2meta(bs, &l2meta, false); 2635 qemu_co_mutex_unlock(&s->lock); 2636 2637 qemu_vfree(crypt_buf); 2638 2639 return ret; 2640 } 2641 2642 /* 2643 * This function can count as GRAPH_RDLOCK because qcow2_co_pwritev_part() holds 2644 * the graph lock and keeps it until this coroutine has terminated. 2645 */ 2646 static coroutine_fn GRAPH_RDLOCK int qcow2_co_pwritev_task_entry(AioTask *task) 2647 { 2648 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2649 2650 assert(!t->subcluster_type); 2651 2652 return qcow2_co_pwritev_task(t->bs, t->host_offset, 2653 t->offset, t->bytes, t->qiov, t->qiov_offset, 2654 t->l2meta); 2655 } 2656 2657 static int coroutine_fn GRAPH_RDLOCK 2658 qcow2_co_pwritev_part(BlockDriverState *bs, int64_t offset, int64_t bytes, 2659 QEMUIOVector *qiov, size_t qiov_offset, 2660 BdrvRequestFlags flags) 2661 { 2662 BDRVQcow2State *s = bs->opaque; 2663 int offset_in_cluster; 2664 int ret; 2665 unsigned int cur_bytes; /* number of sectors in current iteration */ 2666 uint64_t host_offset; 2667 QCowL2Meta *l2meta = NULL; 2668 AioTaskPool *aio = NULL; 2669 2670 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 2671 2672 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2673 2674 l2meta = NULL; 2675 2676 trace_qcow2_writev_start_part(qemu_coroutine_self()); 2677 offset_in_cluster = offset_into_cluster(s, offset); 2678 cur_bytes = MIN(bytes, INT_MAX); 2679 if (bs->encrypted) { 2680 cur_bytes = MIN(cur_bytes, 2681 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 2682 - offset_in_cluster); 2683 } 2684 2685 qemu_co_mutex_lock(&s->lock); 2686 2687 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes, 2688 &host_offset, &l2meta); 2689 if (ret < 0) { 2690 goto out_locked; 2691 } 2692 2693 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, 2694 cur_bytes, true); 2695 if (ret < 0) { 2696 goto out_locked; 2697 } 2698 2699 qemu_co_mutex_unlock(&s->lock); 2700 2701 if (!aio && cur_bytes != bytes) { 2702 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2703 } 2704 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0, 2705 host_offset, offset, 2706 cur_bytes, qiov, qiov_offset, l2meta); 2707 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */ 2708 if (ret < 0) { 2709 goto fail_nometa; 2710 } 2711 2712 bytes -= cur_bytes; 2713 offset += cur_bytes; 2714 qiov_offset += cur_bytes; 2715 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2716 } 2717 ret = 0; 2718 2719 qemu_co_mutex_lock(&s->lock); 2720 2721 out_locked: 2722 qcow2_handle_l2meta(bs, &l2meta, false); 2723 2724 qemu_co_mutex_unlock(&s->lock); 2725 2726 fail_nometa: 2727 if (aio) { 2728 aio_task_pool_wait_all(aio); 2729 if (ret == 0) { 2730 ret = aio_task_pool_status(aio); 2731 } 2732 g_free(aio); 2733 } 2734 2735 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2736 2737 return ret; 2738 } 2739 2740 static int GRAPH_RDLOCK qcow2_inactivate(BlockDriverState *bs) 2741 { 2742 BDRVQcow2State *s = bs->opaque; 2743 int ret, result = 0; 2744 Error *local_err = NULL; 2745 2746 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err); 2747 if (local_err != NULL) { 2748 result = -EINVAL; 2749 error_reportf_err(local_err, "Lost persistent bitmaps during " 2750 "inactivation of node '%s': ", 2751 bdrv_get_device_or_node_name(bs)); 2752 } 2753 2754 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2755 if (ret) { 2756 result = ret; 2757 error_report("Failed to flush the L2 table cache: %s", 2758 strerror(-ret)); 2759 } 2760 2761 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2762 if (ret) { 2763 result = ret; 2764 error_report("Failed to flush the refcount block cache: %s", 2765 strerror(-ret)); 2766 } 2767 2768 if (result == 0) { 2769 qcow2_mark_clean(bs); 2770 } 2771 2772 return result; 2773 } 2774 2775 static void coroutine_mixed_fn GRAPH_RDLOCK 2776 qcow2_do_close(BlockDriverState *bs, bool close_data_file) 2777 { 2778 BDRVQcow2State *s = bs->opaque; 2779 qemu_vfree(s->l1_table); 2780 /* else pre-write overlap checks in cache_destroy may crash */ 2781 s->l1_table = NULL; 2782 2783 if (!(s->flags & BDRV_O_INACTIVE)) { 2784 qcow2_inactivate(bs); 2785 } 2786 2787 cache_clean_timer_del(bs); 2788 qcow2_cache_destroy(s->l2_table_cache); 2789 qcow2_cache_destroy(s->refcount_block_cache); 2790 2791 qcrypto_block_free(s->crypto); 2792 s->crypto = NULL; 2793 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 2794 2795 g_free(s->unknown_header_fields); 2796 cleanup_unknown_header_ext(bs); 2797 2798 g_free(s->image_data_file); 2799 g_free(s->image_backing_file); 2800 g_free(s->image_backing_format); 2801 2802 if (close_data_file && has_data_file(bs)) { 2803 GLOBAL_STATE_CODE(); 2804 bdrv_graph_rdunlock_main_loop(); 2805 bdrv_graph_wrlock(NULL); 2806 bdrv_unref_child(bs, s->data_file); 2807 bdrv_graph_wrunlock(); 2808 s->data_file = NULL; 2809 bdrv_graph_rdlock_main_loop(); 2810 } 2811 2812 qcow2_refcount_close(bs); 2813 qcow2_free_snapshots(bs); 2814 } 2815 2816 static void GRAPH_UNLOCKED qcow2_close(BlockDriverState *bs) 2817 { 2818 GLOBAL_STATE_CODE(); 2819 GRAPH_RDLOCK_GUARD_MAINLOOP(); 2820 2821 qcow2_do_close(bs, true); 2822 } 2823 2824 static void coroutine_fn GRAPH_RDLOCK 2825 qcow2_co_invalidate_cache(BlockDriverState *bs, Error **errp) 2826 { 2827 ERRP_GUARD(); 2828 BDRVQcow2State *s = bs->opaque; 2829 BdrvChild *data_file; 2830 int flags = s->flags; 2831 QCryptoBlock *crypto = NULL; 2832 QDict *options; 2833 int ret; 2834 2835 /* 2836 * Backing files are read-only which makes all of their metadata immutable, 2837 * that means we don't have to worry about reopening them here. 2838 */ 2839 2840 crypto = s->crypto; 2841 s->crypto = NULL; 2842 2843 /* 2844 * Do not reopen s->data_file (i.e., have qcow2_do_close() not close it, 2845 * and then prevent qcow2_do_open() from opening it), because this function 2846 * runs in the I/O path and as such we must not invoke global-state 2847 * functions like bdrv_unref_child() and bdrv_open_child(). 2848 */ 2849 2850 qcow2_do_close(bs, false); 2851 2852 data_file = s->data_file; 2853 memset(s, 0, sizeof(BDRVQcow2State)); 2854 s->data_file = data_file; 2855 2856 options = qdict_clone_shallow(bs->options); 2857 2858 flags &= ~BDRV_O_INACTIVE; 2859 qemu_co_mutex_lock(&s->lock); 2860 ret = qcow2_do_open(bs, options, flags, false, errp); 2861 qemu_co_mutex_unlock(&s->lock); 2862 qobject_unref(options); 2863 if (ret < 0) { 2864 error_prepend(errp, "Could not reopen qcow2 layer: "); 2865 bs->drv = NULL; 2866 return; 2867 } 2868 2869 s->crypto = crypto; 2870 } 2871 2872 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2873 size_t len, size_t buflen) 2874 { 2875 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2876 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2877 2878 if (buflen < ext_len) { 2879 return -ENOSPC; 2880 } 2881 2882 *ext_backing_fmt = (QCowExtension) { 2883 .magic = cpu_to_be32(magic), 2884 .len = cpu_to_be32(len), 2885 }; 2886 2887 if (len) { 2888 memcpy(buf + sizeof(QCowExtension), s, len); 2889 } 2890 2891 return ext_len; 2892 } 2893 2894 /* 2895 * Updates the qcow2 header, including the variable length parts of it, i.e. 2896 * the backing file name and all extensions. qcow2 was not designed to allow 2897 * such changes, so if we run out of space (we can only use the first cluster) 2898 * this function may fail. 2899 * 2900 * Returns 0 on success, -errno in error cases. 2901 */ 2902 int qcow2_update_header(BlockDriverState *bs) 2903 { 2904 BDRVQcow2State *s = bs->opaque; 2905 QCowHeader *header; 2906 char *buf; 2907 size_t buflen = s->cluster_size; 2908 int ret; 2909 uint64_t total_size; 2910 uint32_t refcount_table_clusters; 2911 size_t header_length; 2912 Qcow2UnknownHeaderExtension *uext; 2913 2914 buf = qemu_blockalign(bs, buflen); 2915 2916 /* Header structure */ 2917 header = (QCowHeader*) buf; 2918 2919 if (buflen < sizeof(*header)) { 2920 ret = -ENOSPC; 2921 goto fail; 2922 } 2923 2924 header_length = sizeof(*header) + s->unknown_header_fields_size; 2925 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2926 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2927 2928 ret = validate_compression_type(s, NULL); 2929 if (ret) { 2930 goto fail; 2931 } 2932 2933 *header = (QCowHeader) { 2934 /* Version 2 fields */ 2935 .magic = cpu_to_be32(QCOW_MAGIC), 2936 .version = cpu_to_be32(s->qcow_version), 2937 .backing_file_offset = 0, 2938 .backing_file_size = 0, 2939 .cluster_bits = cpu_to_be32(s->cluster_bits), 2940 .size = cpu_to_be64(total_size), 2941 .crypt_method = cpu_to_be32(s->crypt_method_header), 2942 .l1_size = cpu_to_be32(s->l1_size), 2943 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2944 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2945 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2946 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2947 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2948 2949 /* Version 3 fields */ 2950 .incompatible_features = cpu_to_be64(s->incompatible_features), 2951 .compatible_features = cpu_to_be64(s->compatible_features), 2952 .autoclear_features = cpu_to_be64(s->autoclear_features), 2953 .refcount_order = cpu_to_be32(s->refcount_order), 2954 .header_length = cpu_to_be32(header_length), 2955 .compression_type = s->compression_type, 2956 }; 2957 2958 /* For older versions, write a shorter header */ 2959 switch (s->qcow_version) { 2960 case 2: 2961 ret = offsetof(QCowHeader, incompatible_features); 2962 break; 2963 case 3: 2964 ret = sizeof(*header); 2965 break; 2966 default: 2967 ret = -EINVAL; 2968 goto fail; 2969 } 2970 2971 buf += ret; 2972 buflen -= ret; 2973 memset(buf, 0, buflen); 2974 2975 /* Preserve any unknown field in the header */ 2976 if (s->unknown_header_fields_size) { 2977 if (buflen < s->unknown_header_fields_size) { 2978 ret = -ENOSPC; 2979 goto fail; 2980 } 2981 2982 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2983 buf += s->unknown_header_fields_size; 2984 buflen -= s->unknown_header_fields_size; 2985 } 2986 2987 /* Backing file format header extension */ 2988 if (s->image_backing_format) { 2989 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2990 s->image_backing_format, 2991 strlen(s->image_backing_format), 2992 buflen); 2993 if (ret < 0) { 2994 goto fail; 2995 } 2996 2997 buf += ret; 2998 buflen -= ret; 2999 } 3000 3001 /* External data file header extension */ 3002 if (has_data_file(bs) && s->image_data_file) { 3003 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE, 3004 s->image_data_file, strlen(s->image_data_file), 3005 buflen); 3006 if (ret < 0) { 3007 goto fail; 3008 } 3009 3010 buf += ret; 3011 buflen -= ret; 3012 } 3013 3014 /* Full disk encryption header pointer extension */ 3015 if (s->crypto_header.offset != 0) { 3016 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset); 3017 s->crypto_header.length = cpu_to_be64(s->crypto_header.length); 3018 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 3019 &s->crypto_header, sizeof(s->crypto_header), 3020 buflen); 3021 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 3022 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 3023 if (ret < 0) { 3024 goto fail; 3025 } 3026 buf += ret; 3027 buflen -= ret; 3028 } 3029 3030 /* 3031 * Feature table. A mere 8 feature names occupies 392 bytes, and 3032 * when coupled with the v3 minimum header of 104 bytes plus the 3033 * 8-byte end-of-extension marker, that would leave only 8 bytes 3034 * for a backing file name in an image with 512-byte clusters. 3035 * Thus, we choose to omit this header for cluster sizes 4k and 3036 * smaller. 3037 */ 3038 if (s->qcow_version >= 3 && s->cluster_size > 4096) { 3039 static const Qcow2Feature features[] = { 3040 { 3041 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3042 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 3043 .name = "dirty bit", 3044 }, 3045 { 3046 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3047 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 3048 .name = "corrupt bit", 3049 }, 3050 { 3051 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3052 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR, 3053 .name = "external data file", 3054 }, 3055 { 3056 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3057 .bit = QCOW2_INCOMPAT_COMPRESSION_BITNR, 3058 .name = "compression type", 3059 }, 3060 { 3061 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3062 .bit = QCOW2_INCOMPAT_EXTL2_BITNR, 3063 .name = "extended L2 entries", 3064 }, 3065 { 3066 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 3067 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 3068 .name = "lazy refcounts", 3069 }, 3070 { 3071 .type = QCOW2_FEAT_TYPE_AUTOCLEAR, 3072 .bit = QCOW2_AUTOCLEAR_BITMAPS_BITNR, 3073 .name = "bitmaps", 3074 }, 3075 { 3076 .type = QCOW2_FEAT_TYPE_AUTOCLEAR, 3077 .bit = QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR, 3078 .name = "raw external data", 3079 }, 3080 }; 3081 3082 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 3083 features, sizeof(features), buflen); 3084 if (ret < 0) { 3085 goto fail; 3086 } 3087 buf += ret; 3088 buflen -= ret; 3089 } 3090 3091 /* Bitmap extension */ 3092 if (s->nb_bitmaps > 0) { 3093 Qcow2BitmapHeaderExt bitmaps_header = { 3094 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 3095 .bitmap_directory_size = 3096 cpu_to_be64(s->bitmap_directory_size), 3097 .bitmap_directory_offset = 3098 cpu_to_be64(s->bitmap_directory_offset) 3099 }; 3100 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 3101 &bitmaps_header, sizeof(bitmaps_header), 3102 buflen); 3103 if (ret < 0) { 3104 goto fail; 3105 } 3106 buf += ret; 3107 buflen -= ret; 3108 } 3109 3110 /* Keep unknown header extensions */ 3111 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 3112 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 3113 if (ret < 0) { 3114 goto fail; 3115 } 3116 3117 buf += ret; 3118 buflen -= ret; 3119 } 3120 3121 /* End of header extensions */ 3122 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 3123 if (ret < 0) { 3124 goto fail; 3125 } 3126 3127 buf += ret; 3128 buflen -= ret; 3129 3130 /* Backing file name */ 3131 if (s->image_backing_file) { 3132 size_t backing_file_len = strlen(s->image_backing_file); 3133 3134 if (buflen < backing_file_len) { 3135 ret = -ENOSPC; 3136 goto fail; 3137 } 3138 3139 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 3140 strncpy(buf, s->image_backing_file, buflen); 3141 3142 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 3143 header->backing_file_size = cpu_to_be32(backing_file_len); 3144 } 3145 3146 /* Write the new header */ 3147 ret = bdrv_pwrite(bs->file, 0, s->cluster_size, header, 0); 3148 if (ret < 0) { 3149 goto fail; 3150 } 3151 3152 ret = 0; 3153 fail: 3154 qemu_vfree(header); 3155 return ret; 3156 } 3157 3158 static int qcow2_change_backing_file(BlockDriverState *bs, 3159 const char *backing_file, const char *backing_fmt) 3160 { 3161 BDRVQcow2State *s = bs->opaque; 3162 3163 /* Adding a backing file means that the external data file alone won't be 3164 * enough to make sense of the content */ 3165 if (backing_file && data_file_is_raw(bs)) { 3166 return -EINVAL; 3167 } 3168 3169 if (backing_file && strlen(backing_file) > 1023) { 3170 return -EINVAL; 3171 } 3172 3173 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 3174 backing_file ?: ""); 3175 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 3176 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 3177 3178 g_free(s->image_backing_file); 3179 g_free(s->image_backing_format); 3180 3181 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 3182 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 3183 3184 return qcow2_update_header(bs); 3185 } 3186 3187 static int coroutine_fn GRAPH_RDLOCK 3188 qcow2_set_up_encryption(BlockDriverState *bs, 3189 QCryptoBlockCreateOptions *cryptoopts, 3190 Error **errp) 3191 { 3192 BDRVQcow2State *s = bs->opaque; 3193 QCryptoBlock *crypto = NULL; 3194 int fmt, ret; 3195 3196 switch (cryptoopts->format) { 3197 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 3198 fmt = QCOW_CRYPT_LUKS; 3199 break; 3200 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 3201 fmt = QCOW_CRYPT_AES; 3202 break; 3203 default: 3204 error_setg(errp, "Crypto format not supported in qcow2"); 3205 return -EINVAL; 3206 } 3207 3208 s->crypt_method_header = fmt; 3209 3210 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 3211 qcow2_crypto_hdr_init_func, 3212 qcow2_crypto_hdr_write_func, 3213 bs, errp); 3214 if (!crypto) { 3215 return -EINVAL; 3216 } 3217 3218 ret = qcow2_update_header(bs); 3219 if (ret < 0) { 3220 error_setg_errno(errp, -ret, "Could not write encryption header"); 3221 goto out; 3222 } 3223 3224 ret = 0; 3225 out: 3226 qcrypto_block_free(crypto); 3227 return ret; 3228 } 3229 3230 /** 3231 * Preallocates metadata structures for data clusters between @offset (in the 3232 * guest disk) and @new_length (which is thus generally the new guest disk 3233 * size). 3234 * 3235 * Returns: 0 on success, -errno on failure. 3236 */ 3237 static int coroutine_fn GRAPH_RDLOCK 3238 preallocate_co(BlockDriverState *bs, uint64_t offset, uint64_t new_length, 3239 PreallocMode mode, Error **errp) 3240 { 3241 BDRVQcow2State *s = bs->opaque; 3242 uint64_t bytes; 3243 uint64_t host_offset = 0; 3244 int64_t file_length; 3245 unsigned int cur_bytes; 3246 int ret; 3247 QCowL2Meta *meta = NULL, *m; 3248 3249 assert(offset <= new_length); 3250 bytes = new_length - offset; 3251 3252 while (bytes) { 3253 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size)); 3254 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes, 3255 &host_offset, &meta); 3256 if (ret < 0) { 3257 error_setg_errno(errp, -ret, "Allocating clusters failed"); 3258 goto out; 3259 } 3260 3261 for (m = meta; m != NULL; m = m->next) { 3262 m->prealloc = true; 3263 } 3264 3265 ret = qcow2_handle_l2meta(bs, &meta, true); 3266 if (ret < 0) { 3267 error_setg_errno(errp, -ret, "Mapping clusters failed"); 3268 goto out; 3269 } 3270 3271 /* TODO Preallocate data if requested */ 3272 3273 bytes -= cur_bytes; 3274 offset += cur_bytes; 3275 } 3276 3277 /* 3278 * It is expected that the image file is large enough to actually contain 3279 * all of the allocated clusters (otherwise we get failing reads after 3280 * EOF). Extend the image to the last allocated sector. 3281 */ 3282 file_length = bdrv_co_getlength(s->data_file->bs); 3283 if (file_length < 0) { 3284 error_setg_errno(errp, -file_length, "Could not get file size"); 3285 ret = file_length; 3286 goto out; 3287 } 3288 3289 if (host_offset + cur_bytes > file_length) { 3290 if (mode == PREALLOC_MODE_METADATA) { 3291 mode = PREALLOC_MODE_OFF; 3292 } 3293 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false, 3294 mode, 0, errp); 3295 if (ret < 0) { 3296 goto out; 3297 } 3298 } 3299 3300 ret = 0; 3301 3302 out: 3303 qcow2_handle_l2meta(bs, &meta, false); 3304 return ret; 3305 } 3306 3307 /* qcow2_refcount_metadata_size: 3308 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 3309 * @cluster_size: size of a cluster, in bytes 3310 * @refcount_order: refcount bits power-of-2 exponent 3311 * @generous_increase: allow for the refcount table to be 1.5x as large as it 3312 * needs to be 3313 * 3314 * Returns: Number of bytes required for refcount blocks and table metadata. 3315 */ 3316 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 3317 int refcount_order, bool generous_increase, 3318 uint64_t *refblock_count) 3319 { 3320 /* 3321 * Every host cluster is reference-counted, including metadata (even 3322 * refcount metadata is recursively included). 3323 * 3324 * An accurate formula for the size of refcount metadata size is difficult 3325 * to derive. An easier method of calculation is finding the fixed point 3326 * where no further refcount blocks or table clusters are required to 3327 * reference count every cluster. 3328 */ 3329 int64_t blocks_per_table_cluster = cluster_size / REFTABLE_ENTRY_SIZE; 3330 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 3331 int64_t table = 0; /* number of refcount table clusters */ 3332 int64_t blocks = 0; /* number of refcount block clusters */ 3333 int64_t last; 3334 int64_t n = 0; 3335 3336 do { 3337 last = n; 3338 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 3339 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 3340 n = clusters + blocks + table; 3341 3342 if (n == last && generous_increase) { 3343 clusters += DIV_ROUND_UP(table, 2); 3344 n = 0; /* force another loop */ 3345 generous_increase = false; 3346 } 3347 } while (n != last); 3348 3349 if (refblock_count) { 3350 *refblock_count = blocks; 3351 } 3352 3353 return (blocks + table) * cluster_size; 3354 } 3355 3356 /** 3357 * qcow2_calc_prealloc_size: 3358 * @total_size: virtual disk size in bytes 3359 * @cluster_size: cluster size in bytes 3360 * @refcount_order: refcount bits power-of-2 exponent 3361 * @extended_l2: true if the image has extended L2 entries 3362 * 3363 * Returns: Total number of bytes required for the fully allocated image 3364 * (including metadata). 3365 */ 3366 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 3367 size_t cluster_size, 3368 int refcount_order, 3369 bool extended_l2) 3370 { 3371 int64_t meta_size = 0; 3372 uint64_t nl1e, nl2e; 3373 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size); 3374 size_t l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL; 3375 3376 /* header: 1 cluster */ 3377 meta_size += cluster_size; 3378 3379 /* total size of L2 tables */ 3380 nl2e = aligned_total_size / cluster_size; 3381 nl2e = ROUND_UP(nl2e, cluster_size / l2e_size); 3382 meta_size += nl2e * l2e_size; 3383 3384 /* total size of L1 tables */ 3385 nl1e = nl2e * l2e_size / cluster_size; 3386 nl1e = ROUND_UP(nl1e, cluster_size / L1E_SIZE); 3387 meta_size += nl1e * L1E_SIZE; 3388 3389 /* total size of refcount table and blocks */ 3390 meta_size += qcow2_refcount_metadata_size( 3391 (meta_size + aligned_total_size) / cluster_size, 3392 cluster_size, refcount_order, false, NULL); 3393 3394 return meta_size + aligned_total_size; 3395 } 3396 3397 static bool validate_cluster_size(size_t cluster_size, bool extended_l2, 3398 Error **errp) 3399 { 3400 int cluster_bits = ctz32(cluster_size); 3401 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 3402 (1 << cluster_bits) != cluster_size) 3403 { 3404 error_setg(errp, "Cluster size must be a power of two between %d and " 3405 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 3406 return false; 3407 } 3408 3409 if (extended_l2) { 3410 unsigned min_cluster_size = 3411 (1 << MIN_CLUSTER_BITS) * QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER; 3412 if (cluster_size < min_cluster_size) { 3413 error_setg(errp, "Extended L2 entries are only supported with " 3414 "cluster sizes of at least %u bytes", min_cluster_size); 3415 return false; 3416 } 3417 } 3418 3419 return true; 3420 } 3421 3422 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, bool extended_l2, 3423 Error **errp) 3424 { 3425 size_t cluster_size; 3426 3427 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 3428 DEFAULT_CLUSTER_SIZE); 3429 if (!validate_cluster_size(cluster_size, extended_l2, errp)) { 3430 return 0; 3431 } 3432 return cluster_size; 3433 } 3434 3435 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 3436 { 3437 char *buf; 3438 int ret; 3439 3440 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 3441 if (!buf) { 3442 ret = 3; /* default */ 3443 } else if (!strcmp(buf, "0.10")) { 3444 ret = 2; 3445 } else if (!strcmp(buf, "1.1")) { 3446 ret = 3; 3447 } else { 3448 error_setg(errp, "Invalid compatibility level: '%s'", buf); 3449 ret = -EINVAL; 3450 } 3451 g_free(buf); 3452 return ret; 3453 } 3454 3455 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 3456 Error **errp) 3457 { 3458 uint64_t refcount_bits; 3459 3460 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 3461 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 3462 error_setg(errp, "Refcount width must be a power of two and may not " 3463 "exceed 64 bits"); 3464 return 0; 3465 } 3466 3467 if (version < 3 && refcount_bits != 16) { 3468 error_setg(errp, "Different refcount widths than 16 bits require " 3469 "compatibility level 1.1 or above (use compat=1.1 or " 3470 "greater)"); 3471 return 0; 3472 } 3473 3474 return refcount_bits; 3475 } 3476 3477 static int coroutine_fn GRAPH_UNLOCKED 3478 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) 3479 { 3480 BlockdevCreateOptionsQcow2 *qcow2_opts; 3481 QDict *options; 3482 3483 /* 3484 * Open the image file and write a minimal qcow2 header. 3485 * 3486 * We keep things simple and start with a zero-sized image. We also 3487 * do without refcount blocks or a L1 table for now. We'll fix the 3488 * inconsistency later. 3489 * 3490 * We do need a refcount table because growing the refcount table means 3491 * allocating two new refcount blocks - the second of which would be at 3492 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 3493 * size for any qcow2 image. 3494 */ 3495 BlockBackend *blk = NULL; 3496 BlockDriverState *bs = NULL; 3497 BlockDriverState *data_bs = NULL; 3498 QCowHeader *header; 3499 size_t cluster_size; 3500 int version; 3501 int refcount_order; 3502 uint64_t *refcount_table; 3503 int ret; 3504 uint8_t compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; 3505 3506 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2); 3507 qcow2_opts = &create_options->u.qcow2; 3508 3509 bs = bdrv_co_open_blockdev_ref(qcow2_opts->file, errp); 3510 if (bs == NULL) { 3511 return -EIO; 3512 } 3513 3514 /* Validate options and set default values */ 3515 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) { 3516 error_setg(errp, "Image size must be a multiple of %u bytes", 3517 (unsigned) BDRV_SECTOR_SIZE); 3518 ret = -EINVAL; 3519 goto out; 3520 } 3521 3522 if (qcow2_opts->has_version) { 3523 switch (qcow2_opts->version) { 3524 case BLOCKDEV_QCOW2_VERSION_V2: 3525 version = 2; 3526 break; 3527 case BLOCKDEV_QCOW2_VERSION_V3: 3528 version = 3; 3529 break; 3530 default: 3531 g_assert_not_reached(); 3532 } 3533 } else { 3534 version = 3; 3535 } 3536 3537 if (qcow2_opts->has_cluster_size) { 3538 cluster_size = qcow2_opts->cluster_size; 3539 } else { 3540 cluster_size = DEFAULT_CLUSTER_SIZE; 3541 } 3542 3543 if (!qcow2_opts->has_extended_l2) { 3544 qcow2_opts->extended_l2 = false; 3545 } 3546 if (qcow2_opts->extended_l2) { 3547 if (version < 3) { 3548 error_setg(errp, "Extended L2 entries are only supported with " 3549 "compatibility level 1.1 and above (use version=v3 or " 3550 "greater)"); 3551 ret = -EINVAL; 3552 goto out; 3553 } 3554 } 3555 3556 if (!validate_cluster_size(cluster_size, qcow2_opts->extended_l2, errp)) { 3557 ret = -EINVAL; 3558 goto out; 3559 } 3560 3561 if (!qcow2_opts->has_preallocation) { 3562 qcow2_opts->preallocation = PREALLOC_MODE_OFF; 3563 } 3564 if (qcow2_opts->backing_file && 3565 qcow2_opts->preallocation != PREALLOC_MODE_OFF && 3566 !qcow2_opts->extended_l2) 3567 { 3568 error_setg(errp, "Backing file and preallocation can only be used at " 3569 "the same time if extended_l2 is on"); 3570 ret = -EINVAL; 3571 goto out; 3572 } 3573 if (qcow2_opts->has_backing_fmt && !qcow2_opts->backing_file) { 3574 error_setg(errp, "Backing format cannot be used without backing file"); 3575 ret = -EINVAL; 3576 goto out; 3577 } 3578 3579 if (!qcow2_opts->has_lazy_refcounts) { 3580 qcow2_opts->lazy_refcounts = false; 3581 } 3582 if (version < 3 && qcow2_opts->lazy_refcounts) { 3583 error_setg(errp, "Lazy refcounts only supported with compatibility " 3584 "level 1.1 and above (use version=v3 or greater)"); 3585 ret = -EINVAL; 3586 goto out; 3587 } 3588 3589 if (!qcow2_opts->has_refcount_bits) { 3590 qcow2_opts->refcount_bits = 16; 3591 } 3592 if (qcow2_opts->refcount_bits > 64 || 3593 !is_power_of_2(qcow2_opts->refcount_bits)) 3594 { 3595 error_setg(errp, "Refcount width must be a power of two and may not " 3596 "exceed 64 bits"); 3597 ret = -EINVAL; 3598 goto out; 3599 } 3600 if (version < 3 && qcow2_opts->refcount_bits != 16) { 3601 error_setg(errp, "Different refcount widths than 16 bits require " 3602 "compatibility level 1.1 or above (use version=v3 or " 3603 "greater)"); 3604 ret = -EINVAL; 3605 goto out; 3606 } 3607 refcount_order = ctz32(qcow2_opts->refcount_bits); 3608 3609 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) { 3610 error_setg(errp, "data-file-raw requires data-file"); 3611 ret = -EINVAL; 3612 goto out; 3613 } 3614 if (qcow2_opts->data_file_raw && qcow2_opts->backing_file) { 3615 error_setg(errp, "Backing file and data-file-raw cannot be used at " 3616 "the same time"); 3617 ret = -EINVAL; 3618 goto out; 3619 } 3620 if (qcow2_opts->data_file_raw && 3621 qcow2_opts->preallocation == PREALLOC_MODE_OFF) 3622 { 3623 /* 3624 * data-file-raw means that "the external data file can be 3625 * read as a consistent standalone raw image without looking 3626 * at the qcow2 metadata." It does not say that the metadata 3627 * must be ignored, though (and the qcow2 driver in fact does 3628 * not ignore it), so the L1/L2 tables must be present and 3629 * give a 1:1 mapping, so you get the same result regardless 3630 * of whether you look at the metadata or whether you ignore 3631 * it. 3632 */ 3633 qcow2_opts->preallocation = PREALLOC_MODE_METADATA; 3634 3635 /* 3636 * Cannot use preallocation with backing files, but giving a 3637 * backing file when specifying data_file_raw is an error 3638 * anyway. 3639 */ 3640 assert(!qcow2_opts->backing_file); 3641 } 3642 3643 if (qcow2_opts->data_file) { 3644 if (version < 3) { 3645 error_setg(errp, "External data files are only supported with " 3646 "compatibility level 1.1 and above (use version=v3 or " 3647 "greater)"); 3648 ret = -EINVAL; 3649 goto out; 3650 } 3651 data_bs = bdrv_co_open_blockdev_ref(qcow2_opts->data_file, errp); 3652 if (data_bs == NULL) { 3653 ret = -EIO; 3654 goto out; 3655 } 3656 } 3657 3658 if (qcow2_opts->has_compression_type && 3659 qcow2_opts->compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) { 3660 3661 ret = -EINVAL; 3662 3663 if (version < 3) { 3664 error_setg(errp, "Non-zlib compression type is only supported with " 3665 "compatibility level 1.1 and above (use version=v3 or " 3666 "greater)"); 3667 goto out; 3668 } 3669 3670 switch (qcow2_opts->compression_type) { 3671 #ifdef CONFIG_ZSTD 3672 case QCOW2_COMPRESSION_TYPE_ZSTD: 3673 break; 3674 #endif 3675 default: 3676 error_setg(errp, "Unknown compression type"); 3677 goto out; 3678 } 3679 3680 compression_type = qcow2_opts->compression_type; 3681 } 3682 3683 /* Create BlockBackend to write to the image */ 3684 blk = blk_co_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL, 3685 errp); 3686 if (!blk) { 3687 ret = -EPERM; 3688 goto out; 3689 } 3690 blk_set_allow_write_beyond_eof(blk, true); 3691 3692 /* Write the header */ 3693 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 3694 header = g_malloc0(cluster_size); 3695 *header = (QCowHeader) { 3696 .magic = cpu_to_be32(QCOW_MAGIC), 3697 .version = cpu_to_be32(version), 3698 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 3699 .size = cpu_to_be64(0), 3700 .l1_table_offset = cpu_to_be64(0), 3701 .l1_size = cpu_to_be32(0), 3702 .refcount_table_offset = cpu_to_be64(cluster_size), 3703 .refcount_table_clusters = cpu_to_be32(1), 3704 .refcount_order = cpu_to_be32(refcount_order), 3705 /* don't deal with endianness since compression_type is 1 byte long */ 3706 .compression_type = compression_type, 3707 .header_length = cpu_to_be32(sizeof(*header)), 3708 }; 3709 3710 /* We'll update this to correct value later */ 3711 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 3712 3713 if (qcow2_opts->lazy_refcounts) { 3714 header->compatible_features |= 3715 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 3716 } 3717 if (data_bs) { 3718 header->incompatible_features |= 3719 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE); 3720 } 3721 if (qcow2_opts->data_file_raw) { 3722 header->autoclear_features |= 3723 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW); 3724 } 3725 if (compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) { 3726 header->incompatible_features |= 3727 cpu_to_be64(QCOW2_INCOMPAT_COMPRESSION); 3728 } 3729 3730 if (qcow2_opts->extended_l2) { 3731 header->incompatible_features |= 3732 cpu_to_be64(QCOW2_INCOMPAT_EXTL2); 3733 } 3734 3735 ret = blk_co_pwrite(blk, 0, cluster_size, header, 0); 3736 g_free(header); 3737 if (ret < 0) { 3738 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 3739 goto out; 3740 } 3741 3742 /* Write a refcount table with one refcount block */ 3743 refcount_table = g_malloc0(2 * cluster_size); 3744 refcount_table[0] = cpu_to_be64(2 * cluster_size); 3745 ret = blk_co_pwrite(blk, cluster_size, 2 * cluster_size, refcount_table, 0); 3746 g_free(refcount_table); 3747 3748 if (ret < 0) { 3749 error_setg_errno(errp, -ret, "Could not write refcount table"); 3750 goto out; 3751 } 3752 3753 blk_co_unref(blk); 3754 blk = NULL; 3755 3756 /* 3757 * And now open the image and make it consistent first (i.e. increase the 3758 * refcount of the cluster that is occupied by the header and the refcount 3759 * table) 3760 */ 3761 options = qdict_new(); 3762 qdict_put_str(options, "driver", "qcow2"); 3763 qdict_put_str(options, "file", bs->node_name); 3764 if (data_bs) { 3765 qdict_put_str(options, "data-file", data_bs->node_name); 3766 } 3767 blk = blk_co_new_open(NULL, NULL, options, 3768 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 3769 errp); 3770 if (blk == NULL) { 3771 ret = -EIO; 3772 goto out; 3773 } 3774 3775 bdrv_graph_co_rdlock(); 3776 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 3777 if (ret < 0) { 3778 bdrv_graph_co_rdunlock(); 3779 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 3780 "header and refcount table"); 3781 goto out; 3782 3783 } else if (ret != 0) { 3784 error_report("Huh, first cluster in empty image is already in use?"); 3785 abort(); 3786 } 3787 3788 /* Set the external data file if necessary */ 3789 if (data_bs) { 3790 BDRVQcow2State *s = blk_bs(blk)->opaque; 3791 s->image_data_file = g_strdup(data_bs->filename); 3792 } 3793 3794 /* Create a full header (including things like feature table) */ 3795 ret = qcow2_update_header(blk_bs(blk)); 3796 bdrv_graph_co_rdunlock(); 3797 3798 if (ret < 0) { 3799 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 3800 goto out; 3801 } 3802 3803 /* Okay, now that we have a valid image, let's give it the right size */ 3804 ret = blk_co_truncate(blk, qcow2_opts->size, false, 3805 qcow2_opts->preallocation, 0, errp); 3806 if (ret < 0) { 3807 error_prepend(errp, "Could not resize image: "); 3808 goto out; 3809 } 3810 3811 /* Want a backing file? There you go. */ 3812 if (qcow2_opts->backing_file) { 3813 const char *backing_format = NULL; 3814 3815 if (qcow2_opts->has_backing_fmt) { 3816 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt); 3817 } 3818 3819 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file, 3820 backing_format, false); 3821 if (ret < 0) { 3822 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 3823 "with format '%s'", qcow2_opts->backing_file, 3824 backing_format); 3825 goto out; 3826 } 3827 } 3828 3829 /* Want encryption? There you go. */ 3830 if (qcow2_opts->encrypt) { 3831 bdrv_graph_co_rdlock(); 3832 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); 3833 bdrv_graph_co_rdunlock(); 3834 3835 if (ret < 0) { 3836 goto out; 3837 } 3838 } 3839 3840 blk_co_unref(blk); 3841 blk = NULL; 3842 3843 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 3844 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 3845 * have to setup decryption context. We're not doing any I/O on the top 3846 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 3847 * not have effect. 3848 */ 3849 options = qdict_new(); 3850 qdict_put_str(options, "driver", "qcow2"); 3851 qdict_put_str(options, "file", bs->node_name); 3852 if (data_bs) { 3853 qdict_put_str(options, "data-file", data_bs->node_name); 3854 } 3855 blk = blk_co_new_open(NULL, NULL, options, 3856 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 3857 errp); 3858 if (blk == NULL) { 3859 ret = -EIO; 3860 goto out; 3861 } 3862 3863 ret = 0; 3864 out: 3865 blk_co_unref(blk); 3866 bdrv_co_unref(bs); 3867 bdrv_co_unref(data_bs); 3868 return ret; 3869 } 3870 3871 static int coroutine_fn GRAPH_UNLOCKED 3872 qcow2_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, 3873 Error **errp) 3874 { 3875 BlockdevCreateOptions *create_options = NULL; 3876 QDict *qdict; 3877 Visitor *v; 3878 BlockDriverState *bs = NULL; 3879 BlockDriverState *data_bs = NULL; 3880 const char *val; 3881 int ret; 3882 3883 /* Only the keyval visitor supports the dotted syntax needed for 3884 * encryption, so go through a QDict before getting a QAPI type. Ignore 3885 * options meant for the protocol layer so that the visitor doesn't 3886 * complain. */ 3887 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts, 3888 true); 3889 3890 /* Handle encryption options */ 3891 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT); 3892 if (val && !strcmp(val, "on")) { 3893 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow"); 3894 } else if (val && !strcmp(val, "off")) { 3895 qdict_del(qdict, BLOCK_OPT_ENCRYPT); 3896 } 3897 3898 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT); 3899 if (val && !strcmp(val, "aes")) { 3900 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow"); 3901 } 3902 3903 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into 3904 * version=v2/v3 below. */ 3905 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL); 3906 if (val && !strcmp(val, "0.10")) { 3907 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2"); 3908 } else if (val && !strcmp(val, "1.1")) { 3909 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3"); 3910 } 3911 3912 /* Change legacy command line options into QMP ones */ 3913 static const QDictRenames opt_renames[] = { 3914 { BLOCK_OPT_BACKING_FILE, "backing-file" }, 3915 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, 3916 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, 3917 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" }, 3918 { BLOCK_OPT_EXTL2, "extended-l2" }, 3919 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" }, 3920 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT }, 3921 { BLOCK_OPT_COMPAT_LEVEL, "version" }, 3922 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" }, 3923 { BLOCK_OPT_COMPRESSION_TYPE, "compression-type" }, 3924 { NULL, NULL }, 3925 }; 3926 3927 if (!qdict_rename_keys(qdict, opt_renames, errp)) { 3928 ret = -EINVAL; 3929 goto finish; 3930 } 3931 3932 /* Create and open the file (protocol layer) */ 3933 ret = bdrv_co_create_file(filename, opts, errp); 3934 if (ret < 0) { 3935 goto finish; 3936 } 3937 3938 bs = bdrv_co_open(filename, NULL, NULL, 3939 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); 3940 if (bs == NULL) { 3941 ret = -EIO; 3942 goto finish; 3943 } 3944 3945 /* Create and open an external data file (protocol layer) */ 3946 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE); 3947 if (val) { 3948 ret = bdrv_co_create_file(val, opts, errp); 3949 if (ret < 0) { 3950 goto finish; 3951 } 3952 3953 data_bs = bdrv_co_open(val, NULL, NULL, 3954 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 3955 errp); 3956 if (data_bs == NULL) { 3957 ret = -EIO; 3958 goto finish; 3959 } 3960 3961 qdict_del(qdict, BLOCK_OPT_DATA_FILE); 3962 qdict_put_str(qdict, "data-file", data_bs->node_name); 3963 } 3964 3965 /* Set 'driver' and 'node' options */ 3966 qdict_put_str(qdict, "driver", "qcow2"); 3967 qdict_put_str(qdict, "file", bs->node_name); 3968 3969 /* Now get the QAPI type BlockdevCreateOptions */ 3970 v = qobject_input_visitor_new_flat_confused(qdict, errp); 3971 if (!v) { 3972 ret = -EINVAL; 3973 goto finish; 3974 } 3975 3976 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp); 3977 visit_free(v); 3978 if (!create_options) { 3979 ret = -EINVAL; 3980 goto finish; 3981 } 3982 3983 /* Silently round up size */ 3984 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size, 3985 BDRV_SECTOR_SIZE); 3986 3987 /* Create the qcow2 image (format layer) */ 3988 ret = qcow2_co_create(create_options, errp); 3989 finish: 3990 if (ret < 0) { 3991 bdrv_graph_co_rdlock(); 3992 bdrv_co_delete_file_noerr(bs); 3993 bdrv_co_delete_file_noerr(data_bs); 3994 bdrv_graph_co_rdunlock(); 3995 } else { 3996 ret = 0; 3997 } 3998 3999 qobject_unref(qdict); 4000 bdrv_co_unref(bs); 4001 bdrv_co_unref(data_bs); 4002 qapi_free_BlockdevCreateOptions(create_options); 4003 return ret; 4004 } 4005 4006 4007 static bool coroutine_fn GRAPH_RDLOCK 4008 is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) 4009 { 4010 int64_t nr; 4011 int res; 4012 4013 /* Clamp to image length, before checking status of underlying sectors */ 4014 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 4015 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset; 4016 } 4017 4018 if (!bytes) { 4019 return true; 4020 } 4021 4022 /* 4023 * bdrv_block_status_above doesn't merge different types of zeros, for 4024 * example, zeros which come from the region which is unallocated in 4025 * the whole backing chain, and zeros which come because of a short 4026 * backing file. So, we need a loop. 4027 */ 4028 do { 4029 res = bdrv_co_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL); 4030 offset += nr; 4031 bytes -= nr; 4032 } while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes); 4033 4034 return res >= 0 && (res & BDRV_BLOCK_ZERO) && bytes == 0; 4035 } 4036 4037 static int coroutine_fn GRAPH_RDLOCK 4038 qcow2_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, 4039 BdrvRequestFlags flags) 4040 { 4041 int ret; 4042 BDRVQcow2State *s = bs->opaque; 4043 4044 uint32_t head = offset_into_subcluster(s, offset); 4045 uint32_t tail = ROUND_UP(offset + bytes, s->subcluster_size) - 4046 (offset + bytes); 4047 4048 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 4049 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 4050 tail = 0; 4051 } 4052 4053 if (head || tail) { 4054 uint64_t off; 4055 unsigned int nr; 4056 QCow2SubclusterType type; 4057 4058 assert(head + bytes + tail <= s->subcluster_size); 4059 4060 /* check whether remainder of cluster already reads as zero */ 4061 if (!(is_zero(bs, offset - head, head) && 4062 is_zero(bs, offset + bytes, tail))) { 4063 return -ENOTSUP; 4064 } 4065 4066 qemu_co_mutex_lock(&s->lock); 4067 /* We can have new write after previous check */ 4068 offset -= head; 4069 bytes = s->subcluster_size; 4070 nr = s->subcluster_size; 4071 ret = qcow2_get_host_offset(bs, offset, &nr, &off, &type); 4072 if (ret < 0 || 4073 (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && 4074 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && 4075 type != QCOW2_SUBCLUSTER_ZERO_PLAIN && 4076 type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) { 4077 qemu_co_mutex_unlock(&s->lock); 4078 return ret < 0 ? ret : -ENOTSUP; 4079 } 4080 } else { 4081 qemu_co_mutex_lock(&s->lock); 4082 } 4083 4084 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 4085 4086 /* Whatever is left can use real zero subclusters */ 4087 ret = qcow2_subcluster_zeroize(bs, offset, bytes, flags); 4088 qemu_co_mutex_unlock(&s->lock); 4089 4090 return ret; 4091 } 4092 4093 static int coroutine_fn GRAPH_RDLOCK 4094 qcow2_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) 4095 { 4096 int ret; 4097 BDRVQcow2State *s = bs->opaque; 4098 4099 /* If the image does not support QCOW_OFLAG_ZERO then discarding 4100 * clusters could expose stale data from the backing file. */ 4101 if (s->qcow_version < 3 && bs->backing) { 4102 return -ENOTSUP; 4103 } 4104 4105 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 4106 assert(bytes < s->cluster_size); 4107 /* Ignore partial clusters, except for the special case of the 4108 * complete partial cluster at the end of an unaligned file */ 4109 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 4110 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 4111 return -ENOTSUP; 4112 } 4113 } 4114 4115 qemu_co_mutex_lock(&s->lock); 4116 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 4117 false); 4118 qemu_co_mutex_unlock(&s->lock); 4119 return ret; 4120 } 4121 4122 static int coroutine_fn GRAPH_RDLOCK 4123 qcow2_co_copy_range_from(BlockDriverState *bs, 4124 BdrvChild *src, int64_t src_offset, 4125 BdrvChild *dst, int64_t dst_offset, 4126 int64_t bytes, BdrvRequestFlags read_flags, 4127 BdrvRequestFlags write_flags) 4128 { 4129 BDRVQcow2State *s = bs->opaque; 4130 int ret; 4131 unsigned int cur_bytes; /* number of bytes in current iteration */ 4132 BdrvChild *child = NULL; 4133 BdrvRequestFlags cur_write_flags; 4134 4135 assert(!bs->encrypted); 4136 qemu_co_mutex_lock(&s->lock); 4137 4138 while (bytes != 0) { 4139 uint64_t copy_offset = 0; 4140 QCow2SubclusterType type; 4141 /* prepare next request */ 4142 cur_bytes = MIN(bytes, INT_MAX); 4143 cur_write_flags = write_flags; 4144 4145 ret = qcow2_get_host_offset(bs, src_offset, &cur_bytes, 4146 ©_offset, &type); 4147 if (ret < 0) { 4148 goto out; 4149 } 4150 4151 switch (type) { 4152 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 4153 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 4154 if (bs->backing && bs->backing->bs) { 4155 int64_t backing_length = bdrv_co_getlength(bs->backing->bs); 4156 if (src_offset >= backing_length) { 4157 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 4158 } else { 4159 child = bs->backing; 4160 cur_bytes = MIN(cur_bytes, backing_length - src_offset); 4161 copy_offset = src_offset; 4162 } 4163 } else { 4164 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 4165 } 4166 break; 4167 4168 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 4169 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 4170 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 4171 break; 4172 4173 case QCOW2_SUBCLUSTER_COMPRESSED: 4174 ret = -ENOTSUP; 4175 goto out; 4176 4177 case QCOW2_SUBCLUSTER_NORMAL: 4178 child = s->data_file; 4179 break; 4180 4181 default: 4182 abort(); 4183 } 4184 qemu_co_mutex_unlock(&s->lock); 4185 ret = bdrv_co_copy_range_from(child, 4186 copy_offset, 4187 dst, dst_offset, 4188 cur_bytes, read_flags, cur_write_flags); 4189 qemu_co_mutex_lock(&s->lock); 4190 if (ret < 0) { 4191 goto out; 4192 } 4193 4194 bytes -= cur_bytes; 4195 src_offset += cur_bytes; 4196 dst_offset += cur_bytes; 4197 } 4198 ret = 0; 4199 4200 out: 4201 qemu_co_mutex_unlock(&s->lock); 4202 return ret; 4203 } 4204 4205 static int coroutine_fn GRAPH_RDLOCK 4206 qcow2_co_copy_range_to(BlockDriverState *bs, 4207 BdrvChild *src, int64_t src_offset, 4208 BdrvChild *dst, int64_t dst_offset, 4209 int64_t bytes, BdrvRequestFlags read_flags, 4210 BdrvRequestFlags write_flags) 4211 { 4212 BDRVQcow2State *s = bs->opaque; 4213 int ret; 4214 unsigned int cur_bytes; /* number of sectors in current iteration */ 4215 uint64_t host_offset; 4216 QCowL2Meta *l2meta = NULL; 4217 4218 assert(!bs->encrypted); 4219 4220 qemu_co_mutex_lock(&s->lock); 4221 4222 while (bytes != 0) { 4223 4224 l2meta = NULL; 4225 4226 cur_bytes = MIN(bytes, INT_MAX); 4227 4228 /* TODO: 4229 * If src->bs == dst->bs, we could simply copy by incrementing 4230 * the refcnt, without copying user data. 4231 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */ 4232 ret = qcow2_alloc_host_offset(bs, dst_offset, &cur_bytes, 4233 &host_offset, &l2meta); 4234 if (ret < 0) { 4235 goto fail; 4236 } 4237 4238 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, cur_bytes, 4239 true); 4240 if (ret < 0) { 4241 goto fail; 4242 } 4243 4244 qemu_co_mutex_unlock(&s->lock); 4245 ret = bdrv_co_copy_range_to(src, src_offset, s->data_file, host_offset, 4246 cur_bytes, read_flags, write_flags); 4247 qemu_co_mutex_lock(&s->lock); 4248 if (ret < 0) { 4249 goto fail; 4250 } 4251 4252 ret = qcow2_handle_l2meta(bs, &l2meta, true); 4253 if (ret) { 4254 goto fail; 4255 } 4256 4257 bytes -= cur_bytes; 4258 src_offset += cur_bytes; 4259 dst_offset += cur_bytes; 4260 } 4261 ret = 0; 4262 4263 fail: 4264 qcow2_handle_l2meta(bs, &l2meta, false); 4265 4266 qemu_co_mutex_unlock(&s->lock); 4267 4268 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 4269 4270 return ret; 4271 } 4272 4273 static int coroutine_fn GRAPH_RDLOCK 4274 qcow2_co_truncate(BlockDriverState *bs, int64_t offset, bool exact, 4275 PreallocMode prealloc, BdrvRequestFlags flags, Error **errp) 4276 { 4277 BDRVQcow2State *s = bs->opaque; 4278 uint64_t old_length; 4279 int64_t new_l1_size; 4280 int ret; 4281 QDict *options; 4282 4283 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 4284 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 4285 { 4286 error_setg(errp, "Unsupported preallocation mode '%s'", 4287 PreallocMode_str(prealloc)); 4288 return -ENOTSUP; 4289 } 4290 4291 if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) { 4292 error_setg(errp, "The new size must be a multiple of %u", 4293 (unsigned) BDRV_SECTOR_SIZE); 4294 return -EINVAL; 4295 } 4296 4297 qemu_co_mutex_lock(&s->lock); 4298 4299 /* 4300 * Even though we store snapshot size for all images, it was not 4301 * required until v3, so it is not safe to proceed for v2. 4302 */ 4303 if (s->nb_snapshots && s->qcow_version < 3) { 4304 error_setg(errp, "Can't resize a v2 image which has snapshots"); 4305 ret = -ENOTSUP; 4306 goto fail; 4307 } 4308 4309 /* See qcow2-bitmap.c for which bitmap scenarios prevent a resize. */ 4310 if (qcow2_truncate_bitmaps_check(bs, errp)) { 4311 ret = -ENOTSUP; 4312 goto fail; 4313 } 4314 4315 old_length = bs->total_sectors * BDRV_SECTOR_SIZE; 4316 new_l1_size = size_to_l1(s, offset); 4317 4318 if (offset < old_length) { 4319 int64_t last_cluster, old_file_size; 4320 if (prealloc != PREALLOC_MODE_OFF) { 4321 error_setg(errp, 4322 "Preallocation can't be used for shrinking an image"); 4323 ret = -EINVAL; 4324 goto fail; 4325 } 4326 4327 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size), 4328 old_length - ROUND_UP(offset, 4329 s->cluster_size), 4330 QCOW2_DISCARD_ALWAYS, true); 4331 if (ret < 0) { 4332 error_setg_errno(errp, -ret, "Failed to discard cropped clusters"); 4333 goto fail; 4334 } 4335 4336 ret = qcow2_shrink_l1_table(bs, new_l1_size); 4337 if (ret < 0) { 4338 error_setg_errno(errp, -ret, 4339 "Failed to reduce the number of L2 tables"); 4340 goto fail; 4341 } 4342 4343 ret = qcow2_shrink_reftable(bs); 4344 if (ret < 0) { 4345 error_setg_errno(errp, -ret, 4346 "Failed to discard unused refblocks"); 4347 goto fail; 4348 } 4349 4350 old_file_size = bdrv_co_getlength(bs->file->bs); 4351 if (old_file_size < 0) { 4352 error_setg_errno(errp, -old_file_size, 4353 "Failed to inquire current file length"); 4354 ret = old_file_size; 4355 goto fail; 4356 } 4357 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 4358 if (last_cluster < 0) { 4359 error_setg_errno(errp, -last_cluster, 4360 "Failed to find the last cluster"); 4361 ret = last_cluster; 4362 goto fail; 4363 } 4364 if ((last_cluster + 1) * s->cluster_size < old_file_size) { 4365 Error *local_err = NULL; 4366 4367 /* 4368 * Do not pass @exact here: It will not help the user if 4369 * we get an error here just because they wanted to shrink 4370 * their qcow2 image (on a block device) with qemu-img. 4371 * (And on the qcow2 layer, the @exact requirement is 4372 * always fulfilled, so there is no need to pass it on.) 4373 */ 4374 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size, 4375 false, PREALLOC_MODE_OFF, 0, &local_err); 4376 if (local_err) { 4377 warn_reportf_err(local_err, 4378 "Failed to truncate the tail of the image: "); 4379 } 4380 } 4381 } else { 4382 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 4383 if (ret < 0) { 4384 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 4385 goto fail; 4386 } 4387 4388 if (data_file_is_raw(bs) && prealloc == PREALLOC_MODE_OFF) { 4389 /* 4390 * When creating a qcow2 image with data-file-raw, we enforce 4391 * at least prealloc=metadata, so that the L1/L2 tables are 4392 * fully allocated and reading from the data file will return 4393 * the same data as reading from the qcow2 image. When the 4394 * image is grown, we must consequently preallocate the 4395 * metadata structures to cover the added area. 4396 */ 4397 prealloc = PREALLOC_MODE_METADATA; 4398 } 4399 } 4400 4401 switch (prealloc) { 4402 case PREALLOC_MODE_OFF: 4403 if (has_data_file(bs)) { 4404 /* 4405 * If the caller wants an exact resize, the external data 4406 * file should be resized to the exact target size, too, 4407 * so we pass @exact here. 4408 */ 4409 ret = bdrv_co_truncate(s->data_file, offset, exact, prealloc, 0, 4410 errp); 4411 if (ret < 0) { 4412 goto fail; 4413 } 4414 } 4415 break; 4416 4417 case PREALLOC_MODE_METADATA: 4418 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4419 if (ret < 0) { 4420 goto fail; 4421 } 4422 break; 4423 4424 case PREALLOC_MODE_FALLOC: 4425 case PREALLOC_MODE_FULL: 4426 { 4427 int64_t allocation_start, host_offset, guest_offset; 4428 int64_t clusters_allocated; 4429 int64_t old_file_size, last_cluster, new_file_size; 4430 uint64_t nb_new_data_clusters, nb_new_l2_tables; 4431 bool subclusters_need_allocation = false; 4432 4433 /* With a data file, preallocation means just allocating the metadata 4434 * and forwarding the truncate request to the data file */ 4435 if (has_data_file(bs)) { 4436 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4437 if (ret < 0) { 4438 goto fail; 4439 } 4440 break; 4441 } 4442 4443 old_file_size = bdrv_co_getlength(bs->file->bs); 4444 if (old_file_size < 0) { 4445 error_setg_errno(errp, -old_file_size, 4446 "Failed to inquire current file length"); 4447 ret = old_file_size; 4448 goto fail; 4449 } 4450 4451 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 4452 if (last_cluster >= 0) { 4453 old_file_size = (last_cluster + 1) * s->cluster_size; 4454 } else { 4455 old_file_size = ROUND_UP(old_file_size, s->cluster_size); 4456 } 4457 4458 nb_new_data_clusters = (ROUND_UP(offset, s->cluster_size) - 4459 start_of_cluster(s, old_length)) >> s->cluster_bits; 4460 4461 /* This is an overestimation; we will not actually allocate space for 4462 * these in the file but just make sure the new refcount structures are 4463 * able to cover them so we will not have to allocate new refblocks 4464 * while entering the data blocks in the potentially new L2 tables. 4465 * (We do not actually care where the L2 tables are placed. Maybe they 4466 * are already allocated or they can be placed somewhere before 4467 * @old_file_size. It does not matter because they will be fully 4468 * allocated automatically, so they do not need to be covered by the 4469 * preallocation. All that matters is that we will not have to allocate 4470 * new refcount structures for them.) */ 4471 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 4472 s->cluster_size / l2_entry_size(s)); 4473 /* The cluster range may not be aligned to L2 boundaries, so add one L2 4474 * table for a potential head/tail */ 4475 nb_new_l2_tables++; 4476 4477 allocation_start = qcow2_refcount_area(bs, old_file_size, 4478 nb_new_data_clusters + 4479 nb_new_l2_tables, 4480 true, 0, 0); 4481 if (allocation_start < 0) { 4482 error_setg_errno(errp, -allocation_start, 4483 "Failed to resize refcount structures"); 4484 ret = allocation_start; 4485 goto fail; 4486 } 4487 4488 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 4489 nb_new_data_clusters); 4490 if (clusters_allocated < 0) { 4491 error_setg_errno(errp, -clusters_allocated, 4492 "Failed to allocate data clusters"); 4493 ret = clusters_allocated; 4494 goto fail; 4495 } 4496 4497 assert(clusters_allocated == nb_new_data_clusters); 4498 4499 /* Allocate the data area */ 4500 new_file_size = allocation_start + 4501 nb_new_data_clusters * s->cluster_size; 4502 /* 4503 * Image file grows, so @exact does not matter. 4504 * 4505 * If we need to zero out the new area, try first whether the protocol 4506 * driver can already take care of this. 4507 */ 4508 if (flags & BDRV_REQ_ZERO_WRITE) { 4509 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 4510 BDRV_REQ_ZERO_WRITE, NULL); 4511 if (ret >= 0) { 4512 flags &= ~BDRV_REQ_ZERO_WRITE; 4513 /* Ensure that we read zeroes and not backing file data */ 4514 subclusters_need_allocation = true; 4515 } 4516 } else { 4517 ret = -1; 4518 } 4519 if (ret < 0) { 4520 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 0, 4521 errp); 4522 } 4523 if (ret < 0) { 4524 error_prepend(errp, "Failed to resize underlying file: "); 4525 qcow2_free_clusters(bs, allocation_start, 4526 nb_new_data_clusters * s->cluster_size, 4527 QCOW2_DISCARD_OTHER); 4528 goto fail; 4529 } 4530 4531 /* Create the necessary L2 entries */ 4532 host_offset = allocation_start; 4533 guest_offset = old_length; 4534 while (nb_new_data_clusters) { 4535 int64_t nb_clusters = MIN( 4536 nb_new_data_clusters, 4537 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset)); 4538 unsigned cow_start_length = offset_into_cluster(s, guest_offset); 4539 QCowL2Meta allocation; 4540 guest_offset = start_of_cluster(s, guest_offset); 4541 allocation = (QCowL2Meta) { 4542 .offset = guest_offset, 4543 .alloc_offset = host_offset, 4544 .nb_clusters = nb_clusters, 4545 .cow_start = { 4546 .offset = 0, 4547 .nb_bytes = cow_start_length, 4548 }, 4549 .cow_end = { 4550 .offset = nb_clusters << s->cluster_bits, 4551 .nb_bytes = 0, 4552 }, 4553 .prealloc = !subclusters_need_allocation, 4554 }; 4555 qemu_co_queue_init(&allocation.dependent_requests); 4556 4557 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 4558 if (ret < 0) { 4559 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 4560 qcow2_free_clusters(bs, host_offset, 4561 nb_new_data_clusters * s->cluster_size, 4562 QCOW2_DISCARD_OTHER); 4563 goto fail; 4564 } 4565 4566 guest_offset += nb_clusters * s->cluster_size; 4567 host_offset += nb_clusters * s->cluster_size; 4568 nb_new_data_clusters -= nb_clusters; 4569 } 4570 break; 4571 } 4572 4573 default: 4574 g_assert_not_reached(); 4575 } 4576 4577 if ((flags & BDRV_REQ_ZERO_WRITE) && offset > old_length) { 4578 uint64_t zero_start = QEMU_ALIGN_UP(old_length, s->subcluster_size); 4579 4580 /* 4581 * Use zero clusters as much as we can. qcow2_subcluster_zeroize() 4582 * requires a subcluster-aligned start. The end may be unaligned if 4583 * it is at the end of the image (which it is here). 4584 */ 4585 if (offset > zero_start) { 4586 ret = qcow2_subcluster_zeroize(bs, zero_start, offset - zero_start, 4587 0); 4588 if (ret < 0) { 4589 error_setg_errno(errp, -ret, "Failed to zero out new clusters"); 4590 goto fail; 4591 } 4592 } 4593 4594 /* Write explicit zeros for the unaligned head */ 4595 if (zero_start > old_length) { 4596 uint64_t len = MIN(zero_start, offset) - old_length; 4597 uint8_t *buf = qemu_blockalign0(bs, len); 4598 QEMUIOVector qiov; 4599 qemu_iovec_init_buf(&qiov, buf, len); 4600 4601 qemu_co_mutex_unlock(&s->lock); 4602 ret = qcow2_co_pwritev_part(bs, old_length, len, &qiov, 0, 0); 4603 qemu_co_mutex_lock(&s->lock); 4604 4605 qemu_vfree(buf); 4606 if (ret < 0) { 4607 error_setg_errno(errp, -ret, "Failed to zero out the new area"); 4608 goto fail; 4609 } 4610 } 4611 } 4612 4613 if (prealloc != PREALLOC_MODE_OFF) { 4614 /* Flush metadata before actually changing the image size */ 4615 ret = qcow2_write_caches(bs); 4616 if (ret < 0) { 4617 error_setg_errno(errp, -ret, 4618 "Failed to flush the preallocated area to disk"); 4619 goto fail; 4620 } 4621 } 4622 4623 bs->total_sectors = offset / BDRV_SECTOR_SIZE; 4624 4625 /* write updated header.size */ 4626 offset = cpu_to_be64(offset); 4627 ret = bdrv_co_pwrite_sync(bs->file, offsetof(QCowHeader, size), 4628 sizeof(offset), &offset, 0); 4629 if (ret < 0) { 4630 error_setg_errno(errp, -ret, "Failed to update the image size"); 4631 goto fail; 4632 } 4633 4634 s->l1_vm_state_index = new_l1_size; 4635 4636 /* Update cache sizes */ 4637 options = qdict_clone_shallow(bs->options); 4638 ret = qcow2_update_options(bs, options, s->flags, errp); 4639 qobject_unref(options); 4640 if (ret < 0) { 4641 goto fail; 4642 } 4643 ret = 0; 4644 fail: 4645 qemu_co_mutex_unlock(&s->lock); 4646 return ret; 4647 } 4648 4649 static int coroutine_fn GRAPH_RDLOCK 4650 qcow2_co_pwritev_compressed_task(BlockDriverState *bs, 4651 uint64_t offset, uint64_t bytes, 4652 QEMUIOVector *qiov, size_t qiov_offset) 4653 { 4654 BDRVQcow2State *s = bs->opaque; 4655 int ret; 4656 ssize_t out_len; 4657 uint8_t *buf, *out_buf; 4658 uint64_t cluster_offset; 4659 4660 assert(bytes == s->cluster_size || (bytes < s->cluster_size && 4661 (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS))); 4662 4663 buf = qemu_blockalign(bs, s->cluster_size); 4664 if (bytes < s->cluster_size) { 4665 /* Zero-pad last write if image size is not cluster aligned */ 4666 memset(buf + bytes, 0, s->cluster_size - bytes); 4667 } 4668 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes); 4669 4670 out_buf = g_malloc(s->cluster_size); 4671 4672 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1, 4673 buf, s->cluster_size); 4674 if (out_len == -ENOMEM) { 4675 /* could not compress: write normal cluster */ 4676 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0); 4677 if (ret < 0) { 4678 goto fail; 4679 } 4680 goto success; 4681 } else if (out_len < 0) { 4682 ret = -EINVAL; 4683 goto fail; 4684 } 4685 4686 qemu_co_mutex_lock(&s->lock); 4687 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len, 4688 &cluster_offset); 4689 if (ret < 0) { 4690 qemu_co_mutex_unlock(&s->lock); 4691 goto fail; 4692 } 4693 4694 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true); 4695 qemu_co_mutex_unlock(&s->lock); 4696 if (ret < 0) { 4697 goto fail; 4698 } 4699 4700 BLKDBG_CO_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED); 4701 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0); 4702 if (ret < 0) { 4703 goto fail; 4704 } 4705 success: 4706 ret = 0; 4707 fail: 4708 qemu_vfree(buf); 4709 g_free(out_buf); 4710 return ret; 4711 } 4712 4713 /* 4714 * This function can count as GRAPH_RDLOCK because 4715 * qcow2_co_pwritev_compressed_part() holds the graph lock and keeps it until 4716 * this coroutine has terminated. 4717 */ 4718 static int coroutine_fn GRAPH_RDLOCK 4719 qcow2_co_pwritev_compressed_task_entry(AioTask *task) 4720 { 4721 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 4722 4723 assert(!t->subcluster_type && !t->l2meta); 4724 4725 return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov, 4726 t->qiov_offset); 4727 } 4728 4729 /* 4730 * XXX: put compressed sectors first, then all the cluster aligned 4731 * tables to avoid losing bytes in alignment 4732 */ 4733 static int coroutine_fn GRAPH_RDLOCK 4734 qcow2_co_pwritev_compressed_part(BlockDriverState *bs, 4735 int64_t offset, int64_t bytes, 4736 QEMUIOVector *qiov, size_t qiov_offset) 4737 { 4738 BDRVQcow2State *s = bs->opaque; 4739 AioTaskPool *aio = NULL; 4740 int ret = 0; 4741 4742 if (has_data_file(bs)) { 4743 return -ENOTSUP; 4744 } 4745 4746 if (bytes == 0) { 4747 /* 4748 * align end of file to a sector boundary to ease reading with 4749 * sector based I/Os 4750 */ 4751 int64_t len = bdrv_co_getlength(bs->file->bs); 4752 if (len < 0) { 4753 return len; 4754 } 4755 return bdrv_co_truncate(bs->file, len, false, PREALLOC_MODE_OFF, 0, 4756 NULL); 4757 } 4758 4759 if (offset_into_cluster(s, offset)) { 4760 return -EINVAL; 4761 } 4762 4763 if (offset_into_cluster(s, bytes) && 4764 (offset + bytes) != (bs->total_sectors << BDRV_SECTOR_BITS)) { 4765 return -EINVAL; 4766 } 4767 4768 while (bytes && aio_task_pool_status(aio) == 0) { 4769 uint64_t chunk_size = MIN(bytes, s->cluster_size); 4770 4771 if (!aio && chunk_size != bytes) { 4772 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 4773 } 4774 4775 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_compressed_task_entry, 4776 0, 0, offset, chunk_size, qiov, qiov_offset, NULL); 4777 if (ret < 0) { 4778 break; 4779 } 4780 qiov_offset += chunk_size; 4781 offset += chunk_size; 4782 bytes -= chunk_size; 4783 } 4784 4785 if (aio) { 4786 aio_task_pool_wait_all(aio); 4787 if (ret == 0) { 4788 ret = aio_task_pool_status(aio); 4789 } 4790 g_free(aio); 4791 } 4792 4793 return ret; 4794 } 4795 4796 static int coroutine_fn GRAPH_RDLOCK 4797 qcow2_co_preadv_compressed(BlockDriverState *bs, 4798 uint64_t l2_entry, 4799 uint64_t offset, 4800 uint64_t bytes, 4801 QEMUIOVector *qiov, 4802 size_t qiov_offset) 4803 { 4804 BDRVQcow2State *s = bs->opaque; 4805 int ret = 0, csize; 4806 uint64_t coffset; 4807 uint8_t *buf, *out_buf; 4808 int offset_in_cluster = offset_into_cluster(s, offset); 4809 4810 qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize); 4811 4812 buf = g_try_malloc(csize); 4813 if (!buf) { 4814 return -ENOMEM; 4815 } 4816 4817 out_buf = qemu_blockalign(bs, s->cluster_size); 4818 4819 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 4820 ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0); 4821 if (ret < 0) { 4822 goto fail; 4823 } 4824 4825 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) { 4826 ret = -EIO; 4827 goto fail; 4828 } 4829 4830 qemu_iovec_from_buf(qiov, qiov_offset, out_buf + offset_in_cluster, bytes); 4831 4832 fail: 4833 qemu_vfree(out_buf); 4834 g_free(buf); 4835 4836 return ret; 4837 } 4838 4839 static int GRAPH_RDLOCK make_completely_empty(BlockDriverState *bs) 4840 { 4841 BDRVQcow2State *s = bs->opaque; 4842 Error *local_err = NULL; 4843 int ret, l1_clusters; 4844 int64_t offset; 4845 uint64_t *new_reftable = NULL; 4846 uint64_t rt_entry, l1_size2; 4847 struct { 4848 uint64_t l1_offset; 4849 uint64_t reftable_offset; 4850 uint32_t reftable_clusters; 4851 } QEMU_PACKED l1_ofs_rt_ofs_cls; 4852 4853 ret = qcow2_cache_empty(bs, s->l2_table_cache); 4854 if (ret < 0) { 4855 goto fail; 4856 } 4857 4858 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 4859 if (ret < 0) { 4860 goto fail; 4861 } 4862 4863 /* Refcounts will be broken utterly */ 4864 ret = qcow2_mark_dirty(bs); 4865 if (ret < 0) { 4866 goto fail; 4867 } 4868 4869 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4870 4871 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE); 4872 l1_size2 = (uint64_t)s->l1_size * L1E_SIZE; 4873 4874 /* After this call, neither the in-memory nor the on-disk refcount 4875 * information accurately describe the actual references */ 4876 4877 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 4878 l1_clusters * s->cluster_size, 0); 4879 if (ret < 0) { 4880 goto fail_broken_refcounts; 4881 } 4882 memset(s->l1_table, 0, l1_size2); 4883 4884 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 4885 4886 /* Overwrite enough clusters at the beginning of the sectors to place 4887 * the refcount table, a refcount block and the L1 table in; this may 4888 * overwrite parts of the existing refcount and L1 table, which is not 4889 * an issue because the dirty flag is set, complete data loss is in fact 4890 * desired and partial data loss is consequently fine as well */ 4891 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 4892 (2 + l1_clusters) * s->cluster_size, 0); 4893 /* This call (even if it failed overall) may have overwritten on-disk 4894 * refcount structures; in that case, the in-memory refcount information 4895 * will probably differ from the on-disk information which makes the BDS 4896 * unusable */ 4897 if (ret < 0) { 4898 goto fail_broken_refcounts; 4899 } 4900 4901 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4902 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 4903 4904 /* "Create" an empty reftable (one cluster) directly after the image 4905 * header and an empty L1 table three clusters after the image header; 4906 * the cluster between those two will be used as the first refblock */ 4907 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 4908 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 4909 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 4910 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 4911 sizeof(l1_ofs_rt_ofs_cls), &l1_ofs_rt_ofs_cls, 0); 4912 if (ret < 0) { 4913 goto fail_broken_refcounts; 4914 } 4915 4916 s->l1_table_offset = 3 * s->cluster_size; 4917 4918 new_reftable = g_try_new0(uint64_t, s->cluster_size / REFTABLE_ENTRY_SIZE); 4919 if (!new_reftable) { 4920 ret = -ENOMEM; 4921 goto fail_broken_refcounts; 4922 } 4923 4924 s->refcount_table_offset = s->cluster_size; 4925 s->refcount_table_size = s->cluster_size / REFTABLE_ENTRY_SIZE; 4926 s->max_refcount_table_index = 0; 4927 4928 g_free(s->refcount_table); 4929 s->refcount_table = new_reftable; 4930 new_reftable = NULL; 4931 4932 /* Now the in-memory refcount information again corresponds to the on-disk 4933 * information (reftable is empty and no refblocks (the refblock cache is 4934 * empty)); however, this means some clusters (e.g. the image header) are 4935 * referenced, but not refcounted, but the normal qcow2 code assumes that 4936 * the in-memory information is always correct */ 4937 4938 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 4939 4940 /* Enter the first refblock into the reftable */ 4941 rt_entry = cpu_to_be64(2 * s->cluster_size); 4942 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, sizeof(rt_entry), 4943 &rt_entry, 0); 4944 if (ret < 0) { 4945 goto fail_broken_refcounts; 4946 } 4947 s->refcount_table[0] = 2 * s->cluster_size; 4948 4949 s->free_cluster_index = 0; 4950 assert(3 + l1_clusters <= s->refcount_block_size); 4951 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 4952 if (offset < 0) { 4953 ret = offset; 4954 goto fail_broken_refcounts; 4955 } else if (offset > 0) { 4956 error_report("First cluster in emptied image is in use"); 4957 abort(); 4958 } 4959 4960 /* Now finally the in-memory information corresponds to the on-disk 4961 * structures and is correct */ 4962 ret = qcow2_mark_clean(bs); 4963 if (ret < 0) { 4964 goto fail; 4965 } 4966 4967 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, false, 4968 PREALLOC_MODE_OFF, 0, &local_err); 4969 if (ret < 0) { 4970 error_report_err(local_err); 4971 goto fail; 4972 } 4973 4974 return 0; 4975 4976 fail_broken_refcounts: 4977 /* The BDS is unusable at this point. If we wanted to make it usable, we 4978 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 4979 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 4980 * again. However, because the functions which could have caused this error 4981 * path to be taken are used by those functions as well, it's very likely 4982 * that that sequence will fail as well. Therefore, just eject the BDS. */ 4983 bs->drv = NULL; 4984 4985 fail: 4986 g_free(new_reftable); 4987 return ret; 4988 } 4989 4990 static int GRAPH_RDLOCK qcow2_make_empty(BlockDriverState *bs) 4991 { 4992 BDRVQcow2State *s = bs->opaque; 4993 uint64_t offset, end_offset; 4994 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 4995 int l1_clusters, ret = 0; 4996 4997 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE); 4998 4999 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps && 5000 3 + l1_clusters <= s->refcount_block_size && 5001 s->crypt_method_header != QCOW_CRYPT_LUKS && 5002 !has_data_file(bs)) { 5003 /* The following function only works for qcow2 v3 images (it 5004 * requires the dirty flag) and only as long as there are no 5005 * features that reserve extra clusters (such as snapshots, 5006 * LUKS header, or persistent bitmaps), because it completely 5007 * empties the image. Furthermore, the L1 table and three 5008 * additional clusters (image header, refcount table, one 5009 * refcount block) have to fit inside one refcount block. It 5010 * only resets the image file, i.e. does not work with an 5011 * external data file. */ 5012 return make_completely_empty(bs); 5013 } 5014 5015 /* This fallback code simply discards every active cluster; this is slow, 5016 * but works in all cases */ 5017 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 5018 for (offset = 0; offset < end_offset; offset += step) { 5019 /* As this function is generally used after committing an external 5020 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 5021 * default action for this kind of discard is to pass the discard, 5022 * which will ideally result in an actually smaller image file, as 5023 * is probably desired. */ 5024 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 5025 QCOW2_DISCARD_SNAPSHOT, true); 5026 if (ret < 0) { 5027 break; 5028 } 5029 } 5030 5031 return ret; 5032 } 5033 5034 static coroutine_fn GRAPH_RDLOCK int qcow2_co_flush_to_os(BlockDriverState *bs) 5035 { 5036 BDRVQcow2State *s = bs->opaque; 5037 int ret; 5038 5039 qemu_co_mutex_lock(&s->lock); 5040 ret = qcow2_write_caches(bs); 5041 qemu_co_mutex_unlock(&s->lock); 5042 5043 return ret; 5044 } 5045 5046 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 5047 Error **errp) 5048 { 5049 Error *local_err = NULL; 5050 BlockMeasureInfo *info; 5051 uint64_t required = 0; /* bytes that contribute to required size */ 5052 uint64_t virtual_size; /* disk size as seen by guest */ 5053 uint64_t refcount_bits; 5054 uint64_t l2_tables; 5055 uint64_t luks_payload_size = 0; 5056 size_t cluster_size; 5057 int version; 5058 char *optstr; 5059 PreallocMode prealloc; 5060 bool has_backing_file; 5061 bool has_luks; 5062 bool extended_l2; 5063 size_t l2e_size; 5064 5065 /* Parse image creation options */ 5066 extended_l2 = qemu_opt_get_bool_del(opts, BLOCK_OPT_EXTL2, false); 5067 5068 cluster_size = qcow2_opt_get_cluster_size_del(opts, extended_l2, 5069 &local_err); 5070 if (local_err) { 5071 goto err; 5072 } 5073 5074 version = qcow2_opt_get_version_del(opts, &local_err); 5075 if (local_err) { 5076 goto err; 5077 } 5078 5079 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 5080 if (local_err) { 5081 goto err; 5082 } 5083 5084 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 5085 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr, 5086 PREALLOC_MODE_OFF, &local_err); 5087 g_free(optstr); 5088 if (local_err) { 5089 goto err; 5090 } 5091 5092 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 5093 has_backing_file = !!optstr; 5094 g_free(optstr); 5095 5096 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 5097 has_luks = optstr && strcmp(optstr, "luks") == 0; 5098 g_free(optstr); 5099 5100 if (has_luks) { 5101 g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL; 5102 QDict *cryptoopts = qcow2_extract_crypto_opts(opts, "luks", errp); 5103 size_t headerlen; 5104 5105 create_opts = block_crypto_create_opts_init(cryptoopts, errp); 5106 qobject_unref(cryptoopts); 5107 if (!create_opts) { 5108 goto err; 5109 } 5110 5111 if (!qcrypto_block_calculate_payload_offset(create_opts, 5112 "encrypt.", 5113 &headerlen, 5114 &local_err)) { 5115 goto err; 5116 } 5117 5118 luks_payload_size = ROUND_UP(headerlen, cluster_size); 5119 } 5120 5121 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); 5122 virtual_size = ROUND_UP(virtual_size, cluster_size); 5123 5124 /* Check that virtual disk size is valid */ 5125 l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL; 5126 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 5127 cluster_size / l2e_size); 5128 if (l2_tables * L1E_SIZE > QCOW_MAX_L1_SIZE) { 5129 error_setg(&local_err, "The image size is too large " 5130 "(try using a larger cluster size)"); 5131 goto err; 5132 } 5133 5134 /* Account for input image */ 5135 if (in_bs) { 5136 int64_t ssize = bdrv_getlength(in_bs); 5137 if (ssize < 0) { 5138 error_setg_errno(&local_err, -ssize, 5139 "Unable to get image virtual_size"); 5140 goto err; 5141 } 5142 5143 virtual_size = ROUND_UP(ssize, cluster_size); 5144 5145 if (has_backing_file) { 5146 /* We don't how much of the backing chain is shared by the input 5147 * image and the new image file. In the worst case the new image's 5148 * backing file has nothing in common with the input image. Be 5149 * conservative and assume all clusters need to be written. 5150 */ 5151 required = virtual_size; 5152 } else { 5153 int64_t offset; 5154 int64_t pnum = 0; 5155 5156 for (offset = 0; offset < ssize; offset += pnum) { 5157 int ret; 5158 5159 ret = bdrv_block_status_above(in_bs, NULL, offset, 5160 ssize - offset, &pnum, NULL, 5161 NULL); 5162 if (ret < 0) { 5163 error_setg_errno(&local_err, -ret, 5164 "Unable to get block status"); 5165 goto err; 5166 } 5167 5168 if (ret & BDRV_BLOCK_ZERO) { 5169 /* Skip zero regions (safe with no backing file) */ 5170 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 5171 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 5172 /* Extend pnum to end of cluster for next iteration */ 5173 pnum = ROUND_UP(offset + pnum, cluster_size) - offset; 5174 5175 /* Count clusters we've seen */ 5176 required += offset % cluster_size + pnum; 5177 } 5178 } 5179 } 5180 } 5181 5182 /* Take into account preallocation. Nothing special is needed for 5183 * PREALLOC_MODE_METADATA since metadata is always counted. 5184 */ 5185 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 5186 required = virtual_size; 5187 } 5188 5189 info = g_new0(BlockMeasureInfo, 1); 5190 info->fully_allocated = luks_payload_size + 5191 qcow2_calc_prealloc_size(virtual_size, cluster_size, 5192 ctz32(refcount_bits), extended_l2); 5193 5194 /* 5195 * Remove data clusters that are not required. This overestimates the 5196 * required size because metadata needed for the fully allocated file is 5197 * still counted. Show bitmaps only if both source and destination 5198 * would support them. 5199 */ 5200 info->required = info->fully_allocated - virtual_size + required; 5201 info->has_bitmaps = version >= 3 && in_bs && 5202 bdrv_supports_persistent_dirty_bitmap(in_bs); 5203 if (info->has_bitmaps) { 5204 info->bitmaps = qcow2_get_persistent_dirty_bitmap_size(in_bs, 5205 cluster_size); 5206 } 5207 return info; 5208 5209 err: 5210 error_propagate(errp, local_err); 5211 return NULL; 5212 } 5213 5214 static int coroutine_fn 5215 qcow2_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 5216 { 5217 BDRVQcow2State *s = bs->opaque; 5218 bdi->cluster_size = s->cluster_size; 5219 bdi->subcluster_size = s->subcluster_size; 5220 bdi->vm_state_offset = qcow2_vm_state_offset(s); 5221 bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY; 5222 return 0; 5223 } 5224 5225 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs, 5226 Error **errp) 5227 { 5228 BDRVQcow2State *s = bs->opaque; 5229 ImageInfoSpecific *spec_info; 5230 QCryptoBlockInfo *encrypt_info = NULL; 5231 5232 if (s->crypto != NULL) { 5233 encrypt_info = qcrypto_block_get_info(s->crypto, errp); 5234 if (!encrypt_info) { 5235 return NULL; 5236 } 5237 } 5238 5239 spec_info = g_new(ImageInfoSpecific, 1); 5240 *spec_info = (ImageInfoSpecific){ 5241 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 5242 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1), 5243 }; 5244 if (s->qcow_version == 2) { 5245 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 5246 .compat = g_strdup("0.10"), 5247 .refcount_bits = s->refcount_bits, 5248 }; 5249 } else if (s->qcow_version == 3) { 5250 Qcow2BitmapInfoList *bitmaps; 5251 if (!qcow2_get_bitmap_info_list(bs, &bitmaps, errp)) { 5252 qapi_free_ImageInfoSpecific(spec_info); 5253 qapi_free_QCryptoBlockInfo(encrypt_info); 5254 return NULL; 5255 } 5256 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 5257 .compat = g_strdup("1.1"), 5258 .lazy_refcounts = s->compatible_features & 5259 QCOW2_COMPAT_LAZY_REFCOUNTS, 5260 .has_lazy_refcounts = true, 5261 .corrupt = s->incompatible_features & 5262 QCOW2_INCOMPAT_CORRUPT, 5263 .has_corrupt = true, 5264 .has_extended_l2 = true, 5265 .extended_l2 = has_subclusters(s), 5266 .refcount_bits = s->refcount_bits, 5267 .has_bitmaps = !!bitmaps, 5268 .bitmaps = bitmaps, 5269 .data_file = g_strdup(s->image_data_file), 5270 .has_data_file_raw = has_data_file(bs), 5271 .data_file_raw = data_file_is_raw(bs), 5272 .compression_type = s->compression_type, 5273 }; 5274 } else { 5275 /* if this assertion fails, this probably means a new version was 5276 * added without having it covered here */ 5277 assert(false); 5278 } 5279 5280 if (encrypt_info) { 5281 ImageInfoSpecificQCow2Encryption *qencrypt = 5282 g_new(ImageInfoSpecificQCow2Encryption, 1); 5283 switch (encrypt_info->format) { 5284 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 5285 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 5286 break; 5287 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 5288 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 5289 qencrypt->u.luks = encrypt_info->u.luks; 5290 break; 5291 default: 5292 abort(); 5293 } 5294 /* Since we did shallow copy above, erase any pointers 5295 * in the original info */ 5296 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 5297 qapi_free_QCryptoBlockInfo(encrypt_info); 5298 5299 spec_info->u.qcow2.data->encrypt = qencrypt; 5300 } 5301 5302 return spec_info; 5303 } 5304 5305 static int coroutine_mixed_fn qcow2_has_zero_init(BlockDriverState *bs) 5306 { 5307 BDRVQcow2State *s = bs->opaque; 5308 bool preallocated; 5309 5310 if (qemu_in_coroutine()) { 5311 qemu_co_mutex_lock(&s->lock); 5312 } 5313 /* 5314 * Check preallocation status: Preallocated images have all L2 5315 * tables allocated, nonpreallocated images have none. It is 5316 * therefore enough to check the first one. 5317 */ 5318 preallocated = s->l1_size > 0 && s->l1_table[0] != 0; 5319 if (qemu_in_coroutine()) { 5320 qemu_co_mutex_unlock(&s->lock); 5321 } 5322 5323 if (!preallocated) { 5324 return 1; 5325 } else if (bs->encrypted) { 5326 return 0; 5327 } else { 5328 return bdrv_has_zero_init(s->data_file->bs); 5329 } 5330 } 5331 5332 /* 5333 * Check the request to vmstate. On success return 5334 * qcow2_vm_state_offset(bs) + @pos 5335 */ 5336 static int64_t qcow2_check_vmstate_request(BlockDriverState *bs, 5337 QEMUIOVector *qiov, int64_t pos) 5338 { 5339 BDRVQcow2State *s = bs->opaque; 5340 int64_t vmstate_offset = qcow2_vm_state_offset(s); 5341 int ret; 5342 5343 /* Incoming requests must be OK */ 5344 bdrv_check_qiov_request(pos, qiov->size, qiov, 0, &error_abort); 5345 5346 if (INT64_MAX - pos < vmstate_offset) { 5347 return -EIO; 5348 } 5349 5350 pos += vmstate_offset; 5351 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 5352 if (ret < 0) { 5353 return ret; 5354 } 5355 5356 return pos; 5357 } 5358 5359 static int coroutine_fn GRAPH_RDLOCK 5360 qcow2_co_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 5361 { 5362 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); 5363 if (offset < 0) { 5364 return offset; 5365 } 5366 5367 BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 5368 return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0); 5369 } 5370 5371 static int coroutine_fn GRAPH_RDLOCK 5372 qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 5373 { 5374 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); 5375 if (offset < 0) { 5376 return offset; 5377 } 5378 5379 BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 5380 return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0); 5381 } 5382 5383 static int GRAPH_RDLOCK qcow2_has_compressed_clusters(BlockDriverState *bs) 5384 { 5385 int64_t offset = 0; 5386 int64_t bytes = bdrv_getlength(bs); 5387 5388 if (bytes < 0) { 5389 return bytes; 5390 } 5391 5392 while (bytes != 0) { 5393 int ret; 5394 QCow2SubclusterType type; 5395 unsigned int cur_bytes = MIN(INT_MAX, bytes); 5396 uint64_t host_offset; 5397 5398 ret = qcow2_get_host_offset(bs, offset, &cur_bytes, &host_offset, 5399 &type); 5400 if (ret < 0) { 5401 return ret; 5402 } 5403 5404 if (type == QCOW2_SUBCLUSTER_COMPRESSED) { 5405 return 1; 5406 } 5407 5408 offset += cur_bytes; 5409 bytes -= cur_bytes; 5410 } 5411 5412 return 0; 5413 } 5414 5415 /* 5416 * Downgrades an image's version. To achieve this, any incompatible features 5417 * have to be removed. 5418 */ 5419 static int GRAPH_RDLOCK 5420 qcow2_downgrade(BlockDriverState *bs, int target_version, 5421 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5422 Error **errp) 5423 { 5424 BDRVQcow2State *s = bs->opaque; 5425 int current_version = s->qcow_version; 5426 int ret; 5427 int i; 5428 5429 /* This is qcow2_downgrade(), not qcow2_upgrade() */ 5430 assert(target_version < current_version); 5431 5432 /* There are no other versions (now) that you can downgrade to */ 5433 assert(target_version == 2); 5434 5435 if (s->refcount_order != 4) { 5436 error_setg(errp, "compat=0.10 requires refcount_bits=16"); 5437 return -ENOTSUP; 5438 } 5439 5440 if (has_data_file(bs)) { 5441 error_setg(errp, "Cannot downgrade an image with a data file"); 5442 return -ENOTSUP; 5443 } 5444 5445 /* 5446 * If any internal snapshot has a different size than the current 5447 * image size, or VM state size that exceeds 32 bits, downgrading 5448 * is unsafe. Even though we would still use v3-compliant output 5449 * to preserve that data, other v2 programs might not realize 5450 * those optional fields are important. 5451 */ 5452 for (i = 0; i < s->nb_snapshots; i++) { 5453 if (s->snapshots[i].vm_state_size > UINT32_MAX || 5454 s->snapshots[i].disk_size != bs->total_sectors * BDRV_SECTOR_SIZE) { 5455 error_setg(errp, "Internal snapshots prevent downgrade of image"); 5456 return -ENOTSUP; 5457 } 5458 } 5459 5460 /* clear incompatible features */ 5461 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 5462 ret = qcow2_mark_clean(bs); 5463 if (ret < 0) { 5464 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5465 return ret; 5466 } 5467 } 5468 5469 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 5470 * the first place; if that happens nonetheless, returning -ENOTSUP is the 5471 * best thing to do anyway */ 5472 5473 if (s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION) { 5474 error_setg(errp, "Cannot downgrade an image with incompatible features " 5475 "0x%" PRIx64 " set", 5476 s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION); 5477 return -ENOTSUP; 5478 } 5479 5480 /* since we can ignore compatible features, we can set them to 0 as well */ 5481 s->compatible_features = 0; 5482 /* if lazy refcounts have been used, they have already been fixed through 5483 * clearing the dirty flag */ 5484 5485 /* clearing autoclear features is trivial */ 5486 s->autoclear_features = 0; 5487 5488 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 5489 if (ret < 0) { 5490 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters"); 5491 return ret; 5492 } 5493 5494 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) { 5495 ret = qcow2_has_compressed_clusters(bs); 5496 if (ret < 0) { 5497 error_setg(errp, "Failed to check block status"); 5498 return -EINVAL; 5499 } 5500 if (ret) { 5501 error_setg(errp, "Cannot downgrade an image with zstd compression " 5502 "type and existing compressed clusters"); 5503 return -ENOTSUP; 5504 } 5505 /* 5506 * No compressed clusters for now, so just chose default zlib 5507 * compression. 5508 */ 5509 s->incompatible_features &= ~QCOW2_INCOMPAT_COMPRESSION; 5510 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; 5511 } 5512 5513 assert(s->incompatible_features == 0); 5514 5515 s->qcow_version = target_version; 5516 ret = qcow2_update_header(bs); 5517 if (ret < 0) { 5518 s->qcow_version = current_version; 5519 error_setg_errno(errp, -ret, "Failed to update the image header"); 5520 return ret; 5521 } 5522 return 0; 5523 } 5524 5525 /* 5526 * Upgrades an image's version. While newer versions encompass all 5527 * features of older versions, some things may have to be presented 5528 * differently. 5529 */ 5530 static int GRAPH_RDLOCK 5531 qcow2_upgrade(BlockDriverState *bs, int target_version, 5532 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5533 Error **errp) 5534 { 5535 BDRVQcow2State *s = bs->opaque; 5536 bool need_snapshot_update; 5537 int current_version = s->qcow_version; 5538 int i; 5539 int ret; 5540 5541 /* This is qcow2_upgrade(), not qcow2_downgrade() */ 5542 assert(target_version > current_version); 5543 5544 /* There are no other versions (yet) that you can upgrade to */ 5545 assert(target_version == 3); 5546 5547 status_cb(bs, 0, 2, cb_opaque); 5548 5549 /* 5550 * In v2, snapshots do not need to have extra data. v3 requires 5551 * the 64-bit VM state size and the virtual disk size to be 5552 * present. 5553 * qcow2_write_snapshots() will always write the list in the 5554 * v3-compliant format. 5555 */ 5556 need_snapshot_update = false; 5557 for (i = 0; i < s->nb_snapshots; i++) { 5558 if (s->snapshots[i].extra_data_size < 5559 sizeof_field(QCowSnapshotExtraData, vm_state_size_large) + 5560 sizeof_field(QCowSnapshotExtraData, disk_size)) 5561 { 5562 need_snapshot_update = true; 5563 break; 5564 } 5565 } 5566 if (need_snapshot_update) { 5567 ret = qcow2_write_snapshots(bs); 5568 if (ret < 0) { 5569 error_setg_errno(errp, -ret, "Failed to update the snapshot table"); 5570 return ret; 5571 } 5572 } 5573 status_cb(bs, 1, 2, cb_opaque); 5574 5575 s->qcow_version = target_version; 5576 ret = qcow2_update_header(bs); 5577 if (ret < 0) { 5578 s->qcow_version = current_version; 5579 error_setg_errno(errp, -ret, "Failed to update the image header"); 5580 return ret; 5581 } 5582 status_cb(bs, 2, 2, cb_opaque); 5583 5584 return 0; 5585 } 5586 5587 typedef enum Qcow2AmendOperation { 5588 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 5589 * statically initialized to so that the helper CB can discern the first 5590 * invocation from an operation change */ 5591 QCOW2_NO_OPERATION = 0, 5592 5593 QCOW2_UPGRADING, 5594 QCOW2_UPDATING_ENCRYPTION, 5595 QCOW2_CHANGING_REFCOUNT_ORDER, 5596 QCOW2_DOWNGRADING, 5597 } Qcow2AmendOperation; 5598 5599 typedef struct Qcow2AmendHelperCBInfo { 5600 /* The code coordinating the amend operations should only modify 5601 * these four fields; the rest will be managed by the CB */ 5602 BlockDriverAmendStatusCB *original_status_cb; 5603 void *original_cb_opaque; 5604 5605 Qcow2AmendOperation current_operation; 5606 5607 /* Total number of operations to perform (only set once) */ 5608 int total_operations; 5609 5610 /* The following fields are managed by the CB */ 5611 5612 /* Number of operations completed */ 5613 int operations_completed; 5614 5615 /* Cumulative offset of all completed operations */ 5616 int64_t offset_completed; 5617 5618 Qcow2AmendOperation last_operation; 5619 int64_t last_work_size; 5620 } Qcow2AmendHelperCBInfo; 5621 5622 static void qcow2_amend_helper_cb(BlockDriverState *bs, 5623 int64_t operation_offset, 5624 int64_t operation_work_size, void *opaque) 5625 { 5626 Qcow2AmendHelperCBInfo *info = opaque; 5627 int64_t current_work_size; 5628 int64_t projected_work_size; 5629 5630 if (info->current_operation != info->last_operation) { 5631 if (info->last_operation != QCOW2_NO_OPERATION) { 5632 info->offset_completed += info->last_work_size; 5633 info->operations_completed++; 5634 } 5635 5636 info->last_operation = info->current_operation; 5637 } 5638 5639 assert(info->total_operations > 0); 5640 assert(info->operations_completed < info->total_operations); 5641 5642 info->last_work_size = operation_work_size; 5643 5644 current_work_size = info->offset_completed + operation_work_size; 5645 5646 /* current_work_size is the total work size for (operations_completed + 1) 5647 * operations (which includes this one), so multiply it by the number of 5648 * operations not covered and divide it by the number of operations 5649 * covered to get a projection for the operations not covered */ 5650 projected_work_size = current_work_size * (info->total_operations - 5651 info->operations_completed - 1) 5652 / (info->operations_completed + 1); 5653 5654 info->original_status_cb(bs, info->offset_completed + operation_offset, 5655 current_work_size + projected_work_size, 5656 info->original_cb_opaque); 5657 } 5658 5659 static int GRAPH_RDLOCK 5660 qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 5661 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5662 bool force, Error **errp) 5663 { 5664 BDRVQcow2State *s = bs->opaque; 5665 int old_version = s->qcow_version, new_version = old_version; 5666 uint64_t new_size = 0; 5667 const char *backing_file = NULL, *backing_format = NULL, *data_file = NULL; 5668 bool lazy_refcounts = s->use_lazy_refcounts; 5669 bool data_file_raw = data_file_is_raw(bs); 5670 const char *compat = NULL; 5671 int refcount_bits = s->refcount_bits; 5672 int ret; 5673 QemuOptDesc *desc = opts->list->desc; 5674 Qcow2AmendHelperCBInfo helper_cb_info; 5675 bool encryption_update = false; 5676 5677 while (desc && desc->name) { 5678 if (!qemu_opt_find(opts, desc->name)) { 5679 /* only change explicitly defined options */ 5680 desc++; 5681 continue; 5682 } 5683 5684 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 5685 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 5686 if (!compat) { 5687 /* preserve default */ 5688 } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) { 5689 new_version = 2; 5690 } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) { 5691 new_version = 3; 5692 } else { 5693 error_setg(errp, "Unknown compatibility level %s", compat); 5694 return -EINVAL; 5695 } 5696 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 5697 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 5698 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 5699 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 5700 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 5701 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 5702 } else if (g_str_has_prefix(desc->name, "encrypt.")) { 5703 if (!s->crypto) { 5704 error_setg(errp, 5705 "Can't amend encryption options - encryption not present"); 5706 return -EINVAL; 5707 } 5708 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 5709 error_setg(errp, 5710 "Only LUKS encryption options can be amended"); 5711 return -ENOTSUP; 5712 } 5713 encryption_update = true; 5714 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 5715 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 5716 lazy_refcounts); 5717 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 5718 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 5719 refcount_bits); 5720 5721 if (refcount_bits <= 0 || refcount_bits > 64 || 5722 !is_power_of_2(refcount_bits)) 5723 { 5724 error_setg(errp, "Refcount width must be a power of two and " 5725 "may not exceed 64 bits"); 5726 return -EINVAL; 5727 } 5728 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE)) { 5729 data_file = qemu_opt_get(opts, BLOCK_OPT_DATA_FILE); 5730 if (data_file && !has_data_file(bs)) { 5731 error_setg(errp, "data-file can only be set for images that " 5732 "use an external data file"); 5733 return -EINVAL; 5734 } 5735 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE_RAW)) { 5736 data_file_raw = qemu_opt_get_bool(opts, BLOCK_OPT_DATA_FILE_RAW, 5737 data_file_raw); 5738 if (data_file_raw && !data_file_is_raw(bs)) { 5739 error_setg(errp, "data-file-raw cannot be set on existing " 5740 "images"); 5741 return -EINVAL; 5742 } 5743 } else { 5744 /* if this point is reached, this probably means a new option was 5745 * added without having it covered here */ 5746 abort(); 5747 } 5748 5749 desc++; 5750 } 5751 5752 helper_cb_info = (Qcow2AmendHelperCBInfo){ 5753 .original_status_cb = status_cb, 5754 .original_cb_opaque = cb_opaque, 5755 .total_operations = (new_version != old_version) 5756 + (s->refcount_bits != refcount_bits) + 5757 (encryption_update == true) 5758 }; 5759 5760 /* Upgrade first (some features may require compat=1.1) */ 5761 if (new_version > old_version) { 5762 helper_cb_info.current_operation = QCOW2_UPGRADING; 5763 ret = qcow2_upgrade(bs, new_version, &qcow2_amend_helper_cb, 5764 &helper_cb_info, errp); 5765 if (ret < 0) { 5766 return ret; 5767 } 5768 } 5769 5770 if (encryption_update) { 5771 QDict *amend_opts_dict; 5772 QCryptoBlockAmendOptions *amend_opts; 5773 5774 helper_cb_info.current_operation = QCOW2_UPDATING_ENCRYPTION; 5775 amend_opts_dict = qcow2_extract_crypto_opts(opts, "luks", errp); 5776 if (!amend_opts_dict) { 5777 return -EINVAL; 5778 } 5779 amend_opts = block_crypto_amend_opts_init(amend_opts_dict, errp); 5780 qobject_unref(amend_opts_dict); 5781 if (!amend_opts) { 5782 return -EINVAL; 5783 } 5784 ret = qcrypto_block_amend_options(s->crypto, 5785 qcow2_crypto_hdr_read_func, 5786 qcow2_crypto_hdr_write_func, 5787 bs, 5788 amend_opts, 5789 force, 5790 errp); 5791 qapi_free_QCryptoBlockAmendOptions(amend_opts); 5792 if (ret < 0) { 5793 return ret; 5794 } 5795 } 5796 5797 if (s->refcount_bits != refcount_bits) { 5798 int refcount_order = ctz32(refcount_bits); 5799 5800 if (new_version < 3 && refcount_bits != 16) { 5801 error_setg(errp, "Refcount widths other than 16 bits require " 5802 "compatibility level 1.1 or above (use compat=1.1 or " 5803 "greater)"); 5804 return -EINVAL; 5805 } 5806 5807 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 5808 ret = qcow2_change_refcount_order(bs, refcount_order, 5809 &qcow2_amend_helper_cb, 5810 &helper_cb_info, errp); 5811 if (ret < 0) { 5812 return ret; 5813 } 5814 } 5815 5816 /* data-file-raw blocks backing files, so clear it first if requested */ 5817 if (data_file_raw) { 5818 s->autoclear_features |= QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5819 } else { 5820 s->autoclear_features &= ~QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5821 } 5822 5823 if (data_file) { 5824 g_free(s->image_data_file); 5825 s->image_data_file = *data_file ? g_strdup(data_file) : NULL; 5826 } 5827 5828 ret = qcow2_update_header(bs); 5829 if (ret < 0) { 5830 error_setg_errno(errp, -ret, "Failed to update the image header"); 5831 return ret; 5832 } 5833 5834 if (backing_file || backing_format) { 5835 if (g_strcmp0(backing_file, s->image_backing_file) || 5836 g_strcmp0(backing_format, s->image_backing_format)) { 5837 error_setg(errp, "Cannot amend the backing file"); 5838 error_append_hint(errp, 5839 "You can use 'qemu-img rebase' instead.\n"); 5840 return -EINVAL; 5841 } 5842 } 5843 5844 if (s->use_lazy_refcounts != lazy_refcounts) { 5845 if (lazy_refcounts) { 5846 if (new_version < 3) { 5847 error_setg(errp, "Lazy refcounts only supported with " 5848 "compatibility level 1.1 and above (use compat=1.1 " 5849 "or greater)"); 5850 return -EINVAL; 5851 } 5852 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5853 ret = qcow2_update_header(bs); 5854 if (ret < 0) { 5855 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5856 error_setg_errno(errp, -ret, "Failed to update the image header"); 5857 return ret; 5858 } 5859 s->use_lazy_refcounts = true; 5860 } else { 5861 /* make image clean first */ 5862 ret = qcow2_mark_clean(bs); 5863 if (ret < 0) { 5864 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5865 return ret; 5866 } 5867 /* now disallow lazy refcounts */ 5868 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5869 ret = qcow2_update_header(bs); 5870 if (ret < 0) { 5871 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5872 error_setg_errno(errp, -ret, "Failed to update the image header"); 5873 return ret; 5874 } 5875 s->use_lazy_refcounts = false; 5876 } 5877 } 5878 5879 if (new_size) { 5880 BlockBackend *blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, 5881 errp); 5882 if (!blk) { 5883 return -EPERM; 5884 } 5885 5886 /* 5887 * Amending image options should ensure that the image has 5888 * exactly the given new values, so pass exact=true here. 5889 */ 5890 ret = blk_truncate(blk, new_size, true, PREALLOC_MODE_OFF, 0, errp); 5891 blk_unref(blk); 5892 if (ret < 0) { 5893 return ret; 5894 } 5895 } 5896 5897 /* Downgrade last (so unsupported features can be removed before) */ 5898 if (new_version < old_version) { 5899 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 5900 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 5901 &helper_cb_info, errp); 5902 if (ret < 0) { 5903 return ret; 5904 } 5905 } 5906 5907 return 0; 5908 } 5909 5910 static int coroutine_fn qcow2_co_amend(BlockDriverState *bs, 5911 BlockdevAmendOptions *opts, 5912 bool force, 5913 Error **errp) 5914 { 5915 BlockdevAmendOptionsQcow2 *qopts = &opts->u.qcow2; 5916 BDRVQcow2State *s = bs->opaque; 5917 int ret = 0; 5918 5919 if (qopts->encrypt) { 5920 if (!s->crypto) { 5921 error_setg(errp, "image is not encrypted, can't amend"); 5922 return -EOPNOTSUPP; 5923 } 5924 5925 if (qopts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_LUKS) { 5926 error_setg(errp, 5927 "Amend can't be used to change the qcow2 encryption format"); 5928 return -EOPNOTSUPP; 5929 } 5930 5931 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 5932 error_setg(errp, 5933 "Only LUKS encryption options can be amended for qcow2 with blockdev-amend"); 5934 return -EOPNOTSUPP; 5935 } 5936 5937 ret = qcrypto_block_amend_options(s->crypto, 5938 qcow2_crypto_hdr_read_func, 5939 qcow2_crypto_hdr_write_func, 5940 bs, 5941 qopts->encrypt, 5942 force, 5943 errp); 5944 } 5945 return ret; 5946 } 5947 5948 /* 5949 * If offset or size are negative, respectively, they will not be included in 5950 * the BLOCK_IMAGE_CORRUPTED event emitted. 5951 * fatal will be ignored for read-only BDS; corruptions found there will always 5952 * be considered non-fatal. 5953 */ 5954 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 5955 int64_t size, const char *message_format, ...) 5956 { 5957 BDRVQcow2State *s = bs->opaque; 5958 const char *node_name; 5959 char *message; 5960 va_list ap; 5961 5962 fatal = fatal && bdrv_is_writable(bs); 5963 5964 if (s->signaled_corruption && 5965 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 5966 { 5967 return; 5968 } 5969 5970 va_start(ap, message_format); 5971 message = g_strdup_vprintf(message_format, ap); 5972 va_end(ap); 5973 5974 if (fatal) { 5975 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 5976 "corruption events will be suppressed\n", message); 5977 } else { 5978 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 5979 "corruption events will be suppressed\n", message); 5980 } 5981 5982 node_name = bdrv_get_node_name(bs); 5983 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 5984 *node_name ? node_name : NULL, 5985 message, offset >= 0, offset, 5986 size >= 0, size, 5987 fatal); 5988 g_free(message); 5989 5990 if (fatal) { 5991 qcow2_mark_corrupt(bs); 5992 bs->drv = NULL; /* make BDS unusable */ 5993 } 5994 5995 s->signaled_corruption = true; 5996 } 5997 5998 #define QCOW_COMMON_OPTIONS \ 5999 { \ 6000 .name = BLOCK_OPT_SIZE, \ 6001 .type = QEMU_OPT_SIZE, \ 6002 .help = "Virtual disk size" \ 6003 }, \ 6004 { \ 6005 .name = BLOCK_OPT_COMPAT_LEVEL, \ 6006 .type = QEMU_OPT_STRING, \ 6007 .help = "Compatibility level (v2 [0.10] or v3 [1.1])" \ 6008 }, \ 6009 { \ 6010 .name = BLOCK_OPT_BACKING_FILE, \ 6011 .type = QEMU_OPT_STRING, \ 6012 .help = "File name of a base image" \ 6013 }, \ 6014 { \ 6015 .name = BLOCK_OPT_BACKING_FMT, \ 6016 .type = QEMU_OPT_STRING, \ 6017 .help = "Image format of the base image" \ 6018 }, \ 6019 { \ 6020 .name = BLOCK_OPT_DATA_FILE, \ 6021 .type = QEMU_OPT_STRING, \ 6022 .help = "File name of an external data file" \ 6023 }, \ 6024 { \ 6025 .name = BLOCK_OPT_DATA_FILE_RAW, \ 6026 .type = QEMU_OPT_BOOL, \ 6027 .help = "The external data file must stay valid " \ 6028 "as a raw image" \ 6029 }, \ 6030 { \ 6031 .name = BLOCK_OPT_LAZY_REFCOUNTS, \ 6032 .type = QEMU_OPT_BOOL, \ 6033 .help = "Postpone refcount updates", \ 6034 .def_value_str = "off" \ 6035 }, \ 6036 { \ 6037 .name = BLOCK_OPT_REFCOUNT_BITS, \ 6038 .type = QEMU_OPT_NUMBER, \ 6039 .help = "Width of a reference count entry in bits", \ 6040 .def_value_str = "16" \ 6041 } 6042 6043 static QemuOptsList qcow2_create_opts = { 6044 .name = "qcow2-create-opts", 6045 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 6046 .desc = { 6047 { \ 6048 .name = BLOCK_OPT_ENCRYPT, \ 6049 .type = QEMU_OPT_BOOL, \ 6050 .help = "Encrypt the image with format 'aes'. (Deprecated " \ 6051 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", \ 6052 }, \ 6053 { \ 6054 .name = BLOCK_OPT_ENCRYPT_FORMAT, \ 6055 .type = QEMU_OPT_STRING, \ 6056 .help = "Encrypt the image, format choices: 'aes', 'luks'", \ 6057 }, \ 6058 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", \ 6059 "ID of secret providing qcow AES key or LUKS passphrase"), \ 6060 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), \ 6061 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), \ 6062 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), \ 6063 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), \ 6064 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), \ 6065 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), \ 6066 { \ 6067 .name = BLOCK_OPT_CLUSTER_SIZE, \ 6068 .type = QEMU_OPT_SIZE, \ 6069 .help = "qcow2 cluster size", \ 6070 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) \ 6071 }, \ 6072 { \ 6073 .name = BLOCK_OPT_EXTL2, \ 6074 .type = QEMU_OPT_BOOL, \ 6075 .help = "Extended L2 tables", \ 6076 .def_value_str = "off" \ 6077 }, \ 6078 { \ 6079 .name = BLOCK_OPT_PREALLOC, \ 6080 .type = QEMU_OPT_STRING, \ 6081 .help = "Preallocation mode (allowed values: off, " \ 6082 "metadata, falloc, full)" \ 6083 }, \ 6084 { \ 6085 .name = BLOCK_OPT_COMPRESSION_TYPE, \ 6086 .type = QEMU_OPT_STRING, \ 6087 .help = "Compression method used for image cluster " \ 6088 "compression", \ 6089 .def_value_str = "zlib" \ 6090 }, 6091 QCOW_COMMON_OPTIONS, 6092 { /* end of list */ } 6093 } 6094 }; 6095 6096 static QemuOptsList qcow2_amend_opts = { 6097 .name = "qcow2-amend-opts", 6098 .head = QTAILQ_HEAD_INITIALIZER(qcow2_amend_opts.head), 6099 .desc = { 6100 BLOCK_CRYPTO_OPT_DEF_LUKS_STATE("encrypt."), 6101 BLOCK_CRYPTO_OPT_DEF_LUKS_KEYSLOT("encrypt."), 6102 BLOCK_CRYPTO_OPT_DEF_LUKS_OLD_SECRET("encrypt."), 6103 BLOCK_CRYPTO_OPT_DEF_LUKS_NEW_SECRET("encrypt."), 6104 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 6105 QCOW_COMMON_OPTIONS, 6106 { /* end of list */ } 6107 } 6108 }; 6109 6110 static const char *const qcow2_strong_runtime_opts[] = { 6111 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET, 6112 6113 NULL 6114 }; 6115 6116 BlockDriver bdrv_qcow2 = { 6117 .format_name = "qcow2", 6118 .instance_size = sizeof(BDRVQcow2State), 6119 .bdrv_probe = qcow2_probe, 6120 .bdrv_open = qcow2_open, 6121 .bdrv_close = qcow2_close, 6122 .bdrv_reopen_prepare = qcow2_reopen_prepare, 6123 .bdrv_reopen_commit = qcow2_reopen_commit, 6124 .bdrv_reopen_commit_post = qcow2_reopen_commit_post, 6125 .bdrv_reopen_abort = qcow2_reopen_abort, 6126 .bdrv_join_options = qcow2_join_options, 6127 .bdrv_child_perm = bdrv_default_perms, 6128 .bdrv_co_create_opts = qcow2_co_create_opts, 6129 .bdrv_co_create = qcow2_co_create, 6130 .bdrv_has_zero_init = qcow2_has_zero_init, 6131 .bdrv_co_block_status = qcow2_co_block_status, 6132 6133 .bdrv_co_preadv_part = qcow2_co_preadv_part, 6134 .bdrv_co_pwritev_part = qcow2_co_pwritev_part, 6135 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 6136 6137 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 6138 .bdrv_co_pdiscard = qcow2_co_pdiscard, 6139 .bdrv_co_copy_range_from = qcow2_co_copy_range_from, 6140 .bdrv_co_copy_range_to = qcow2_co_copy_range_to, 6141 .bdrv_co_truncate = qcow2_co_truncate, 6142 .bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part, 6143 .bdrv_make_empty = qcow2_make_empty, 6144 6145 .bdrv_snapshot_create = qcow2_snapshot_create, 6146 .bdrv_snapshot_goto = qcow2_snapshot_goto, 6147 .bdrv_snapshot_delete = qcow2_snapshot_delete, 6148 .bdrv_snapshot_list = qcow2_snapshot_list, 6149 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 6150 .bdrv_measure = qcow2_measure, 6151 .bdrv_co_get_info = qcow2_co_get_info, 6152 .bdrv_get_specific_info = qcow2_get_specific_info, 6153 6154 .bdrv_co_save_vmstate = qcow2_co_save_vmstate, 6155 .bdrv_co_load_vmstate = qcow2_co_load_vmstate, 6156 6157 .is_format = true, 6158 .supports_backing = true, 6159 .bdrv_change_backing_file = qcow2_change_backing_file, 6160 6161 .bdrv_refresh_limits = qcow2_refresh_limits, 6162 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache, 6163 .bdrv_inactivate = qcow2_inactivate, 6164 6165 .create_opts = &qcow2_create_opts, 6166 .amend_opts = &qcow2_amend_opts, 6167 .strong_runtime_opts = qcow2_strong_runtime_opts, 6168 .mutable_opts = mutable_opts, 6169 .bdrv_co_check = qcow2_co_check, 6170 .bdrv_amend_options = qcow2_amend_options, 6171 .bdrv_co_amend = qcow2_co_amend, 6172 6173 .bdrv_detach_aio_context = qcow2_detach_aio_context, 6174 .bdrv_attach_aio_context = qcow2_attach_aio_context, 6175 6176 .bdrv_supports_persistent_dirty_bitmap = 6177 qcow2_supports_persistent_dirty_bitmap, 6178 .bdrv_co_can_store_new_dirty_bitmap = qcow2_co_can_store_new_dirty_bitmap, 6179 .bdrv_co_remove_persistent_dirty_bitmap = 6180 qcow2_co_remove_persistent_dirty_bitmap, 6181 }; 6182 6183 static void bdrv_qcow2_init(void) 6184 { 6185 bdrv_register(&bdrv_qcow2); 6186 } 6187 6188 block_init(bdrv_qcow2_init); 6189