1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 27 #include "block/qdict.h" 28 #include "system/block-backend.h" 29 #include "qemu/main-loop.h" 30 #include "qemu/module.h" 31 #include "qcow2.h" 32 #include "qemu/error-report.h" 33 #include "qapi/error.h" 34 #include "qapi/qapi-events-block-core.h" 35 #include "qobject/qdict.h" 36 #include "qobject/qstring.h" 37 #include "trace.h" 38 #include "qemu/option_int.h" 39 #include "qemu/cutils.h" 40 #include "qemu/bswap.h" 41 #include "qemu/memalign.h" 42 #include "qapi/qobject-input-visitor.h" 43 #include "qapi/qapi-visit-block-core.h" 44 #include "crypto.h" 45 #include "block/aio_task.h" 46 #include "block/dirty-bitmap.h" 47 48 /* 49 Differences with QCOW: 50 51 - Support for multiple incremental snapshots. 52 - Memory management by reference counts. 53 - Clusters which have a reference count of one have the bit 54 QCOW_OFLAG_COPIED to optimize write performance. 55 - Size of compressed clusters is stored in sectors to reduce bit usage 56 in the cluster offsets. 57 - Support for storing additional data (such as the VM state) in the 58 snapshots. 59 - If a backing store is used, the cluster size is not constrained 60 (could be backported to QCOW). 61 - L2 tables have always a size of one cluster. 62 */ 63 64 65 typedef struct { 66 uint32_t magic; 67 uint32_t len; 68 } QEMU_PACKED QCowExtension; 69 70 #define QCOW2_EXT_MAGIC_END 0 71 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xe2792aca 72 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 73 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 74 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 75 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441 76 77 static int coroutine_fn 78 qcow2_co_preadv_compressed(BlockDriverState *bs, 79 uint64_t l2_entry, 80 uint64_t offset, 81 uint64_t bytes, 82 QEMUIOVector *qiov, 83 size_t qiov_offset); 84 85 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 86 { 87 const QCowHeader *cow_header = (const void *)buf; 88 89 if (buf_size >= sizeof(QCowHeader) && 90 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 91 be32_to_cpu(cow_header->version) >= 2) 92 return 100; 93 else 94 return 0; 95 } 96 97 98 static int GRAPH_RDLOCK 99 qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 100 uint8_t *buf, size_t buflen, 101 void *opaque, Error **errp) 102 { 103 BlockDriverState *bs = opaque; 104 BDRVQcow2State *s = bs->opaque; 105 ssize_t ret; 106 107 if ((offset + buflen) > s->crypto_header.length) { 108 error_setg(errp, "Request for data outside of extension header"); 109 return -1; 110 } 111 112 ret = bdrv_pread(bs->file, s->crypto_header.offset + offset, buflen, buf, 113 0); 114 if (ret < 0) { 115 error_setg_errno(errp, -ret, "Could not read encryption header"); 116 return -1; 117 } 118 return 0; 119 } 120 121 122 static int coroutine_fn GRAPH_RDLOCK 123 qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, void *opaque, 124 Error **errp) 125 { 126 BlockDriverState *bs = opaque; 127 BDRVQcow2State *s = bs->opaque; 128 int64_t ret; 129 int64_t clusterlen; 130 131 ret = qcow2_alloc_clusters(bs, headerlen); 132 if (ret < 0) { 133 error_setg_errno(errp, -ret, 134 "Cannot allocate cluster for LUKS header size %zu", 135 headerlen); 136 return -1; 137 } 138 139 s->crypto_header.length = headerlen; 140 s->crypto_header.offset = ret; 141 142 /* 143 * Zero fill all space in cluster so it has predictable 144 * content, as we may not initialize some regions of the 145 * header (eg only 1 out of 8 key slots will be initialized) 146 */ 147 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 148 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0); 149 ret = bdrv_co_pwrite_zeroes(bs->file, ret, clusterlen, 0); 150 if (ret < 0) { 151 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 152 return -1; 153 } 154 155 return 0; 156 } 157 158 159 /* The graph lock must be held when called in coroutine context */ 160 static int coroutine_mixed_fn GRAPH_RDLOCK 161 qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 162 const uint8_t *buf, size_t buflen, 163 void *opaque, Error **errp) 164 { 165 BlockDriverState *bs = opaque; 166 BDRVQcow2State *s = bs->opaque; 167 ssize_t ret; 168 169 if ((offset + buflen) > s->crypto_header.length) { 170 error_setg(errp, "Request for data outside of extension header"); 171 return -1; 172 } 173 174 ret = bdrv_pwrite(bs->file, s->crypto_header.offset + offset, buflen, buf, 175 0); 176 if (ret < 0) { 177 error_setg_errno(errp, -ret, "Could not read encryption header"); 178 return -1; 179 } 180 return 0; 181 } 182 183 static QDict* 184 qcow2_extract_crypto_opts(QemuOpts *opts, const char *fmt, Error **errp) 185 { 186 QDict *cryptoopts_qdict; 187 QDict *opts_qdict; 188 189 /* Extract "encrypt." options into a qdict */ 190 opts_qdict = qemu_opts_to_qdict(opts, NULL); 191 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt."); 192 qobject_unref(opts_qdict); 193 qdict_put_str(cryptoopts_qdict, "format", fmt); 194 return cryptoopts_qdict; 195 } 196 197 /* 198 * read qcow2 extension and fill bs 199 * start reading from start_offset 200 * finish reading upon magic of value 0 or when end_offset reached 201 * unknown magic is skipped (future extension this version knows nothing about) 202 * return 0 upon success, non-0 otherwise 203 */ 204 static int coroutine_fn GRAPH_RDLOCK 205 qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 206 uint64_t end_offset, void **p_feature_table, 207 int flags, bool *need_update_header, Error **errp) 208 { 209 BDRVQcow2State *s = bs->opaque; 210 QCowExtension ext; 211 uint64_t offset; 212 int ret; 213 Qcow2BitmapHeaderExt bitmaps_ext; 214 215 if (need_update_header != NULL) { 216 *need_update_header = false; 217 } 218 219 #ifdef DEBUG_EXT 220 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 221 #endif 222 offset = start_offset; 223 while (offset < end_offset) { 224 225 #ifdef DEBUG_EXT 226 /* Sanity check */ 227 if (offset > s->cluster_size) 228 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 229 230 printf("attempting to read extended header in offset %lu\n", offset); 231 #endif 232 233 ret = bdrv_co_pread(bs->file, offset, sizeof(ext), &ext, 0); 234 if (ret < 0) { 235 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 236 "pread fail from offset %" PRIu64, offset); 237 return 1; 238 } 239 ext.magic = be32_to_cpu(ext.magic); 240 ext.len = be32_to_cpu(ext.len); 241 offset += sizeof(ext); 242 #ifdef DEBUG_EXT 243 printf("ext.magic = 0x%x\n", ext.magic); 244 #endif 245 if (offset > end_offset || ext.len > end_offset - offset) { 246 error_setg(errp, "Header extension too large"); 247 return -EINVAL; 248 } 249 250 switch (ext.magic) { 251 case QCOW2_EXT_MAGIC_END: 252 return 0; 253 254 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 255 if (ext.len >= sizeof(bs->backing_format)) { 256 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 257 " too large (>=%zu)", ext.len, 258 sizeof(bs->backing_format)); 259 return 2; 260 } 261 ret = bdrv_co_pread(bs->file, offset, ext.len, bs->backing_format, 0); 262 if (ret < 0) { 263 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 264 "Could not read format name"); 265 return 3; 266 } 267 bs->backing_format[ext.len] = '\0'; 268 s->image_backing_format = g_strdup(bs->backing_format); 269 #ifdef DEBUG_EXT 270 printf("Qcow2: Got format extension %s\n", bs->backing_format); 271 #endif 272 break; 273 274 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 275 if (p_feature_table != NULL) { 276 void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 277 ret = bdrv_co_pread(bs->file, offset, ext.len, feature_table, 0); 278 if (ret < 0) { 279 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 280 "Could not read table"); 281 g_free(feature_table); 282 return ret; 283 } 284 285 *p_feature_table = feature_table; 286 } 287 break; 288 289 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 290 unsigned int cflags = 0; 291 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 292 error_setg(errp, "CRYPTO header extension only " 293 "expected with LUKS encryption method"); 294 return -EINVAL; 295 } 296 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 297 error_setg(errp, "CRYPTO header extension size %u, " 298 "but expected size %zu", ext.len, 299 sizeof(Qcow2CryptoHeaderExtension)); 300 return -EINVAL; 301 } 302 303 ret = bdrv_co_pread(bs->file, offset, ext.len, &s->crypto_header, 0); 304 if (ret < 0) { 305 error_setg_errno(errp, -ret, 306 "Unable to read CRYPTO header extension"); 307 return ret; 308 } 309 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 310 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 311 312 if ((s->crypto_header.offset % s->cluster_size) != 0) { 313 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 314 "not a multiple of cluster size '%u'", 315 s->crypto_header.offset, s->cluster_size); 316 return -EINVAL; 317 } 318 319 if (flags & BDRV_O_NO_IO) { 320 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 321 } 322 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 323 qcow2_crypto_hdr_read_func, 324 bs, cflags, errp); 325 if (!s->crypto) { 326 return -EINVAL; 327 } 328 } break; 329 330 case QCOW2_EXT_MAGIC_BITMAPS: 331 if (ext.len != sizeof(bitmaps_ext)) { 332 error_setg_errno(errp, -ret, "bitmaps_ext: " 333 "Invalid extension length"); 334 return -EINVAL; 335 } 336 337 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 338 if (s->qcow_version < 3) { 339 /* Let's be a bit more specific */ 340 warn_report("This qcow2 v2 image contains bitmaps, but " 341 "they may have been modified by a program " 342 "without persistent bitmap support; so now " 343 "they must all be considered inconsistent"); 344 } else { 345 warn_report("a program lacking bitmap support " 346 "modified this file, so all bitmaps are now " 347 "considered inconsistent"); 348 } 349 error_printf("Some clusters may be leaked, " 350 "run 'qemu-img check -r' on the image " 351 "file to fix."); 352 if (need_update_header != NULL) { 353 /* Updating is needed to drop invalid bitmap extension. */ 354 *need_update_header = true; 355 } 356 break; 357 } 358 359 ret = bdrv_co_pread(bs->file, offset, ext.len, &bitmaps_ext, 0); 360 if (ret < 0) { 361 error_setg_errno(errp, -ret, "bitmaps_ext: " 362 "Could not read ext header"); 363 return ret; 364 } 365 366 if (bitmaps_ext.reserved32 != 0) { 367 error_setg_errno(errp, -ret, "bitmaps_ext: " 368 "Reserved field is not zero"); 369 return -EINVAL; 370 } 371 372 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps); 373 bitmaps_ext.bitmap_directory_size = 374 be64_to_cpu(bitmaps_ext.bitmap_directory_size); 375 bitmaps_ext.bitmap_directory_offset = 376 be64_to_cpu(bitmaps_ext.bitmap_directory_offset); 377 378 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 379 error_setg(errp, 380 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 381 "exceeding the QEMU supported maximum of %d", 382 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 383 return -EINVAL; 384 } 385 386 if (bitmaps_ext.nb_bitmaps == 0) { 387 error_setg(errp, "found bitmaps extension with zero bitmaps"); 388 return -EINVAL; 389 } 390 391 if (offset_into_cluster(s, bitmaps_ext.bitmap_directory_offset)) { 392 error_setg(errp, "bitmaps_ext: " 393 "invalid bitmap directory offset"); 394 return -EINVAL; 395 } 396 397 if (bitmaps_ext.bitmap_directory_size > 398 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 399 error_setg(errp, "bitmaps_ext: " 400 "bitmap directory size (%" PRIu64 ") exceeds " 401 "the maximum supported size (%d)", 402 bitmaps_ext.bitmap_directory_size, 403 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 404 return -EINVAL; 405 } 406 407 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 408 s->bitmap_directory_offset = 409 bitmaps_ext.bitmap_directory_offset; 410 s->bitmap_directory_size = 411 bitmaps_ext.bitmap_directory_size; 412 413 #ifdef DEBUG_EXT 414 printf("Qcow2: Got bitmaps extension: " 415 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 416 s->bitmap_directory_offset, s->nb_bitmaps); 417 #endif 418 break; 419 420 case QCOW2_EXT_MAGIC_DATA_FILE: 421 { 422 s->image_data_file = g_malloc0(ext.len + 1); 423 ret = bdrv_co_pread(bs->file, offset, ext.len, s->image_data_file, 0); 424 if (ret < 0) { 425 error_setg_errno(errp, -ret, 426 "ERROR: Could not read data file name"); 427 return ret; 428 } 429 #ifdef DEBUG_EXT 430 printf("Qcow2: Got external data file %s\n", s->image_data_file); 431 #endif 432 break; 433 } 434 435 default: 436 /* unknown magic - save it in case we need to rewrite the header */ 437 /* If you add a new feature, make sure to also update the fast 438 * path of qcow2_make_empty() to deal with it. */ 439 { 440 Qcow2UnknownHeaderExtension *uext; 441 442 uext = g_malloc0(sizeof(*uext) + ext.len); 443 uext->magic = ext.magic; 444 uext->len = ext.len; 445 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 446 447 ret = bdrv_co_pread(bs->file, offset, uext->len, uext->data, 0); 448 if (ret < 0) { 449 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 450 "Could not read data"); 451 return ret; 452 } 453 } 454 break; 455 } 456 457 offset += ((ext.len + 7) & ~7); 458 } 459 460 return 0; 461 } 462 463 static void cleanup_unknown_header_ext(BlockDriverState *bs) 464 { 465 BDRVQcow2State *s = bs->opaque; 466 Qcow2UnknownHeaderExtension *uext, *next; 467 468 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 469 QLIST_REMOVE(uext, next); 470 g_free(uext); 471 } 472 } 473 474 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 475 uint64_t mask) 476 { 477 g_autoptr(GString) features = g_string_sized_new(60); 478 479 while (table && table->name[0] != '\0') { 480 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 481 if (mask & (1ULL << table->bit)) { 482 if (features->len > 0) { 483 g_string_append(features, ", "); 484 } 485 g_string_append_printf(features, "%.46s", table->name); 486 mask &= ~(1ULL << table->bit); 487 } 488 } 489 table++; 490 } 491 492 if (mask) { 493 if (features->len > 0) { 494 g_string_append(features, ", "); 495 } 496 g_string_append_printf(features, 497 "Unknown incompatible feature: %" PRIx64, mask); 498 } 499 500 error_setg(errp, "Unsupported qcow2 feature(s): %s", features->str); 501 } 502 503 /* 504 * Sets the dirty bit and flushes afterwards if necessary. 505 * 506 * The incompatible_features bit is only set if the image file header was 507 * updated successfully. Therefore it is not required to check the return 508 * value of this function. 509 */ 510 int qcow2_mark_dirty(BlockDriverState *bs) 511 { 512 BDRVQcow2State *s = bs->opaque; 513 uint64_t val; 514 int ret; 515 516 assert(s->qcow_version >= 3); 517 518 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 519 return 0; /* already dirty */ 520 } 521 522 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 523 ret = bdrv_pwrite_sync(bs->file, 524 offsetof(QCowHeader, incompatible_features), 525 sizeof(val), &val, 0); 526 if (ret < 0) { 527 return ret; 528 } 529 530 /* Only treat image as dirty if the header was updated successfully */ 531 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 532 return 0; 533 } 534 535 /* 536 * Clears the dirty bit and flushes before if necessary. Only call this 537 * function when there are no pending requests, it does not guard against 538 * concurrent requests dirtying the image. 539 */ 540 static int GRAPH_RDLOCK qcow2_mark_clean(BlockDriverState *bs) 541 { 542 BDRVQcow2State *s = bs->opaque; 543 544 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 545 int ret; 546 547 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 548 549 ret = qcow2_flush_caches(bs); 550 if (ret < 0) { 551 return ret; 552 } 553 554 return qcow2_update_header(bs); 555 } 556 return 0; 557 } 558 559 /* 560 * Marks the image as corrupt. 561 */ 562 int qcow2_mark_corrupt(BlockDriverState *bs) 563 { 564 BDRVQcow2State *s = bs->opaque; 565 566 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 567 return qcow2_update_header(bs); 568 } 569 570 /* 571 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 572 * before if necessary. 573 */ 574 static int coroutine_fn GRAPH_RDLOCK 575 qcow2_mark_consistent(BlockDriverState *bs) 576 { 577 BDRVQcow2State *s = bs->opaque; 578 579 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 580 int ret = qcow2_flush_caches(bs); 581 if (ret < 0) { 582 return ret; 583 } 584 585 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 586 return qcow2_update_header(bs); 587 } 588 return 0; 589 } 590 591 static void qcow2_add_check_result(BdrvCheckResult *out, 592 const BdrvCheckResult *src, 593 bool set_allocation_info) 594 { 595 out->corruptions += src->corruptions; 596 out->leaks += src->leaks; 597 out->check_errors += src->check_errors; 598 out->corruptions_fixed += src->corruptions_fixed; 599 out->leaks_fixed += src->leaks_fixed; 600 601 if (set_allocation_info) { 602 out->image_end_offset = src->image_end_offset; 603 out->bfi = src->bfi; 604 } 605 } 606 607 static int coroutine_fn GRAPH_RDLOCK 608 qcow2_co_check_locked(BlockDriverState *bs, BdrvCheckResult *result, 609 BdrvCheckMode fix) 610 { 611 BdrvCheckResult snapshot_res = {}; 612 BdrvCheckResult refcount_res = {}; 613 int ret; 614 615 memset(result, 0, sizeof(*result)); 616 617 ret = qcow2_check_read_snapshot_table(bs, &snapshot_res, fix); 618 if (ret < 0) { 619 qcow2_add_check_result(result, &snapshot_res, false); 620 return ret; 621 } 622 623 ret = qcow2_check_refcounts(bs, &refcount_res, fix); 624 qcow2_add_check_result(result, &refcount_res, true); 625 if (ret < 0) { 626 qcow2_add_check_result(result, &snapshot_res, false); 627 return ret; 628 } 629 630 ret = qcow2_check_fix_snapshot_table(bs, &snapshot_res, fix); 631 qcow2_add_check_result(result, &snapshot_res, false); 632 if (ret < 0) { 633 return ret; 634 } 635 636 if (fix && result->check_errors == 0 && result->corruptions == 0) { 637 ret = qcow2_mark_clean(bs); 638 if (ret < 0) { 639 return ret; 640 } 641 return qcow2_mark_consistent(bs); 642 } 643 return ret; 644 } 645 646 static int coroutine_fn GRAPH_RDLOCK 647 qcow2_co_check(BlockDriverState *bs, BdrvCheckResult *result, 648 BdrvCheckMode fix) 649 { 650 BDRVQcow2State *s = bs->opaque; 651 int ret; 652 653 qemu_co_mutex_lock(&s->lock); 654 ret = qcow2_co_check_locked(bs, result, fix); 655 qemu_co_mutex_unlock(&s->lock); 656 return ret; 657 } 658 659 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, 660 uint64_t entries, size_t entry_len, 661 int64_t max_size_bytes, const char *table_name, 662 Error **errp) 663 { 664 BDRVQcow2State *s = bs->opaque; 665 666 if (entries > max_size_bytes / entry_len) { 667 error_setg(errp, "%s too large", table_name); 668 return -EFBIG; 669 } 670 671 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 672 * because values will be passed to qemu functions taking int64_t. */ 673 if ((INT64_MAX - entries * entry_len < offset) || 674 (offset_into_cluster(s, offset) != 0)) { 675 error_setg(errp, "%s offset invalid", table_name); 676 return -EINVAL; 677 } 678 679 return 0; 680 } 681 682 static const char *const mutable_opts[] = { 683 QCOW2_OPT_LAZY_REFCOUNTS, 684 QCOW2_OPT_DISCARD_REQUEST, 685 QCOW2_OPT_DISCARD_SNAPSHOT, 686 QCOW2_OPT_DISCARD_OTHER, 687 QCOW2_OPT_DISCARD_NO_UNREF, 688 QCOW2_OPT_OVERLAP, 689 QCOW2_OPT_OVERLAP_TEMPLATE, 690 QCOW2_OPT_OVERLAP_MAIN_HEADER, 691 QCOW2_OPT_OVERLAP_ACTIVE_L1, 692 QCOW2_OPT_OVERLAP_ACTIVE_L2, 693 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 694 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 695 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 696 QCOW2_OPT_OVERLAP_INACTIVE_L1, 697 QCOW2_OPT_OVERLAP_INACTIVE_L2, 698 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 699 QCOW2_OPT_CACHE_SIZE, 700 QCOW2_OPT_L2_CACHE_SIZE, 701 QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 702 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 703 QCOW2_OPT_CACHE_CLEAN_INTERVAL, 704 NULL 705 }; 706 707 static QemuOptsList qcow2_runtime_opts = { 708 .name = "qcow2", 709 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 710 .desc = { 711 { 712 .name = QCOW2_OPT_LAZY_REFCOUNTS, 713 .type = QEMU_OPT_BOOL, 714 .help = "Postpone refcount updates", 715 }, 716 { 717 .name = QCOW2_OPT_DISCARD_REQUEST, 718 .type = QEMU_OPT_BOOL, 719 .help = "Pass guest discard requests to the layer below", 720 }, 721 { 722 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 723 .type = QEMU_OPT_BOOL, 724 .help = "Generate discard requests when snapshot related space " 725 "is freed", 726 }, 727 { 728 .name = QCOW2_OPT_DISCARD_OTHER, 729 .type = QEMU_OPT_BOOL, 730 .help = "Generate discard requests when other clusters are freed", 731 }, 732 { 733 .name = QCOW2_OPT_DISCARD_NO_UNREF, 734 .type = QEMU_OPT_BOOL, 735 .help = "Do not unreference discarded clusters", 736 }, 737 { 738 .name = QCOW2_OPT_OVERLAP, 739 .type = QEMU_OPT_STRING, 740 .help = "Selects which overlap checks to perform from a range of " 741 "templates (none, constant, cached, all)", 742 }, 743 { 744 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 745 .type = QEMU_OPT_STRING, 746 .help = "Selects which overlap checks to perform from a range of " 747 "templates (none, constant, cached, all)", 748 }, 749 { 750 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 751 .type = QEMU_OPT_BOOL, 752 .help = "Check for unintended writes into the main qcow2 header", 753 }, 754 { 755 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 756 .type = QEMU_OPT_BOOL, 757 .help = "Check for unintended writes into the active L1 table", 758 }, 759 { 760 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 761 .type = QEMU_OPT_BOOL, 762 .help = "Check for unintended writes into an active L2 table", 763 }, 764 { 765 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 766 .type = QEMU_OPT_BOOL, 767 .help = "Check for unintended writes into the refcount table", 768 }, 769 { 770 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 771 .type = QEMU_OPT_BOOL, 772 .help = "Check for unintended writes into a refcount block", 773 }, 774 { 775 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 776 .type = QEMU_OPT_BOOL, 777 .help = "Check for unintended writes into the snapshot table", 778 }, 779 { 780 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 781 .type = QEMU_OPT_BOOL, 782 .help = "Check for unintended writes into an inactive L1 table", 783 }, 784 { 785 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 786 .type = QEMU_OPT_BOOL, 787 .help = "Check for unintended writes into an inactive L2 table", 788 }, 789 { 790 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 791 .type = QEMU_OPT_BOOL, 792 .help = "Check for unintended writes into the bitmap directory", 793 }, 794 { 795 .name = QCOW2_OPT_CACHE_SIZE, 796 .type = QEMU_OPT_SIZE, 797 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 798 "cache size", 799 }, 800 { 801 .name = QCOW2_OPT_L2_CACHE_SIZE, 802 .type = QEMU_OPT_SIZE, 803 .help = "Maximum L2 table cache size", 804 }, 805 { 806 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 807 .type = QEMU_OPT_SIZE, 808 .help = "Size of each entry in the L2 cache", 809 }, 810 { 811 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 812 .type = QEMU_OPT_SIZE, 813 .help = "Maximum refcount block cache size", 814 }, 815 { 816 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 817 .type = QEMU_OPT_NUMBER, 818 .help = "Clean unused cache entries after this time (in seconds)", 819 }, 820 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 821 "ID of secret providing qcow2 AES key or LUKS passphrase"), 822 { /* end of list */ } 823 }, 824 }; 825 826 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 827 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 828 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 829 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 830 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 831 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 832 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 833 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 834 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 835 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 836 }; 837 838 static void cache_clean_timer_cb(void *opaque) 839 { 840 BlockDriverState *bs = opaque; 841 BDRVQcow2State *s = bs->opaque; 842 qcow2_cache_clean_unused(s->l2_table_cache); 843 qcow2_cache_clean_unused(s->refcount_block_cache); 844 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 845 (int64_t) s->cache_clean_interval * 1000); 846 } 847 848 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 849 { 850 BDRVQcow2State *s = bs->opaque; 851 if (s->cache_clean_interval > 0) { 852 s->cache_clean_timer = 853 aio_timer_new_with_attrs(context, QEMU_CLOCK_VIRTUAL, 854 SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL, 855 cache_clean_timer_cb, bs); 856 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 857 (int64_t) s->cache_clean_interval * 1000); 858 } 859 } 860 861 static void cache_clean_timer_del(BlockDriverState *bs) 862 { 863 BDRVQcow2State *s = bs->opaque; 864 if (s->cache_clean_timer) { 865 timer_free(s->cache_clean_timer); 866 s->cache_clean_timer = NULL; 867 } 868 } 869 870 static void qcow2_detach_aio_context(BlockDriverState *bs) 871 { 872 cache_clean_timer_del(bs); 873 } 874 875 static void qcow2_attach_aio_context(BlockDriverState *bs, 876 AioContext *new_context) 877 { 878 cache_clean_timer_init(bs, new_context); 879 } 880 881 static bool read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 882 uint64_t *l2_cache_size, 883 uint64_t *l2_cache_entry_size, 884 uint64_t *refcount_cache_size, Error **errp) 885 { 886 BDRVQcow2State *s = bs->opaque; 887 uint64_t combined_cache_size, l2_cache_max_setting; 888 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 889 bool l2_cache_entry_size_set; 890 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size; 891 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; 892 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size); 893 /* An L2 table is always one cluster in size so the max cache size 894 * should be a multiple of the cluster size. */ 895 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * l2_entry_size(s), 896 s->cluster_size); 897 898 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 899 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 900 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 901 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE); 902 903 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 904 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 905 DEFAULT_L2_CACHE_MAX_SIZE); 906 *refcount_cache_size = qemu_opt_get_size(opts, 907 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 908 909 *l2_cache_entry_size = qemu_opt_get_size( 910 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size); 911 912 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting); 913 914 if (combined_cache_size_set) { 915 if (l2_cache_size_set && refcount_cache_size_set) { 916 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 917 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 918 "at the same time"); 919 return false; 920 } else if (l2_cache_size_set && 921 (l2_cache_max_setting > combined_cache_size)) { 922 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 923 QCOW2_OPT_CACHE_SIZE); 924 return false; 925 } else if (*refcount_cache_size > combined_cache_size) { 926 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 927 QCOW2_OPT_CACHE_SIZE); 928 return false; 929 } 930 931 if (l2_cache_size_set) { 932 *refcount_cache_size = combined_cache_size - *l2_cache_size; 933 } else if (refcount_cache_size_set) { 934 *l2_cache_size = combined_cache_size - *refcount_cache_size; 935 } else { 936 /* Assign as much memory as possible to the L2 cache, and 937 * use the remainder for the refcount cache */ 938 if (combined_cache_size >= max_l2_cache + min_refcount_cache) { 939 *l2_cache_size = max_l2_cache; 940 *refcount_cache_size = combined_cache_size - *l2_cache_size; 941 } else { 942 *refcount_cache_size = 943 MIN(combined_cache_size, min_refcount_cache); 944 *l2_cache_size = combined_cache_size - *refcount_cache_size; 945 } 946 } 947 } 948 949 /* 950 * If the L2 cache is not enough to cover the whole disk then 951 * default to 4KB entries. Smaller entries reduce the cost of 952 * loads and evictions and increase I/O performance. 953 */ 954 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) { 955 *l2_cache_entry_size = MIN(s->cluster_size, 4096); 956 } 957 958 /* l2_cache_size and refcount_cache_size are ensured to have at least 959 * their minimum values in qcow2_update_options_prepare() */ 960 961 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) || 962 *l2_cache_entry_size > s->cluster_size || 963 !is_power_of_2(*l2_cache_entry_size)) { 964 error_setg(errp, "L2 cache entry size must be a power of two " 965 "between %d and the cluster size (%d)", 966 1 << MIN_CLUSTER_BITS, s->cluster_size); 967 return false; 968 } 969 970 return true; 971 } 972 973 typedef struct Qcow2ReopenState { 974 Qcow2Cache *l2_table_cache; 975 Qcow2Cache *refcount_block_cache; 976 int l2_slice_size; /* Number of entries in a slice of the L2 table */ 977 bool use_lazy_refcounts; 978 int overlap_check; 979 bool discard_passthrough[QCOW2_DISCARD_MAX]; 980 bool discard_no_unref; 981 uint64_t cache_clean_interval; 982 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 983 } Qcow2ReopenState; 984 985 static int GRAPH_RDLOCK 986 qcow2_update_options_prepare(BlockDriverState *bs, Qcow2ReopenState *r, 987 QDict *options, int flags, Error **errp) 988 { 989 BDRVQcow2State *s = bs->opaque; 990 QemuOpts *opts = NULL; 991 const char *opt_overlap_check, *opt_overlap_check_template; 992 int overlap_check_template = 0; 993 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size; 994 int i; 995 const char *encryptfmt; 996 QDict *encryptopts = NULL; 997 int ret; 998 999 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 1000 encryptfmt = qdict_get_try_str(encryptopts, "format"); 1001 1002 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 1003 if (!qemu_opts_absorb_qdict(opts, options, errp)) { 1004 ret = -EINVAL; 1005 goto fail; 1006 } 1007 1008 /* get L2 table/refcount block cache size from command line options */ 1009 if (!read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size, 1010 &refcount_cache_size, errp)) { 1011 ret = -EINVAL; 1012 goto fail; 1013 } 1014 1015 l2_cache_size /= l2_cache_entry_size; 1016 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 1017 l2_cache_size = MIN_L2_CACHE_SIZE; 1018 } 1019 if (l2_cache_size > INT_MAX) { 1020 error_setg(errp, "L2 cache size too big"); 1021 ret = -EINVAL; 1022 goto fail; 1023 } 1024 1025 refcount_cache_size /= s->cluster_size; 1026 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 1027 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 1028 } 1029 if (refcount_cache_size > INT_MAX) { 1030 error_setg(errp, "Refcount cache size too big"); 1031 ret = -EINVAL; 1032 goto fail; 1033 } 1034 1035 /* alloc new L2 table/refcount block cache, flush old one */ 1036 if (s->l2_table_cache) { 1037 ret = qcow2_cache_flush(bs, s->l2_table_cache); 1038 if (ret) { 1039 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 1040 goto fail; 1041 } 1042 } 1043 1044 if (s->refcount_block_cache) { 1045 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 1046 if (ret) { 1047 error_setg_errno(errp, -ret, 1048 "Failed to flush the refcount block cache"); 1049 goto fail; 1050 } 1051 } 1052 1053 r->l2_slice_size = l2_cache_entry_size / l2_entry_size(s); 1054 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size, 1055 l2_cache_entry_size); 1056 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size, 1057 s->cluster_size); 1058 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 1059 error_setg(errp, "Could not allocate metadata caches"); 1060 ret = -ENOMEM; 1061 goto fail; 1062 } 1063 1064 /* New interval for cache cleanup timer */ 1065 r->cache_clean_interval = 1066 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 1067 DEFAULT_CACHE_CLEAN_INTERVAL); 1068 #ifndef CONFIG_LINUX 1069 if (r->cache_clean_interval != 0) { 1070 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 1071 " not supported on this host"); 1072 ret = -EINVAL; 1073 goto fail; 1074 } 1075 #endif 1076 if (r->cache_clean_interval > UINT_MAX) { 1077 error_setg(errp, "Cache clean interval too big"); 1078 ret = -EINVAL; 1079 goto fail; 1080 } 1081 1082 /* lazy-refcounts; flush if going from enabled to disabled */ 1083 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 1084 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 1085 if (r->use_lazy_refcounts && s->qcow_version < 3) { 1086 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 1087 "qemu 1.1 compatibility level"); 1088 ret = -EINVAL; 1089 goto fail; 1090 } 1091 1092 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 1093 ret = qcow2_mark_clean(bs); 1094 if (ret < 0) { 1095 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 1096 goto fail; 1097 } 1098 } 1099 1100 /* Overlap check options */ 1101 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 1102 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 1103 if (opt_overlap_check_template && opt_overlap_check && 1104 strcmp(opt_overlap_check_template, opt_overlap_check)) 1105 { 1106 error_setg(errp, "Conflicting values for qcow2 options '" 1107 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 1108 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 1109 ret = -EINVAL; 1110 goto fail; 1111 } 1112 if (!opt_overlap_check) { 1113 opt_overlap_check = opt_overlap_check_template ?: "cached"; 1114 } 1115 1116 if (!strcmp(opt_overlap_check, "none")) { 1117 overlap_check_template = 0; 1118 } else if (!strcmp(opt_overlap_check, "constant")) { 1119 overlap_check_template = QCOW2_OL_CONSTANT; 1120 } else if (!strcmp(opt_overlap_check, "cached")) { 1121 overlap_check_template = QCOW2_OL_CACHED; 1122 } else if (!strcmp(opt_overlap_check, "all")) { 1123 overlap_check_template = QCOW2_OL_ALL; 1124 } else { 1125 error_setg(errp, "Unsupported value '%s' for qcow2 option " 1126 "'overlap-check'. Allowed are any of the following: " 1127 "none, constant, cached, all", opt_overlap_check); 1128 ret = -EINVAL; 1129 goto fail; 1130 } 1131 1132 r->overlap_check = 0; 1133 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 1134 /* overlap-check defines a template bitmask, but every flag may be 1135 * overwritten through the associated boolean option */ 1136 r->overlap_check |= 1137 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 1138 overlap_check_template & (1 << i)) << i; 1139 } 1140 1141 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 1142 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 1143 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 1144 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 1145 flags & BDRV_O_UNMAP); 1146 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 1147 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 1148 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 1149 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 1150 1151 r->discard_no_unref = qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_NO_UNREF, 1152 false); 1153 if (r->discard_no_unref && s->qcow_version < 3) { 1154 error_setg(errp, 1155 "discard-no-unref is only supported since qcow2 version 3"); 1156 ret = -EINVAL; 1157 goto fail; 1158 } 1159 1160 switch (s->crypt_method_header) { 1161 case QCOW_CRYPT_NONE: 1162 if (encryptfmt) { 1163 error_setg(errp, "No encryption in image header, but options " 1164 "specified format '%s'", encryptfmt); 1165 ret = -EINVAL; 1166 goto fail; 1167 } 1168 break; 1169 1170 case QCOW_CRYPT_AES: 1171 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 1172 error_setg(errp, 1173 "Header reported 'aes' encryption format but " 1174 "options specify '%s'", encryptfmt); 1175 ret = -EINVAL; 1176 goto fail; 1177 } 1178 qdict_put_str(encryptopts, "format", "qcow"); 1179 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1180 if (!r->crypto_opts) { 1181 ret = -EINVAL; 1182 goto fail; 1183 } 1184 break; 1185 1186 case QCOW_CRYPT_LUKS: 1187 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 1188 error_setg(errp, 1189 "Header reported 'luks' encryption format but " 1190 "options specify '%s'", encryptfmt); 1191 ret = -EINVAL; 1192 goto fail; 1193 } 1194 qdict_put_str(encryptopts, "format", "luks"); 1195 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1196 if (!r->crypto_opts) { 1197 ret = -EINVAL; 1198 goto fail; 1199 } 1200 break; 1201 1202 default: 1203 error_setg(errp, "Unsupported encryption method %d", 1204 s->crypt_method_header); 1205 ret = -EINVAL; 1206 goto fail; 1207 } 1208 1209 ret = 0; 1210 fail: 1211 qobject_unref(encryptopts); 1212 qemu_opts_del(opts); 1213 opts = NULL; 1214 return ret; 1215 } 1216 1217 static void qcow2_update_options_commit(BlockDriverState *bs, 1218 Qcow2ReopenState *r) 1219 { 1220 BDRVQcow2State *s = bs->opaque; 1221 int i; 1222 1223 if (s->l2_table_cache) { 1224 qcow2_cache_destroy(s->l2_table_cache); 1225 } 1226 if (s->refcount_block_cache) { 1227 qcow2_cache_destroy(s->refcount_block_cache); 1228 } 1229 s->l2_table_cache = r->l2_table_cache; 1230 s->refcount_block_cache = r->refcount_block_cache; 1231 s->l2_slice_size = r->l2_slice_size; 1232 1233 s->overlap_check = r->overlap_check; 1234 s->use_lazy_refcounts = r->use_lazy_refcounts; 1235 1236 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1237 s->discard_passthrough[i] = r->discard_passthrough[i]; 1238 } 1239 1240 s->discard_no_unref = r->discard_no_unref; 1241 1242 if (s->cache_clean_interval != r->cache_clean_interval) { 1243 cache_clean_timer_del(bs); 1244 s->cache_clean_interval = r->cache_clean_interval; 1245 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1246 } 1247 1248 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1249 s->crypto_opts = r->crypto_opts; 1250 } 1251 1252 static void qcow2_update_options_abort(BlockDriverState *bs, 1253 Qcow2ReopenState *r) 1254 { 1255 if (r->l2_table_cache) { 1256 qcow2_cache_destroy(r->l2_table_cache); 1257 } 1258 if (r->refcount_block_cache) { 1259 qcow2_cache_destroy(r->refcount_block_cache); 1260 } 1261 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1262 } 1263 1264 static int coroutine_fn GRAPH_RDLOCK 1265 qcow2_update_options(BlockDriverState *bs, QDict *options, int flags, 1266 Error **errp) 1267 { 1268 Qcow2ReopenState r = {}; 1269 int ret; 1270 1271 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1272 if (ret >= 0) { 1273 qcow2_update_options_commit(bs, &r); 1274 } else { 1275 qcow2_update_options_abort(bs, &r); 1276 } 1277 1278 return ret; 1279 } 1280 1281 static int validate_compression_type(BDRVQcow2State *s, Error **errp) 1282 { 1283 switch (s->compression_type) { 1284 case QCOW2_COMPRESSION_TYPE_ZLIB: 1285 #ifdef CONFIG_ZSTD 1286 case QCOW2_COMPRESSION_TYPE_ZSTD: 1287 #endif 1288 break; 1289 1290 default: 1291 error_setg(errp, "qcow2: unknown compression type: %u", 1292 s->compression_type); 1293 return -ENOTSUP; 1294 } 1295 1296 /* 1297 * if the compression type differs from QCOW2_COMPRESSION_TYPE_ZLIB 1298 * the incompatible feature flag must be set 1299 */ 1300 if (s->compression_type == QCOW2_COMPRESSION_TYPE_ZLIB) { 1301 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) { 1302 error_setg(errp, "qcow2: Compression type incompatible feature " 1303 "bit must not be set"); 1304 return -EINVAL; 1305 } 1306 } else { 1307 if (!(s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION)) { 1308 error_setg(errp, "qcow2: Compression type incompatible feature " 1309 "bit must be set"); 1310 return -EINVAL; 1311 } 1312 } 1313 1314 return 0; 1315 } 1316 1317 /* Called with s->lock held. */ 1318 static int coroutine_fn GRAPH_RDLOCK 1319 qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, 1320 bool open_data_file, Error **errp) 1321 { 1322 ERRP_GUARD(); 1323 BDRVQcow2State *s = bs->opaque; 1324 unsigned int len, i; 1325 int ret = 0; 1326 QCowHeader header; 1327 uint64_t ext_end; 1328 uint64_t l1_vm_state_index; 1329 bool update_header = false; 1330 1331 ret = bdrv_co_pread(bs->file, 0, sizeof(header), &header, 0); 1332 if (ret < 0) { 1333 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1334 goto fail; 1335 } 1336 header.magic = be32_to_cpu(header.magic); 1337 header.version = be32_to_cpu(header.version); 1338 header.backing_file_offset = be64_to_cpu(header.backing_file_offset); 1339 header.backing_file_size = be32_to_cpu(header.backing_file_size); 1340 header.size = be64_to_cpu(header.size); 1341 header.cluster_bits = be32_to_cpu(header.cluster_bits); 1342 header.crypt_method = be32_to_cpu(header.crypt_method); 1343 header.l1_table_offset = be64_to_cpu(header.l1_table_offset); 1344 header.l1_size = be32_to_cpu(header.l1_size); 1345 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset); 1346 header.refcount_table_clusters = 1347 be32_to_cpu(header.refcount_table_clusters); 1348 header.snapshots_offset = be64_to_cpu(header.snapshots_offset); 1349 header.nb_snapshots = be32_to_cpu(header.nb_snapshots); 1350 1351 if (header.magic != QCOW_MAGIC) { 1352 error_setg(errp, "Image is not in qcow2 format"); 1353 ret = -EINVAL; 1354 goto fail; 1355 } 1356 if (header.version < 2 || header.version > 3) { 1357 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1358 ret = -ENOTSUP; 1359 goto fail; 1360 } 1361 1362 s->qcow_version = header.version; 1363 1364 /* Initialise cluster size */ 1365 if (header.cluster_bits < MIN_CLUSTER_BITS || 1366 header.cluster_bits > MAX_CLUSTER_BITS) { 1367 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1368 header.cluster_bits); 1369 ret = -EINVAL; 1370 goto fail; 1371 } 1372 1373 s->cluster_bits = header.cluster_bits; 1374 s->cluster_size = 1 << s->cluster_bits; 1375 1376 /* Initialise version 3 header fields */ 1377 if (header.version == 2) { 1378 header.incompatible_features = 0; 1379 header.compatible_features = 0; 1380 header.autoclear_features = 0; 1381 header.refcount_order = 4; 1382 header.header_length = 72; 1383 } else { 1384 header.incompatible_features = 1385 be64_to_cpu(header.incompatible_features); 1386 header.compatible_features = be64_to_cpu(header.compatible_features); 1387 header.autoclear_features = be64_to_cpu(header.autoclear_features); 1388 header.refcount_order = be32_to_cpu(header.refcount_order); 1389 header.header_length = be32_to_cpu(header.header_length); 1390 1391 if (header.header_length < 104) { 1392 error_setg(errp, "qcow2 header too short"); 1393 ret = -EINVAL; 1394 goto fail; 1395 } 1396 } 1397 1398 if (header.header_length > s->cluster_size) { 1399 error_setg(errp, "qcow2 header exceeds cluster size"); 1400 ret = -EINVAL; 1401 goto fail; 1402 } 1403 1404 if (header.header_length > sizeof(header)) { 1405 s->unknown_header_fields_size = header.header_length - sizeof(header); 1406 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1407 ret = bdrv_co_pread(bs->file, sizeof(header), 1408 s->unknown_header_fields_size, 1409 s->unknown_header_fields, 0); 1410 if (ret < 0) { 1411 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1412 "fields"); 1413 goto fail; 1414 } 1415 } 1416 1417 if (header.backing_file_offset > s->cluster_size) { 1418 error_setg(errp, "Invalid backing file offset"); 1419 ret = -EINVAL; 1420 goto fail; 1421 } 1422 1423 if (header.backing_file_offset) { 1424 ext_end = header.backing_file_offset; 1425 } else { 1426 ext_end = 1 << header.cluster_bits; 1427 } 1428 1429 /* Handle feature bits */ 1430 s->incompatible_features = header.incompatible_features; 1431 s->compatible_features = header.compatible_features; 1432 s->autoclear_features = header.autoclear_features; 1433 1434 /* 1435 * Handle compression type 1436 * Older qcow2 images don't contain the compression type header. 1437 * Distinguish them by the header length and use 1438 * the only valid (default) compression type in that case 1439 */ 1440 if (header.header_length > offsetof(QCowHeader, compression_type)) { 1441 s->compression_type = header.compression_type; 1442 } else { 1443 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; 1444 } 1445 1446 ret = validate_compression_type(s, errp); 1447 if (ret) { 1448 goto fail; 1449 } 1450 1451 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1452 void *feature_table = NULL; 1453 qcow2_read_extensions(bs, header.header_length, ext_end, 1454 &feature_table, flags, NULL, NULL); 1455 report_unsupported_feature(errp, feature_table, 1456 s->incompatible_features & 1457 ~QCOW2_INCOMPAT_MASK); 1458 ret = -ENOTSUP; 1459 g_free(feature_table); 1460 goto fail; 1461 } 1462 1463 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1464 /* Corrupt images may not be written to unless they are being repaired 1465 */ 1466 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1467 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1468 "read/write"); 1469 ret = -EACCES; 1470 goto fail; 1471 } 1472 } 1473 1474 s->subclusters_per_cluster = 1475 has_subclusters(s) ? QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER : 1; 1476 s->subcluster_size = s->cluster_size / s->subclusters_per_cluster; 1477 s->subcluster_bits = ctz32(s->subcluster_size); 1478 1479 if (s->subcluster_size < (1 << MIN_CLUSTER_BITS)) { 1480 error_setg(errp, "Unsupported subcluster size: %d", s->subcluster_size); 1481 ret = -EINVAL; 1482 goto fail; 1483 } 1484 1485 /* Check support for various header values */ 1486 if (header.refcount_order > 6) { 1487 error_setg(errp, "Reference count entry width too large; may not " 1488 "exceed 64 bits"); 1489 ret = -EINVAL; 1490 goto fail; 1491 } 1492 s->refcount_order = header.refcount_order; 1493 s->refcount_bits = 1 << s->refcount_order; 1494 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1495 s->refcount_max += s->refcount_max - 1; 1496 1497 s->crypt_method_header = header.crypt_method; 1498 if (s->crypt_method_header) { 1499 if (bdrv_uses_whitelist() && 1500 s->crypt_method_header == QCOW_CRYPT_AES) { 1501 error_setg(errp, 1502 "Use of AES-CBC encrypted qcow2 images is no longer " 1503 "supported in system emulators"); 1504 error_append_hint(errp, 1505 "You can use 'qemu-img convert' to convert your " 1506 "image to an alternative supported format, such " 1507 "as unencrypted qcow2, or raw with the LUKS " 1508 "format instead.\n"); 1509 ret = -ENOSYS; 1510 goto fail; 1511 } 1512 1513 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1514 s->crypt_physical_offset = false; 1515 } else { 1516 /* Assuming LUKS and any future crypt methods we 1517 * add will all use physical offsets, due to the 1518 * fact that the alternative is insecure... */ 1519 s->crypt_physical_offset = true; 1520 } 1521 1522 bs->encrypted = true; 1523 } 1524 1525 s->l2_bits = s->cluster_bits - ctz32(l2_entry_size(s)); 1526 s->l2_size = 1 << s->l2_bits; 1527 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1528 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1529 s->refcount_block_size = 1 << s->refcount_block_bits; 1530 bs->total_sectors = header.size / BDRV_SECTOR_SIZE; 1531 s->csize_shift = (62 - (s->cluster_bits - 8)); 1532 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1533 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1534 1535 s->refcount_table_offset = header.refcount_table_offset; 1536 s->refcount_table_size = 1537 header.refcount_table_clusters << (s->cluster_bits - 3); 1538 1539 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) { 1540 error_setg(errp, "Image does not contain a reference count table"); 1541 ret = -EINVAL; 1542 goto fail; 1543 } 1544 1545 ret = qcow2_validate_table(bs, s->refcount_table_offset, 1546 header.refcount_table_clusters, 1547 s->cluster_size, QCOW_MAX_REFTABLE_SIZE, 1548 "Reference count table", errp); 1549 if (ret < 0) { 1550 goto fail; 1551 } 1552 1553 if (!(flags & BDRV_O_CHECK)) { 1554 /* 1555 * The total size in bytes of the snapshot table is checked in 1556 * qcow2_read_snapshots() because the size of each snapshot is 1557 * variable and we don't know it yet. 1558 * Here we only check the offset and number of snapshots. 1559 */ 1560 ret = qcow2_validate_table(bs, header.snapshots_offset, 1561 header.nb_snapshots, 1562 sizeof(QCowSnapshotHeader), 1563 sizeof(QCowSnapshotHeader) * 1564 QCOW_MAX_SNAPSHOTS, 1565 "Snapshot table", errp); 1566 if (ret < 0) { 1567 goto fail; 1568 } 1569 } 1570 1571 /* read the level 1 table */ 1572 ret = qcow2_validate_table(bs, header.l1_table_offset, 1573 header.l1_size, L1E_SIZE, 1574 QCOW_MAX_L1_SIZE, "Active L1 table", errp); 1575 if (ret < 0) { 1576 goto fail; 1577 } 1578 s->l1_size = header.l1_size; 1579 s->l1_table_offset = header.l1_table_offset; 1580 1581 l1_vm_state_index = size_to_l1(s, header.size); 1582 if (l1_vm_state_index > INT_MAX) { 1583 error_setg(errp, "Image is too big"); 1584 ret = -EFBIG; 1585 goto fail; 1586 } 1587 s->l1_vm_state_index = l1_vm_state_index; 1588 1589 /* the L1 table must contain at least enough entries to put 1590 header.size bytes */ 1591 if (s->l1_size < s->l1_vm_state_index) { 1592 error_setg(errp, "L1 table is too small"); 1593 ret = -EINVAL; 1594 goto fail; 1595 } 1596 1597 if (s->l1_size > 0) { 1598 s->l1_table = qemu_try_blockalign(bs->file->bs, s->l1_size * L1E_SIZE); 1599 if (s->l1_table == NULL) { 1600 error_setg(errp, "Could not allocate L1 table"); 1601 ret = -ENOMEM; 1602 goto fail; 1603 } 1604 ret = bdrv_co_pread(bs->file, s->l1_table_offset, s->l1_size * L1E_SIZE, 1605 s->l1_table, 0); 1606 if (ret < 0) { 1607 error_setg_errno(errp, -ret, "Could not read L1 table"); 1608 goto fail; 1609 } 1610 for(i = 0;i < s->l1_size; i++) { 1611 s->l1_table[i] = be64_to_cpu(s->l1_table[i]); 1612 } 1613 } 1614 1615 /* Parse driver-specific options */ 1616 ret = qcow2_update_options(bs, options, flags, errp); 1617 if (ret < 0) { 1618 goto fail; 1619 } 1620 1621 s->flags = flags; 1622 1623 ret = qcow2_refcount_init(bs); 1624 if (ret != 0) { 1625 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1626 goto fail; 1627 } 1628 1629 QLIST_INIT(&s->cluster_allocs); 1630 QTAILQ_INIT(&s->discards); 1631 1632 /* read qcow2 extensions */ 1633 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1634 flags, &update_header, errp)) { 1635 ret = -EINVAL; 1636 goto fail; 1637 } 1638 1639 if (open_data_file && (flags & BDRV_O_NO_IO)) { 1640 /* 1641 * Don't open the data file for 'qemu-img info' so that it can be used 1642 * to verify that an untrusted qcow2 image doesn't refer to external 1643 * files. 1644 * 1645 * Note: This still makes has_data_file() return true. 1646 */ 1647 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { 1648 s->data_file = NULL; 1649 } else { 1650 s->data_file = bs->file; 1651 } 1652 qdict_extract_subqdict(options, NULL, "data-file."); 1653 qdict_del(options, "data-file"); 1654 } else if (open_data_file) { 1655 /* Open external data file */ 1656 bdrv_graph_co_rdunlock(); 1657 s->data_file = bdrv_co_open_child(NULL, options, "data-file", bs, 1658 &child_of_bds, BDRV_CHILD_DATA, 1659 true, errp); 1660 bdrv_graph_co_rdlock(); 1661 if (*errp) { 1662 ret = -EINVAL; 1663 goto fail; 1664 } 1665 1666 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { 1667 if (!s->data_file && s->image_data_file) { 1668 bdrv_graph_co_rdunlock(); 1669 s->data_file = bdrv_co_open_child(s->image_data_file, options, 1670 "data-file", bs, 1671 &child_of_bds, 1672 BDRV_CHILD_DATA, false, errp); 1673 bdrv_graph_co_rdlock(); 1674 if (!s->data_file) { 1675 ret = -EINVAL; 1676 goto fail; 1677 } 1678 } 1679 if (!s->data_file) { 1680 error_setg(errp, "'data-file' is required for this image"); 1681 ret = -EINVAL; 1682 goto fail; 1683 } 1684 1685 /* No data here */ 1686 bs->file->role &= ~BDRV_CHILD_DATA; 1687 1688 /* Must succeed because we have given up permissions if anything */ 1689 bdrv_child_refresh_perms(bs, bs->file, &error_abort); 1690 } else { 1691 if (s->data_file) { 1692 error_setg(errp, "'data-file' can only be set for images with " 1693 "an external data file"); 1694 ret = -EINVAL; 1695 goto fail; 1696 } 1697 1698 s->data_file = bs->file; 1699 1700 if (data_file_is_raw(bs)) { 1701 error_setg(errp, "data-file-raw requires a data file"); 1702 ret = -EINVAL; 1703 goto fail; 1704 } 1705 } 1706 } 1707 1708 /* qcow2_read_extension may have set up the crypto context 1709 * if the crypt method needs a header region, some methods 1710 * don't need header extensions, so must check here 1711 */ 1712 if (s->crypt_method_header && !s->crypto) { 1713 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1714 unsigned int cflags = 0; 1715 if (flags & BDRV_O_NO_IO) { 1716 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1717 } 1718 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1719 NULL, NULL, cflags, errp); 1720 if (!s->crypto) { 1721 ret = -EINVAL; 1722 goto fail; 1723 } 1724 } else { 1725 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1726 s->crypt_method_header); 1727 ret = -EINVAL; 1728 goto fail; 1729 } 1730 } 1731 1732 /* read the backing file name */ 1733 if (header.backing_file_offset != 0) { 1734 len = header.backing_file_size; 1735 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1736 len >= sizeof(bs->backing_file)) { 1737 error_setg(errp, "Backing file name too long"); 1738 ret = -EINVAL; 1739 goto fail; 1740 } 1741 1742 s->image_backing_file = g_malloc(len + 1); 1743 ret = bdrv_co_pread(bs->file, header.backing_file_offset, len, 1744 s->image_backing_file, 0); 1745 if (ret < 0) { 1746 error_setg_errno(errp, -ret, "Could not read backing file name"); 1747 goto fail; 1748 } 1749 s->image_backing_file[len] = '\0'; 1750 1751 /* 1752 * Update only when something has changed. This function is called by 1753 * qcow2_co_invalidate_cache(), and we do not want to reset 1754 * auto_backing_file unless necessary. 1755 */ 1756 if (!g_str_equal(s->image_backing_file, bs->backing_file)) { 1757 pstrcpy(bs->backing_file, sizeof(bs->backing_file), 1758 s->image_backing_file); 1759 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 1760 s->image_backing_file); 1761 } 1762 } 1763 1764 /* 1765 * Internal snapshots; skip reading them in check mode, because 1766 * we do not need them then, and we do not want to abort because 1767 * of a broken table. 1768 */ 1769 if (!(flags & BDRV_O_CHECK)) { 1770 s->snapshots_offset = header.snapshots_offset; 1771 s->nb_snapshots = header.nb_snapshots; 1772 1773 ret = qcow2_read_snapshots(bs, errp); 1774 if (ret < 0) { 1775 goto fail; 1776 } 1777 } 1778 1779 /* Clear unknown autoclear feature bits */ 1780 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1781 update_header = update_header && bdrv_is_writable(bs); 1782 if (update_header) { 1783 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1784 } 1785 1786 /* == Handle persistent dirty bitmaps == 1787 * 1788 * We want load dirty bitmaps in three cases: 1789 * 1790 * 1. Normal open of the disk in active mode, not related to invalidation 1791 * after migration. 1792 * 1793 * 2. Invalidation of the target vm after pre-copy phase of migration, if 1794 * bitmaps are _not_ migrating through migration channel, i.e. 1795 * 'dirty-bitmaps' capability is disabled. 1796 * 1797 * 3. Invalidation of source vm after failed or canceled migration. 1798 * This is a very interesting case. There are two possible types of 1799 * bitmaps: 1800 * 1801 * A. Stored on inactivation and removed. They should be loaded from the 1802 * image. 1803 * 1804 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through 1805 * the migration channel (with dirty-bitmaps capability). 1806 * 1807 * On the other hand, there are two possible sub-cases: 1808 * 1809 * 3.1 disk was changed by somebody else while were inactive. In this 1810 * case all in-RAM dirty bitmaps (both persistent and not) are 1811 * definitely invalid. And we don't have any method to determine 1812 * this. 1813 * 1814 * Simple and safe thing is to just drop all the bitmaps of type B on 1815 * inactivation. But in this case we lose bitmaps in valid 4.2 case. 1816 * 1817 * On the other hand, resuming source vm, if disk was already changed 1818 * is a bad thing anyway: not only bitmaps, the whole vm state is 1819 * out of sync with disk. 1820 * 1821 * This means, that user or management tool, who for some reason 1822 * decided to resume source vm, after disk was already changed by 1823 * target vm, should at least drop all dirty bitmaps by hand. 1824 * 1825 * So, we can ignore this case for now, but TODO: "generation" 1826 * extension for qcow2, to determine, that image was changed after 1827 * last inactivation. And if it is changed, we will drop (or at least 1828 * mark as 'invalid' all the bitmaps of type B, both persistent 1829 * and not). 1830 * 1831 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved 1832 * to disk ('dirty-bitmaps' capability disabled), or not saved 1833 * ('dirty-bitmaps' capability enabled), but we don't need to care 1834 * of: let's load bitmaps as always: stored bitmaps will be loaded, 1835 * and not stored has flag IN_USE=1 in the image and will be skipped 1836 * on loading. 1837 * 1838 * One remaining possible case when we don't want load bitmaps: 1839 * 1840 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or 1841 * will be loaded on invalidation, no needs try loading them before) 1842 */ 1843 1844 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) { 1845 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */ 1846 bool header_updated; 1847 if (!qcow2_load_dirty_bitmaps(bs, &header_updated, errp)) { 1848 ret = -EINVAL; 1849 goto fail; 1850 } 1851 1852 update_header = update_header && !header_updated; 1853 } 1854 1855 if (update_header) { 1856 ret = qcow2_update_header(bs); 1857 if (ret < 0) { 1858 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1859 goto fail; 1860 } 1861 } 1862 1863 bs->supported_zero_flags = header.version >= 3 ? 1864 BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK : 0; 1865 bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE; 1866 1867 /* Repair image if dirty */ 1868 if (!(flags & BDRV_O_CHECK) && bdrv_is_writable(bs) && 1869 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1870 BdrvCheckResult result = {0}; 1871 1872 ret = qcow2_co_check_locked(bs, &result, 1873 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1874 if (ret < 0 || result.check_errors) { 1875 if (ret >= 0) { 1876 ret = -EIO; 1877 } 1878 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1879 goto fail; 1880 } 1881 } 1882 1883 #ifdef DEBUG_ALLOC 1884 { 1885 BdrvCheckResult result = {0}; 1886 qcow2_check_refcounts(bs, &result, 0); 1887 } 1888 #endif 1889 1890 qemu_co_queue_init(&s->thread_task_queue); 1891 1892 return ret; 1893 1894 fail: 1895 g_free(s->image_data_file); 1896 if (open_data_file && has_data_file(bs)) { 1897 bdrv_graph_co_rdunlock(); 1898 bdrv_drain_all_begin(); 1899 bdrv_co_unref_child(bs, s->data_file); 1900 bdrv_drain_all_end(); 1901 bdrv_graph_co_rdlock(); 1902 s->data_file = NULL; 1903 } 1904 g_free(s->unknown_header_fields); 1905 cleanup_unknown_header_ext(bs); 1906 qcow2_free_snapshots(bs); 1907 qcow2_refcount_close(bs); 1908 qemu_vfree(s->l1_table); 1909 /* else pre-write overlap checks in cache_destroy may crash */ 1910 s->l1_table = NULL; 1911 cache_clean_timer_del(bs); 1912 if (s->l2_table_cache) { 1913 qcow2_cache_destroy(s->l2_table_cache); 1914 } 1915 if (s->refcount_block_cache) { 1916 qcow2_cache_destroy(s->refcount_block_cache); 1917 } 1918 qcrypto_block_free(s->crypto); 1919 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1920 return ret; 1921 } 1922 1923 typedef struct QCow2OpenCo { 1924 BlockDriverState *bs; 1925 QDict *options; 1926 int flags; 1927 Error **errp; 1928 int ret; 1929 } QCow2OpenCo; 1930 1931 static void coroutine_fn qcow2_open_entry(void *opaque) 1932 { 1933 QCow2OpenCo *qoc = opaque; 1934 BDRVQcow2State *s = qoc->bs->opaque; 1935 1936 GRAPH_RDLOCK_GUARD(); 1937 1938 qemu_co_mutex_lock(&s->lock); 1939 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, true, 1940 qoc->errp); 1941 qemu_co_mutex_unlock(&s->lock); 1942 1943 aio_wait_kick(); 1944 } 1945 1946 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1947 Error **errp) 1948 { 1949 BDRVQcow2State *s = bs->opaque; 1950 QCow2OpenCo qoc = { 1951 .bs = bs, 1952 .options = options, 1953 .flags = flags, 1954 .errp = errp, 1955 .ret = -EINPROGRESS 1956 }; 1957 int ret; 1958 1959 ret = bdrv_open_file_child(NULL, options, "file", bs, errp); 1960 if (ret < 0) { 1961 return ret; 1962 } 1963 1964 /* Initialise locks */ 1965 qemu_co_mutex_init(&s->lock); 1966 1967 assert(!qemu_in_coroutine()); 1968 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1969 1970 aio_co_enter(bdrv_get_aio_context(bs), 1971 qemu_coroutine_create(qcow2_open_entry, &qoc)); 1972 AIO_WAIT_WHILE_UNLOCKED(NULL, qoc.ret == -EINPROGRESS); 1973 1974 return qoc.ret; 1975 } 1976 1977 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1978 { 1979 BDRVQcow2State *s = bs->opaque; 1980 1981 if (s->crypto) { 1982 /* Encryption works on a sector granularity */ 1983 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto); 1984 } 1985 bs->bl.pwrite_zeroes_alignment = s->subcluster_size; 1986 bs->bl.pdiscard_alignment = s->cluster_size; 1987 } 1988 1989 static int GRAPH_UNLOCKED 1990 qcow2_reopen_prepare(BDRVReopenState *state,BlockReopenQueue *queue, 1991 Error **errp) 1992 { 1993 BDRVQcow2State *s = state->bs->opaque; 1994 Qcow2ReopenState *r; 1995 int ret; 1996 1997 GLOBAL_STATE_CODE(); 1998 GRAPH_RDLOCK_GUARD_MAINLOOP(); 1999 2000 r = g_new0(Qcow2ReopenState, 1); 2001 state->opaque = r; 2002 2003 ret = qcow2_update_options_prepare(state->bs, r, state->options, 2004 state->flags, errp); 2005 if (ret < 0) { 2006 goto fail; 2007 } 2008 2009 /* We need to write out any unwritten data if we reopen read-only. */ 2010 if ((state->flags & BDRV_O_RDWR) == 0) { 2011 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 2012 if (ret < 0) { 2013 goto fail; 2014 } 2015 2016 ret = bdrv_flush(state->bs); 2017 if (ret < 0) { 2018 goto fail; 2019 } 2020 2021 ret = qcow2_mark_clean(state->bs); 2022 if (ret < 0) { 2023 goto fail; 2024 } 2025 } 2026 2027 /* 2028 * Without an external data file, s->data_file points to the same BdrvChild 2029 * as bs->file. It needs to be resynced after reopen because bs->file may 2030 * be changed. We can't use it in the meantime. 2031 */ 2032 if (!has_data_file(state->bs)) { 2033 assert(s->data_file == state->bs->file); 2034 s->data_file = NULL; 2035 } 2036 2037 return 0; 2038 2039 fail: 2040 qcow2_update_options_abort(state->bs, r); 2041 g_free(r); 2042 return ret; 2043 } 2044 2045 static void qcow2_reopen_commit(BDRVReopenState *state) 2046 { 2047 BDRVQcow2State *s = state->bs->opaque; 2048 2049 GRAPH_RDLOCK_GUARD_MAINLOOP(); 2050 2051 qcow2_update_options_commit(state->bs, state->opaque); 2052 if (!s->data_file) { 2053 /* 2054 * If we don't have an external data file, s->data_file was cleared by 2055 * qcow2_reopen_prepare() and needs to be updated. 2056 */ 2057 s->data_file = state->bs->file; 2058 } 2059 g_free(state->opaque); 2060 } 2061 2062 static void qcow2_reopen_commit_post(BDRVReopenState *state) 2063 { 2064 GRAPH_RDLOCK_GUARD_MAINLOOP(); 2065 2066 if (state->flags & BDRV_O_RDWR) { 2067 Error *local_err = NULL; 2068 2069 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) { 2070 /* 2071 * This is not fatal, bitmaps just left read-only, so all following 2072 * writes will fail. User can remove read-only bitmaps to unblock 2073 * writes or retry reopen. 2074 */ 2075 error_reportf_err(local_err, 2076 "%s: Failed to make dirty bitmaps writable: ", 2077 bdrv_get_node_name(state->bs)); 2078 } 2079 } 2080 } 2081 2082 static void qcow2_reopen_abort(BDRVReopenState *state) 2083 { 2084 BDRVQcow2State *s = state->bs->opaque; 2085 2086 GRAPH_RDLOCK_GUARD_MAINLOOP(); 2087 2088 if (!s->data_file) { 2089 /* 2090 * If we don't have an external data file, s->data_file was cleared by 2091 * qcow2_reopen_prepare() and needs to be restored. 2092 */ 2093 s->data_file = state->bs->file; 2094 } 2095 qcow2_update_options_abort(state->bs, state->opaque); 2096 g_free(state->opaque); 2097 } 2098 2099 static void qcow2_join_options(QDict *options, QDict *old_options) 2100 { 2101 bool has_new_overlap_template = 2102 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 2103 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 2104 bool has_new_total_cache_size = 2105 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 2106 bool has_all_cache_options; 2107 2108 /* New overlap template overrides all old overlap options */ 2109 if (has_new_overlap_template) { 2110 qdict_del(old_options, QCOW2_OPT_OVERLAP); 2111 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 2112 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 2113 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 2114 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 2115 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 2116 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 2117 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 2118 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 2119 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 2120 } 2121 2122 /* New total cache size overrides all old options */ 2123 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 2124 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 2125 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 2126 } 2127 2128 qdict_join(options, old_options, false); 2129 2130 /* 2131 * If after merging all cache size options are set, an old total size is 2132 * overwritten. Do keep all options, however, if all three are new. The 2133 * resulting error message is what we want to happen. 2134 */ 2135 has_all_cache_options = 2136 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 2137 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 2138 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 2139 2140 if (has_all_cache_options && !has_new_total_cache_size) { 2141 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 2142 } 2143 } 2144 2145 static int coroutine_fn GRAPH_RDLOCK 2146 qcow2_co_block_status(BlockDriverState *bs, unsigned int mode, 2147 int64_t offset, int64_t count, int64_t *pnum, 2148 int64_t *map, BlockDriverState **file) 2149 { 2150 BDRVQcow2State *s = bs->opaque; 2151 uint64_t host_offset; 2152 unsigned int bytes; 2153 QCow2SubclusterType type; 2154 int ret, status = 0; 2155 2156 qemu_co_mutex_lock(&s->lock); 2157 2158 if (!s->metadata_preallocation_checked) { 2159 ret = qcow2_detect_metadata_preallocation(bs); 2160 s->metadata_preallocation = (ret == 1); 2161 s->metadata_preallocation_checked = true; 2162 } 2163 2164 bytes = MIN(INT_MAX, count); 2165 ret = qcow2_get_host_offset(bs, offset, &bytes, &host_offset, &type); 2166 qemu_co_mutex_unlock(&s->lock); 2167 if (ret < 0) { 2168 return ret; 2169 } 2170 2171 *pnum = bytes; 2172 2173 if ((type == QCOW2_SUBCLUSTER_NORMAL || 2174 type == QCOW2_SUBCLUSTER_ZERO_ALLOC || 2175 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) && !s->crypto) { 2176 *map = host_offset; 2177 *file = s->data_file->bs; 2178 status |= BDRV_BLOCK_OFFSET_VALID; 2179 } 2180 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN || 2181 type == QCOW2_SUBCLUSTER_ZERO_ALLOC) { 2182 status |= BDRV_BLOCK_ZERO; 2183 } else if (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && 2184 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) { 2185 status |= BDRV_BLOCK_DATA; 2186 } 2187 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) && 2188 (status & BDRV_BLOCK_OFFSET_VALID)) 2189 { 2190 status |= BDRV_BLOCK_RECURSE; 2191 } 2192 if (type == QCOW2_SUBCLUSTER_COMPRESSED) { 2193 status |= BDRV_BLOCK_COMPRESSED; 2194 } 2195 return status; 2196 } 2197 2198 static int coroutine_fn GRAPH_RDLOCK 2199 qcow2_handle_l2meta(BlockDriverState *bs, QCowL2Meta **pl2meta, bool link_l2) 2200 { 2201 int ret = 0; 2202 QCowL2Meta *l2meta = *pl2meta; 2203 2204 while (l2meta != NULL) { 2205 QCowL2Meta *next; 2206 2207 if (link_l2) { 2208 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 2209 if (ret) { 2210 goto out; 2211 } 2212 } else { 2213 qcow2_alloc_cluster_abort(bs, l2meta); 2214 } 2215 2216 /* Take the request off the list of running requests */ 2217 QLIST_REMOVE(l2meta, next_in_flight); 2218 2219 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2220 2221 next = l2meta->next; 2222 g_free(l2meta); 2223 l2meta = next; 2224 } 2225 out: 2226 *pl2meta = l2meta; 2227 return ret; 2228 } 2229 2230 static int coroutine_fn GRAPH_RDLOCK 2231 qcow2_co_preadv_encrypted(BlockDriverState *bs, 2232 uint64_t host_offset, 2233 uint64_t offset, 2234 uint64_t bytes, 2235 QEMUIOVector *qiov, 2236 uint64_t qiov_offset) 2237 { 2238 int ret; 2239 BDRVQcow2State *s = bs->opaque; 2240 uint8_t *buf; 2241 2242 assert(bs->encrypted && s->crypto); 2243 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2244 2245 /* 2246 * For encrypted images, read everything into a temporary 2247 * contiguous buffer on which the AES functions can work. 2248 * Also, decryption in a separate buffer is better as it 2249 * prevents the guest from learning information about the 2250 * encrypted nature of the virtual disk. 2251 */ 2252 2253 buf = qemu_try_blockalign(s->data_file->bs, bytes); 2254 if (buf == NULL) { 2255 return -ENOMEM; 2256 } 2257 2258 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); 2259 ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0); 2260 if (ret < 0) { 2261 goto fail; 2262 } 2263 2264 if (qcow2_co_decrypt(bs, host_offset, offset, buf, bytes) < 0) 2265 { 2266 ret = -EIO; 2267 goto fail; 2268 } 2269 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes); 2270 2271 fail: 2272 qemu_vfree(buf); 2273 2274 return ret; 2275 } 2276 2277 typedef struct Qcow2AioTask { 2278 AioTask task; 2279 2280 BlockDriverState *bs; 2281 QCow2SubclusterType subcluster_type; /* only for read */ 2282 uint64_t host_offset; /* or l2_entry for compressed read */ 2283 uint64_t offset; 2284 uint64_t bytes; 2285 QEMUIOVector *qiov; 2286 uint64_t qiov_offset; 2287 QCowL2Meta *l2meta; /* only for write */ 2288 } Qcow2AioTask; 2289 2290 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task); 2291 static coroutine_fn int qcow2_add_task(BlockDriverState *bs, 2292 AioTaskPool *pool, 2293 AioTaskFunc func, 2294 QCow2SubclusterType subcluster_type, 2295 uint64_t host_offset, 2296 uint64_t offset, 2297 uint64_t bytes, 2298 QEMUIOVector *qiov, 2299 size_t qiov_offset, 2300 QCowL2Meta *l2meta) 2301 { 2302 Qcow2AioTask local_task; 2303 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task; 2304 2305 *task = (Qcow2AioTask) { 2306 .task.func = func, 2307 .bs = bs, 2308 .subcluster_type = subcluster_type, 2309 .qiov = qiov, 2310 .host_offset = host_offset, 2311 .offset = offset, 2312 .bytes = bytes, 2313 .qiov_offset = qiov_offset, 2314 .l2meta = l2meta, 2315 }; 2316 2317 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool, 2318 func == qcow2_co_preadv_task_entry ? "read" : "write", 2319 subcluster_type, host_offset, offset, bytes, 2320 qiov, qiov_offset); 2321 2322 if (!pool) { 2323 return func(&task->task); 2324 } 2325 2326 aio_task_pool_start_task(pool, &task->task); 2327 2328 return 0; 2329 } 2330 2331 static int coroutine_fn GRAPH_RDLOCK 2332 qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type, 2333 uint64_t host_offset, uint64_t offset, uint64_t bytes, 2334 QEMUIOVector *qiov, size_t qiov_offset) 2335 { 2336 BDRVQcow2State *s = bs->opaque; 2337 2338 switch (subc_type) { 2339 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 2340 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 2341 /* Both zero types are handled in qcow2_co_preadv_part */ 2342 g_assert_not_reached(); 2343 2344 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 2345 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 2346 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */ 2347 2348 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 2349 return bdrv_co_preadv_part(bs->backing, offset, bytes, 2350 qiov, qiov_offset, 0); 2351 2352 case QCOW2_SUBCLUSTER_COMPRESSED: 2353 return qcow2_co_preadv_compressed(bs, host_offset, 2354 offset, bytes, qiov, qiov_offset); 2355 2356 case QCOW2_SUBCLUSTER_NORMAL: 2357 if (bs->encrypted) { 2358 return qcow2_co_preadv_encrypted(bs, host_offset, 2359 offset, bytes, qiov, qiov_offset); 2360 } 2361 2362 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); 2363 return bdrv_co_preadv_part(s->data_file, host_offset, 2364 bytes, qiov, qiov_offset, 0); 2365 2366 default: 2367 g_assert_not_reached(); 2368 } 2369 2370 g_assert_not_reached(); 2371 } 2372 2373 /* 2374 * This function can count as GRAPH_RDLOCK because qcow2_co_preadv_part() holds 2375 * the graph lock and keeps it until this coroutine has terminated. 2376 */ 2377 static int coroutine_fn GRAPH_RDLOCK qcow2_co_preadv_task_entry(AioTask *task) 2378 { 2379 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2380 2381 assert(!t->l2meta); 2382 2383 return qcow2_co_preadv_task(t->bs, t->subcluster_type, 2384 t->host_offset, t->offset, t->bytes, 2385 t->qiov, t->qiov_offset); 2386 } 2387 2388 static int coroutine_fn GRAPH_RDLOCK 2389 qcow2_co_preadv_part(BlockDriverState *bs, int64_t offset, int64_t bytes, 2390 QEMUIOVector *qiov, size_t qiov_offset, 2391 BdrvRequestFlags flags) 2392 { 2393 BDRVQcow2State *s = bs->opaque; 2394 int ret = 0; 2395 unsigned int cur_bytes; /* number of bytes in current iteration */ 2396 uint64_t host_offset = 0; 2397 QCow2SubclusterType type; 2398 AioTaskPool *aio = NULL; 2399 2400 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2401 /* prepare next request */ 2402 cur_bytes = MIN(bytes, INT_MAX); 2403 if (s->crypto) { 2404 cur_bytes = MIN(cur_bytes, 2405 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2406 } 2407 2408 qemu_co_mutex_lock(&s->lock); 2409 ret = qcow2_get_host_offset(bs, offset, &cur_bytes, 2410 &host_offset, &type); 2411 qemu_co_mutex_unlock(&s->lock); 2412 if (ret < 0) { 2413 goto out; 2414 } 2415 2416 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN || 2417 type == QCOW2_SUBCLUSTER_ZERO_ALLOC || 2418 (type == QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && !bs->backing) || 2419 (type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && !bs->backing)) 2420 { 2421 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes); 2422 } else { 2423 if (!aio && cur_bytes != bytes) { 2424 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2425 } 2426 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, type, 2427 host_offset, offset, cur_bytes, 2428 qiov, qiov_offset, NULL); 2429 if (ret < 0) { 2430 goto out; 2431 } 2432 } 2433 2434 bytes -= cur_bytes; 2435 offset += cur_bytes; 2436 qiov_offset += cur_bytes; 2437 } 2438 2439 out: 2440 if (aio) { 2441 aio_task_pool_wait_all(aio); 2442 if (ret == 0) { 2443 ret = aio_task_pool_status(aio); 2444 } 2445 g_free(aio); 2446 } 2447 2448 return ret; 2449 } 2450 2451 /* Check if it's possible to merge a write request with the writing of 2452 * the data from the COW regions */ 2453 static bool merge_cow(uint64_t offset, unsigned bytes, 2454 QEMUIOVector *qiov, size_t qiov_offset, 2455 QCowL2Meta *l2meta) 2456 { 2457 QCowL2Meta *m; 2458 2459 for (m = l2meta; m != NULL; m = m->next) { 2460 /* If both COW regions are empty then there's nothing to merge */ 2461 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 2462 continue; 2463 } 2464 2465 /* If COW regions are handled already, skip this too */ 2466 if (m->skip_cow) { 2467 continue; 2468 } 2469 2470 /* 2471 * The write request should start immediately after the first 2472 * COW region. This does not always happen because the area 2473 * touched by the request can be larger than the one defined 2474 * by @m (a single request can span an area consisting of a 2475 * mix of previously unallocated and allocated clusters, that 2476 * is why @l2meta is a list). 2477 */ 2478 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 2479 /* In this case the request starts before this region */ 2480 assert(offset < l2meta_cow_start(m)); 2481 assert(m->cow_start.nb_bytes == 0); 2482 continue; 2483 } 2484 2485 /* The write request should end immediately before the second 2486 * COW region (see above for why it does not always happen) */ 2487 if (m->offset + m->cow_end.offset != offset + bytes) { 2488 assert(offset + bytes > m->offset + m->cow_end.offset); 2489 assert(m->cow_end.nb_bytes == 0); 2490 continue; 2491 } 2492 2493 /* Make sure that adding both COW regions to the QEMUIOVector 2494 * does not exceed IOV_MAX */ 2495 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) { 2496 continue; 2497 } 2498 2499 m->data_qiov = qiov; 2500 m->data_qiov_offset = qiov_offset; 2501 return true; 2502 } 2503 2504 return false; 2505 } 2506 2507 /* 2508 * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error. 2509 * Note that returning 0 does not guarantee non-zero data. 2510 */ 2511 static int coroutine_fn GRAPH_RDLOCK 2512 is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) 2513 { 2514 /* 2515 * This check is designed for optimization shortcut so it must be 2516 * efficient. 2517 * Instead of is_zero(), use bdrv_co_is_zero_fast() as it is 2518 * faster (but not as accurate and can result in false negatives). 2519 */ 2520 int ret = bdrv_co_is_zero_fast(bs, m->offset + m->cow_start.offset, 2521 m->cow_start.nb_bytes); 2522 if (ret <= 0) { 2523 return ret; 2524 } 2525 2526 return bdrv_co_is_zero_fast(bs, m->offset + m->cow_end.offset, 2527 m->cow_end.nb_bytes); 2528 } 2529 2530 static int coroutine_fn GRAPH_RDLOCK 2531 handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) 2532 { 2533 BDRVQcow2State *s = bs->opaque; 2534 QCowL2Meta *m; 2535 2536 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) { 2537 return 0; 2538 } 2539 2540 if (bs->encrypted) { 2541 return 0; 2542 } 2543 2544 for (m = l2meta; m != NULL; m = m->next) { 2545 int ret; 2546 uint64_t start_offset = m->alloc_offset + m->cow_start.offset; 2547 unsigned nb_bytes = m->cow_end.offset + m->cow_end.nb_bytes - 2548 m->cow_start.offset; 2549 2550 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) { 2551 continue; 2552 } 2553 2554 ret = is_zero_cow(bs, m); 2555 if (ret < 0) { 2556 return ret; 2557 } else if (ret == 0) { 2558 continue; 2559 } 2560 2561 /* 2562 * instead of writing zero COW buffers, 2563 * efficiently zero out the whole clusters 2564 */ 2565 2566 ret = qcow2_pre_write_overlap_check(bs, 0, start_offset, nb_bytes, 2567 true); 2568 if (ret < 0) { 2569 return ret; 2570 } 2571 2572 BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE); 2573 ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes, 2574 BDRV_REQ_NO_FALLBACK); 2575 if (ret < 0) { 2576 if (ret != -ENOTSUP && ret != -EAGAIN) { 2577 return ret; 2578 } 2579 continue; 2580 } 2581 2582 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters); 2583 m->skip_cow = true; 2584 } 2585 return 0; 2586 } 2587 2588 /* 2589 * qcow2_co_pwritev_task 2590 * Called with s->lock unlocked 2591 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must 2592 * not use it somehow after qcow2_co_pwritev_task() call 2593 */ 2594 static coroutine_fn GRAPH_RDLOCK 2595 int qcow2_co_pwritev_task(BlockDriverState *bs, uint64_t host_offset, 2596 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, 2597 uint64_t qiov_offset, QCowL2Meta *l2meta) 2598 { 2599 int ret; 2600 BDRVQcow2State *s = bs->opaque; 2601 void *crypt_buf = NULL; 2602 QEMUIOVector encrypted_qiov; 2603 2604 if (bs->encrypted) { 2605 assert(s->crypto); 2606 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2607 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes); 2608 if (crypt_buf == NULL) { 2609 ret = -ENOMEM; 2610 goto out_unlocked; 2611 } 2612 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes); 2613 2614 if (qcow2_co_encrypt(bs, host_offset, offset, crypt_buf, bytes) < 0) { 2615 ret = -EIO; 2616 goto out_unlocked; 2617 } 2618 2619 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes); 2620 qiov = &encrypted_qiov; 2621 qiov_offset = 0; 2622 } 2623 2624 /* Try to efficiently initialize the physical space with zeroes */ 2625 ret = handle_alloc_space(bs, l2meta); 2626 if (ret < 0) { 2627 goto out_unlocked; 2628 } 2629 2630 /* 2631 * If we need to do COW, check if it's possible to merge the 2632 * writing of the guest data together with that of the COW regions. 2633 * If it's not possible (or not necessary) then write the 2634 * guest data now. 2635 */ 2636 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) { 2637 BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO); 2638 trace_qcow2_writev_data(qemu_coroutine_self(), host_offset); 2639 ret = bdrv_co_pwritev_part(s->data_file, host_offset, 2640 bytes, qiov, qiov_offset, 0); 2641 if (ret < 0) { 2642 goto out_unlocked; 2643 } 2644 } 2645 2646 qemu_co_mutex_lock(&s->lock); 2647 2648 ret = qcow2_handle_l2meta(bs, &l2meta, true); 2649 goto out_locked; 2650 2651 out_unlocked: 2652 qemu_co_mutex_lock(&s->lock); 2653 2654 out_locked: 2655 qcow2_handle_l2meta(bs, &l2meta, false); 2656 qemu_co_mutex_unlock(&s->lock); 2657 2658 qemu_vfree(crypt_buf); 2659 2660 return ret; 2661 } 2662 2663 /* 2664 * This function can count as GRAPH_RDLOCK because qcow2_co_pwritev_part() holds 2665 * the graph lock and keeps it until this coroutine has terminated. 2666 */ 2667 static coroutine_fn GRAPH_RDLOCK int qcow2_co_pwritev_task_entry(AioTask *task) 2668 { 2669 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2670 2671 assert(!t->subcluster_type); 2672 2673 return qcow2_co_pwritev_task(t->bs, t->host_offset, 2674 t->offset, t->bytes, t->qiov, t->qiov_offset, 2675 t->l2meta); 2676 } 2677 2678 static int coroutine_fn GRAPH_RDLOCK 2679 qcow2_co_pwritev_part(BlockDriverState *bs, int64_t offset, int64_t bytes, 2680 QEMUIOVector *qiov, size_t qiov_offset, 2681 BdrvRequestFlags flags) 2682 { 2683 BDRVQcow2State *s = bs->opaque; 2684 int offset_in_cluster; 2685 int ret; 2686 unsigned int cur_bytes; /* number of sectors in current iteration */ 2687 uint64_t host_offset; 2688 QCowL2Meta *l2meta = NULL; 2689 AioTaskPool *aio = NULL; 2690 2691 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 2692 2693 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2694 2695 l2meta = NULL; 2696 2697 trace_qcow2_writev_start_part(qemu_coroutine_self()); 2698 offset_in_cluster = offset_into_cluster(s, offset); 2699 cur_bytes = MIN(bytes, INT_MAX); 2700 if (bs->encrypted) { 2701 cur_bytes = MIN(cur_bytes, 2702 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 2703 - offset_in_cluster); 2704 } 2705 2706 qemu_co_mutex_lock(&s->lock); 2707 2708 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes, 2709 &host_offset, &l2meta); 2710 if (ret < 0) { 2711 goto out_locked; 2712 } 2713 2714 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, 2715 cur_bytes, true); 2716 if (ret < 0) { 2717 goto out_locked; 2718 } 2719 2720 qemu_co_mutex_unlock(&s->lock); 2721 2722 if (!aio && cur_bytes != bytes) { 2723 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2724 } 2725 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0, 2726 host_offset, offset, 2727 cur_bytes, qiov, qiov_offset, l2meta); 2728 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */ 2729 if (ret < 0) { 2730 goto fail_nometa; 2731 } 2732 2733 bytes -= cur_bytes; 2734 offset += cur_bytes; 2735 qiov_offset += cur_bytes; 2736 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2737 } 2738 ret = 0; 2739 2740 qemu_co_mutex_lock(&s->lock); 2741 2742 out_locked: 2743 qcow2_handle_l2meta(bs, &l2meta, false); 2744 2745 qemu_co_mutex_unlock(&s->lock); 2746 2747 fail_nometa: 2748 if (aio) { 2749 aio_task_pool_wait_all(aio); 2750 if (ret == 0) { 2751 ret = aio_task_pool_status(aio); 2752 } 2753 g_free(aio); 2754 } 2755 2756 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2757 2758 return ret; 2759 } 2760 2761 static int GRAPH_RDLOCK qcow2_inactivate(BlockDriverState *bs) 2762 { 2763 BDRVQcow2State *s = bs->opaque; 2764 int ret, result = 0; 2765 Error *local_err = NULL; 2766 2767 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err); 2768 if (local_err != NULL) { 2769 result = -EINVAL; 2770 error_reportf_err(local_err, "Lost persistent bitmaps during " 2771 "inactivation of node '%s': ", 2772 bdrv_get_device_or_node_name(bs)); 2773 } 2774 2775 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2776 if (ret) { 2777 result = ret; 2778 error_report("Failed to flush the L2 table cache: %s", 2779 strerror(-ret)); 2780 } 2781 2782 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2783 if (ret) { 2784 result = ret; 2785 error_report("Failed to flush the refcount block cache: %s", 2786 strerror(-ret)); 2787 } 2788 2789 if (result == 0) { 2790 qcow2_mark_clean(bs); 2791 } 2792 2793 return result; 2794 } 2795 2796 static void coroutine_mixed_fn GRAPH_RDLOCK 2797 qcow2_do_close(BlockDriverState *bs, bool close_data_file) 2798 { 2799 BDRVQcow2State *s = bs->opaque; 2800 qemu_vfree(s->l1_table); 2801 /* else pre-write overlap checks in cache_destroy may crash */ 2802 s->l1_table = NULL; 2803 2804 if (!(s->flags & BDRV_O_INACTIVE)) { 2805 qcow2_inactivate(bs); 2806 } 2807 2808 cache_clean_timer_del(bs); 2809 qcow2_cache_destroy(s->l2_table_cache); 2810 qcow2_cache_destroy(s->refcount_block_cache); 2811 2812 qcrypto_block_free(s->crypto); 2813 s->crypto = NULL; 2814 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 2815 2816 g_free(s->unknown_header_fields); 2817 cleanup_unknown_header_ext(bs); 2818 2819 g_free(s->image_data_file); 2820 g_free(s->image_backing_file); 2821 g_free(s->image_backing_format); 2822 2823 if (close_data_file && has_data_file(bs)) { 2824 GLOBAL_STATE_CODE(); 2825 bdrv_graph_rdunlock_main_loop(); 2826 bdrv_graph_wrlock_drained(); 2827 bdrv_unref_child(bs, s->data_file); 2828 bdrv_graph_wrunlock(); 2829 s->data_file = NULL; 2830 bdrv_graph_rdlock_main_loop(); 2831 } 2832 2833 qcow2_refcount_close(bs); 2834 qcow2_free_snapshots(bs); 2835 } 2836 2837 static void GRAPH_UNLOCKED qcow2_close(BlockDriverState *bs) 2838 { 2839 GLOBAL_STATE_CODE(); 2840 GRAPH_RDLOCK_GUARD_MAINLOOP(); 2841 2842 qcow2_do_close(bs, true); 2843 } 2844 2845 static void coroutine_fn GRAPH_RDLOCK 2846 qcow2_co_invalidate_cache(BlockDriverState *bs, Error **errp) 2847 { 2848 ERRP_GUARD(); 2849 BDRVQcow2State *s = bs->opaque; 2850 BdrvChild *data_file; 2851 int flags = s->flags; 2852 QCryptoBlock *crypto = NULL; 2853 QDict *options; 2854 int ret; 2855 2856 /* 2857 * Backing files are read-only which makes all of their metadata immutable, 2858 * that means we don't have to worry about reopening them here. 2859 */ 2860 2861 crypto = s->crypto; 2862 s->crypto = NULL; 2863 2864 /* 2865 * Do not reopen s->data_file (i.e., have qcow2_do_close() not close it, 2866 * and then prevent qcow2_do_open() from opening it), because this function 2867 * runs in the I/O path and as such we must not invoke global-state 2868 * functions like bdrv_unref_child() and bdrv_open_child(). 2869 */ 2870 2871 qcow2_do_close(bs, false); 2872 2873 data_file = s->data_file; 2874 memset(s, 0, sizeof(BDRVQcow2State)); 2875 s->data_file = data_file; 2876 2877 options = qdict_clone_shallow(bs->options); 2878 2879 flags &= ~BDRV_O_INACTIVE; 2880 qemu_co_mutex_lock(&s->lock); 2881 ret = qcow2_do_open(bs, options, flags, false, errp); 2882 qemu_co_mutex_unlock(&s->lock); 2883 qobject_unref(options); 2884 if (ret < 0) { 2885 error_prepend(errp, "Could not reopen qcow2 layer: "); 2886 bs->drv = NULL; 2887 return; 2888 } 2889 2890 s->crypto = crypto; 2891 } 2892 2893 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2894 size_t len, size_t buflen) 2895 { 2896 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2897 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2898 2899 if (buflen < ext_len) { 2900 return -ENOSPC; 2901 } 2902 2903 *ext_backing_fmt = (QCowExtension) { 2904 .magic = cpu_to_be32(magic), 2905 .len = cpu_to_be32(len), 2906 }; 2907 2908 if (len) { 2909 memcpy(buf + sizeof(QCowExtension), s, len); 2910 } 2911 2912 return ext_len; 2913 } 2914 2915 /* 2916 * Updates the qcow2 header, including the variable length parts of it, i.e. 2917 * the backing file name and all extensions. qcow2 was not designed to allow 2918 * such changes, so if we run out of space (we can only use the first cluster) 2919 * this function may fail. 2920 * 2921 * Returns 0 on success, -errno in error cases. 2922 */ 2923 int qcow2_update_header(BlockDriverState *bs) 2924 { 2925 BDRVQcow2State *s = bs->opaque; 2926 QCowHeader *header; 2927 char *buf; 2928 size_t buflen = s->cluster_size; 2929 int ret; 2930 uint64_t total_size; 2931 uint32_t refcount_table_clusters; 2932 size_t header_length; 2933 Qcow2UnknownHeaderExtension *uext; 2934 2935 buf = qemu_blockalign(bs, buflen); 2936 2937 /* Header structure */ 2938 header = (QCowHeader*) buf; 2939 2940 if (buflen < sizeof(*header)) { 2941 ret = -ENOSPC; 2942 goto fail; 2943 } 2944 2945 header_length = sizeof(*header) + s->unknown_header_fields_size; 2946 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2947 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2948 2949 ret = validate_compression_type(s, NULL); 2950 if (ret) { 2951 goto fail; 2952 } 2953 2954 *header = (QCowHeader) { 2955 /* Version 2 fields */ 2956 .magic = cpu_to_be32(QCOW_MAGIC), 2957 .version = cpu_to_be32(s->qcow_version), 2958 .backing_file_offset = 0, 2959 .backing_file_size = 0, 2960 .cluster_bits = cpu_to_be32(s->cluster_bits), 2961 .size = cpu_to_be64(total_size), 2962 .crypt_method = cpu_to_be32(s->crypt_method_header), 2963 .l1_size = cpu_to_be32(s->l1_size), 2964 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2965 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2966 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2967 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2968 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2969 2970 /* Version 3 fields */ 2971 .incompatible_features = cpu_to_be64(s->incompatible_features), 2972 .compatible_features = cpu_to_be64(s->compatible_features), 2973 .autoclear_features = cpu_to_be64(s->autoclear_features), 2974 .refcount_order = cpu_to_be32(s->refcount_order), 2975 .header_length = cpu_to_be32(header_length), 2976 .compression_type = s->compression_type, 2977 }; 2978 2979 /* For older versions, write a shorter header */ 2980 switch (s->qcow_version) { 2981 case 2: 2982 ret = offsetof(QCowHeader, incompatible_features); 2983 break; 2984 case 3: 2985 ret = sizeof(*header); 2986 break; 2987 default: 2988 ret = -EINVAL; 2989 goto fail; 2990 } 2991 2992 buf += ret; 2993 buflen -= ret; 2994 memset(buf, 0, buflen); 2995 2996 /* Preserve any unknown field in the header */ 2997 if (s->unknown_header_fields_size) { 2998 if (buflen < s->unknown_header_fields_size) { 2999 ret = -ENOSPC; 3000 goto fail; 3001 } 3002 3003 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 3004 buf += s->unknown_header_fields_size; 3005 buflen -= s->unknown_header_fields_size; 3006 } 3007 3008 /* Backing file format header extension */ 3009 if (s->image_backing_format) { 3010 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 3011 s->image_backing_format, 3012 strlen(s->image_backing_format), 3013 buflen); 3014 if (ret < 0) { 3015 goto fail; 3016 } 3017 3018 buf += ret; 3019 buflen -= ret; 3020 } 3021 3022 /* External data file header extension */ 3023 if (has_data_file(bs) && s->image_data_file) { 3024 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE, 3025 s->image_data_file, strlen(s->image_data_file), 3026 buflen); 3027 if (ret < 0) { 3028 goto fail; 3029 } 3030 3031 buf += ret; 3032 buflen -= ret; 3033 } 3034 3035 /* Full disk encryption header pointer extension */ 3036 if (s->crypto_header.offset != 0) { 3037 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset); 3038 s->crypto_header.length = cpu_to_be64(s->crypto_header.length); 3039 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 3040 &s->crypto_header, sizeof(s->crypto_header), 3041 buflen); 3042 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 3043 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 3044 if (ret < 0) { 3045 goto fail; 3046 } 3047 buf += ret; 3048 buflen -= ret; 3049 } 3050 3051 /* 3052 * Feature table. A mere 8 feature names occupies 392 bytes, and 3053 * when coupled with the v3 minimum header of 104 bytes plus the 3054 * 8-byte end-of-extension marker, that would leave only 8 bytes 3055 * for a backing file name in an image with 512-byte clusters. 3056 * Thus, we choose to omit this header for cluster sizes 4k and 3057 * smaller. 3058 */ 3059 if (s->qcow_version >= 3 && s->cluster_size > 4096) { 3060 static const Qcow2Feature features[] = { 3061 { 3062 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3063 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 3064 .name = "dirty bit", 3065 }, 3066 { 3067 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3068 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 3069 .name = "corrupt bit", 3070 }, 3071 { 3072 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3073 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR, 3074 .name = "external data file", 3075 }, 3076 { 3077 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3078 .bit = QCOW2_INCOMPAT_COMPRESSION_BITNR, 3079 .name = "compression type", 3080 }, 3081 { 3082 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3083 .bit = QCOW2_INCOMPAT_EXTL2_BITNR, 3084 .name = "extended L2 entries", 3085 }, 3086 { 3087 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 3088 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 3089 .name = "lazy refcounts", 3090 }, 3091 { 3092 .type = QCOW2_FEAT_TYPE_AUTOCLEAR, 3093 .bit = QCOW2_AUTOCLEAR_BITMAPS_BITNR, 3094 .name = "bitmaps", 3095 }, 3096 { 3097 .type = QCOW2_FEAT_TYPE_AUTOCLEAR, 3098 .bit = QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR, 3099 .name = "raw external data", 3100 }, 3101 }; 3102 3103 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 3104 features, sizeof(features), buflen); 3105 if (ret < 0) { 3106 goto fail; 3107 } 3108 buf += ret; 3109 buflen -= ret; 3110 } 3111 3112 /* Bitmap extension */ 3113 if (s->nb_bitmaps > 0) { 3114 Qcow2BitmapHeaderExt bitmaps_header = { 3115 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 3116 .bitmap_directory_size = 3117 cpu_to_be64(s->bitmap_directory_size), 3118 .bitmap_directory_offset = 3119 cpu_to_be64(s->bitmap_directory_offset) 3120 }; 3121 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 3122 &bitmaps_header, sizeof(bitmaps_header), 3123 buflen); 3124 if (ret < 0) { 3125 goto fail; 3126 } 3127 buf += ret; 3128 buflen -= ret; 3129 } 3130 3131 /* Keep unknown header extensions */ 3132 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 3133 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 3134 if (ret < 0) { 3135 goto fail; 3136 } 3137 3138 buf += ret; 3139 buflen -= ret; 3140 } 3141 3142 /* End of header extensions */ 3143 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 3144 if (ret < 0) { 3145 goto fail; 3146 } 3147 3148 buf += ret; 3149 buflen -= ret; 3150 3151 /* Backing file name */ 3152 if (s->image_backing_file) { 3153 size_t backing_file_len = strlen(s->image_backing_file); 3154 3155 if (buflen < backing_file_len) { 3156 ret = -ENOSPC; 3157 goto fail; 3158 } 3159 3160 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 3161 strncpy(buf, s->image_backing_file, buflen); 3162 3163 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 3164 header->backing_file_size = cpu_to_be32(backing_file_len); 3165 } 3166 3167 /* Write the new header */ 3168 ret = bdrv_pwrite(bs->file, 0, s->cluster_size, header, 0); 3169 if (ret < 0) { 3170 goto fail; 3171 } 3172 3173 ret = 0; 3174 fail: 3175 qemu_vfree(header); 3176 return ret; 3177 } 3178 3179 static int coroutine_fn GRAPH_RDLOCK 3180 qcow2_co_change_backing_file(BlockDriverState *bs, const char *backing_file, 3181 const char *backing_fmt) 3182 { 3183 BDRVQcow2State *s = bs->opaque; 3184 3185 /* Adding a backing file means that the external data file alone won't be 3186 * enough to make sense of the content */ 3187 if (backing_file && data_file_is_raw(bs)) { 3188 return -EINVAL; 3189 } 3190 3191 if (backing_file && strlen(backing_file) > 1023) { 3192 return -EINVAL; 3193 } 3194 3195 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 3196 backing_file ?: ""); 3197 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 3198 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 3199 3200 g_free(s->image_backing_file); 3201 g_free(s->image_backing_format); 3202 3203 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 3204 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 3205 3206 return qcow2_update_header(bs); 3207 } 3208 3209 static int coroutine_fn GRAPH_RDLOCK 3210 qcow2_set_up_encryption(BlockDriverState *bs, 3211 QCryptoBlockCreateOptions *cryptoopts, 3212 Error **errp) 3213 { 3214 BDRVQcow2State *s = bs->opaque; 3215 QCryptoBlock *crypto = NULL; 3216 int fmt, ret; 3217 3218 switch (cryptoopts->format) { 3219 case QCRYPTO_BLOCK_FORMAT_LUKS: 3220 fmt = QCOW_CRYPT_LUKS; 3221 break; 3222 case QCRYPTO_BLOCK_FORMAT_QCOW: 3223 fmt = QCOW_CRYPT_AES; 3224 break; 3225 default: 3226 error_setg(errp, "Crypto format not supported in qcow2"); 3227 return -EINVAL; 3228 } 3229 3230 s->crypt_method_header = fmt; 3231 3232 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 3233 qcow2_crypto_hdr_init_func, 3234 qcow2_crypto_hdr_write_func, 3235 bs, 0, errp); 3236 if (!crypto) { 3237 return -EINVAL; 3238 } 3239 3240 ret = qcow2_update_header(bs); 3241 if (ret < 0) { 3242 error_setg_errno(errp, -ret, "Could not write encryption header"); 3243 goto out; 3244 } 3245 3246 ret = 0; 3247 out: 3248 qcrypto_block_free(crypto); 3249 return ret; 3250 } 3251 3252 /** 3253 * Preallocates metadata structures for data clusters between @offset (in the 3254 * guest disk) and @new_length (which is thus generally the new guest disk 3255 * size). 3256 * 3257 * Returns: 0 on success, -errno on failure. 3258 */ 3259 static int coroutine_fn GRAPH_RDLOCK 3260 preallocate_co(BlockDriverState *bs, uint64_t offset, uint64_t new_length, 3261 PreallocMode mode, Error **errp) 3262 { 3263 BDRVQcow2State *s = bs->opaque; 3264 uint64_t bytes; 3265 uint64_t host_offset = 0; 3266 int64_t file_length; 3267 unsigned int cur_bytes; 3268 int ret; 3269 QCowL2Meta *meta = NULL, *m; 3270 3271 assert(offset <= new_length); 3272 bytes = new_length - offset; 3273 3274 while (bytes) { 3275 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size)); 3276 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes, 3277 &host_offset, &meta); 3278 if (ret < 0) { 3279 error_setg_errno(errp, -ret, "Allocating clusters failed"); 3280 goto out; 3281 } 3282 3283 for (m = meta; m != NULL; m = m->next) { 3284 m->prealloc = true; 3285 } 3286 3287 ret = qcow2_handle_l2meta(bs, &meta, true); 3288 if (ret < 0) { 3289 error_setg_errno(errp, -ret, "Mapping clusters failed"); 3290 goto out; 3291 } 3292 3293 /* TODO Preallocate data if requested */ 3294 3295 bytes -= cur_bytes; 3296 offset += cur_bytes; 3297 } 3298 3299 /* 3300 * It is expected that the image file is large enough to actually contain 3301 * all of the allocated clusters (otherwise we get failing reads after 3302 * EOF). Extend the image to the last allocated sector. 3303 */ 3304 file_length = bdrv_co_getlength(s->data_file->bs); 3305 if (file_length < 0) { 3306 error_setg_errno(errp, -file_length, "Could not get file size"); 3307 ret = file_length; 3308 goto out; 3309 } 3310 3311 if (host_offset + cur_bytes > file_length) { 3312 if (mode == PREALLOC_MODE_METADATA) { 3313 mode = PREALLOC_MODE_OFF; 3314 } 3315 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false, 3316 mode, 0, errp); 3317 if (ret < 0) { 3318 goto out; 3319 } 3320 } 3321 3322 ret = 0; 3323 3324 out: 3325 qcow2_handle_l2meta(bs, &meta, false); 3326 return ret; 3327 } 3328 3329 /* qcow2_refcount_metadata_size: 3330 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 3331 * @cluster_size: size of a cluster, in bytes 3332 * @refcount_order: refcount bits power-of-2 exponent 3333 * @generous_increase: allow for the refcount table to be 1.5x as large as it 3334 * needs to be 3335 * 3336 * Returns: Number of bytes required for refcount blocks and table metadata. 3337 */ 3338 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 3339 int refcount_order, bool generous_increase, 3340 uint64_t *refblock_count) 3341 { 3342 /* 3343 * Every host cluster is reference-counted, including metadata (even 3344 * refcount metadata is recursively included). 3345 * 3346 * An accurate formula for the size of refcount metadata size is difficult 3347 * to derive. An easier method of calculation is finding the fixed point 3348 * where no further refcount blocks or table clusters are required to 3349 * reference count every cluster. 3350 */ 3351 int64_t blocks_per_table_cluster = cluster_size / REFTABLE_ENTRY_SIZE; 3352 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 3353 int64_t table = 0; /* number of refcount table clusters */ 3354 int64_t blocks = 0; /* number of refcount block clusters */ 3355 int64_t last; 3356 int64_t n = 0; 3357 3358 do { 3359 last = n; 3360 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 3361 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 3362 n = clusters + blocks + table; 3363 3364 if (n == last && generous_increase) { 3365 clusters += DIV_ROUND_UP(table, 2); 3366 n = 0; /* force another loop */ 3367 generous_increase = false; 3368 } 3369 } while (n != last); 3370 3371 if (refblock_count) { 3372 *refblock_count = blocks; 3373 } 3374 3375 return (blocks + table) * cluster_size; 3376 } 3377 3378 /** 3379 * qcow2_calc_prealloc_size: 3380 * @total_size: virtual disk size in bytes 3381 * @cluster_size: cluster size in bytes 3382 * @refcount_order: refcount bits power-of-2 exponent 3383 * @extended_l2: true if the image has extended L2 entries 3384 * 3385 * Returns: Total number of bytes required for the fully allocated image 3386 * (including metadata). 3387 */ 3388 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 3389 size_t cluster_size, 3390 int refcount_order, 3391 bool extended_l2) 3392 { 3393 int64_t meta_size = 0; 3394 uint64_t nl1e, nl2e; 3395 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size); 3396 size_t l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL; 3397 3398 /* header: 1 cluster */ 3399 meta_size += cluster_size; 3400 3401 /* total size of L2 tables */ 3402 nl2e = aligned_total_size / cluster_size; 3403 nl2e = ROUND_UP(nl2e, cluster_size / l2e_size); 3404 meta_size += nl2e * l2e_size; 3405 3406 /* total size of L1 tables */ 3407 nl1e = nl2e * l2e_size / cluster_size; 3408 nl1e = ROUND_UP(nl1e, cluster_size / L1E_SIZE); 3409 meta_size += nl1e * L1E_SIZE; 3410 3411 /* total size of refcount table and blocks */ 3412 meta_size += qcow2_refcount_metadata_size( 3413 (meta_size + aligned_total_size) / cluster_size, 3414 cluster_size, refcount_order, false, NULL); 3415 3416 return meta_size + aligned_total_size; 3417 } 3418 3419 static bool validate_cluster_size(size_t cluster_size, bool extended_l2, 3420 Error **errp) 3421 { 3422 int cluster_bits = ctz32(cluster_size); 3423 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 3424 (1 << cluster_bits) != cluster_size) 3425 { 3426 error_setg(errp, "Cluster size must be a power of two between %d and " 3427 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 3428 return false; 3429 } 3430 3431 if (extended_l2) { 3432 unsigned min_cluster_size = 3433 (1 << MIN_CLUSTER_BITS) * QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER; 3434 if (cluster_size < min_cluster_size) { 3435 error_setg(errp, "Extended L2 entries are only supported with " 3436 "cluster sizes of at least %u bytes", min_cluster_size); 3437 return false; 3438 } 3439 } 3440 3441 return true; 3442 } 3443 3444 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, bool extended_l2, 3445 Error **errp) 3446 { 3447 size_t cluster_size; 3448 3449 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 3450 DEFAULT_CLUSTER_SIZE); 3451 if (!validate_cluster_size(cluster_size, extended_l2, errp)) { 3452 return 0; 3453 } 3454 return cluster_size; 3455 } 3456 3457 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 3458 { 3459 char *buf; 3460 int ret; 3461 3462 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 3463 if (!buf) { 3464 ret = 3; /* default */ 3465 } else if (!strcmp(buf, "0.10")) { 3466 ret = 2; 3467 } else if (!strcmp(buf, "1.1")) { 3468 ret = 3; 3469 } else { 3470 error_setg(errp, "Invalid compatibility level: '%s'", buf); 3471 ret = -EINVAL; 3472 } 3473 g_free(buf); 3474 return ret; 3475 } 3476 3477 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 3478 Error **errp) 3479 { 3480 uint64_t refcount_bits; 3481 3482 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 3483 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 3484 error_setg(errp, "Refcount width must be a power of two and may not " 3485 "exceed 64 bits"); 3486 return 0; 3487 } 3488 3489 if (version < 3 && refcount_bits != 16) { 3490 error_setg(errp, "Different refcount widths than 16 bits require " 3491 "compatibility level 1.1 or above (use compat=1.1 or " 3492 "greater)"); 3493 return 0; 3494 } 3495 3496 return refcount_bits; 3497 } 3498 3499 static int coroutine_fn GRAPH_UNLOCKED 3500 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) 3501 { 3502 ERRP_GUARD(); 3503 BlockdevCreateOptionsQcow2 *qcow2_opts; 3504 QDict *options; 3505 3506 /* 3507 * Open the image file and write a minimal qcow2 header. 3508 * 3509 * We keep things simple and start with a zero-sized image. We also 3510 * do without refcount blocks or a L1 table for now. We'll fix the 3511 * inconsistency later. 3512 * 3513 * We do need a refcount table because growing the refcount table means 3514 * allocating two new refcount blocks - the second of which would be at 3515 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 3516 * size for any qcow2 image. 3517 */ 3518 BlockBackend *blk = NULL; 3519 BlockDriverState *bs = NULL; 3520 BlockDriverState *data_bs = NULL; 3521 QCowHeader *header; 3522 size_t cluster_size; 3523 int version; 3524 int refcount_order; 3525 uint64_t *refcount_table; 3526 int ret; 3527 uint8_t compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; 3528 3529 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2); 3530 qcow2_opts = &create_options->u.qcow2; 3531 3532 bs = bdrv_co_open_blockdev_ref(qcow2_opts->file, errp); 3533 if (bs == NULL) { 3534 return -EIO; 3535 } 3536 3537 /* Validate options and set default values */ 3538 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) { 3539 error_setg(errp, "Image size must be a multiple of %u bytes", 3540 (unsigned) BDRV_SECTOR_SIZE); 3541 ret = -EINVAL; 3542 goto out; 3543 } 3544 3545 if (qcow2_opts->has_version) { 3546 switch (qcow2_opts->version) { 3547 case BLOCKDEV_QCOW2_VERSION_V2: 3548 version = 2; 3549 break; 3550 case BLOCKDEV_QCOW2_VERSION_V3: 3551 version = 3; 3552 break; 3553 default: 3554 g_assert_not_reached(); 3555 } 3556 } else { 3557 version = 3; 3558 } 3559 3560 if (qcow2_opts->has_cluster_size) { 3561 cluster_size = qcow2_opts->cluster_size; 3562 } else { 3563 cluster_size = DEFAULT_CLUSTER_SIZE; 3564 } 3565 3566 if (!qcow2_opts->has_extended_l2) { 3567 qcow2_opts->extended_l2 = false; 3568 } 3569 if (qcow2_opts->extended_l2) { 3570 if (version < 3) { 3571 error_setg(errp, "Extended L2 entries are only supported with " 3572 "compatibility level 1.1 and above (use version=v3 or " 3573 "greater)"); 3574 ret = -EINVAL; 3575 goto out; 3576 } 3577 } 3578 3579 if (!validate_cluster_size(cluster_size, qcow2_opts->extended_l2, errp)) { 3580 ret = -EINVAL; 3581 goto out; 3582 } 3583 3584 if (!qcow2_opts->has_preallocation) { 3585 qcow2_opts->preallocation = PREALLOC_MODE_OFF; 3586 } 3587 if (qcow2_opts->backing_file && 3588 qcow2_opts->preallocation != PREALLOC_MODE_OFF && 3589 !qcow2_opts->extended_l2) 3590 { 3591 error_setg(errp, "Backing file and preallocation can only be used at " 3592 "the same time if extended_l2 is on"); 3593 ret = -EINVAL; 3594 goto out; 3595 } 3596 if (qcow2_opts->has_backing_fmt && !qcow2_opts->backing_file) { 3597 error_setg(errp, "Backing format cannot be used without backing file"); 3598 ret = -EINVAL; 3599 goto out; 3600 } 3601 3602 if (!qcow2_opts->has_lazy_refcounts) { 3603 qcow2_opts->lazy_refcounts = false; 3604 } 3605 if (version < 3 && qcow2_opts->lazy_refcounts) { 3606 error_setg(errp, "Lazy refcounts only supported with compatibility " 3607 "level 1.1 and above (use version=v3 or greater)"); 3608 ret = -EINVAL; 3609 goto out; 3610 } 3611 3612 if (!qcow2_opts->has_refcount_bits) { 3613 qcow2_opts->refcount_bits = 16; 3614 } 3615 if (qcow2_opts->refcount_bits > 64 || 3616 !is_power_of_2(qcow2_opts->refcount_bits)) 3617 { 3618 error_setg(errp, "Refcount width must be a power of two and may not " 3619 "exceed 64 bits"); 3620 ret = -EINVAL; 3621 goto out; 3622 } 3623 if (version < 3 && qcow2_opts->refcount_bits != 16) { 3624 error_setg(errp, "Different refcount widths than 16 bits require " 3625 "compatibility level 1.1 or above (use version=v3 or " 3626 "greater)"); 3627 ret = -EINVAL; 3628 goto out; 3629 } 3630 refcount_order = ctz32(qcow2_opts->refcount_bits); 3631 3632 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) { 3633 error_setg(errp, "data-file-raw requires data-file"); 3634 ret = -EINVAL; 3635 goto out; 3636 } 3637 if (qcow2_opts->data_file_raw && qcow2_opts->backing_file) { 3638 error_setg(errp, "Backing file and data-file-raw cannot be used at " 3639 "the same time"); 3640 ret = -EINVAL; 3641 goto out; 3642 } 3643 if (qcow2_opts->data_file_raw && 3644 qcow2_opts->preallocation == PREALLOC_MODE_OFF) 3645 { 3646 /* 3647 * data-file-raw means that "the external data file can be 3648 * read as a consistent standalone raw image without looking 3649 * at the qcow2 metadata." It does not say that the metadata 3650 * must be ignored, though (and the qcow2 driver in fact does 3651 * not ignore it), so the L1/L2 tables must be present and 3652 * give a 1:1 mapping, so you get the same result regardless 3653 * of whether you look at the metadata or whether you ignore 3654 * it. 3655 */ 3656 qcow2_opts->preallocation = PREALLOC_MODE_METADATA; 3657 3658 /* 3659 * Cannot use preallocation with backing files, but giving a 3660 * backing file when specifying data_file_raw is an error 3661 * anyway. 3662 */ 3663 assert(!qcow2_opts->backing_file); 3664 } 3665 3666 if (qcow2_opts->data_file) { 3667 if (version < 3) { 3668 error_setg(errp, "External data files are only supported with " 3669 "compatibility level 1.1 and above (use version=v3 or " 3670 "greater)"); 3671 ret = -EINVAL; 3672 goto out; 3673 } 3674 data_bs = bdrv_co_open_blockdev_ref(qcow2_opts->data_file, errp); 3675 if (data_bs == NULL) { 3676 ret = -EIO; 3677 goto out; 3678 } 3679 } 3680 3681 if (qcow2_opts->has_compression_type && 3682 qcow2_opts->compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) { 3683 3684 ret = -EINVAL; 3685 3686 if (version < 3) { 3687 error_setg(errp, "Non-zlib compression type is only supported with " 3688 "compatibility level 1.1 and above (use version=v3 or " 3689 "greater)"); 3690 goto out; 3691 } 3692 3693 switch (qcow2_opts->compression_type) { 3694 #ifdef CONFIG_ZSTD 3695 case QCOW2_COMPRESSION_TYPE_ZSTD: 3696 break; 3697 #endif 3698 default: 3699 error_setg(errp, "Unknown compression type"); 3700 goto out; 3701 } 3702 3703 compression_type = qcow2_opts->compression_type; 3704 } 3705 3706 /* Create BlockBackend to write to the image */ 3707 blk = blk_co_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL, 3708 errp); 3709 if (!blk) { 3710 ret = -EPERM; 3711 goto out; 3712 } 3713 blk_set_allow_write_beyond_eof(blk, true); 3714 3715 /* Write the header */ 3716 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 3717 header = g_malloc0(cluster_size); 3718 *header = (QCowHeader) { 3719 .magic = cpu_to_be32(QCOW_MAGIC), 3720 .version = cpu_to_be32(version), 3721 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 3722 .size = cpu_to_be64(0), 3723 .l1_table_offset = cpu_to_be64(0), 3724 .l1_size = cpu_to_be32(0), 3725 .refcount_table_offset = cpu_to_be64(cluster_size), 3726 .refcount_table_clusters = cpu_to_be32(1), 3727 .refcount_order = cpu_to_be32(refcount_order), 3728 /* don't deal with endianness since compression_type is 1 byte long */ 3729 .compression_type = compression_type, 3730 .header_length = cpu_to_be32(sizeof(*header)), 3731 }; 3732 3733 /* We'll update this to correct value later */ 3734 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 3735 3736 if (qcow2_opts->lazy_refcounts) { 3737 header->compatible_features |= 3738 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 3739 } 3740 if (data_bs) { 3741 header->incompatible_features |= 3742 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE); 3743 } 3744 if (qcow2_opts->data_file_raw) { 3745 header->autoclear_features |= 3746 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW); 3747 } 3748 if (compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) { 3749 header->incompatible_features |= 3750 cpu_to_be64(QCOW2_INCOMPAT_COMPRESSION); 3751 } 3752 3753 if (qcow2_opts->extended_l2) { 3754 header->incompatible_features |= 3755 cpu_to_be64(QCOW2_INCOMPAT_EXTL2); 3756 } 3757 3758 ret = blk_co_pwrite(blk, 0, cluster_size, header, 0); 3759 g_free(header); 3760 if (ret < 0) { 3761 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 3762 goto out; 3763 } 3764 3765 /* Write a refcount table with one refcount block */ 3766 refcount_table = g_malloc0(2 * cluster_size); 3767 refcount_table[0] = cpu_to_be64(2 * cluster_size); 3768 ret = blk_co_pwrite(blk, cluster_size, 2 * cluster_size, refcount_table, 0); 3769 g_free(refcount_table); 3770 3771 if (ret < 0) { 3772 error_setg_errno(errp, -ret, "Could not write refcount table"); 3773 goto out; 3774 } 3775 3776 blk_co_unref(blk); 3777 blk = NULL; 3778 3779 /* 3780 * And now open the image and make it consistent first (i.e. increase the 3781 * refcount of the cluster that is occupied by the header and the refcount 3782 * table) 3783 */ 3784 options = qdict_new(); 3785 qdict_put_str(options, "driver", "qcow2"); 3786 qdict_put_str(options, "file", bs->node_name); 3787 if (data_bs) { 3788 qdict_put_str(options, "data-file", data_bs->node_name); 3789 } 3790 blk = blk_co_new_open(NULL, NULL, options, 3791 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 3792 errp); 3793 if (blk == NULL) { 3794 ret = -EIO; 3795 goto out; 3796 } 3797 3798 bdrv_graph_co_rdlock(); 3799 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 3800 if (ret < 0) { 3801 bdrv_graph_co_rdunlock(); 3802 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 3803 "header and refcount table"); 3804 goto out; 3805 3806 } else if (ret != 0) { 3807 error_report("Huh, first cluster in empty image is already in use?"); 3808 abort(); 3809 } 3810 3811 /* Set the external data file if necessary */ 3812 if (data_bs) { 3813 BDRVQcow2State *s = blk_bs(blk)->opaque; 3814 s->image_data_file = g_strdup(data_bs->filename); 3815 } 3816 3817 /* Create a full header (including things like feature table) */ 3818 ret = qcow2_update_header(blk_bs(blk)); 3819 bdrv_graph_co_rdunlock(); 3820 3821 if (ret < 0) { 3822 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 3823 goto out; 3824 } 3825 3826 /* Okay, now that we have a valid image, let's give it the right size */ 3827 ret = blk_co_truncate(blk, qcow2_opts->size, false, 3828 qcow2_opts->preallocation, 0, errp); 3829 if (ret < 0) { 3830 error_prepend(errp, "Could not resize image: "); 3831 goto out; 3832 } 3833 3834 /* Want a backing file? There you go. */ 3835 if (qcow2_opts->backing_file) { 3836 const char *backing_format = NULL; 3837 3838 if (qcow2_opts->has_backing_fmt) { 3839 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt); 3840 } 3841 3842 bdrv_graph_co_rdlock(); 3843 ret = bdrv_co_change_backing_file(blk_bs(blk), qcow2_opts->backing_file, 3844 backing_format, false); 3845 bdrv_graph_co_rdunlock(); 3846 3847 if (ret < 0) { 3848 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 3849 "with format '%s'", qcow2_opts->backing_file, 3850 backing_format); 3851 goto out; 3852 } 3853 } 3854 3855 /* Want encryption? There you go. */ 3856 if (qcow2_opts->encrypt) { 3857 bdrv_graph_co_rdlock(); 3858 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); 3859 bdrv_graph_co_rdunlock(); 3860 3861 if (ret < 0) { 3862 goto out; 3863 } 3864 } 3865 3866 blk_co_unref(blk); 3867 blk = NULL; 3868 3869 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 3870 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 3871 * have to setup decryption context. We're not doing any I/O on the top 3872 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 3873 * not have effect. 3874 */ 3875 options = qdict_new(); 3876 qdict_put_str(options, "driver", "qcow2"); 3877 qdict_put_str(options, "file", bs->node_name); 3878 if (data_bs) { 3879 qdict_put_str(options, "data-file", data_bs->node_name); 3880 } 3881 blk = blk_co_new_open(NULL, NULL, options, 3882 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 3883 errp); 3884 if (blk == NULL) { 3885 ret = -EIO; 3886 goto out; 3887 } 3888 3889 ret = 0; 3890 out: 3891 blk_co_unref(blk); 3892 bdrv_co_unref(bs); 3893 bdrv_co_unref(data_bs); 3894 return ret; 3895 } 3896 3897 static int coroutine_fn GRAPH_UNLOCKED 3898 qcow2_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, 3899 Error **errp) 3900 { 3901 BlockdevCreateOptions *create_options = NULL; 3902 QDict *qdict; 3903 Visitor *v; 3904 BlockDriverState *bs = NULL; 3905 BlockDriverState *data_bs = NULL; 3906 const char *val; 3907 int ret; 3908 3909 /* Only the keyval visitor supports the dotted syntax needed for 3910 * encryption, so go through a QDict before getting a QAPI type. Ignore 3911 * options meant for the protocol layer so that the visitor doesn't 3912 * complain. */ 3913 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts, 3914 true); 3915 3916 /* Handle encryption options */ 3917 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT); 3918 if (val && !strcmp(val, "on")) { 3919 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow"); 3920 } else if (val && !strcmp(val, "off")) { 3921 qdict_del(qdict, BLOCK_OPT_ENCRYPT); 3922 } 3923 3924 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT); 3925 if (val && !strcmp(val, "aes")) { 3926 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow"); 3927 } 3928 3929 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into 3930 * version=v2/v3 below. */ 3931 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL); 3932 if (val && !strcmp(val, "0.10")) { 3933 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2"); 3934 } else if (val && !strcmp(val, "1.1")) { 3935 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3"); 3936 } 3937 3938 /* Change legacy command line options into QMP ones */ 3939 static const QDictRenames opt_renames[] = { 3940 { BLOCK_OPT_BACKING_FILE, "backing-file" }, 3941 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, 3942 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, 3943 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" }, 3944 { BLOCK_OPT_EXTL2, "extended-l2" }, 3945 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" }, 3946 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT }, 3947 { BLOCK_OPT_COMPAT_LEVEL, "version" }, 3948 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" }, 3949 { BLOCK_OPT_COMPRESSION_TYPE, "compression-type" }, 3950 { NULL, NULL }, 3951 }; 3952 3953 if (!qdict_rename_keys(qdict, opt_renames, errp)) { 3954 ret = -EINVAL; 3955 goto finish; 3956 } 3957 3958 /* Create and open the file (protocol layer) */ 3959 ret = bdrv_co_create_file(filename, opts, errp); 3960 if (ret < 0) { 3961 goto finish; 3962 } 3963 3964 bs = bdrv_co_open(filename, NULL, NULL, 3965 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); 3966 if (bs == NULL) { 3967 ret = -EIO; 3968 goto finish; 3969 } 3970 3971 /* Create and open an external data file (protocol layer) */ 3972 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE); 3973 if (val) { 3974 ret = bdrv_co_create_file(val, opts, errp); 3975 if (ret < 0) { 3976 goto finish; 3977 } 3978 3979 data_bs = bdrv_co_open(val, NULL, NULL, 3980 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 3981 errp); 3982 if (data_bs == NULL) { 3983 ret = -EIO; 3984 goto finish; 3985 } 3986 3987 qdict_del(qdict, BLOCK_OPT_DATA_FILE); 3988 qdict_put_str(qdict, "data-file", data_bs->node_name); 3989 } 3990 3991 /* Set 'driver' and 'node' options */ 3992 qdict_put_str(qdict, "driver", "qcow2"); 3993 qdict_put_str(qdict, "file", bs->node_name); 3994 3995 /* Now get the QAPI type BlockdevCreateOptions */ 3996 v = qobject_input_visitor_new_flat_confused(qdict, errp); 3997 if (!v) { 3998 ret = -EINVAL; 3999 goto finish; 4000 } 4001 4002 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp); 4003 visit_free(v); 4004 if (!create_options) { 4005 ret = -EINVAL; 4006 goto finish; 4007 } 4008 4009 /* Silently round up size */ 4010 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size, 4011 BDRV_SECTOR_SIZE); 4012 4013 /* Create the qcow2 image (format layer) */ 4014 ret = qcow2_co_create(create_options, errp); 4015 finish: 4016 if (ret < 0) { 4017 bdrv_graph_co_rdlock(); 4018 bdrv_co_delete_file_noerr(bs); 4019 bdrv_co_delete_file_noerr(data_bs); 4020 bdrv_graph_co_rdunlock(); 4021 } else { 4022 ret = 0; 4023 } 4024 4025 qobject_unref(qdict); 4026 bdrv_co_unref(bs); 4027 bdrv_co_unref(data_bs); 4028 qapi_free_BlockdevCreateOptions(create_options); 4029 return ret; 4030 } 4031 4032 4033 static bool coroutine_fn GRAPH_RDLOCK 4034 is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) 4035 { 4036 int64_t nr; 4037 int res; 4038 4039 /* Clamp to image length, before checking status of underlying sectors */ 4040 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 4041 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset; 4042 } 4043 4044 if (!bytes) { 4045 return true; 4046 } 4047 4048 /* 4049 * bdrv_block_status_above doesn't merge different types of zeros, for 4050 * example, zeros which come from the region which is unallocated in 4051 * the whole backing chain, and zeros which come because of a short 4052 * backing file. So, we need a loop. 4053 */ 4054 do { 4055 res = bdrv_co_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL); 4056 offset += nr; 4057 bytes -= nr; 4058 } while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes); 4059 4060 return res >= 0 && (res & BDRV_BLOCK_ZERO) && bytes == 0; 4061 } 4062 4063 static int coroutine_fn GRAPH_RDLOCK 4064 qcow2_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, 4065 BdrvRequestFlags flags) 4066 { 4067 int ret; 4068 BDRVQcow2State *s = bs->opaque; 4069 4070 uint32_t head = offset_into_subcluster(s, offset); 4071 uint32_t tail = ROUND_UP(offset + bytes, s->subcluster_size) - 4072 (offset + bytes); 4073 4074 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 4075 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 4076 tail = 0; 4077 } 4078 4079 if (head || tail) { 4080 uint64_t off; 4081 unsigned int nr; 4082 QCow2SubclusterType type; 4083 4084 assert(head + bytes + tail <= s->subcluster_size); 4085 4086 /* check whether remainder of cluster already reads as zero */ 4087 if (!(is_zero(bs, offset - head, head) && 4088 is_zero(bs, offset + bytes, tail))) { 4089 return -ENOTSUP; 4090 } 4091 4092 qemu_co_mutex_lock(&s->lock); 4093 /* We can have new write after previous check */ 4094 offset -= head; 4095 bytes = s->subcluster_size; 4096 nr = s->subcluster_size; 4097 ret = qcow2_get_host_offset(bs, offset, &nr, &off, &type); 4098 if (ret < 0 || 4099 (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && 4100 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && 4101 type != QCOW2_SUBCLUSTER_ZERO_PLAIN && 4102 type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) { 4103 qemu_co_mutex_unlock(&s->lock); 4104 return ret < 0 ? ret : -ENOTSUP; 4105 } 4106 } else { 4107 qemu_co_mutex_lock(&s->lock); 4108 } 4109 4110 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 4111 4112 /* Whatever is left can use real zero subclusters */ 4113 ret = qcow2_subcluster_zeroize(bs, offset, bytes, flags); 4114 qemu_co_mutex_unlock(&s->lock); 4115 4116 return ret; 4117 } 4118 4119 static int coroutine_fn GRAPH_RDLOCK 4120 qcow2_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) 4121 { 4122 int ret; 4123 BDRVQcow2State *s = bs->opaque; 4124 4125 /* If the image does not support QCOW_OFLAG_ZERO then discarding 4126 * clusters could expose stale data from the backing file. */ 4127 if (s->qcow_version < 3 && bs->backing) { 4128 return -ENOTSUP; 4129 } 4130 4131 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 4132 assert(bytes < s->cluster_size); 4133 /* Ignore partial clusters, except for the special case of the 4134 * complete partial cluster at the end of an unaligned file */ 4135 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 4136 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 4137 return -ENOTSUP; 4138 } 4139 } 4140 4141 qemu_co_mutex_lock(&s->lock); 4142 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 4143 false); 4144 qemu_co_mutex_unlock(&s->lock); 4145 return ret; 4146 } 4147 4148 static int coroutine_fn GRAPH_RDLOCK 4149 qcow2_co_copy_range_from(BlockDriverState *bs, 4150 BdrvChild *src, int64_t src_offset, 4151 BdrvChild *dst, int64_t dst_offset, 4152 int64_t bytes, BdrvRequestFlags read_flags, 4153 BdrvRequestFlags write_flags) 4154 { 4155 BDRVQcow2State *s = bs->opaque; 4156 int ret; 4157 unsigned int cur_bytes; /* number of bytes in current iteration */ 4158 BdrvChild *child = NULL; 4159 BdrvRequestFlags cur_write_flags; 4160 4161 assert(!bs->encrypted); 4162 qemu_co_mutex_lock(&s->lock); 4163 4164 while (bytes != 0) { 4165 uint64_t copy_offset = 0; 4166 QCow2SubclusterType type; 4167 /* prepare next request */ 4168 cur_bytes = MIN(bytes, INT_MAX); 4169 cur_write_flags = write_flags; 4170 4171 ret = qcow2_get_host_offset(bs, src_offset, &cur_bytes, 4172 ©_offset, &type); 4173 if (ret < 0) { 4174 goto out; 4175 } 4176 4177 switch (type) { 4178 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 4179 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 4180 if (bs->backing && bs->backing->bs) { 4181 int64_t backing_length = bdrv_co_getlength(bs->backing->bs); 4182 if (src_offset >= backing_length) { 4183 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 4184 } else { 4185 child = bs->backing; 4186 cur_bytes = MIN(cur_bytes, backing_length - src_offset); 4187 copy_offset = src_offset; 4188 } 4189 } else { 4190 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 4191 } 4192 break; 4193 4194 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 4195 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 4196 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 4197 break; 4198 4199 case QCOW2_SUBCLUSTER_COMPRESSED: 4200 ret = -ENOTSUP; 4201 goto out; 4202 4203 case QCOW2_SUBCLUSTER_NORMAL: 4204 child = s->data_file; 4205 break; 4206 4207 default: 4208 abort(); 4209 } 4210 qemu_co_mutex_unlock(&s->lock); 4211 ret = bdrv_co_copy_range_from(child, 4212 copy_offset, 4213 dst, dst_offset, 4214 cur_bytes, read_flags, cur_write_flags); 4215 qemu_co_mutex_lock(&s->lock); 4216 if (ret < 0) { 4217 goto out; 4218 } 4219 4220 bytes -= cur_bytes; 4221 src_offset += cur_bytes; 4222 dst_offset += cur_bytes; 4223 } 4224 ret = 0; 4225 4226 out: 4227 qemu_co_mutex_unlock(&s->lock); 4228 return ret; 4229 } 4230 4231 static int coroutine_fn GRAPH_RDLOCK 4232 qcow2_co_copy_range_to(BlockDriverState *bs, 4233 BdrvChild *src, int64_t src_offset, 4234 BdrvChild *dst, int64_t dst_offset, 4235 int64_t bytes, BdrvRequestFlags read_flags, 4236 BdrvRequestFlags write_flags) 4237 { 4238 BDRVQcow2State *s = bs->opaque; 4239 int ret; 4240 unsigned int cur_bytes; /* number of sectors in current iteration */ 4241 uint64_t host_offset; 4242 QCowL2Meta *l2meta = NULL; 4243 4244 assert(!bs->encrypted); 4245 4246 qemu_co_mutex_lock(&s->lock); 4247 4248 while (bytes != 0) { 4249 4250 l2meta = NULL; 4251 4252 cur_bytes = MIN(bytes, INT_MAX); 4253 4254 /* TODO: 4255 * If src->bs == dst->bs, we could simply copy by incrementing 4256 * the refcnt, without copying user data. 4257 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */ 4258 ret = qcow2_alloc_host_offset(bs, dst_offset, &cur_bytes, 4259 &host_offset, &l2meta); 4260 if (ret < 0) { 4261 goto fail; 4262 } 4263 4264 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, cur_bytes, 4265 true); 4266 if (ret < 0) { 4267 goto fail; 4268 } 4269 4270 qemu_co_mutex_unlock(&s->lock); 4271 ret = bdrv_co_copy_range_to(src, src_offset, s->data_file, host_offset, 4272 cur_bytes, read_flags, write_flags); 4273 qemu_co_mutex_lock(&s->lock); 4274 if (ret < 0) { 4275 goto fail; 4276 } 4277 4278 ret = qcow2_handle_l2meta(bs, &l2meta, true); 4279 if (ret) { 4280 goto fail; 4281 } 4282 4283 bytes -= cur_bytes; 4284 src_offset += cur_bytes; 4285 dst_offset += cur_bytes; 4286 } 4287 ret = 0; 4288 4289 fail: 4290 qcow2_handle_l2meta(bs, &l2meta, false); 4291 4292 qemu_co_mutex_unlock(&s->lock); 4293 4294 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 4295 4296 return ret; 4297 } 4298 4299 static int coroutine_fn GRAPH_RDLOCK 4300 qcow2_co_truncate(BlockDriverState *bs, int64_t offset, bool exact, 4301 PreallocMode prealloc, BdrvRequestFlags flags, Error **errp) 4302 { 4303 ERRP_GUARD(); 4304 BDRVQcow2State *s = bs->opaque; 4305 uint64_t old_length; 4306 int64_t new_l1_size; 4307 int ret; 4308 QDict *options; 4309 4310 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 4311 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 4312 { 4313 error_setg(errp, "Unsupported preallocation mode '%s'", 4314 PreallocMode_str(prealloc)); 4315 return -ENOTSUP; 4316 } 4317 4318 if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) { 4319 error_setg(errp, "The new size must be a multiple of %u", 4320 (unsigned) BDRV_SECTOR_SIZE); 4321 return -EINVAL; 4322 } 4323 4324 qemu_co_mutex_lock(&s->lock); 4325 4326 /* 4327 * Even though we store snapshot size for all images, it was not 4328 * required until v3, so it is not safe to proceed for v2. 4329 */ 4330 if (s->nb_snapshots && s->qcow_version < 3) { 4331 error_setg(errp, "Can't resize a v2 image which has snapshots"); 4332 ret = -ENOTSUP; 4333 goto fail; 4334 } 4335 4336 /* See qcow2-bitmap.c for which bitmap scenarios prevent a resize. */ 4337 if (qcow2_truncate_bitmaps_check(bs, errp)) { 4338 ret = -ENOTSUP; 4339 goto fail; 4340 } 4341 4342 old_length = bs->total_sectors * BDRV_SECTOR_SIZE; 4343 new_l1_size = size_to_l1(s, offset); 4344 4345 if (offset < old_length) { 4346 int64_t last_cluster, old_file_size; 4347 if (prealloc != PREALLOC_MODE_OFF) { 4348 error_setg(errp, 4349 "Preallocation can't be used for shrinking an image"); 4350 ret = -EINVAL; 4351 goto fail; 4352 } 4353 4354 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size), 4355 old_length - ROUND_UP(offset, 4356 s->cluster_size), 4357 QCOW2_DISCARD_ALWAYS, true); 4358 if (ret < 0) { 4359 error_setg_errno(errp, -ret, "Failed to discard cropped clusters"); 4360 goto fail; 4361 } 4362 4363 ret = qcow2_shrink_l1_table(bs, new_l1_size); 4364 if (ret < 0) { 4365 error_setg_errno(errp, -ret, 4366 "Failed to reduce the number of L2 tables"); 4367 goto fail; 4368 } 4369 4370 ret = qcow2_shrink_reftable(bs); 4371 if (ret < 0) { 4372 error_setg_errno(errp, -ret, 4373 "Failed to discard unused refblocks"); 4374 goto fail; 4375 } 4376 4377 old_file_size = bdrv_co_getlength(bs->file->bs); 4378 if (old_file_size < 0) { 4379 error_setg_errno(errp, -old_file_size, 4380 "Failed to inquire current file length"); 4381 ret = old_file_size; 4382 goto fail; 4383 } 4384 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 4385 if (last_cluster < 0) { 4386 error_setg_errno(errp, -last_cluster, 4387 "Failed to find the last cluster"); 4388 ret = last_cluster; 4389 goto fail; 4390 } 4391 if ((last_cluster + 1) * s->cluster_size < old_file_size) { 4392 Error *local_err = NULL; 4393 4394 /* 4395 * Do not pass @exact here: It will not help the user if 4396 * we get an error here just because they wanted to shrink 4397 * their qcow2 image (on a block device) with qemu-img. 4398 * (And on the qcow2 layer, the @exact requirement is 4399 * always fulfilled, so there is no need to pass it on.) 4400 */ 4401 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size, 4402 false, PREALLOC_MODE_OFF, 0, &local_err); 4403 if (local_err) { 4404 warn_reportf_err(local_err, 4405 "Failed to truncate the tail of the image: "); 4406 } 4407 } 4408 } else { 4409 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 4410 if (ret < 0) { 4411 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 4412 goto fail; 4413 } 4414 4415 if (data_file_is_raw(bs) && prealloc == PREALLOC_MODE_OFF) { 4416 /* 4417 * When creating a qcow2 image with data-file-raw, we enforce 4418 * at least prealloc=metadata, so that the L1/L2 tables are 4419 * fully allocated and reading from the data file will return 4420 * the same data as reading from the qcow2 image. When the 4421 * image is grown, we must consequently preallocate the 4422 * metadata structures to cover the added area. 4423 */ 4424 prealloc = PREALLOC_MODE_METADATA; 4425 } 4426 } 4427 4428 switch (prealloc) { 4429 case PREALLOC_MODE_OFF: 4430 if (has_data_file(bs)) { 4431 /* 4432 * If the caller wants an exact resize, the external data 4433 * file should be resized to the exact target size, too, 4434 * so we pass @exact here. 4435 */ 4436 ret = bdrv_co_truncate(s->data_file, offset, exact, prealloc, 0, 4437 errp); 4438 if (ret < 0) { 4439 goto fail; 4440 } 4441 } 4442 break; 4443 4444 case PREALLOC_MODE_METADATA: 4445 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4446 if (ret < 0) { 4447 goto fail; 4448 } 4449 break; 4450 4451 case PREALLOC_MODE_FALLOC: 4452 case PREALLOC_MODE_FULL: 4453 { 4454 int64_t allocation_start, host_offset, guest_offset; 4455 int64_t clusters_allocated; 4456 int64_t old_file_size, last_cluster, new_file_size; 4457 uint64_t nb_new_data_clusters, nb_new_l2_tables; 4458 bool subclusters_need_allocation = false; 4459 4460 /* With a data file, preallocation means just allocating the metadata 4461 * and forwarding the truncate request to the data file */ 4462 if (has_data_file(bs)) { 4463 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4464 if (ret < 0) { 4465 goto fail; 4466 } 4467 break; 4468 } 4469 4470 old_file_size = bdrv_co_getlength(bs->file->bs); 4471 if (old_file_size < 0) { 4472 error_setg_errno(errp, -old_file_size, 4473 "Failed to inquire current file length"); 4474 ret = old_file_size; 4475 goto fail; 4476 } 4477 4478 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 4479 if (last_cluster >= 0) { 4480 old_file_size = (last_cluster + 1) * s->cluster_size; 4481 } else { 4482 old_file_size = ROUND_UP(old_file_size, s->cluster_size); 4483 } 4484 4485 nb_new_data_clusters = (ROUND_UP(offset, s->cluster_size) - 4486 start_of_cluster(s, old_length)) >> s->cluster_bits; 4487 4488 /* This is an overestimation; we will not actually allocate space for 4489 * these in the file but just make sure the new refcount structures are 4490 * able to cover them so we will not have to allocate new refblocks 4491 * while entering the data blocks in the potentially new L2 tables. 4492 * (We do not actually care where the L2 tables are placed. Maybe they 4493 * are already allocated or they can be placed somewhere before 4494 * @old_file_size. It does not matter because they will be fully 4495 * allocated automatically, so they do not need to be covered by the 4496 * preallocation. All that matters is that we will not have to allocate 4497 * new refcount structures for them.) */ 4498 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 4499 s->cluster_size / l2_entry_size(s)); 4500 /* The cluster range may not be aligned to L2 boundaries, so add one L2 4501 * table for a potential head/tail */ 4502 nb_new_l2_tables++; 4503 4504 allocation_start = qcow2_refcount_area(bs, old_file_size, 4505 nb_new_data_clusters + 4506 nb_new_l2_tables, 4507 true, 0, 0); 4508 if (allocation_start < 0) { 4509 error_setg_errno(errp, -allocation_start, 4510 "Failed to resize refcount structures"); 4511 ret = allocation_start; 4512 goto fail; 4513 } 4514 4515 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 4516 nb_new_data_clusters); 4517 if (clusters_allocated < 0) { 4518 error_setg_errno(errp, -clusters_allocated, 4519 "Failed to allocate data clusters"); 4520 ret = clusters_allocated; 4521 goto fail; 4522 } 4523 4524 assert(clusters_allocated == nb_new_data_clusters); 4525 4526 /* Allocate the data area */ 4527 new_file_size = allocation_start + 4528 nb_new_data_clusters * s->cluster_size; 4529 /* 4530 * Image file grows, so @exact does not matter. 4531 * 4532 * If we need to zero out the new area, try first whether the protocol 4533 * driver can already take care of this. 4534 */ 4535 if (flags & BDRV_REQ_ZERO_WRITE) { 4536 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 4537 BDRV_REQ_ZERO_WRITE, NULL); 4538 if (ret >= 0) { 4539 flags &= ~BDRV_REQ_ZERO_WRITE; 4540 /* Ensure that we read zeroes and not backing file data */ 4541 subclusters_need_allocation = true; 4542 } 4543 } else { 4544 ret = -1; 4545 } 4546 if (ret < 0) { 4547 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 0, 4548 errp); 4549 } 4550 if (ret < 0) { 4551 error_prepend(errp, "Failed to resize underlying file: "); 4552 qcow2_free_clusters(bs, allocation_start, 4553 nb_new_data_clusters * s->cluster_size, 4554 QCOW2_DISCARD_OTHER); 4555 goto fail; 4556 } 4557 4558 /* Create the necessary L2 entries */ 4559 host_offset = allocation_start; 4560 guest_offset = old_length; 4561 while (nb_new_data_clusters) { 4562 int64_t nb_clusters = MIN( 4563 nb_new_data_clusters, 4564 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset)); 4565 unsigned cow_start_length = offset_into_cluster(s, guest_offset); 4566 QCowL2Meta allocation; 4567 guest_offset = start_of_cluster(s, guest_offset); 4568 allocation = (QCowL2Meta) { 4569 .offset = guest_offset, 4570 .alloc_offset = host_offset, 4571 .nb_clusters = nb_clusters, 4572 .cow_start = { 4573 .offset = 0, 4574 .nb_bytes = cow_start_length, 4575 }, 4576 .cow_end = { 4577 .offset = nb_clusters << s->cluster_bits, 4578 .nb_bytes = 0, 4579 }, 4580 .prealloc = !subclusters_need_allocation, 4581 }; 4582 qemu_co_queue_init(&allocation.dependent_requests); 4583 4584 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 4585 if (ret < 0) { 4586 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 4587 qcow2_free_clusters(bs, host_offset, 4588 nb_new_data_clusters * s->cluster_size, 4589 QCOW2_DISCARD_OTHER); 4590 goto fail; 4591 } 4592 4593 guest_offset += nb_clusters * s->cluster_size; 4594 host_offset += nb_clusters * s->cluster_size; 4595 nb_new_data_clusters -= nb_clusters; 4596 } 4597 break; 4598 } 4599 4600 default: 4601 g_assert_not_reached(); 4602 } 4603 4604 if ((flags & BDRV_REQ_ZERO_WRITE) && offset > old_length) { 4605 uint64_t zero_start = QEMU_ALIGN_UP(old_length, s->subcluster_size); 4606 4607 /* 4608 * Use zero clusters as much as we can. qcow2_subcluster_zeroize() 4609 * requires a subcluster-aligned start. The end may be unaligned if 4610 * it is at the end of the image (which it is here). 4611 */ 4612 if (offset > zero_start) { 4613 ret = qcow2_subcluster_zeroize(bs, zero_start, offset - zero_start, 4614 0); 4615 if (ret < 0) { 4616 error_setg_errno(errp, -ret, "Failed to zero out new clusters"); 4617 goto fail; 4618 } 4619 } 4620 4621 /* Write explicit zeros for the unaligned head */ 4622 if (zero_start > old_length) { 4623 uint64_t len = MIN(zero_start, offset) - old_length; 4624 uint8_t *buf = qemu_blockalign0(bs, len); 4625 QEMUIOVector qiov; 4626 qemu_iovec_init_buf(&qiov, buf, len); 4627 4628 qemu_co_mutex_unlock(&s->lock); 4629 ret = qcow2_co_pwritev_part(bs, old_length, len, &qiov, 0, 0); 4630 qemu_co_mutex_lock(&s->lock); 4631 4632 qemu_vfree(buf); 4633 if (ret < 0) { 4634 error_setg_errno(errp, -ret, "Failed to zero out the new area"); 4635 goto fail; 4636 } 4637 } 4638 } 4639 4640 if (prealloc != PREALLOC_MODE_OFF) { 4641 /* Flush metadata before actually changing the image size */ 4642 ret = qcow2_write_caches(bs); 4643 if (ret < 0) { 4644 error_setg_errno(errp, -ret, 4645 "Failed to flush the preallocated area to disk"); 4646 goto fail; 4647 } 4648 } 4649 4650 bs->total_sectors = offset / BDRV_SECTOR_SIZE; 4651 4652 /* write updated header.size */ 4653 offset = cpu_to_be64(offset); 4654 ret = bdrv_co_pwrite_sync(bs->file, offsetof(QCowHeader, size), 4655 sizeof(offset), &offset, 0); 4656 if (ret < 0) { 4657 error_setg_errno(errp, -ret, "Failed to update the image size"); 4658 goto fail; 4659 } 4660 4661 s->l1_vm_state_index = new_l1_size; 4662 4663 /* Update cache sizes */ 4664 options = qdict_clone_shallow(bs->options); 4665 ret = qcow2_update_options(bs, options, s->flags, errp); 4666 qobject_unref(options); 4667 if (ret < 0) { 4668 goto fail; 4669 } 4670 ret = 0; 4671 fail: 4672 qemu_co_mutex_unlock(&s->lock); 4673 return ret; 4674 } 4675 4676 static int coroutine_fn GRAPH_RDLOCK 4677 qcow2_co_pwritev_compressed_task(BlockDriverState *bs, 4678 uint64_t offset, uint64_t bytes, 4679 QEMUIOVector *qiov, size_t qiov_offset) 4680 { 4681 BDRVQcow2State *s = bs->opaque; 4682 int ret; 4683 ssize_t out_len; 4684 uint8_t *buf, *out_buf; 4685 uint64_t cluster_offset; 4686 4687 assert(bytes == s->cluster_size || (bytes < s->cluster_size && 4688 (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS))); 4689 4690 buf = qemu_blockalign(bs, s->cluster_size); 4691 if (bytes < s->cluster_size) { 4692 /* Zero-pad last write if image size is not cluster aligned */ 4693 memset(buf + bytes, 0, s->cluster_size - bytes); 4694 } 4695 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes); 4696 4697 out_buf = g_malloc(s->cluster_size); 4698 4699 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1, 4700 buf, s->cluster_size); 4701 if (out_len == -ENOMEM) { 4702 /* could not compress: write normal cluster */ 4703 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0); 4704 if (ret < 0) { 4705 goto fail; 4706 } 4707 goto success; 4708 } else if (out_len < 0) { 4709 ret = -EINVAL; 4710 goto fail; 4711 } 4712 4713 qemu_co_mutex_lock(&s->lock); 4714 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len, 4715 &cluster_offset); 4716 if (ret < 0) { 4717 qemu_co_mutex_unlock(&s->lock); 4718 goto fail; 4719 } 4720 4721 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true); 4722 qemu_co_mutex_unlock(&s->lock); 4723 if (ret < 0) { 4724 goto fail; 4725 } 4726 4727 BLKDBG_CO_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED); 4728 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0); 4729 if (ret < 0) { 4730 goto fail; 4731 } 4732 success: 4733 ret = 0; 4734 fail: 4735 qemu_vfree(buf); 4736 g_free(out_buf); 4737 return ret; 4738 } 4739 4740 /* 4741 * This function can count as GRAPH_RDLOCK because 4742 * qcow2_co_pwritev_compressed_part() holds the graph lock and keeps it until 4743 * this coroutine has terminated. 4744 */ 4745 static int coroutine_fn GRAPH_RDLOCK 4746 qcow2_co_pwritev_compressed_task_entry(AioTask *task) 4747 { 4748 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 4749 4750 assert(!t->subcluster_type && !t->l2meta); 4751 4752 return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov, 4753 t->qiov_offset); 4754 } 4755 4756 /* 4757 * XXX: put compressed sectors first, then all the cluster aligned 4758 * tables to avoid losing bytes in alignment 4759 */ 4760 static int coroutine_fn GRAPH_RDLOCK 4761 qcow2_co_pwritev_compressed_part(BlockDriverState *bs, 4762 int64_t offset, int64_t bytes, 4763 QEMUIOVector *qiov, size_t qiov_offset) 4764 { 4765 BDRVQcow2State *s = bs->opaque; 4766 AioTaskPool *aio = NULL; 4767 int ret = 0; 4768 4769 if (has_data_file(bs)) { 4770 return -ENOTSUP; 4771 } 4772 4773 if (bytes == 0) { 4774 /* 4775 * align end of file to a sector boundary to ease reading with 4776 * sector based I/Os 4777 */ 4778 int64_t len = bdrv_co_getlength(bs->file->bs); 4779 if (len < 0) { 4780 return len; 4781 } 4782 return bdrv_co_truncate(bs->file, len, false, PREALLOC_MODE_OFF, 0, 4783 NULL); 4784 } 4785 4786 if (offset_into_cluster(s, offset)) { 4787 return -EINVAL; 4788 } 4789 4790 if (offset_into_cluster(s, bytes) && 4791 (offset + bytes) != (bs->total_sectors << BDRV_SECTOR_BITS)) { 4792 return -EINVAL; 4793 } 4794 4795 while (bytes && aio_task_pool_status(aio) == 0) { 4796 uint64_t chunk_size = MIN(bytes, s->cluster_size); 4797 4798 if (!aio && chunk_size != bytes) { 4799 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 4800 } 4801 4802 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_compressed_task_entry, 4803 0, 0, offset, chunk_size, qiov, qiov_offset, NULL); 4804 if (ret < 0) { 4805 break; 4806 } 4807 qiov_offset += chunk_size; 4808 offset += chunk_size; 4809 bytes -= chunk_size; 4810 } 4811 4812 if (aio) { 4813 aio_task_pool_wait_all(aio); 4814 if (ret == 0) { 4815 ret = aio_task_pool_status(aio); 4816 } 4817 g_free(aio); 4818 } 4819 4820 return ret; 4821 } 4822 4823 static int coroutine_fn GRAPH_RDLOCK 4824 qcow2_co_preadv_compressed(BlockDriverState *bs, 4825 uint64_t l2_entry, 4826 uint64_t offset, 4827 uint64_t bytes, 4828 QEMUIOVector *qiov, 4829 size_t qiov_offset) 4830 { 4831 BDRVQcow2State *s = bs->opaque; 4832 int ret = 0, csize; 4833 uint64_t coffset; 4834 uint8_t *buf, *out_buf; 4835 int offset_in_cluster = offset_into_cluster(s, offset); 4836 4837 qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize); 4838 4839 buf = g_try_malloc(csize); 4840 if (!buf) { 4841 return -ENOMEM; 4842 } 4843 4844 out_buf = qemu_blockalign(bs, s->cluster_size); 4845 4846 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 4847 ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0); 4848 if (ret < 0) { 4849 goto fail; 4850 } 4851 4852 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) { 4853 ret = -EIO; 4854 goto fail; 4855 } 4856 4857 qemu_iovec_from_buf(qiov, qiov_offset, out_buf + offset_in_cluster, bytes); 4858 4859 fail: 4860 qemu_vfree(out_buf); 4861 g_free(buf); 4862 4863 return ret; 4864 } 4865 4866 static int GRAPH_RDLOCK make_completely_empty(BlockDriverState *bs) 4867 { 4868 BDRVQcow2State *s = bs->opaque; 4869 Error *local_err = NULL; 4870 int ret, l1_clusters; 4871 int64_t offset; 4872 uint64_t *new_reftable = NULL; 4873 uint64_t rt_entry, l1_size2; 4874 struct { 4875 uint64_t l1_offset; 4876 uint64_t reftable_offset; 4877 uint32_t reftable_clusters; 4878 } QEMU_PACKED l1_ofs_rt_ofs_cls; 4879 4880 ret = qcow2_cache_empty(bs, s->l2_table_cache); 4881 if (ret < 0) { 4882 goto fail; 4883 } 4884 4885 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 4886 if (ret < 0) { 4887 goto fail; 4888 } 4889 4890 /* Refcounts will be broken utterly */ 4891 ret = qcow2_mark_dirty(bs); 4892 if (ret < 0) { 4893 goto fail; 4894 } 4895 4896 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4897 4898 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE); 4899 l1_size2 = (uint64_t)s->l1_size * L1E_SIZE; 4900 4901 /* After this call, neither the in-memory nor the on-disk refcount 4902 * information accurately describe the actual references */ 4903 4904 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 4905 l1_clusters * s->cluster_size, 0); 4906 if (ret < 0) { 4907 goto fail_broken_refcounts; 4908 } 4909 memset(s->l1_table, 0, l1_size2); 4910 4911 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 4912 4913 /* Overwrite enough clusters at the beginning of the sectors to place 4914 * the refcount table, a refcount block and the L1 table in; this may 4915 * overwrite parts of the existing refcount and L1 table, which is not 4916 * an issue because the dirty flag is set, complete data loss is in fact 4917 * desired and partial data loss is consequently fine as well */ 4918 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 4919 (2 + l1_clusters) * s->cluster_size, 0); 4920 /* This call (even if it failed overall) may have overwritten on-disk 4921 * refcount structures; in that case, the in-memory refcount information 4922 * will probably differ from the on-disk information which makes the BDS 4923 * unusable */ 4924 if (ret < 0) { 4925 goto fail_broken_refcounts; 4926 } 4927 4928 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4929 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 4930 4931 /* "Create" an empty reftable (one cluster) directly after the image 4932 * header and an empty L1 table three clusters after the image header; 4933 * the cluster between those two will be used as the first refblock */ 4934 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 4935 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 4936 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 4937 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 4938 sizeof(l1_ofs_rt_ofs_cls), &l1_ofs_rt_ofs_cls, 0); 4939 if (ret < 0) { 4940 goto fail_broken_refcounts; 4941 } 4942 4943 s->l1_table_offset = 3 * s->cluster_size; 4944 4945 new_reftable = g_try_new0(uint64_t, s->cluster_size / REFTABLE_ENTRY_SIZE); 4946 if (!new_reftable) { 4947 ret = -ENOMEM; 4948 goto fail_broken_refcounts; 4949 } 4950 4951 s->refcount_table_offset = s->cluster_size; 4952 s->refcount_table_size = s->cluster_size / REFTABLE_ENTRY_SIZE; 4953 s->max_refcount_table_index = 0; 4954 4955 g_free(s->refcount_table); 4956 s->refcount_table = new_reftable; 4957 new_reftable = NULL; 4958 4959 /* Now the in-memory refcount information again corresponds to the on-disk 4960 * information (reftable is empty and no refblocks (the refblock cache is 4961 * empty)); however, this means some clusters (e.g. the image header) are 4962 * referenced, but not refcounted, but the normal qcow2 code assumes that 4963 * the in-memory information is always correct */ 4964 4965 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 4966 4967 /* Enter the first refblock into the reftable */ 4968 rt_entry = cpu_to_be64(2 * s->cluster_size); 4969 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, sizeof(rt_entry), 4970 &rt_entry, 0); 4971 if (ret < 0) { 4972 goto fail_broken_refcounts; 4973 } 4974 s->refcount_table[0] = 2 * s->cluster_size; 4975 4976 s->free_cluster_index = 0; 4977 assert(3 + l1_clusters <= s->refcount_block_size); 4978 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 4979 if (offset < 0) { 4980 ret = offset; 4981 goto fail_broken_refcounts; 4982 } else if (offset > 0) { 4983 error_report("First cluster in emptied image is in use"); 4984 abort(); 4985 } 4986 4987 /* Now finally the in-memory information corresponds to the on-disk 4988 * structures and is correct */ 4989 ret = qcow2_mark_clean(bs); 4990 if (ret < 0) { 4991 goto fail; 4992 } 4993 4994 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, false, 4995 PREALLOC_MODE_OFF, 0, &local_err); 4996 if (ret < 0) { 4997 error_report_err(local_err); 4998 goto fail; 4999 } 5000 5001 return 0; 5002 5003 fail_broken_refcounts: 5004 /* The BDS is unusable at this point. If we wanted to make it usable, we 5005 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 5006 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 5007 * again. However, because the functions which could have caused this error 5008 * path to be taken are used by those functions as well, it's very likely 5009 * that that sequence will fail as well. Therefore, just eject the BDS. */ 5010 bs->drv = NULL; 5011 5012 fail: 5013 g_free(new_reftable); 5014 return ret; 5015 } 5016 5017 static int GRAPH_RDLOCK qcow2_make_empty(BlockDriverState *bs) 5018 { 5019 BDRVQcow2State *s = bs->opaque; 5020 uint64_t offset, end_offset; 5021 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 5022 int l1_clusters, ret = 0; 5023 5024 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE); 5025 5026 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps && 5027 3 + l1_clusters <= s->refcount_block_size && 5028 s->crypt_method_header != QCOW_CRYPT_LUKS && 5029 !has_data_file(bs)) { 5030 /* The following function only works for qcow2 v3 images (it 5031 * requires the dirty flag) and only as long as there are no 5032 * features that reserve extra clusters (such as snapshots, 5033 * LUKS header, or persistent bitmaps), because it completely 5034 * empties the image. Furthermore, the L1 table and three 5035 * additional clusters (image header, refcount table, one 5036 * refcount block) have to fit inside one refcount block. It 5037 * only resets the image file, i.e. does not work with an 5038 * external data file. */ 5039 return make_completely_empty(bs); 5040 } 5041 5042 /* This fallback code simply discards every active cluster; this is slow, 5043 * but works in all cases */ 5044 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 5045 for (offset = 0; offset < end_offset; offset += step) { 5046 /* As this function is generally used after committing an external 5047 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 5048 * default action for this kind of discard is to pass the discard, 5049 * which will ideally result in an actually smaller image file, as 5050 * is probably desired. */ 5051 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 5052 QCOW2_DISCARD_SNAPSHOT, true); 5053 if (ret < 0) { 5054 break; 5055 } 5056 } 5057 5058 return ret; 5059 } 5060 5061 static coroutine_fn GRAPH_RDLOCK int qcow2_co_flush_to_os(BlockDriverState *bs) 5062 { 5063 BDRVQcow2State *s = bs->opaque; 5064 int ret; 5065 5066 qemu_co_mutex_lock(&s->lock); 5067 ret = qcow2_write_caches(bs); 5068 qemu_co_mutex_unlock(&s->lock); 5069 5070 return ret; 5071 } 5072 5073 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 5074 Error **errp) 5075 { 5076 Error *local_err = NULL; 5077 BlockMeasureInfo *info; 5078 uint64_t required = 0; /* bytes that contribute to required size */ 5079 uint64_t virtual_size; /* disk size as seen by guest */ 5080 uint64_t refcount_bits; 5081 uint64_t l2_tables; 5082 uint64_t luks_payload_size = 0; 5083 size_t cluster_size; 5084 int version; 5085 char *optstr; 5086 PreallocMode prealloc; 5087 bool has_backing_file; 5088 bool has_luks; 5089 bool extended_l2; 5090 size_t l2e_size; 5091 5092 /* Parse image creation options */ 5093 extended_l2 = qemu_opt_get_bool_del(opts, BLOCK_OPT_EXTL2, false); 5094 5095 cluster_size = qcow2_opt_get_cluster_size_del(opts, extended_l2, 5096 &local_err); 5097 if (local_err) { 5098 goto err; 5099 } 5100 5101 version = qcow2_opt_get_version_del(opts, &local_err); 5102 if (local_err) { 5103 goto err; 5104 } 5105 5106 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 5107 if (local_err) { 5108 goto err; 5109 } 5110 5111 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 5112 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr, 5113 PREALLOC_MODE_OFF, &local_err); 5114 g_free(optstr); 5115 if (local_err) { 5116 goto err; 5117 } 5118 5119 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 5120 has_backing_file = !!optstr; 5121 g_free(optstr); 5122 5123 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 5124 has_luks = optstr && strcmp(optstr, "luks") == 0; 5125 g_free(optstr); 5126 5127 if (has_luks) { 5128 g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL; 5129 QDict *cryptoopts = qcow2_extract_crypto_opts(opts, "luks", errp); 5130 size_t headerlen; 5131 5132 create_opts = block_crypto_create_opts_init(cryptoopts, errp); 5133 qobject_unref(cryptoopts); 5134 if (!create_opts) { 5135 goto err; 5136 } 5137 5138 if (!qcrypto_block_calculate_payload_offset(create_opts, 5139 "encrypt.", 5140 &headerlen, 5141 &local_err)) { 5142 goto err; 5143 } 5144 5145 luks_payload_size = ROUND_UP(headerlen, cluster_size); 5146 } 5147 5148 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); 5149 virtual_size = ROUND_UP(virtual_size, cluster_size); 5150 5151 /* Check that virtual disk size is valid */ 5152 l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL; 5153 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 5154 cluster_size / l2e_size); 5155 if (l2_tables * L1E_SIZE > QCOW_MAX_L1_SIZE) { 5156 error_setg(&local_err, "The image size is too large " 5157 "(try using a larger cluster size)"); 5158 goto err; 5159 } 5160 5161 /* Account for input image */ 5162 if (in_bs) { 5163 int64_t ssize = bdrv_getlength(in_bs); 5164 if (ssize < 0) { 5165 error_setg_errno(&local_err, -ssize, 5166 "Unable to get image virtual_size"); 5167 goto err; 5168 } 5169 5170 virtual_size = ROUND_UP(ssize, cluster_size); 5171 5172 if (has_backing_file) { 5173 /* We don't how much of the backing chain is shared by the input 5174 * image and the new image file. In the worst case the new image's 5175 * backing file has nothing in common with the input image. Be 5176 * conservative and assume all clusters need to be written. 5177 */ 5178 required = virtual_size; 5179 } else { 5180 int64_t offset; 5181 int64_t pnum = 0; 5182 5183 for (offset = 0; offset < ssize; offset += pnum) { 5184 int ret; 5185 5186 ret = bdrv_block_status_above(in_bs, NULL, offset, 5187 ssize - offset, &pnum, NULL, 5188 NULL); 5189 if (ret < 0) { 5190 error_setg_errno(&local_err, -ret, 5191 "Unable to get block status"); 5192 goto err; 5193 } 5194 5195 if (ret & BDRV_BLOCK_ZERO) { 5196 /* Skip zero regions (safe with no backing file) */ 5197 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 5198 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 5199 /* Extend pnum to end of cluster for next iteration */ 5200 pnum = ROUND_UP(offset + pnum, cluster_size) - offset; 5201 5202 /* Count clusters we've seen */ 5203 required += offset % cluster_size + pnum; 5204 } 5205 } 5206 } 5207 } 5208 5209 /* Take into account preallocation. Nothing special is needed for 5210 * PREALLOC_MODE_METADATA since metadata is always counted. 5211 */ 5212 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 5213 required = virtual_size; 5214 } 5215 5216 info = g_new0(BlockMeasureInfo, 1); 5217 info->fully_allocated = luks_payload_size + 5218 qcow2_calc_prealloc_size(virtual_size, cluster_size, 5219 ctz32(refcount_bits), extended_l2); 5220 5221 /* 5222 * Remove data clusters that are not required. This overestimates the 5223 * required size because metadata needed for the fully allocated file is 5224 * still counted. Show bitmaps only if both source and destination 5225 * would support them. 5226 */ 5227 info->required = info->fully_allocated - virtual_size + required; 5228 info->has_bitmaps = version >= 3 && in_bs && 5229 bdrv_supports_persistent_dirty_bitmap(in_bs); 5230 if (info->has_bitmaps) { 5231 info->bitmaps = qcow2_get_persistent_dirty_bitmap_size(in_bs, 5232 cluster_size); 5233 } 5234 return info; 5235 5236 err: 5237 error_propagate(errp, local_err); 5238 return NULL; 5239 } 5240 5241 static int coroutine_fn 5242 qcow2_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 5243 { 5244 BDRVQcow2State *s = bs->opaque; 5245 bdi->cluster_size = s->cluster_size; 5246 bdi->subcluster_size = s->subcluster_size; 5247 bdi->vm_state_offset = qcow2_vm_state_offset(s); 5248 bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY; 5249 return 0; 5250 } 5251 5252 static ImageInfoSpecific * GRAPH_RDLOCK 5253 qcow2_get_specific_info(BlockDriverState *bs, Error **errp) 5254 { 5255 BDRVQcow2State *s = bs->opaque; 5256 ImageInfoSpecific *spec_info; 5257 QCryptoBlockInfo *encrypt_info = NULL; 5258 5259 if (s->crypto != NULL) { 5260 encrypt_info = qcrypto_block_get_info(s->crypto, errp); 5261 if (!encrypt_info) { 5262 return NULL; 5263 } 5264 } 5265 5266 spec_info = g_new(ImageInfoSpecific, 1); 5267 *spec_info = (ImageInfoSpecific){ 5268 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 5269 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1), 5270 }; 5271 if (s->qcow_version == 2) { 5272 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 5273 .compat = g_strdup("0.10"), 5274 .refcount_bits = s->refcount_bits, 5275 }; 5276 } else if (s->qcow_version == 3) { 5277 Qcow2BitmapInfoList *bitmaps; 5278 if (!qcow2_get_bitmap_info_list(bs, &bitmaps, errp)) { 5279 qapi_free_ImageInfoSpecific(spec_info); 5280 qapi_free_QCryptoBlockInfo(encrypt_info); 5281 return NULL; 5282 } 5283 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 5284 .compat = g_strdup("1.1"), 5285 .lazy_refcounts = s->compatible_features & 5286 QCOW2_COMPAT_LAZY_REFCOUNTS, 5287 .has_lazy_refcounts = true, 5288 .corrupt = s->incompatible_features & 5289 QCOW2_INCOMPAT_CORRUPT, 5290 .has_corrupt = true, 5291 .has_extended_l2 = true, 5292 .extended_l2 = has_subclusters(s), 5293 .refcount_bits = s->refcount_bits, 5294 .has_bitmaps = !!bitmaps, 5295 .bitmaps = bitmaps, 5296 .data_file = g_strdup(s->image_data_file), 5297 .has_data_file_raw = has_data_file(bs), 5298 .data_file_raw = data_file_is_raw(bs), 5299 .compression_type = s->compression_type, 5300 }; 5301 } else { 5302 /* if this assertion fails, this probably means a new version was 5303 * added without having it covered here */ 5304 g_assert_not_reached(); 5305 } 5306 5307 if (encrypt_info) { 5308 ImageInfoSpecificQCow2Encryption *qencrypt = 5309 g_new(ImageInfoSpecificQCow2Encryption, 1); 5310 switch (encrypt_info->format) { 5311 case QCRYPTO_BLOCK_FORMAT_QCOW: 5312 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 5313 break; 5314 case QCRYPTO_BLOCK_FORMAT_LUKS: 5315 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 5316 qencrypt->u.luks = encrypt_info->u.luks; 5317 break; 5318 default: 5319 abort(); 5320 } 5321 /* Since we did shallow copy above, erase any pointers 5322 * in the original info */ 5323 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 5324 qapi_free_QCryptoBlockInfo(encrypt_info); 5325 5326 spec_info->u.qcow2.data->encrypt = qencrypt; 5327 } 5328 5329 return spec_info; 5330 } 5331 5332 static int coroutine_mixed_fn GRAPH_RDLOCK 5333 qcow2_has_zero_init(BlockDriverState *bs) 5334 { 5335 BDRVQcow2State *s = bs->opaque; 5336 bool preallocated; 5337 5338 if (qemu_in_coroutine()) { 5339 qemu_co_mutex_lock(&s->lock); 5340 } 5341 /* 5342 * Check preallocation status: Preallocated images have all L2 5343 * tables allocated, nonpreallocated images have none. It is 5344 * therefore enough to check the first one. 5345 */ 5346 preallocated = s->l1_size > 0 && s->l1_table[0] != 0; 5347 if (qemu_in_coroutine()) { 5348 qemu_co_mutex_unlock(&s->lock); 5349 } 5350 5351 if (!preallocated) { 5352 return 1; 5353 } else if (bs->encrypted) { 5354 return 0; 5355 } else { 5356 return bdrv_has_zero_init(s->data_file->bs); 5357 } 5358 } 5359 5360 /* 5361 * Check the request to vmstate. On success return 5362 * qcow2_vm_state_offset(bs) + @pos 5363 */ 5364 static int64_t qcow2_check_vmstate_request(BlockDriverState *bs, 5365 QEMUIOVector *qiov, int64_t pos) 5366 { 5367 BDRVQcow2State *s = bs->opaque; 5368 int64_t vmstate_offset = qcow2_vm_state_offset(s); 5369 int ret; 5370 5371 /* Incoming requests must be OK */ 5372 bdrv_check_qiov_request(pos, qiov->size, qiov, 0, &error_abort); 5373 5374 if (INT64_MAX - pos < vmstate_offset) { 5375 return -EIO; 5376 } 5377 5378 pos += vmstate_offset; 5379 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 5380 if (ret < 0) { 5381 return ret; 5382 } 5383 5384 return pos; 5385 } 5386 5387 static int coroutine_fn GRAPH_RDLOCK 5388 qcow2_co_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 5389 { 5390 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); 5391 if (offset < 0) { 5392 return offset; 5393 } 5394 5395 BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 5396 return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0); 5397 } 5398 5399 static int coroutine_fn GRAPH_RDLOCK 5400 qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 5401 { 5402 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); 5403 if (offset < 0) { 5404 return offset; 5405 } 5406 5407 BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 5408 return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0); 5409 } 5410 5411 static int GRAPH_RDLOCK qcow2_has_compressed_clusters(BlockDriverState *bs) 5412 { 5413 int64_t offset = 0; 5414 int64_t bytes = bdrv_getlength(bs); 5415 5416 if (bytes < 0) { 5417 return bytes; 5418 } 5419 5420 while (bytes != 0) { 5421 int ret; 5422 QCow2SubclusterType type; 5423 unsigned int cur_bytes = MIN(INT_MAX, bytes); 5424 uint64_t host_offset; 5425 5426 ret = qcow2_get_host_offset(bs, offset, &cur_bytes, &host_offset, 5427 &type); 5428 if (ret < 0) { 5429 return ret; 5430 } 5431 5432 if (type == QCOW2_SUBCLUSTER_COMPRESSED) { 5433 return 1; 5434 } 5435 5436 offset += cur_bytes; 5437 bytes -= cur_bytes; 5438 } 5439 5440 return 0; 5441 } 5442 5443 /* 5444 * Downgrades an image's version. To achieve this, any incompatible features 5445 * have to be removed. 5446 */ 5447 static int GRAPH_RDLOCK 5448 qcow2_downgrade(BlockDriverState *bs, int target_version, 5449 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5450 Error **errp) 5451 { 5452 BDRVQcow2State *s = bs->opaque; 5453 int current_version = s->qcow_version; 5454 int ret; 5455 int i; 5456 5457 /* This is qcow2_downgrade(), not qcow2_upgrade() */ 5458 assert(target_version < current_version); 5459 5460 /* There are no other versions (now) that you can downgrade to */ 5461 assert(target_version == 2); 5462 5463 if (s->refcount_order != 4) { 5464 error_setg(errp, "compat=0.10 requires refcount_bits=16"); 5465 return -ENOTSUP; 5466 } 5467 5468 if (has_data_file(bs)) { 5469 error_setg(errp, "Cannot downgrade an image with a data file"); 5470 return -ENOTSUP; 5471 } 5472 5473 /* 5474 * If any internal snapshot has a different size than the current 5475 * image size, or VM state size that exceeds 32 bits, downgrading 5476 * is unsafe. Even though we would still use v3-compliant output 5477 * to preserve that data, other v2 programs might not realize 5478 * those optional fields are important. 5479 */ 5480 for (i = 0; i < s->nb_snapshots; i++) { 5481 if (s->snapshots[i].vm_state_size > UINT32_MAX || 5482 s->snapshots[i].disk_size != bs->total_sectors * BDRV_SECTOR_SIZE) { 5483 error_setg(errp, "Internal snapshots prevent downgrade of image"); 5484 return -ENOTSUP; 5485 } 5486 } 5487 5488 /* clear incompatible features */ 5489 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 5490 ret = qcow2_mark_clean(bs); 5491 if (ret < 0) { 5492 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5493 return ret; 5494 } 5495 } 5496 5497 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 5498 * the first place; if that happens nonetheless, returning -ENOTSUP is the 5499 * best thing to do anyway */ 5500 5501 if (s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION) { 5502 error_setg(errp, "Cannot downgrade an image with incompatible features " 5503 "0x%" PRIx64 " set", 5504 s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION); 5505 return -ENOTSUP; 5506 } 5507 5508 /* since we can ignore compatible features, we can set them to 0 as well */ 5509 s->compatible_features = 0; 5510 /* if lazy refcounts have been used, they have already been fixed through 5511 * clearing the dirty flag */ 5512 5513 /* clearing autoclear features is trivial */ 5514 s->autoclear_features = 0; 5515 5516 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 5517 if (ret < 0) { 5518 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters"); 5519 return ret; 5520 } 5521 5522 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) { 5523 ret = qcow2_has_compressed_clusters(bs); 5524 if (ret < 0) { 5525 error_setg(errp, "Failed to check block status"); 5526 return -EINVAL; 5527 } 5528 if (ret) { 5529 error_setg(errp, "Cannot downgrade an image with zstd compression " 5530 "type and existing compressed clusters"); 5531 return -ENOTSUP; 5532 } 5533 /* 5534 * No compressed clusters for now, so just chose default zlib 5535 * compression. 5536 */ 5537 s->incompatible_features &= ~QCOW2_INCOMPAT_COMPRESSION; 5538 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; 5539 } 5540 5541 assert(s->incompatible_features == 0); 5542 5543 s->qcow_version = target_version; 5544 ret = qcow2_update_header(bs); 5545 if (ret < 0) { 5546 s->qcow_version = current_version; 5547 error_setg_errno(errp, -ret, "Failed to update the image header"); 5548 return ret; 5549 } 5550 return 0; 5551 } 5552 5553 /* 5554 * Upgrades an image's version. While newer versions encompass all 5555 * features of older versions, some things may have to be presented 5556 * differently. 5557 */ 5558 static int GRAPH_RDLOCK 5559 qcow2_upgrade(BlockDriverState *bs, int target_version, 5560 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5561 Error **errp) 5562 { 5563 BDRVQcow2State *s = bs->opaque; 5564 bool need_snapshot_update; 5565 int current_version = s->qcow_version; 5566 int i; 5567 int ret; 5568 5569 /* This is qcow2_upgrade(), not qcow2_downgrade() */ 5570 assert(target_version > current_version); 5571 5572 /* There are no other versions (yet) that you can upgrade to */ 5573 assert(target_version == 3); 5574 5575 status_cb(bs, 0, 2, cb_opaque); 5576 5577 /* 5578 * In v2, snapshots do not need to have extra data. v3 requires 5579 * the 64-bit VM state size and the virtual disk size to be 5580 * present. 5581 * qcow2_write_snapshots() will always write the list in the 5582 * v3-compliant format. 5583 */ 5584 need_snapshot_update = false; 5585 for (i = 0; i < s->nb_snapshots; i++) { 5586 if (s->snapshots[i].extra_data_size < 5587 sizeof_field(QCowSnapshotExtraData, vm_state_size_large) + 5588 sizeof_field(QCowSnapshotExtraData, disk_size)) 5589 { 5590 need_snapshot_update = true; 5591 break; 5592 } 5593 } 5594 if (need_snapshot_update) { 5595 ret = qcow2_write_snapshots(bs); 5596 if (ret < 0) { 5597 error_setg_errno(errp, -ret, "Failed to update the snapshot table"); 5598 return ret; 5599 } 5600 } 5601 status_cb(bs, 1, 2, cb_opaque); 5602 5603 s->qcow_version = target_version; 5604 ret = qcow2_update_header(bs); 5605 if (ret < 0) { 5606 s->qcow_version = current_version; 5607 error_setg_errno(errp, -ret, "Failed to update the image header"); 5608 return ret; 5609 } 5610 status_cb(bs, 2, 2, cb_opaque); 5611 5612 return 0; 5613 } 5614 5615 typedef enum Qcow2AmendOperation { 5616 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 5617 * statically initialized to so that the helper CB can discern the first 5618 * invocation from an operation change */ 5619 QCOW2_NO_OPERATION = 0, 5620 5621 QCOW2_UPGRADING, 5622 QCOW2_UPDATING_ENCRYPTION, 5623 QCOW2_CHANGING_REFCOUNT_ORDER, 5624 QCOW2_DOWNGRADING, 5625 } Qcow2AmendOperation; 5626 5627 typedef struct Qcow2AmendHelperCBInfo { 5628 /* The code coordinating the amend operations should only modify 5629 * these four fields; the rest will be managed by the CB */ 5630 BlockDriverAmendStatusCB *original_status_cb; 5631 void *original_cb_opaque; 5632 5633 Qcow2AmendOperation current_operation; 5634 5635 /* Total number of operations to perform (only set once) */ 5636 int total_operations; 5637 5638 /* The following fields are managed by the CB */ 5639 5640 /* Number of operations completed */ 5641 int operations_completed; 5642 5643 /* Cumulative offset of all completed operations */ 5644 int64_t offset_completed; 5645 5646 Qcow2AmendOperation last_operation; 5647 int64_t last_work_size; 5648 } Qcow2AmendHelperCBInfo; 5649 5650 static void qcow2_amend_helper_cb(BlockDriverState *bs, 5651 int64_t operation_offset, 5652 int64_t operation_work_size, void *opaque) 5653 { 5654 Qcow2AmendHelperCBInfo *info = opaque; 5655 int64_t current_work_size; 5656 int64_t projected_work_size; 5657 5658 if (info->current_operation != info->last_operation) { 5659 if (info->last_operation != QCOW2_NO_OPERATION) { 5660 info->offset_completed += info->last_work_size; 5661 info->operations_completed++; 5662 } 5663 5664 info->last_operation = info->current_operation; 5665 } 5666 5667 assert(info->total_operations > 0); 5668 assert(info->operations_completed < info->total_operations); 5669 5670 info->last_work_size = operation_work_size; 5671 5672 current_work_size = info->offset_completed + operation_work_size; 5673 5674 /* current_work_size is the total work size for (operations_completed + 1) 5675 * operations (which includes this one), so multiply it by the number of 5676 * operations not covered and divide it by the number of operations 5677 * covered to get a projection for the operations not covered */ 5678 projected_work_size = current_work_size * (info->total_operations - 5679 info->operations_completed - 1) 5680 / (info->operations_completed + 1); 5681 5682 info->original_status_cb(bs, info->offset_completed + operation_offset, 5683 current_work_size + projected_work_size, 5684 info->original_cb_opaque); 5685 } 5686 5687 static int GRAPH_RDLOCK 5688 qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 5689 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5690 bool force, Error **errp) 5691 { 5692 BDRVQcow2State *s = bs->opaque; 5693 int old_version = s->qcow_version, new_version = old_version; 5694 uint64_t new_size = 0; 5695 const char *backing_file = NULL, *backing_format = NULL, *data_file = NULL; 5696 bool lazy_refcounts = s->use_lazy_refcounts; 5697 bool data_file_raw = data_file_is_raw(bs); 5698 const char *compat = NULL; 5699 int refcount_bits = s->refcount_bits; 5700 int ret; 5701 QemuOptDesc *desc = opts->list->desc; 5702 Qcow2AmendHelperCBInfo helper_cb_info; 5703 bool encryption_update = false; 5704 5705 while (desc && desc->name) { 5706 if (!qemu_opt_find(opts, desc->name)) { 5707 /* only change explicitly defined options */ 5708 desc++; 5709 continue; 5710 } 5711 5712 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 5713 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 5714 if (!compat) { 5715 /* preserve default */ 5716 } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) { 5717 new_version = 2; 5718 } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) { 5719 new_version = 3; 5720 } else { 5721 error_setg(errp, "Unknown compatibility level %s", compat); 5722 return -EINVAL; 5723 } 5724 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 5725 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 5726 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 5727 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 5728 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 5729 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 5730 } else if (g_str_has_prefix(desc->name, "encrypt.")) { 5731 if (!s->crypto) { 5732 error_setg(errp, 5733 "Can't amend encryption options - encryption not present"); 5734 return -EINVAL; 5735 } 5736 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 5737 error_setg(errp, 5738 "Only LUKS encryption options can be amended"); 5739 return -ENOTSUP; 5740 } 5741 encryption_update = true; 5742 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 5743 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 5744 lazy_refcounts); 5745 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 5746 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 5747 refcount_bits); 5748 5749 if (refcount_bits <= 0 || refcount_bits > 64 || 5750 !is_power_of_2(refcount_bits)) 5751 { 5752 error_setg(errp, "Refcount width must be a power of two and " 5753 "may not exceed 64 bits"); 5754 return -EINVAL; 5755 } 5756 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE)) { 5757 data_file = qemu_opt_get(opts, BLOCK_OPT_DATA_FILE); 5758 if (data_file && !has_data_file(bs)) { 5759 error_setg(errp, "data-file can only be set for images that " 5760 "use an external data file"); 5761 return -EINVAL; 5762 } 5763 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE_RAW)) { 5764 data_file_raw = qemu_opt_get_bool(opts, BLOCK_OPT_DATA_FILE_RAW, 5765 data_file_raw); 5766 if (data_file_raw && !data_file_is_raw(bs)) { 5767 error_setg(errp, "data-file-raw cannot be set on existing " 5768 "images"); 5769 return -EINVAL; 5770 } 5771 } else { 5772 /* if this point is reached, this probably means a new option was 5773 * added without having it covered here */ 5774 abort(); 5775 } 5776 5777 desc++; 5778 } 5779 5780 helper_cb_info = (Qcow2AmendHelperCBInfo){ 5781 .original_status_cb = status_cb, 5782 .original_cb_opaque = cb_opaque, 5783 .total_operations = (new_version != old_version) 5784 + (s->refcount_bits != refcount_bits) + 5785 (encryption_update == true) 5786 }; 5787 5788 /* Upgrade first (some features may require compat=1.1) */ 5789 if (new_version > old_version) { 5790 helper_cb_info.current_operation = QCOW2_UPGRADING; 5791 ret = qcow2_upgrade(bs, new_version, &qcow2_amend_helper_cb, 5792 &helper_cb_info, errp); 5793 if (ret < 0) { 5794 return ret; 5795 } 5796 } 5797 5798 if (encryption_update) { 5799 QDict *amend_opts_dict; 5800 QCryptoBlockAmendOptions *amend_opts; 5801 5802 helper_cb_info.current_operation = QCOW2_UPDATING_ENCRYPTION; 5803 amend_opts_dict = qcow2_extract_crypto_opts(opts, "luks", errp); 5804 if (!amend_opts_dict) { 5805 return -EINVAL; 5806 } 5807 amend_opts = block_crypto_amend_opts_init(amend_opts_dict, errp); 5808 qobject_unref(amend_opts_dict); 5809 if (!amend_opts) { 5810 return -EINVAL; 5811 } 5812 ret = qcrypto_block_amend_options(s->crypto, 5813 qcow2_crypto_hdr_read_func, 5814 qcow2_crypto_hdr_write_func, 5815 bs, 5816 amend_opts, 5817 force, 5818 errp); 5819 qapi_free_QCryptoBlockAmendOptions(amend_opts); 5820 if (ret < 0) { 5821 return ret; 5822 } 5823 } 5824 5825 if (s->refcount_bits != refcount_bits) { 5826 int refcount_order = ctz32(refcount_bits); 5827 5828 if (new_version < 3 && refcount_bits != 16) { 5829 error_setg(errp, "Refcount widths other than 16 bits require " 5830 "compatibility level 1.1 or above (use compat=1.1 or " 5831 "greater)"); 5832 return -EINVAL; 5833 } 5834 5835 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 5836 ret = qcow2_change_refcount_order(bs, refcount_order, 5837 &qcow2_amend_helper_cb, 5838 &helper_cb_info, errp); 5839 if (ret < 0) { 5840 return ret; 5841 } 5842 } 5843 5844 /* data-file-raw blocks backing files, so clear it first if requested */ 5845 if (data_file_raw) { 5846 s->autoclear_features |= QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5847 } else { 5848 s->autoclear_features &= ~QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5849 } 5850 5851 if (data_file) { 5852 g_free(s->image_data_file); 5853 s->image_data_file = *data_file ? g_strdup(data_file) : NULL; 5854 } 5855 5856 ret = qcow2_update_header(bs); 5857 if (ret < 0) { 5858 error_setg_errno(errp, -ret, "Failed to update the image header"); 5859 return ret; 5860 } 5861 5862 if (backing_file || backing_format) { 5863 if (g_strcmp0(backing_file, s->image_backing_file) || 5864 g_strcmp0(backing_format, s->image_backing_format)) { 5865 error_setg(errp, "Cannot amend the backing file"); 5866 error_append_hint(errp, 5867 "You can use 'qemu-img rebase' instead.\n"); 5868 return -EINVAL; 5869 } 5870 } 5871 5872 if (s->use_lazy_refcounts != lazy_refcounts) { 5873 if (lazy_refcounts) { 5874 if (new_version < 3) { 5875 error_setg(errp, "Lazy refcounts only supported with " 5876 "compatibility level 1.1 and above (use compat=1.1 " 5877 "or greater)"); 5878 return -EINVAL; 5879 } 5880 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5881 ret = qcow2_update_header(bs); 5882 if (ret < 0) { 5883 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5884 error_setg_errno(errp, -ret, "Failed to update the image header"); 5885 return ret; 5886 } 5887 s->use_lazy_refcounts = true; 5888 } else { 5889 /* make image clean first */ 5890 ret = qcow2_mark_clean(bs); 5891 if (ret < 0) { 5892 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5893 return ret; 5894 } 5895 /* now disallow lazy refcounts */ 5896 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5897 ret = qcow2_update_header(bs); 5898 if (ret < 0) { 5899 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5900 error_setg_errno(errp, -ret, "Failed to update the image header"); 5901 return ret; 5902 } 5903 s->use_lazy_refcounts = false; 5904 } 5905 } 5906 5907 if (new_size) { 5908 BlockBackend *blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, 5909 errp); 5910 if (!blk) { 5911 return -EPERM; 5912 } 5913 5914 /* 5915 * Amending image options should ensure that the image has 5916 * exactly the given new values, so pass exact=true here. 5917 */ 5918 ret = blk_truncate(blk, new_size, true, PREALLOC_MODE_OFF, 0, errp); 5919 blk_unref(blk); 5920 if (ret < 0) { 5921 return ret; 5922 } 5923 } 5924 5925 /* Downgrade last (so unsupported features can be removed before) */ 5926 if (new_version < old_version) { 5927 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 5928 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 5929 &helper_cb_info, errp); 5930 if (ret < 0) { 5931 return ret; 5932 } 5933 } 5934 5935 return 0; 5936 } 5937 5938 static int coroutine_fn qcow2_co_amend(BlockDriverState *bs, 5939 BlockdevAmendOptions *opts, 5940 bool force, 5941 Error **errp) 5942 { 5943 BlockdevAmendOptionsQcow2 *qopts = &opts->u.qcow2; 5944 BDRVQcow2State *s = bs->opaque; 5945 int ret = 0; 5946 5947 if (qopts->encrypt) { 5948 if (!s->crypto) { 5949 error_setg(errp, "image is not encrypted, can't amend"); 5950 return -EOPNOTSUPP; 5951 } 5952 5953 if (qopts->encrypt->format != QCRYPTO_BLOCK_FORMAT_LUKS) { 5954 error_setg(errp, 5955 "Amend can't be used to change the qcow2 encryption format"); 5956 return -EOPNOTSUPP; 5957 } 5958 5959 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 5960 error_setg(errp, 5961 "Only LUKS encryption options can be amended for qcow2 with blockdev-amend"); 5962 return -EOPNOTSUPP; 5963 } 5964 5965 ret = qcrypto_block_amend_options(s->crypto, 5966 qcow2_crypto_hdr_read_func, 5967 qcow2_crypto_hdr_write_func, 5968 bs, 5969 qopts->encrypt, 5970 force, 5971 errp); 5972 } 5973 return ret; 5974 } 5975 5976 /* 5977 * If offset or size are negative, respectively, they will not be included in 5978 * the BLOCK_IMAGE_CORRUPTED event emitted. 5979 * fatal will be ignored for read-only BDS; corruptions found there will always 5980 * be considered non-fatal. 5981 */ 5982 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 5983 int64_t size, const char *message_format, ...) 5984 { 5985 BDRVQcow2State *s = bs->opaque; 5986 const char *node_name; 5987 char *message; 5988 va_list ap; 5989 5990 fatal = fatal && bdrv_is_writable(bs); 5991 5992 if (s->signaled_corruption && 5993 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 5994 { 5995 return; 5996 } 5997 5998 va_start(ap, message_format); 5999 message = g_strdup_vprintf(message_format, ap); 6000 va_end(ap); 6001 6002 if (fatal) { 6003 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 6004 "corruption events will be suppressed\n", message); 6005 } else { 6006 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 6007 "corruption events will be suppressed\n", message); 6008 } 6009 6010 node_name = bdrv_get_node_name(bs); 6011 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 6012 *node_name ? node_name : NULL, 6013 message, offset >= 0, offset, 6014 size >= 0, size, 6015 fatal); 6016 g_free(message); 6017 6018 if (fatal) { 6019 qcow2_mark_corrupt(bs); 6020 bs->drv = NULL; /* make BDS unusable */ 6021 } 6022 6023 s->signaled_corruption = true; 6024 } 6025 6026 #define QCOW_COMMON_OPTIONS \ 6027 { \ 6028 .name = BLOCK_OPT_SIZE, \ 6029 .type = QEMU_OPT_SIZE, \ 6030 .help = "Virtual disk size" \ 6031 }, \ 6032 { \ 6033 .name = BLOCK_OPT_COMPAT_LEVEL, \ 6034 .type = QEMU_OPT_STRING, \ 6035 .help = "Compatibility level (v2 [0.10] or v3 [1.1])" \ 6036 }, \ 6037 { \ 6038 .name = BLOCK_OPT_BACKING_FILE, \ 6039 .type = QEMU_OPT_STRING, \ 6040 .help = "File name of a base image" \ 6041 }, \ 6042 { \ 6043 .name = BLOCK_OPT_BACKING_FMT, \ 6044 .type = QEMU_OPT_STRING, \ 6045 .help = "Image format of the base image" \ 6046 }, \ 6047 { \ 6048 .name = BLOCK_OPT_DATA_FILE, \ 6049 .type = QEMU_OPT_STRING, \ 6050 .help = "File name of an external data file" \ 6051 }, \ 6052 { \ 6053 .name = BLOCK_OPT_DATA_FILE_RAW, \ 6054 .type = QEMU_OPT_BOOL, \ 6055 .help = "The external data file must stay valid " \ 6056 "as a raw image" \ 6057 }, \ 6058 { \ 6059 .name = BLOCK_OPT_LAZY_REFCOUNTS, \ 6060 .type = QEMU_OPT_BOOL, \ 6061 .help = "Postpone refcount updates", \ 6062 .def_value_str = "off" \ 6063 }, \ 6064 { \ 6065 .name = BLOCK_OPT_REFCOUNT_BITS, \ 6066 .type = QEMU_OPT_NUMBER, \ 6067 .help = "Width of a reference count entry in bits", \ 6068 .def_value_str = "16" \ 6069 } 6070 6071 static QemuOptsList qcow2_create_opts = { 6072 .name = "qcow2-create-opts", 6073 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 6074 .desc = { 6075 { \ 6076 .name = BLOCK_OPT_ENCRYPT, \ 6077 .type = QEMU_OPT_BOOL, \ 6078 .help = "Encrypt the image with format 'aes'. (Deprecated " \ 6079 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", \ 6080 }, \ 6081 { \ 6082 .name = BLOCK_OPT_ENCRYPT_FORMAT, \ 6083 .type = QEMU_OPT_STRING, \ 6084 .help = "Encrypt the image, format choices: 'aes', 'luks'", \ 6085 }, \ 6086 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", \ 6087 "ID of secret providing qcow AES key or LUKS passphrase"), \ 6088 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), \ 6089 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), \ 6090 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), \ 6091 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), \ 6092 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), \ 6093 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), \ 6094 { \ 6095 .name = BLOCK_OPT_CLUSTER_SIZE, \ 6096 .type = QEMU_OPT_SIZE, \ 6097 .help = "qcow2 cluster size", \ 6098 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) \ 6099 }, \ 6100 { \ 6101 .name = BLOCK_OPT_EXTL2, \ 6102 .type = QEMU_OPT_BOOL, \ 6103 .help = "Extended L2 tables", \ 6104 .def_value_str = "off" \ 6105 }, \ 6106 { \ 6107 .name = BLOCK_OPT_PREALLOC, \ 6108 .type = QEMU_OPT_STRING, \ 6109 .help = "Preallocation mode (allowed values: off, " \ 6110 "metadata, falloc, full)" \ 6111 }, \ 6112 { \ 6113 .name = BLOCK_OPT_COMPRESSION_TYPE, \ 6114 .type = QEMU_OPT_STRING, \ 6115 .help = "Compression method used for image cluster " \ 6116 "compression", \ 6117 .def_value_str = "zlib" \ 6118 }, 6119 QCOW_COMMON_OPTIONS, 6120 { /* end of list */ } 6121 } 6122 }; 6123 6124 static QemuOptsList qcow2_amend_opts = { 6125 .name = "qcow2-amend-opts", 6126 .head = QTAILQ_HEAD_INITIALIZER(qcow2_amend_opts.head), 6127 .desc = { 6128 BLOCK_CRYPTO_OPT_DEF_LUKS_STATE("encrypt."), 6129 BLOCK_CRYPTO_OPT_DEF_LUKS_KEYSLOT("encrypt."), 6130 BLOCK_CRYPTO_OPT_DEF_LUKS_OLD_SECRET("encrypt."), 6131 BLOCK_CRYPTO_OPT_DEF_LUKS_NEW_SECRET("encrypt."), 6132 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 6133 QCOW_COMMON_OPTIONS, 6134 { /* end of list */ } 6135 } 6136 }; 6137 6138 static const char *const qcow2_strong_runtime_opts[] = { 6139 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET, 6140 6141 NULL 6142 }; 6143 6144 BlockDriver bdrv_qcow2 = { 6145 .format_name = "qcow2", 6146 .instance_size = sizeof(BDRVQcow2State), 6147 .bdrv_probe = qcow2_probe, 6148 .bdrv_open = qcow2_open, 6149 .bdrv_close = qcow2_close, 6150 .bdrv_reopen_prepare = qcow2_reopen_prepare, 6151 .bdrv_reopen_commit = qcow2_reopen_commit, 6152 .bdrv_reopen_commit_post = qcow2_reopen_commit_post, 6153 .bdrv_reopen_abort = qcow2_reopen_abort, 6154 .bdrv_join_options = qcow2_join_options, 6155 .bdrv_child_perm = bdrv_default_perms, 6156 .bdrv_co_create_opts = qcow2_co_create_opts, 6157 .bdrv_co_create = qcow2_co_create, 6158 .bdrv_has_zero_init = qcow2_has_zero_init, 6159 .bdrv_co_block_status = qcow2_co_block_status, 6160 6161 .bdrv_co_preadv_part = qcow2_co_preadv_part, 6162 .bdrv_co_pwritev_part = qcow2_co_pwritev_part, 6163 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 6164 6165 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 6166 .bdrv_co_pdiscard = qcow2_co_pdiscard, 6167 .bdrv_co_copy_range_from = qcow2_co_copy_range_from, 6168 .bdrv_co_copy_range_to = qcow2_co_copy_range_to, 6169 .bdrv_co_truncate = qcow2_co_truncate, 6170 .bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part, 6171 .bdrv_make_empty = qcow2_make_empty, 6172 6173 .bdrv_snapshot_create = qcow2_snapshot_create, 6174 .bdrv_snapshot_goto = qcow2_snapshot_goto, 6175 .bdrv_snapshot_delete = qcow2_snapshot_delete, 6176 .bdrv_snapshot_list = qcow2_snapshot_list, 6177 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 6178 .bdrv_measure = qcow2_measure, 6179 .bdrv_co_get_info = qcow2_co_get_info, 6180 .bdrv_get_specific_info = qcow2_get_specific_info, 6181 6182 .bdrv_co_save_vmstate = qcow2_co_save_vmstate, 6183 .bdrv_co_load_vmstate = qcow2_co_load_vmstate, 6184 6185 .is_format = true, 6186 .supports_backing = true, 6187 .bdrv_co_change_backing_file = qcow2_co_change_backing_file, 6188 6189 .bdrv_refresh_limits = qcow2_refresh_limits, 6190 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache, 6191 .bdrv_inactivate = qcow2_inactivate, 6192 6193 .create_opts = &qcow2_create_opts, 6194 .amend_opts = &qcow2_amend_opts, 6195 .strong_runtime_opts = qcow2_strong_runtime_opts, 6196 .mutable_opts = mutable_opts, 6197 .bdrv_co_check = qcow2_co_check, 6198 .bdrv_amend_options = qcow2_amend_options, 6199 .bdrv_co_amend = qcow2_co_amend, 6200 6201 .bdrv_detach_aio_context = qcow2_detach_aio_context, 6202 .bdrv_attach_aio_context = qcow2_attach_aio_context, 6203 6204 .bdrv_supports_persistent_dirty_bitmap = 6205 qcow2_supports_persistent_dirty_bitmap, 6206 .bdrv_co_can_store_new_dirty_bitmap = qcow2_co_can_store_new_dirty_bitmap, 6207 .bdrv_co_remove_persistent_dirty_bitmap = 6208 qcow2_co_remove_persistent_dirty_bitmap, 6209 }; 6210 6211 static void bdrv_qcow2_init(void) 6212 { 6213 bdrv_register(&bdrv_qcow2); 6214 } 6215 6216 block_init(bdrv_qcow2_init); 6217