1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include "block/block_int.h" 26 #include "sysemu/block-backend.h" 27 #include "qemu/module.h" 28 #include <zlib.h> 29 #include "block/qcow2.h" 30 #include "qemu/error-report.h" 31 #include "qapi/qmp/qerror.h" 32 #include "qapi/qmp/qbool.h" 33 #include "qapi/qmp/types.h" 34 #include "qapi-event.h" 35 #include "trace.h" 36 #include "qemu/option_int.h" 37 #include "qemu/cutils.h" 38 #include "qemu/bswap.h" 39 #include "qapi/opts-visitor.h" 40 #include "qapi-visit.h" 41 #include "block/crypto.h" 42 43 /* 44 Differences with QCOW: 45 46 - Support for multiple incremental snapshots. 47 - Memory management by reference counts. 48 - Clusters which have a reference count of one have the bit 49 QCOW_OFLAG_COPIED to optimize write performance. 50 - Size of compressed clusters is stored in sectors to reduce bit usage 51 in the cluster offsets. 52 - Support for storing additional data (such as the VM state) in the 53 snapshots. 54 - If a backing store is used, the cluster size is not constrained 55 (could be backported to QCOW). 56 - L2 tables have always a size of one cluster. 57 */ 58 59 60 typedef struct { 61 uint32_t magic; 62 uint32_t len; 63 } QEMU_PACKED QCowExtension; 64 65 #define QCOW2_EXT_MAGIC_END 0 66 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 67 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 68 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 69 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 70 71 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 72 { 73 const QCowHeader *cow_header = (const void *)buf; 74 75 if (buf_size >= sizeof(QCowHeader) && 76 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 77 be32_to_cpu(cow_header->version) >= 2) 78 return 100; 79 else 80 return 0; 81 } 82 83 84 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 85 uint8_t *buf, size_t buflen, 86 void *opaque, Error **errp) 87 { 88 BlockDriverState *bs = opaque; 89 BDRVQcow2State *s = bs->opaque; 90 ssize_t ret; 91 92 if ((offset + buflen) > s->crypto_header.length) { 93 error_setg(errp, "Request for data outside of extension header"); 94 return -1; 95 } 96 97 ret = bdrv_pread(bs->file, 98 s->crypto_header.offset + offset, buf, buflen); 99 if (ret < 0) { 100 error_setg_errno(errp, -ret, "Could not read encryption header"); 101 return -1; 102 } 103 return ret; 104 } 105 106 107 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 108 void *opaque, Error **errp) 109 { 110 BlockDriverState *bs = opaque; 111 BDRVQcow2State *s = bs->opaque; 112 int64_t ret; 113 int64_t clusterlen; 114 115 ret = qcow2_alloc_clusters(bs, headerlen); 116 if (ret < 0) { 117 error_setg_errno(errp, -ret, 118 "Cannot allocate cluster for LUKS header size %zu", 119 headerlen); 120 return -1; 121 } 122 123 s->crypto_header.length = headerlen; 124 s->crypto_header.offset = ret; 125 126 /* Zero fill remaining space in cluster so it has predictable 127 * content in case of future spec changes */ 128 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 129 ret = bdrv_pwrite_zeroes(bs->file, 130 ret + headerlen, 131 clusterlen - headerlen, 0); 132 if (ret < 0) { 133 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 134 return -1; 135 } 136 137 return ret; 138 } 139 140 141 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 142 const uint8_t *buf, size_t buflen, 143 void *opaque, Error **errp) 144 { 145 BlockDriverState *bs = opaque; 146 BDRVQcow2State *s = bs->opaque; 147 ssize_t ret; 148 149 if ((offset + buflen) > s->crypto_header.length) { 150 error_setg(errp, "Request for data outside of extension header"); 151 return -1; 152 } 153 154 ret = bdrv_pwrite(bs->file, 155 s->crypto_header.offset + offset, buf, buflen); 156 if (ret < 0) { 157 error_setg_errno(errp, -ret, "Could not read encryption header"); 158 return -1; 159 } 160 return ret; 161 } 162 163 164 /* 165 * read qcow2 extension and fill bs 166 * start reading from start_offset 167 * finish reading upon magic of value 0 or when end_offset reached 168 * unknown magic is skipped (future extension this version knows nothing about) 169 * return 0 upon success, non-0 otherwise 170 */ 171 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 172 uint64_t end_offset, void **p_feature_table, 173 int flags, bool *need_update_header, 174 Error **errp) 175 { 176 BDRVQcow2State *s = bs->opaque; 177 QCowExtension ext; 178 uint64_t offset; 179 int ret; 180 Qcow2BitmapHeaderExt bitmaps_ext; 181 182 if (need_update_header != NULL) { 183 *need_update_header = false; 184 } 185 186 #ifdef DEBUG_EXT 187 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 188 #endif 189 offset = start_offset; 190 while (offset < end_offset) { 191 192 #ifdef DEBUG_EXT 193 /* Sanity check */ 194 if (offset > s->cluster_size) 195 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 196 197 printf("attempting to read extended header in offset %lu\n", offset); 198 #endif 199 200 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); 201 if (ret < 0) { 202 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 203 "pread fail from offset %" PRIu64, offset); 204 return 1; 205 } 206 be32_to_cpus(&ext.magic); 207 be32_to_cpus(&ext.len); 208 offset += sizeof(ext); 209 #ifdef DEBUG_EXT 210 printf("ext.magic = 0x%x\n", ext.magic); 211 #endif 212 if (offset > end_offset || ext.len > end_offset - offset) { 213 error_setg(errp, "Header extension too large"); 214 return -EINVAL; 215 } 216 217 switch (ext.magic) { 218 case QCOW2_EXT_MAGIC_END: 219 return 0; 220 221 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 222 if (ext.len >= sizeof(bs->backing_format)) { 223 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 224 " too large (>=%zu)", ext.len, 225 sizeof(bs->backing_format)); 226 return 2; 227 } 228 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); 229 if (ret < 0) { 230 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 231 "Could not read format name"); 232 return 3; 233 } 234 bs->backing_format[ext.len] = '\0'; 235 s->image_backing_format = g_strdup(bs->backing_format); 236 #ifdef DEBUG_EXT 237 printf("Qcow2: Got format extension %s\n", bs->backing_format); 238 #endif 239 break; 240 241 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 242 if (p_feature_table != NULL) { 243 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 244 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 245 if (ret < 0) { 246 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 247 "Could not read table"); 248 return ret; 249 } 250 251 *p_feature_table = feature_table; 252 } 253 break; 254 255 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 256 unsigned int cflags = 0; 257 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 258 error_setg(errp, "CRYPTO header extension only " 259 "expected with LUKS encryption method"); 260 return -EINVAL; 261 } 262 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 263 error_setg(errp, "CRYPTO header extension size %u, " 264 "but expected size %zu", ext.len, 265 sizeof(Qcow2CryptoHeaderExtension)); 266 return -EINVAL; 267 } 268 269 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); 270 if (ret < 0) { 271 error_setg_errno(errp, -ret, 272 "Unable to read CRYPTO header extension"); 273 return ret; 274 } 275 be64_to_cpus(&s->crypto_header.offset); 276 be64_to_cpus(&s->crypto_header.length); 277 278 if ((s->crypto_header.offset % s->cluster_size) != 0) { 279 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 280 "not a multiple of cluster size '%u'", 281 s->crypto_header.offset, s->cluster_size); 282 return -EINVAL; 283 } 284 285 if (flags & BDRV_O_NO_IO) { 286 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 287 } 288 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 289 qcow2_crypto_hdr_read_func, 290 bs, cflags, errp); 291 if (!s->crypto) { 292 return -EINVAL; 293 } 294 } break; 295 296 case QCOW2_EXT_MAGIC_BITMAPS: 297 if (ext.len != sizeof(bitmaps_ext)) { 298 error_setg_errno(errp, -ret, "bitmaps_ext: " 299 "Invalid extension length"); 300 return -EINVAL; 301 } 302 303 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 304 warn_report("a program lacking bitmap support " 305 "modified this file, so all bitmaps are now " 306 "considered inconsistent"); 307 error_printf("Some clusters may be leaked, " 308 "run 'qemu-img check -r' on the image " 309 "file to fix."); 310 if (need_update_header != NULL) { 311 /* Updating is needed to drop invalid bitmap extension. */ 312 *need_update_header = true; 313 } 314 break; 315 } 316 317 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); 318 if (ret < 0) { 319 error_setg_errno(errp, -ret, "bitmaps_ext: " 320 "Could not read ext header"); 321 return ret; 322 } 323 324 if (bitmaps_ext.reserved32 != 0) { 325 error_setg_errno(errp, -ret, "bitmaps_ext: " 326 "Reserved field is not zero"); 327 return -EINVAL; 328 } 329 330 be32_to_cpus(&bitmaps_ext.nb_bitmaps); 331 be64_to_cpus(&bitmaps_ext.bitmap_directory_size); 332 be64_to_cpus(&bitmaps_ext.bitmap_directory_offset); 333 334 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 335 error_setg(errp, 336 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 337 "exceeding the QEMU supported maximum of %d", 338 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 339 return -EINVAL; 340 } 341 342 if (bitmaps_ext.nb_bitmaps == 0) { 343 error_setg(errp, "found bitmaps extension with zero bitmaps"); 344 return -EINVAL; 345 } 346 347 if (bitmaps_ext.bitmap_directory_offset & (s->cluster_size - 1)) { 348 error_setg(errp, "bitmaps_ext: " 349 "invalid bitmap directory offset"); 350 return -EINVAL; 351 } 352 353 if (bitmaps_ext.bitmap_directory_size > 354 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 355 error_setg(errp, "bitmaps_ext: " 356 "bitmap directory size (%" PRIu64 ") exceeds " 357 "the maximum supported size (%d)", 358 bitmaps_ext.bitmap_directory_size, 359 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 360 return -EINVAL; 361 } 362 363 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 364 s->bitmap_directory_offset = 365 bitmaps_ext.bitmap_directory_offset; 366 s->bitmap_directory_size = 367 bitmaps_ext.bitmap_directory_size; 368 369 #ifdef DEBUG_EXT 370 printf("Qcow2: Got bitmaps extension: " 371 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 372 s->bitmap_directory_offset, s->nb_bitmaps); 373 #endif 374 break; 375 376 default: 377 /* unknown magic - save it in case we need to rewrite the header */ 378 { 379 Qcow2UnknownHeaderExtension *uext; 380 381 uext = g_malloc0(sizeof(*uext) + ext.len); 382 uext->magic = ext.magic; 383 uext->len = ext.len; 384 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 385 386 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 387 if (ret < 0) { 388 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 389 "Could not read data"); 390 return ret; 391 } 392 } 393 break; 394 } 395 396 offset += ((ext.len + 7) & ~7); 397 } 398 399 return 0; 400 } 401 402 static void cleanup_unknown_header_ext(BlockDriverState *bs) 403 { 404 BDRVQcow2State *s = bs->opaque; 405 Qcow2UnknownHeaderExtension *uext, *next; 406 407 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 408 QLIST_REMOVE(uext, next); 409 g_free(uext); 410 } 411 } 412 413 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 414 uint64_t mask) 415 { 416 char *features = g_strdup(""); 417 char *old; 418 419 while (table && table->name[0] != '\0') { 420 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 421 if (mask & (1ULL << table->bit)) { 422 old = features; 423 features = g_strdup_printf("%s%s%.46s", old, *old ? ", " : "", 424 table->name); 425 g_free(old); 426 mask &= ~(1ULL << table->bit); 427 } 428 } 429 table++; 430 } 431 432 if (mask) { 433 old = features; 434 features = g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64, 435 old, *old ? ", " : "", mask); 436 g_free(old); 437 } 438 439 error_setg(errp, "Unsupported qcow2 feature(s): %s", features); 440 g_free(features); 441 } 442 443 /* 444 * Sets the dirty bit and flushes afterwards if necessary. 445 * 446 * The incompatible_features bit is only set if the image file header was 447 * updated successfully. Therefore it is not required to check the return 448 * value of this function. 449 */ 450 int qcow2_mark_dirty(BlockDriverState *bs) 451 { 452 BDRVQcow2State *s = bs->opaque; 453 uint64_t val; 454 int ret; 455 456 assert(s->qcow_version >= 3); 457 458 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 459 return 0; /* already dirty */ 460 } 461 462 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 463 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 464 &val, sizeof(val)); 465 if (ret < 0) { 466 return ret; 467 } 468 ret = bdrv_flush(bs->file->bs); 469 if (ret < 0) { 470 return ret; 471 } 472 473 /* Only treat image as dirty if the header was updated successfully */ 474 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 475 return 0; 476 } 477 478 /* 479 * Clears the dirty bit and flushes before if necessary. Only call this 480 * function when there are no pending requests, it does not guard against 481 * concurrent requests dirtying the image. 482 */ 483 static int qcow2_mark_clean(BlockDriverState *bs) 484 { 485 BDRVQcow2State *s = bs->opaque; 486 487 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 488 int ret; 489 490 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 491 492 ret = bdrv_flush(bs); 493 if (ret < 0) { 494 return ret; 495 } 496 497 return qcow2_update_header(bs); 498 } 499 return 0; 500 } 501 502 /* 503 * Marks the image as corrupt. 504 */ 505 int qcow2_mark_corrupt(BlockDriverState *bs) 506 { 507 BDRVQcow2State *s = bs->opaque; 508 509 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 510 return qcow2_update_header(bs); 511 } 512 513 /* 514 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 515 * before if necessary. 516 */ 517 int qcow2_mark_consistent(BlockDriverState *bs) 518 { 519 BDRVQcow2State *s = bs->opaque; 520 521 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 522 int ret = bdrv_flush(bs); 523 if (ret < 0) { 524 return ret; 525 } 526 527 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 528 return qcow2_update_header(bs); 529 } 530 return 0; 531 } 532 533 static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result, 534 BdrvCheckMode fix) 535 { 536 int ret = qcow2_check_refcounts(bs, result, fix); 537 if (ret < 0) { 538 return ret; 539 } 540 541 if (fix && result->check_errors == 0 && result->corruptions == 0) { 542 ret = qcow2_mark_clean(bs); 543 if (ret < 0) { 544 return ret; 545 } 546 return qcow2_mark_consistent(bs); 547 } 548 return ret; 549 } 550 551 static int validate_table_offset(BlockDriverState *bs, uint64_t offset, 552 uint64_t entries, size_t entry_len) 553 { 554 BDRVQcow2State *s = bs->opaque; 555 uint64_t size; 556 557 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 558 * because values will be passed to qemu functions taking int64_t. */ 559 if (entries > INT64_MAX / entry_len) { 560 return -EINVAL; 561 } 562 563 size = entries * entry_len; 564 565 if (INT64_MAX - size < offset) { 566 return -EINVAL; 567 } 568 569 /* Tables must be cluster aligned */ 570 if (offset_into_cluster(s, offset) != 0) { 571 return -EINVAL; 572 } 573 574 return 0; 575 } 576 577 static QemuOptsList qcow2_runtime_opts = { 578 .name = "qcow2", 579 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 580 .desc = { 581 { 582 .name = QCOW2_OPT_LAZY_REFCOUNTS, 583 .type = QEMU_OPT_BOOL, 584 .help = "Postpone refcount updates", 585 }, 586 { 587 .name = QCOW2_OPT_DISCARD_REQUEST, 588 .type = QEMU_OPT_BOOL, 589 .help = "Pass guest discard requests to the layer below", 590 }, 591 { 592 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 593 .type = QEMU_OPT_BOOL, 594 .help = "Generate discard requests when snapshot related space " 595 "is freed", 596 }, 597 { 598 .name = QCOW2_OPT_DISCARD_OTHER, 599 .type = QEMU_OPT_BOOL, 600 .help = "Generate discard requests when other clusters are freed", 601 }, 602 { 603 .name = QCOW2_OPT_OVERLAP, 604 .type = QEMU_OPT_STRING, 605 .help = "Selects which overlap checks to perform from a range of " 606 "templates (none, constant, cached, all)", 607 }, 608 { 609 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 610 .type = QEMU_OPT_STRING, 611 .help = "Selects which overlap checks to perform from a range of " 612 "templates (none, constant, cached, all)", 613 }, 614 { 615 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 616 .type = QEMU_OPT_BOOL, 617 .help = "Check for unintended writes into the main qcow2 header", 618 }, 619 { 620 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 621 .type = QEMU_OPT_BOOL, 622 .help = "Check for unintended writes into the active L1 table", 623 }, 624 { 625 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 626 .type = QEMU_OPT_BOOL, 627 .help = "Check for unintended writes into an active L2 table", 628 }, 629 { 630 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 631 .type = QEMU_OPT_BOOL, 632 .help = "Check for unintended writes into the refcount table", 633 }, 634 { 635 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 636 .type = QEMU_OPT_BOOL, 637 .help = "Check for unintended writes into a refcount block", 638 }, 639 { 640 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 641 .type = QEMU_OPT_BOOL, 642 .help = "Check for unintended writes into the snapshot table", 643 }, 644 { 645 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 646 .type = QEMU_OPT_BOOL, 647 .help = "Check for unintended writes into an inactive L1 table", 648 }, 649 { 650 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 651 .type = QEMU_OPT_BOOL, 652 .help = "Check for unintended writes into an inactive L2 table", 653 }, 654 { 655 .name = QCOW2_OPT_CACHE_SIZE, 656 .type = QEMU_OPT_SIZE, 657 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 658 "cache size", 659 }, 660 { 661 .name = QCOW2_OPT_L2_CACHE_SIZE, 662 .type = QEMU_OPT_SIZE, 663 .help = "Maximum L2 table cache size", 664 }, 665 { 666 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 667 .type = QEMU_OPT_SIZE, 668 .help = "Maximum refcount block cache size", 669 }, 670 { 671 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 672 .type = QEMU_OPT_NUMBER, 673 .help = "Clean unused cache entries after this time (in seconds)", 674 }, 675 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 676 "ID of secret providing qcow2 AES key or LUKS passphrase"), 677 { /* end of list */ } 678 }, 679 }; 680 681 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 682 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 683 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 684 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 685 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 686 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 687 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 688 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 689 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 690 }; 691 692 static void cache_clean_timer_cb(void *opaque) 693 { 694 BlockDriverState *bs = opaque; 695 BDRVQcow2State *s = bs->opaque; 696 qcow2_cache_clean_unused(bs, s->l2_table_cache); 697 qcow2_cache_clean_unused(bs, s->refcount_block_cache); 698 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 699 (int64_t) s->cache_clean_interval * 1000); 700 } 701 702 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 703 { 704 BDRVQcow2State *s = bs->opaque; 705 if (s->cache_clean_interval > 0) { 706 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL, 707 SCALE_MS, cache_clean_timer_cb, 708 bs); 709 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 710 (int64_t) s->cache_clean_interval * 1000); 711 } 712 } 713 714 static void cache_clean_timer_del(BlockDriverState *bs) 715 { 716 BDRVQcow2State *s = bs->opaque; 717 if (s->cache_clean_timer) { 718 timer_del(s->cache_clean_timer); 719 timer_free(s->cache_clean_timer); 720 s->cache_clean_timer = NULL; 721 } 722 } 723 724 static void qcow2_detach_aio_context(BlockDriverState *bs) 725 { 726 cache_clean_timer_del(bs); 727 } 728 729 static void qcow2_attach_aio_context(BlockDriverState *bs, 730 AioContext *new_context) 731 { 732 cache_clean_timer_init(bs, new_context); 733 } 734 735 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 736 uint64_t *l2_cache_size, 737 uint64_t *refcount_cache_size, Error **errp) 738 { 739 BDRVQcow2State *s = bs->opaque; 740 uint64_t combined_cache_size; 741 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 742 743 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 744 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 745 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 746 747 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 748 *l2_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 0); 749 *refcount_cache_size = qemu_opt_get_size(opts, 750 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 751 752 if (combined_cache_size_set) { 753 if (l2_cache_size_set && refcount_cache_size_set) { 754 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 755 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 756 "the same time"); 757 return; 758 } else if (*l2_cache_size > combined_cache_size) { 759 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 760 QCOW2_OPT_CACHE_SIZE); 761 return; 762 } else if (*refcount_cache_size > combined_cache_size) { 763 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 764 QCOW2_OPT_CACHE_SIZE); 765 return; 766 } 767 768 if (l2_cache_size_set) { 769 *refcount_cache_size = combined_cache_size - *l2_cache_size; 770 } else if (refcount_cache_size_set) { 771 *l2_cache_size = combined_cache_size - *refcount_cache_size; 772 } else { 773 *refcount_cache_size = combined_cache_size 774 / (DEFAULT_L2_REFCOUNT_SIZE_RATIO + 1); 775 *l2_cache_size = combined_cache_size - *refcount_cache_size; 776 } 777 } else { 778 if (!l2_cache_size_set && !refcount_cache_size_set) { 779 *l2_cache_size = MAX(DEFAULT_L2_CACHE_BYTE_SIZE, 780 (uint64_t)DEFAULT_L2_CACHE_CLUSTERS 781 * s->cluster_size); 782 *refcount_cache_size = *l2_cache_size 783 / DEFAULT_L2_REFCOUNT_SIZE_RATIO; 784 } else if (!l2_cache_size_set) { 785 *l2_cache_size = *refcount_cache_size 786 * DEFAULT_L2_REFCOUNT_SIZE_RATIO; 787 } else if (!refcount_cache_size_set) { 788 *refcount_cache_size = *l2_cache_size 789 / DEFAULT_L2_REFCOUNT_SIZE_RATIO; 790 } 791 } 792 } 793 794 typedef struct Qcow2ReopenState { 795 Qcow2Cache *l2_table_cache; 796 Qcow2Cache *refcount_block_cache; 797 bool use_lazy_refcounts; 798 int overlap_check; 799 bool discard_passthrough[QCOW2_DISCARD_MAX]; 800 uint64_t cache_clean_interval; 801 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 802 } Qcow2ReopenState; 803 804 static int qcow2_update_options_prepare(BlockDriverState *bs, 805 Qcow2ReopenState *r, 806 QDict *options, int flags, 807 Error **errp) 808 { 809 BDRVQcow2State *s = bs->opaque; 810 QemuOpts *opts = NULL; 811 const char *opt_overlap_check, *opt_overlap_check_template; 812 int overlap_check_template = 0; 813 uint64_t l2_cache_size, refcount_cache_size; 814 int i; 815 const char *encryptfmt; 816 QDict *encryptopts = NULL; 817 Error *local_err = NULL; 818 int ret; 819 820 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 821 encryptfmt = qdict_get_try_str(encryptopts, "format"); 822 823 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 824 qemu_opts_absorb_qdict(opts, options, &local_err); 825 if (local_err) { 826 error_propagate(errp, local_err); 827 ret = -EINVAL; 828 goto fail; 829 } 830 831 /* get L2 table/refcount block cache size from command line options */ 832 read_cache_sizes(bs, opts, &l2_cache_size, &refcount_cache_size, 833 &local_err); 834 if (local_err) { 835 error_propagate(errp, local_err); 836 ret = -EINVAL; 837 goto fail; 838 } 839 840 l2_cache_size /= s->cluster_size; 841 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 842 l2_cache_size = MIN_L2_CACHE_SIZE; 843 } 844 if (l2_cache_size > INT_MAX) { 845 error_setg(errp, "L2 cache size too big"); 846 ret = -EINVAL; 847 goto fail; 848 } 849 850 refcount_cache_size /= s->cluster_size; 851 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 852 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 853 } 854 if (refcount_cache_size > INT_MAX) { 855 error_setg(errp, "Refcount cache size too big"); 856 ret = -EINVAL; 857 goto fail; 858 } 859 860 /* alloc new L2 table/refcount block cache, flush old one */ 861 if (s->l2_table_cache) { 862 ret = qcow2_cache_flush(bs, s->l2_table_cache); 863 if (ret) { 864 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 865 goto fail; 866 } 867 } 868 869 if (s->refcount_block_cache) { 870 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 871 if (ret) { 872 error_setg_errno(errp, -ret, 873 "Failed to flush the refcount block cache"); 874 goto fail; 875 } 876 } 877 878 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size); 879 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size); 880 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 881 error_setg(errp, "Could not allocate metadata caches"); 882 ret = -ENOMEM; 883 goto fail; 884 } 885 886 /* New interval for cache cleanup timer */ 887 r->cache_clean_interval = 888 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 889 s->cache_clean_interval); 890 #ifndef CONFIG_LINUX 891 if (r->cache_clean_interval != 0) { 892 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 893 " not supported on this host"); 894 ret = -EINVAL; 895 goto fail; 896 } 897 #endif 898 if (r->cache_clean_interval > UINT_MAX) { 899 error_setg(errp, "Cache clean interval too big"); 900 ret = -EINVAL; 901 goto fail; 902 } 903 904 /* lazy-refcounts; flush if going from enabled to disabled */ 905 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 906 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 907 if (r->use_lazy_refcounts && s->qcow_version < 3) { 908 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 909 "qemu 1.1 compatibility level"); 910 ret = -EINVAL; 911 goto fail; 912 } 913 914 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 915 ret = qcow2_mark_clean(bs); 916 if (ret < 0) { 917 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 918 goto fail; 919 } 920 } 921 922 /* Overlap check options */ 923 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 924 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 925 if (opt_overlap_check_template && opt_overlap_check && 926 strcmp(opt_overlap_check_template, opt_overlap_check)) 927 { 928 error_setg(errp, "Conflicting values for qcow2 options '" 929 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 930 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 931 ret = -EINVAL; 932 goto fail; 933 } 934 if (!opt_overlap_check) { 935 opt_overlap_check = opt_overlap_check_template ?: "cached"; 936 } 937 938 if (!strcmp(opt_overlap_check, "none")) { 939 overlap_check_template = 0; 940 } else if (!strcmp(opt_overlap_check, "constant")) { 941 overlap_check_template = QCOW2_OL_CONSTANT; 942 } else if (!strcmp(opt_overlap_check, "cached")) { 943 overlap_check_template = QCOW2_OL_CACHED; 944 } else if (!strcmp(opt_overlap_check, "all")) { 945 overlap_check_template = QCOW2_OL_ALL; 946 } else { 947 error_setg(errp, "Unsupported value '%s' for qcow2 option " 948 "'overlap-check'. Allowed are any of the following: " 949 "none, constant, cached, all", opt_overlap_check); 950 ret = -EINVAL; 951 goto fail; 952 } 953 954 r->overlap_check = 0; 955 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 956 /* overlap-check defines a template bitmask, but every flag may be 957 * overwritten through the associated boolean option */ 958 r->overlap_check |= 959 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 960 overlap_check_template & (1 << i)) << i; 961 } 962 963 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 964 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 965 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 966 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 967 flags & BDRV_O_UNMAP); 968 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 969 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 970 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 971 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 972 973 switch (s->crypt_method_header) { 974 case QCOW_CRYPT_NONE: 975 if (encryptfmt) { 976 error_setg(errp, "No encryption in image header, but options " 977 "specified format '%s'", encryptfmt); 978 ret = -EINVAL; 979 goto fail; 980 } 981 break; 982 983 case QCOW_CRYPT_AES: 984 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 985 error_setg(errp, 986 "Header reported 'aes' encryption format but " 987 "options specify '%s'", encryptfmt); 988 ret = -EINVAL; 989 goto fail; 990 } 991 qdict_del(encryptopts, "format"); 992 r->crypto_opts = block_crypto_open_opts_init( 993 Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); 994 break; 995 996 case QCOW_CRYPT_LUKS: 997 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 998 error_setg(errp, 999 "Header reported 'luks' encryption format but " 1000 "options specify '%s'", encryptfmt); 1001 ret = -EINVAL; 1002 goto fail; 1003 } 1004 qdict_del(encryptopts, "format"); 1005 r->crypto_opts = block_crypto_open_opts_init( 1006 Q_CRYPTO_BLOCK_FORMAT_LUKS, encryptopts, errp); 1007 break; 1008 1009 default: 1010 error_setg(errp, "Unsupported encryption method %d", 1011 s->crypt_method_header); 1012 break; 1013 } 1014 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) { 1015 ret = -EINVAL; 1016 goto fail; 1017 } 1018 1019 ret = 0; 1020 fail: 1021 QDECREF(encryptopts); 1022 qemu_opts_del(opts); 1023 opts = NULL; 1024 return ret; 1025 } 1026 1027 static void qcow2_update_options_commit(BlockDriverState *bs, 1028 Qcow2ReopenState *r) 1029 { 1030 BDRVQcow2State *s = bs->opaque; 1031 int i; 1032 1033 if (s->l2_table_cache) { 1034 qcow2_cache_destroy(bs, s->l2_table_cache); 1035 } 1036 if (s->refcount_block_cache) { 1037 qcow2_cache_destroy(bs, s->refcount_block_cache); 1038 } 1039 s->l2_table_cache = r->l2_table_cache; 1040 s->refcount_block_cache = r->refcount_block_cache; 1041 1042 s->overlap_check = r->overlap_check; 1043 s->use_lazy_refcounts = r->use_lazy_refcounts; 1044 1045 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1046 s->discard_passthrough[i] = r->discard_passthrough[i]; 1047 } 1048 1049 if (s->cache_clean_interval != r->cache_clean_interval) { 1050 cache_clean_timer_del(bs); 1051 s->cache_clean_interval = r->cache_clean_interval; 1052 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1053 } 1054 1055 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1056 s->crypto_opts = r->crypto_opts; 1057 } 1058 1059 static void qcow2_update_options_abort(BlockDriverState *bs, 1060 Qcow2ReopenState *r) 1061 { 1062 if (r->l2_table_cache) { 1063 qcow2_cache_destroy(bs, r->l2_table_cache); 1064 } 1065 if (r->refcount_block_cache) { 1066 qcow2_cache_destroy(bs, r->refcount_block_cache); 1067 } 1068 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1069 } 1070 1071 static int qcow2_update_options(BlockDriverState *bs, QDict *options, 1072 int flags, Error **errp) 1073 { 1074 Qcow2ReopenState r = {}; 1075 int ret; 1076 1077 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1078 if (ret >= 0) { 1079 qcow2_update_options_commit(bs, &r); 1080 } else { 1081 qcow2_update_options_abort(bs, &r); 1082 } 1083 1084 return ret; 1085 } 1086 1087 static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, 1088 Error **errp) 1089 { 1090 BDRVQcow2State *s = bs->opaque; 1091 unsigned int len, i; 1092 int ret = 0; 1093 QCowHeader header; 1094 Error *local_err = NULL; 1095 uint64_t ext_end; 1096 uint64_t l1_vm_state_index; 1097 bool update_header = false; 1098 1099 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 1100 if (ret < 0) { 1101 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1102 goto fail; 1103 } 1104 be32_to_cpus(&header.magic); 1105 be32_to_cpus(&header.version); 1106 be64_to_cpus(&header.backing_file_offset); 1107 be32_to_cpus(&header.backing_file_size); 1108 be64_to_cpus(&header.size); 1109 be32_to_cpus(&header.cluster_bits); 1110 be32_to_cpus(&header.crypt_method); 1111 be64_to_cpus(&header.l1_table_offset); 1112 be32_to_cpus(&header.l1_size); 1113 be64_to_cpus(&header.refcount_table_offset); 1114 be32_to_cpus(&header.refcount_table_clusters); 1115 be64_to_cpus(&header.snapshots_offset); 1116 be32_to_cpus(&header.nb_snapshots); 1117 1118 if (header.magic != QCOW_MAGIC) { 1119 error_setg(errp, "Image is not in qcow2 format"); 1120 ret = -EINVAL; 1121 goto fail; 1122 } 1123 if (header.version < 2 || header.version > 3) { 1124 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1125 ret = -ENOTSUP; 1126 goto fail; 1127 } 1128 1129 s->qcow_version = header.version; 1130 1131 /* Initialise cluster size */ 1132 if (header.cluster_bits < MIN_CLUSTER_BITS || 1133 header.cluster_bits > MAX_CLUSTER_BITS) { 1134 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1135 header.cluster_bits); 1136 ret = -EINVAL; 1137 goto fail; 1138 } 1139 1140 s->cluster_bits = header.cluster_bits; 1141 s->cluster_size = 1 << s->cluster_bits; 1142 s->cluster_sectors = 1 << (s->cluster_bits - 9); 1143 1144 /* Initialise version 3 header fields */ 1145 if (header.version == 2) { 1146 header.incompatible_features = 0; 1147 header.compatible_features = 0; 1148 header.autoclear_features = 0; 1149 header.refcount_order = 4; 1150 header.header_length = 72; 1151 } else { 1152 be64_to_cpus(&header.incompatible_features); 1153 be64_to_cpus(&header.compatible_features); 1154 be64_to_cpus(&header.autoclear_features); 1155 be32_to_cpus(&header.refcount_order); 1156 be32_to_cpus(&header.header_length); 1157 1158 if (header.header_length < 104) { 1159 error_setg(errp, "qcow2 header too short"); 1160 ret = -EINVAL; 1161 goto fail; 1162 } 1163 } 1164 1165 if (header.header_length > s->cluster_size) { 1166 error_setg(errp, "qcow2 header exceeds cluster size"); 1167 ret = -EINVAL; 1168 goto fail; 1169 } 1170 1171 if (header.header_length > sizeof(header)) { 1172 s->unknown_header_fields_size = header.header_length - sizeof(header); 1173 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1174 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 1175 s->unknown_header_fields_size); 1176 if (ret < 0) { 1177 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1178 "fields"); 1179 goto fail; 1180 } 1181 } 1182 1183 if (header.backing_file_offset > s->cluster_size) { 1184 error_setg(errp, "Invalid backing file offset"); 1185 ret = -EINVAL; 1186 goto fail; 1187 } 1188 1189 if (header.backing_file_offset) { 1190 ext_end = header.backing_file_offset; 1191 } else { 1192 ext_end = 1 << header.cluster_bits; 1193 } 1194 1195 /* Handle feature bits */ 1196 s->incompatible_features = header.incompatible_features; 1197 s->compatible_features = header.compatible_features; 1198 s->autoclear_features = header.autoclear_features; 1199 1200 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1201 void *feature_table = NULL; 1202 qcow2_read_extensions(bs, header.header_length, ext_end, 1203 &feature_table, flags, NULL, NULL); 1204 report_unsupported_feature(errp, feature_table, 1205 s->incompatible_features & 1206 ~QCOW2_INCOMPAT_MASK); 1207 ret = -ENOTSUP; 1208 g_free(feature_table); 1209 goto fail; 1210 } 1211 1212 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1213 /* Corrupt images may not be written to unless they are being repaired 1214 */ 1215 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1216 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1217 "read/write"); 1218 ret = -EACCES; 1219 goto fail; 1220 } 1221 } 1222 1223 /* Check support for various header values */ 1224 if (header.refcount_order > 6) { 1225 error_setg(errp, "Reference count entry width too large; may not " 1226 "exceed 64 bits"); 1227 ret = -EINVAL; 1228 goto fail; 1229 } 1230 s->refcount_order = header.refcount_order; 1231 s->refcount_bits = 1 << s->refcount_order; 1232 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1233 s->refcount_max += s->refcount_max - 1; 1234 1235 s->crypt_method_header = header.crypt_method; 1236 if (s->crypt_method_header) { 1237 if (bdrv_uses_whitelist() && 1238 s->crypt_method_header == QCOW_CRYPT_AES) { 1239 error_setg(errp, 1240 "Use of AES-CBC encrypted qcow2 images is no longer " 1241 "supported in system emulators"); 1242 error_append_hint(errp, 1243 "You can use 'qemu-img convert' to convert your " 1244 "image to an alternative supported format, such " 1245 "as unencrypted qcow2, or raw with the LUKS " 1246 "format instead.\n"); 1247 ret = -ENOSYS; 1248 goto fail; 1249 } 1250 1251 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1252 s->crypt_physical_offset = false; 1253 } else { 1254 /* Assuming LUKS and any future crypt methods we 1255 * add will all use physical offsets, due to the 1256 * fact that the alternative is insecure... */ 1257 s->crypt_physical_offset = true; 1258 } 1259 1260 bs->encrypted = true; 1261 } 1262 1263 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 1264 s->l2_size = 1 << s->l2_bits; 1265 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1266 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1267 s->refcount_block_size = 1 << s->refcount_block_bits; 1268 bs->total_sectors = header.size / 512; 1269 s->csize_shift = (62 - (s->cluster_bits - 8)); 1270 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1271 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1272 1273 s->refcount_table_offset = header.refcount_table_offset; 1274 s->refcount_table_size = 1275 header.refcount_table_clusters << (s->cluster_bits - 3); 1276 1277 if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) { 1278 error_setg(errp, "Reference count table too large"); 1279 ret = -EINVAL; 1280 goto fail; 1281 } 1282 1283 ret = validate_table_offset(bs, s->refcount_table_offset, 1284 s->refcount_table_size, sizeof(uint64_t)); 1285 if (ret < 0) { 1286 error_setg(errp, "Invalid reference count table offset"); 1287 goto fail; 1288 } 1289 1290 /* Snapshot table offset/length */ 1291 if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) { 1292 error_setg(errp, "Too many snapshots"); 1293 ret = -EINVAL; 1294 goto fail; 1295 } 1296 1297 ret = validate_table_offset(bs, header.snapshots_offset, 1298 header.nb_snapshots, 1299 sizeof(QCowSnapshotHeader)); 1300 if (ret < 0) { 1301 error_setg(errp, "Invalid snapshot table offset"); 1302 goto fail; 1303 } 1304 1305 /* read the level 1 table */ 1306 if (header.l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 1307 error_setg(errp, "Active L1 table too large"); 1308 ret = -EFBIG; 1309 goto fail; 1310 } 1311 s->l1_size = header.l1_size; 1312 1313 l1_vm_state_index = size_to_l1(s, header.size); 1314 if (l1_vm_state_index > INT_MAX) { 1315 error_setg(errp, "Image is too big"); 1316 ret = -EFBIG; 1317 goto fail; 1318 } 1319 s->l1_vm_state_index = l1_vm_state_index; 1320 1321 /* the L1 table must contain at least enough entries to put 1322 header.size bytes */ 1323 if (s->l1_size < s->l1_vm_state_index) { 1324 error_setg(errp, "L1 table is too small"); 1325 ret = -EINVAL; 1326 goto fail; 1327 } 1328 1329 ret = validate_table_offset(bs, header.l1_table_offset, 1330 header.l1_size, sizeof(uint64_t)); 1331 if (ret < 0) { 1332 error_setg(errp, "Invalid L1 table offset"); 1333 goto fail; 1334 } 1335 s->l1_table_offset = header.l1_table_offset; 1336 1337 1338 if (s->l1_size > 0) { 1339 s->l1_table = qemu_try_blockalign(bs->file->bs, 1340 align_offset(s->l1_size * sizeof(uint64_t), 512)); 1341 if (s->l1_table == NULL) { 1342 error_setg(errp, "Could not allocate L1 table"); 1343 ret = -ENOMEM; 1344 goto fail; 1345 } 1346 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 1347 s->l1_size * sizeof(uint64_t)); 1348 if (ret < 0) { 1349 error_setg_errno(errp, -ret, "Could not read L1 table"); 1350 goto fail; 1351 } 1352 for(i = 0;i < s->l1_size; i++) { 1353 be64_to_cpus(&s->l1_table[i]); 1354 } 1355 } 1356 1357 /* Parse driver-specific options */ 1358 ret = qcow2_update_options(bs, options, flags, errp); 1359 if (ret < 0) { 1360 goto fail; 1361 } 1362 1363 s->cluster_cache_offset = -1; 1364 s->flags = flags; 1365 1366 ret = qcow2_refcount_init(bs); 1367 if (ret != 0) { 1368 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1369 goto fail; 1370 } 1371 1372 QLIST_INIT(&s->cluster_allocs); 1373 QTAILQ_INIT(&s->discards); 1374 1375 /* read qcow2 extensions */ 1376 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1377 flags, &update_header, &local_err)) { 1378 error_propagate(errp, local_err); 1379 ret = -EINVAL; 1380 goto fail; 1381 } 1382 1383 /* qcow2_read_extension may have set up the crypto context 1384 * if the crypt method needs a header region, some methods 1385 * don't need header extensions, so must check here 1386 */ 1387 if (s->crypt_method_header && !s->crypto) { 1388 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1389 unsigned int cflags = 0; 1390 if (flags & BDRV_O_NO_IO) { 1391 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1392 } 1393 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1394 NULL, NULL, cflags, errp); 1395 if (!s->crypto) { 1396 ret = -EINVAL; 1397 goto fail; 1398 } 1399 } else if (!(flags & BDRV_O_NO_IO)) { 1400 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1401 s->crypt_method_header); 1402 ret = -EINVAL; 1403 goto fail; 1404 } 1405 } 1406 1407 /* read the backing file name */ 1408 if (header.backing_file_offset != 0) { 1409 len = header.backing_file_size; 1410 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1411 len >= sizeof(bs->backing_file)) { 1412 error_setg(errp, "Backing file name too long"); 1413 ret = -EINVAL; 1414 goto fail; 1415 } 1416 ret = bdrv_pread(bs->file, header.backing_file_offset, 1417 bs->backing_file, len); 1418 if (ret < 0) { 1419 error_setg_errno(errp, -ret, "Could not read backing file name"); 1420 goto fail; 1421 } 1422 bs->backing_file[len] = '\0'; 1423 s->image_backing_file = g_strdup(bs->backing_file); 1424 } 1425 1426 /* Internal snapshots */ 1427 s->snapshots_offset = header.snapshots_offset; 1428 s->nb_snapshots = header.nb_snapshots; 1429 1430 ret = qcow2_read_snapshots(bs); 1431 if (ret < 0) { 1432 error_setg_errno(errp, -ret, "Could not read snapshots"); 1433 goto fail; 1434 } 1435 1436 /* Clear unknown autoclear feature bits */ 1437 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1438 update_header = 1439 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); 1440 if (update_header) { 1441 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1442 } 1443 1444 if (qcow2_load_autoloading_dirty_bitmaps(bs, &local_err)) { 1445 update_header = false; 1446 } 1447 if (local_err != NULL) { 1448 error_propagate(errp, local_err); 1449 ret = -EINVAL; 1450 goto fail; 1451 } 1452 1453 if (update_header) { 1454 ret = qcow2_update_header(bs); 1455 if (ret < 0) { 1456 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1457 goto fail; 1458 } 1459 } 1460 1461 /* Initialise locks */ 1462 qemu_co_mutex_init(&s->lock); 1463 bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP; 1464 1465 /* Repair image if dirty */ 1466 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && 1467 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1468 BdrvCheckResult result = {0}; 1469 1470 ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1471 if (ret < 0) { 1472 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1473 goto fail; 1474 } 1475 } 1476 1477 #ifdef DEBUG_ALLOC 1478 { 1479 BdrvCheckResult result = {0}; 1480 qcow2_check_refcounts(bs, &result, 0); 1481 } 1482 #endif 1483 return ret; 1484 1485 fail: 1486 g_free(s->unknown_header_fields); 1487 cleanup_unknown_header_ext(bs); 1488 qcow2_free_snapshots(bs); 1489 qcow2_refcount_close(bs); 1490 qemu_vfree(s->l1_table); 1491 /* else pre-write overlap checks in cache_destroy may crash */ 1492 s->l1_table = NULL; 1493 cache_clean_timer_del(bs); 1494 if (s->l2_table_cache) { 1495 qcow2_cache_destroy(bs, s->l2_table_cache); 1496 } 1497 if (s->refcount_block_cache) { 1498 qcow2_cache_destroy(bs, s->refcount_block_cache); 1499 } 1500 qcrypto_block_free(s->crypto); 1501 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1502 return ret; 1503 } 1504 1505 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1506 Error **errp) 1507 { 1508 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 1509 false, errp); 1510 if (!bs->file) { 1511 return -EINVAL; 1512 } 1513 1514 return qcow2_do_open(bs, options, flags, errp); 1515 } 1516 1517 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1518 { 1519 BDRVQcow2State *s = bs->opaque; 1520 1521 if (bs->encrypted) { 1522 /* Encryption works on a sector granularity */ 1523 bs->bl.request_alignment = BDRV_SECTOR_SIZE; 1524 } 1525 bs->bl.pwrite_zeroes_alignment = s->cluster_size; 1526 bs->bl.pdiscard_alignment = s->cluster_size; 1527 } 1528 1529 static int qcow2_reopen_prepare(BDRVReopenState *state, 1530 BlockReopenQueue *queue, Error **errp) 1531 { 1532 Qcow2ReopenState *r; 1533 int ret; 1534 1535 r = g_new0(Qcow2ReopenState, 1); 1536 state->opaque = r; 1537 1538 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1539 state->flags, errp); 1540 if (ret < 0) { 1541 goto fail; 1542 } 1543 1544 /* We need to write out any unwritten data if we reopen read-only. */ 1545 if ((state->flags & BDRV_O_RDWR) == 0) { 1546 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1547 if (ret < 0) { 1548 goto fail; 1549 } 1550 1551 ret = bdrv_flush(state->bs); 1552 if (ret < 0) { 1553 goto fail; 1554 } 1555 1556 ret = qcow2_mark_clean(state->bs); 1557 if (ret < 0) { 1558 goto fail; 1559 } 1560 } 1561 1562 return 0; 1563 1564 fail: 1565 qcow2_update_options_abort(state->bs, r); 1566 g_free(r); 1567 return ret; 1568 } 1569 1570 static void qcow2_reopen_commit(BDRVReopenState *state) 1571 { 1572 qcow2_update_options_commit(state->bs, state->opaque); 1573 g_free(state->opaque); 1574 } 1575 1576 static void qcow2_reopen_abort(BDRVReopenState *state) 1577 { 1578 qcow2_update_options_abort(state->bs, state->opaque); 1579 g_free(state->opaque); 1580 } 1581 1582 static void qcow2_join_options(QDict *options, QDict *old_options) 1583 { 1584 bool has_new_overlap_template = 1585 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 1586 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 1587 bool has_new_total_cache_size = 1588 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 1589 bool has_all_cache_options; 1590 1591 /* New overlap template overrides all old overlap options */ 1592 if (has_new_overlap_template) { 1593 qdict_del(old_options, QCOW2_OPT_OVERLAP); 1594 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 1595 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 1596 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 1597 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 1598 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 1599 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 1600 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 1601 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 1602 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 1603 } 1604 1605 /* New total cache size overrides all old options */ 1606 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 1607 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 1608 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1609 } 1610 1611 qdict_join(options, old_options, false); 1612 1613 /* 1614 * If after merging all cache size options are set, an old total size is 1615 * overwritten. Do keep all options, however, if all three are new. The 1616 * resulting error message is what we want to happen. 1617 */ 1618 has_all_cache_options = 1619 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 1620 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 1621 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1622 1623 if (has_all_cache_options && !has_new_total_cache_size) { 1624 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 1625 } 1626 } 1627 1628 static int64_t coroutine_fn qcow2_co_get_block_status(BlockDriverState *bs, 1629 int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file) 1630 { 1631 BDRVQcow2State *s = bs->opaque; 1632 uint64_t cluster_offset; 1633 int index_in_cluster, ret; 1634 unsigned int bytes; 1635 int64_t status = 0; 1636 1637 bytes = MIN(INT_MAX, nb_sectors * BDRV_SECTOR_SIZE); 1638 qemu_co_mutex_lock(&s->lock); 1639 ret = qcow2_get_cluster_offset(bs, sector_num << 9, &bytes, 1640 &cluster_offset); 1641 qemu_co_mutex_unlock(&s->lock); 1642 if (ret < 0) { 1643 return ret; 1644 } 1645 1646 *pnum = bytes >> BDRV_SECTOR_BITS; 1647 1648 if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED && 1649 !s->crypto) { 1650 index_in_cluster = sector_num & (s->cluster_sectors - 1); 1651 cluster_offset |= (index_in_cluster << BDRV_SECTOR_BITS); 1652 *file = bs->file->bs; 1653 status |= BDRV_BLOCK_OFFSET_VALID | cluster_offset; 1654 } 1655 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { 1656 status |= BDRV_BLOCK_ZERO; 1657 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { 1658 status |= BDRV_BLOCK_DATA; 1659 } 1660 return status; 1661 } 1662 1663 /* handle reading after the end of the backing file */ 1664 int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov, 1665 int64_t offset, int bytes) 1666 { 1667 uint64_t bs_size = bs->total_sectors * BDRV_SECTOR_SIZE; 1668 int n1; 1669 1670 if ((offset + bytes) <= bs_size) { 1671 return bytes; 1672 } 1673 1674 if (offset >= bs_size) { 1675 n1 = 0; 1676 } else { 1677 n1 = bs_size - offset; 1678 } 1679 1680 qemu_iovec_memset(qiov, n1, 0, bytes - n1); 1681 1682 return n1; 1683 } 1684 1685 static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, 1686 uint64_t bytes, QEMUIOVector *qiov, 1687 int flags) 1688 { 1689 BDRVQcow2State *s = bs->opaque; 1690 int offset_in_cluster, n1; 1691 int ret; 1692 unsigned int cur_bytes; /* number of bytes in current iteration */ 1693 uint64_t cluster_offset = 0; 1694 uint64_t bytes_done = 0; 1695 QEMUIOVector hd_qiov; 1696 uint8_t *cluster_data = NULL; 1697 1698 qemu_iovec_init(&hd_qiov, qiov->niov); 1699 1700 qemu_co_mutex_lock(&s->lock); 1701 1702 while (bytes != 0) { 1703 1704 /* prepare next request */ 1705 cur_bytes = MIN(bytes, INT_MAX); 1706 if (s->crypto) { 1707 cur_bytes = MIN(cur_bytes, 1708 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1709 } 1710 1711 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); 1712 if (ret < 0) { 1713 goto fail; 1714 } 1715 1716 offset_in_cluster = offset_into_cluster(s, offset); 1717 1718 qemu_iovec_reset(&hd_qiov); 1719 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1720 1721 switch (ret) { 1722 case QCOW2_CLUSTER_UNALLOCATED: 1723 1724 if (bs->backing) { 1725 /* read from the base image */ 1726 n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov, 1727 offset, cur_bytes); 1728 if (n1 > 0) { 1729 QEMUIOVector local_qiov; 1730 1731 qemu_iovec_init(&local_qiov, hd_qiov.niov); 1732 qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1); 1733 1734 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 1735 qemu_co_mutex_unlock(&s->lock); 1736 ret = bdrv_co_preadv(bs->backing, offset, n1, 1737 &local_qiov, 0); 1738 qemu_co_mutex_lock(&s->lock); 1739 1740 qemu_iovec_destroy(&local_qiov); 1741 1742 if (ret < 0) { 1743 goto fail; 1744 } 1745 } 1746 } else { 1747 /* Note: in this case, no need to wait */ 1748 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1749 } 1750 break; 1751 1752 case QCOW2_CLUSTER_ZERO_PLAIN: 1753 case QCOW2_CLUSTER_ZERO_ALLOC: 1754 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1755 break; 1756 1757 case QCOW2_CLUSTER_COMPRESSED: 1758 /* add AIO support for compressed blocks ? */ 1759 ret = qcow2_decompress_cluster(bs, cluster_offset); 1760 if (ret < 0) { 1761 goto fail; 1762 } 1763 1764 qemu_iovec_from_buf(&hd_qiov, 0, 1765 s->cluster_cache + offset_in_cluster, 1766 cur_bytes); 1767 break; 1768 1769 case QCOW2_CLUSTER_NORMAL: 1770 if ((cluster_offset & 511) != 0) { 1771 ret = -EIO; 1772 goto fail; 1773 } 1774 1775 if (bs->encrypted) { 1776 assert(s->crypto); 1777 1778 /* 1779 * For encrypted images, read everything into a temporary 1780 * contiguous buffer on which the AES functions can work. 1781 */ 1782 if (!cluster_data) { 1783 cluster_data = 1784 qemu_try_blockalign(bs->file->bs, 1785 QCOW_MAX_CRYPT_CLUSTERS 1786 * s->cluster_size); 1787 if (cluster_data == NULL) { 1788 ret = -ENOMEM; 1789 goto fail; 1790 } 1791 } 1792 1793 assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1794 qemu_iovec_reset(&hd_qiov); 1795 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1796 } 1797 1798 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1799 qemu_co_mutex_unlock(&s->lock); 1800 ret = bdrv_co_preadv(bs->file, 1801 cluster_offset + offset_in_cluster, 1802 cur_bytes, &hd_qiov, 0); 1803 qemu_co_mutex_lock(&s->lock); 1804 if (ret < 0) { 1805 goto fail; 1806 } 1807 if (bs->encrypted) { 1808 assert(s->crypto); 1809 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1810 assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1811 if (qcrypto_block_decrypt(s->crypto, 1812 (s->crypt_physical_offset ? 1813 cluster_offset + offset_in_cluster : 1814 offset), 1815 cluster_data, 1816 cur_bytes, 1817 NULL) < 0) { 1818 ret = -EIO; 1819 goto fail; 1820 } 1821 qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes); 1822 } 1823 break; 1824 1825 default: 1826 g_assert_not_reached(); 1827 ret = -EIO; 1828 goto fail; 1829 } 1830 1831 bytes -= cur_bytes; 1832 offset += cur_bytes; 1833 bytes_done += cur_bytes; 1834 } 1835 ret = 0; 1836 1837 fail: 1838 qemu_co_mutex_unlock(&s->lock); 1839 1840 qemu_iovec_destroy(&hd_qiov); 1841 qemu_vfree(cluster_data); 1842 1843 return ret; 1844 } 1845 1846 /* Check if it's possible to merge a write request with the writing of 1847 * the data from the COW regions */ 1848 static bool merge_cow(uint64_t offset, unsigned bytes, 1849 QEMUIOVector *hd_qiov, QCowL2Meta *l2meta) 1850 { 1851 QCowL2Meta *m; 1852 1853 for (m = l2meta; m != NULL; m = m->next) { 1854 /* If both COW regions are empty then there's nothing to merge */ 1855 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 1856 continue; 1857 } 1858 1859 /* The data (middle) region must be immediately after the 1860 * start region */ 1861 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 1862 continue; 1863 } 1864 1865 /* The end region must be immediately after the data (middle) 1866 * region */ 1867 if (m->offset + m->cow_end.offset != offset + bytes) { 1868 continue; 1869 } 1870 1871 /* Make sure that adding both COW regions to the QEMUIOVector 1872 * does not exceed IOV_MAX */ 1873 if (hd_qiov->niov > IOV_MAX - 2) { 1874 continue; 1875 } 1876 1877 m->data_qiov = hd_qiov; 1878 return true; 1879 } 1880 1881 return false; 1882 } 1883 1884 static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, 1885 uint64_t bytes, QEMUIOVector *qiov, 1886 int flags) 1887 { 1888 BDRVQcow2State *s = bs->opaque; 1889 int offset_in_cluster; 1890 int ret; 1891 unsigned int cur_bytes; /* number of sectors in current iteration */ 1892 uint64_t cluster_offset; 1893 QEMUIOVector hd_qiov; 1894 uint64_t bytes_done = 0; 1895 uint8_t *cluster_data = NULL; 1896 QCowL2Meta *l2meta = NULL; 1897 1898 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 1899 1900 qemu_iovec_init(&hd_qiov, qiov->niov); 1901 1902 s->cluster_cache_offset = -1; /* disable compressed cache */ 1903 1904 qemu_co_mutex_lock(&s->lock); 1905 1906 while (bytes != 0) { 1907 1908 l2meta = NULL; 1909 1910 trace_qcow2_writev_start_part(qemu_coroutine_self()); 1911 offset_in_cluster = offset_into_cluster(s, offset); 1912 cur_bytes = MIN(bytes, INT_MAX); 1913 if (bs->encrypted) { 1914 cur_bytes = MIN(cur_bytes, 1915 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 1916 - offset_in_cluster); 1917 } 1918 1919 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 1920 &cluster_offset, &l2meta); 1921 if (ret < 0) { 1922 goto fail; 1923 } 1924 1925 assert((cluster_offset & 511) == 0); 1926 1927 qemu_iovec_reset(&hd_qiov); 1928 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1929 1930 if (bs->encrypted) { 1931 assert(s->crypto); 1932 if (!cluster_data) { 1933 cluster_data = qemu_try_blockalign(bs->file->bs, 1934 QCOW_MAX_CRYPT_CLUSTERS 1935 * s->cluster_size); 1936 if (cluster_data == NULL) { 1937 ret = -ENOMEM; 1938 goto fail; 1939 } 1940 } 1941 1942 assert(hd_qiov.size <= 1943 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1944 qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); 1945 1946 if (qcrypto_block_encrypt(s->crypto, 1947 (s->crypt_physical_offset ? 1948 cluster_offset + offset_in_cluster : 1949 offset), 1950 cluster_data, 1951 cur_bytes, NULL) < 0) { 1952 ret = -EIO; 1953 goto fail; 1954 } 1955 1956 qemu_iovec_reset(&hd_qiov); 1957 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1958 } 1959 1960 ret = qcow2_pre_write_overlap_check(bs, 0, 1961 cluster_offset + offset_in_cluster, cur_bytes); 1962 if (ret < 0) { 1963 goto fail; 1964 } 1965 1966 /* If we need to do COW, check if it's possible to merge the 1967 * writing of the guest data together with that of the COW regions. 1968 * If it's not possible (or not necessary) then write the 1969 * guest data now. */ 1970 if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) { 1971 qemu_co_mutex_unlock(&s->lock); 1972 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 1973 trace_qcow2_writev_data(qemu_coroutine_self(), 1974 cluster_offset + offset_in_cluster); 1975 ret = bdrv_co_pwritev(bs->file, 1976 cluster_offset + offset_in_cluster, 1977 cur_bytes, &hd_qiov, 0); 1978 qemu_co_mutex_lock(&s->lock); 1979 if (ret < 0) { 1980 goto fail; 1981 } 1982 } 1983 1984 while (l2meta != NULL) { 1985 QCowL2Meta *next; 1986 1987 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 1988 if (ret < 0) { 1989 goto fail; 1990 } 1991 1992 /* Take the request off the list of running requests */ 1993 if (l2meta->nb_clusters != 0) { 1994 QLIST_REMOVE(l2meta, next_in_flight); 1995 } 1996 1997 qemu_co_queue_restart_all(&l2meta->dependent_requests); 1998 1999 next = l2meta->next; 2000 g_free(l2meta); 2001 l2meta = next; 2002 } 2003 2004 bytes -= cur_bytes; 2005 offset += cur_bytes; 2006 bytes_done += cur_bytes; 2007 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2008 } 2009 ret = 0; 2010 2011 fail: 2012 while (l2meta != NULL) { 2013 QCowL2Meta *next; 2014 2015 if (l2meta->nb_clusters != 0) { 2016 QLIST_REMOVE(l2meta, next_in_flight); 2017 } 2018 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2019 2020 next = l2meta->next; 2021 g_free(l2meta); 2022 l2meta = next; 2023 } 2024 2025 qemu_co_mutex_unlock(&s->lock); 2026 2027 qemu_iovec_destroy(&hd_qiov); 2028 qemu_vfree(cluster_data); 2029 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2030 2031 return ret; 2032 } 2033 2034 static int qcow2_inactivate(BlockDriverState *bs) 2035 { 2036 BDRVQcow2State *s = bs->opaque; 2037 int ret, result = 0; 2038 Error *local_err = NULL; 2039 2040 qcow2_store_persistent_dirty_bitmaps(bs, &local_err); 2041 if (local_err != NULL) { 2042 result = -EINVAL; 2043 error_report_err(local_err); 2044 error_report("Persistent bitmaps are lost for node '%s'", 2045 bdrv_get_device_or_node_name(bs)); 2046 } 2047 2048 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2049 if (ret) { 2050 result = ret; 2051 error_report("Failed to flush the L2 table cache: %s", 2052 strerror(-ret)); 2053 } 2054 2055 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2056 if (ret) { 2057 result = ret; 2058 error_report("Failed to flush the refcount block cache: %s", 2059 strerror(-ret)); 2060 } 2061 2062 if (result == 0) { 2063 qcow2_mark_clean(bs); 2064 } 2065 2066 return result; 2067 } 2068 2069 static void qcow2_close(BlockDriverState *bs) 2070 { 2071 BDRVQcow2State *s = bs->opaque; 2072 qemu_vfree(s->l1_table); 2073 /* else pre-write overlap checks in cache_destroy may crash */ 2074 s->l1_table = NULL; 2075 2076 if (!(s->flags & BDRV_O_INACTIVE)) { 2077 qcow2_inactivate(bs); 2078 } 2079 2080 cache_clean_timer_del(bs); 2081 qcow2_cache_destroy(bs, s->l2_table_cache); 2082 qcow2_cache_destroy(bs, s->refcount_block_cache); 2083 2084 qcrypto_block_free(s->crypto); 2085 s->crypto = NULL; 2086 2087 g_free(s->unknown_header_fields); 2088 cleanup_unknown_header_ext(bs); 2089 2090 g_free(s->image_backing_file); 2091 g_free(s->image_backing_format); 2092 2093 g_free(s->cluster_cache); 2094 qemu_vfree(s->cluster_data); 2095 qcow2_refcount_close(bs); 2096 qcow2_free_snapshots(bs); 2097 } 2098 2099 static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp) 2100 { 2101 BDRVQcow2State *s = bs->opaque; 2102 int flags = s->flags; 2103 QCryptoBlock *crypto = NULL; 2104 QDict *options; 2105 Error *local_err = NULL; 2106 int ret; 2107 2108 /* 2109 * Backing files are read-only which makes all of their metadata immutable, 2110 * that means we don't have to worry about reopening them here. 2111 */ 2112 2113 crypto = s->crypto; 2114 s->crypto = NULL; 2115 2116 qcow2_close(bs); 2117 2118 memset(s, 0, sizeof(BDRVQcow2State)); 2119 options = qdict_clone_shallow(bs->options); 2120 2121 flags &= ~BDRV_O_INACTIVE; 2122 ret = qcow2_do_open(bs, options, flags, &local_err); 2123 QDECREF(options); 2124 if (local_err) { 2125 error_propagate(errp, local_err); 2126 error_prepend(errp, "Could not reopen qcow2 layer: "); 2127 bs->drv = NULL; 2128 return; 2129 } else if (ret < 0) { 2130 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer"); 2131 bs->drv = NULL; 2132 return; 2133 } 2134 2135 s->crypto = crypto; 2136 } 2137 2138 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2139 size_t len, size_t buflen) 2140 { 2141 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2142 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2143 2144 if (buflen < ext_len) { 2145 return -ENOSPC; 2146 } 2147 2148 *ext_backing_fmt = (QCowExtension) { 2149 .magic = cpu_to_be32(magic), 2150 .len = cpu_to_be32(len), 2151 }; 2152 2153 if (len) { 2154 memcpy(buf + sizeof(QCowExtension), s, len); 2155 } 2156 2157 return ext_len; 2158 } 2159 2160 /* 2161 * Updates the qcow2 header, including the variable length parts of it, i.e. 2162 * the backing file name and all extensions. qcow2 was not designed to allow 2163 * such changes, so if we run out of space (we can only use the first cluster) 2164 * this function may fail. 2165 * 2166 * Returns 0 on success, -errno in error cases. 2167 */ 2168 int qcow2_update_header(BlockDriverState *bs) 2169 { 2170 BDRVQcow2State *s = bs->opaque; 2171 QCowHeader *header; 2172 char *buf; 2173 size_t buflen = s->cluster_size; 2174 int ret; 2175 uint64_t total_size; 2176 uint32_t refcount_table_clusters; 2177 size_t header_length; 2178 Qcow2UnknownHeaderExtension *uext; 2179 2180 buf = qemu_blockalign(bs, buflen); 2181 2182 /* Header structure */ 2183 header = (QCowHeader*) buf; 2184 2185 if (buflen < sizeof(*header)) { 2186 ret = -ENOSPC; 2187 goto fail; 2188 } 2189 2190 header_length = sizeof(*header) + s->unknown_header_fields_size; 2191 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2192 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2193 2194 *header = (QCowHeader) { 2195 /* Version 2 fields */ 2196 .magic = cpu_to_be32(QCOW_MAGIC), 2197 .version = cpu_to_be32(s->qcow_version), 2198 .backing_file_offset = 0, 2199 .backing_file_size = 0, 2200 .cluster_bits = cpu_to_be32(s->cluster_bits), 2201 .size = cpu_to_be64(total_size), 2202 .crypt_method = cpu_to_be32(s->crypt_method_header), 2203 .l1_size = cpu_to_be32(s->l1_size), 2204 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2205 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2206 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2207 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2208 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2209 2210 /* Version 3 fields */ 2211 .incompatible_features = cpu_to_be64(s->incompatible_features), 2212 .compatible_features = cpu_to_be64(s->compatible_features), 2213 .autoclear_features = cpu_to_be64(s->autoclear_features), 2214 .refcount_order = cpu_to_be32(s->refcount_order), 2215 .header_length = cpu_to_be32(header_length), 2216 }; 2217 2218 /* For older versions, write a shorter header */ 2219 switch (s->qcow_version) { 2220 case 2: 2221 ret = offsetof(QCowHeader, incompatible_features); 2222 break; 2223 case 3: 2224 ret = sizeof(*header); 2225 break; 2226 default: 2227 ret = -EINVAL; 2228 goto fail; 2229 } 2230 2231 buf += ret; 2232 buflen -= ret; 2233 memset(buf, 0, buflen); 2234 2235 /* Preserve any unknown field in the header */ 2236 if (s->unknown_header_fields_size) { 2237 if (buflen < s->unknown_header_fields_size) { 2238 ret = -ENOSPC; 2239 goto fail; 2240 } 2241 2242 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2243 buf += s->unknown_header_fields_size; 2244 buflen -= s->unknown_header_fields_size; 2245 } 2246 2247 /* Backing file format header extension */ 2248 if (s->image_backing_format) { 2249 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2250 s->image_backing_format, 2251 strlen(s->image_backing_format), 2252 buflen); 2253 if (ret < 0) { 2254 goto fail; 2255 } 2256 2257 buf += ret; 2258 buflen -= ret; 2259 } 2260 2261 /* Full disk encryption header pointer extension */ 2262 if (s->crypto_header.offset != 0) { 2263 cpu_to_be64s(&s->crypto_header.offset); 2264 cpu_to_be64s(&s->crypto_header.length); 2265 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2266 &s->crypto_header, sizeof(s->crypto_header), 2267 buflen); 2268 be64_to_cpus(&s->crypto_header.offset); 2269 be64_to_cpus(&s->crypto_header.length); 2270 if (ret < 0) { 2271 goto fail; 2272 } 2273 buf += ret; 2274 buflen -= ret; 2275 } 2276 2277 /* Feature table */ 2278 if (s->qcow_version >= 3) { 2279 Qcow2Feature features[] = { 2280 { 2281 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2282 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2283 .name = "dirty bit", 2284 }, 2285 { 2286 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2287 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 2288 .name = "corrupt bit", 2289 }, 2290 { 2291 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 2292 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 2293 .name = "lazy refcounts", 2294 }, 2295 }; 2296 2297 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 2298 features, sizeof(features), buflen); 2299 if (ret < 0) { 2300 goto fail; 2301 } 2302 buf += ret; 2303 buflen -= ret; 2304 } 2305 2306 /* Bitmap extension */ 2307 if (s->nb_bitmaps > 0) { 2308 Qcow2BitmapHeaderExt bitmaps_header = { 2309 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 2310 .bitmap_directory_size = 2311 cpu_to_be64(s->bitmap_directory_size), 2312 .bitmap_directory_offset = 2313 cpu_to_be64(s->bitmap_directory_offset) 2314 }; 2315 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 2316 &bitmaps_header, sizeof(bitmaps_header), 2317 buflen); 2318 if (ret < 0) { 2319 goto fail; 2320 } 2321 buf += ret; 2322 buflen -= ret; 2323 } 2324 2325 /* Keep unknown header extensions */ 2326 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 2327 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 2328 if (ret < 0) { 2329 goto fail; 2330 } 2331 2332 buf += ret; 2333 buflen -= ret; 2334 } 2335 2336 /* End of header extensions */ 2337 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 2338 if (ret < 0) { 2339 goto fail; 2340 } 2341 2342 buf += ret; 2343 buflen -= ret; 2344 2345 /* Backing file name */ 2346 if (s->image_backing_file) { 2347 size_t backing_file_len = strlen(s->image_backing_file); 2348 2349 if (buflen < backing_file_len) { 2350 ret = -ENOSPC; 2351 goto fail; 2352 } 2353 2354 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 2355 strncpy(buf, s->image_backing_file, buflen); 2356 2357 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 2358 header->backing_file_size = cpu_to_be32(backing_file_len); 2359 } 2360 2361 /* Write the new header */ 2362 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 2363 if (ret < 0) { 2364 goto fail; 2365 } 2366 2367 ret = 0; 2368 fail: 2369 qemu_vfree(header); 2370 return ret; 2371 } 2372 2373 static int qcow2_change_backing_file(BlockDriverState *bs, 2374 const char *backing_file, const char *backing_fmt) 2375 { 2376 BDRVQcow2State *s = bs->opaque; 2377 2378 if (backing_file && strlen(backing_file) > 1023) { 2379 return -EINVAL; 2380 } 2381 2382 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2383 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2384 2385 g_free(s->image_backing_file); 2386 g_free(s->image_backing_format); 2387 2388 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 2389 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 2390 2391 return qcow2_update_header(bs); 2392 } 2393 2394 static int qcow2_crypt_method_from_format(const char *encryptfmt) 2395 { 2396 if (g_str_equal(encryptfmt, "luks")) { 2397 return QCOW_CRYPT_LUKS; 2398 } else if (g_str_equal(encryptfmt, "aes")) { 2399 return QCOW_CRYPT_AES; 2400 } else { 2401 return -EINVAL; 2402 } 2403 } 2404 2405 static int qcow2_set_up_encryption(BlockDriverState *bs, const char *encryptfmt, 2406 QemuOpts *opts, Error **errp) 2407 { 2408 BDRVQcow2State *s = bs->opaque; 2409 QCryptoBlockCreateOptions *cryptoopts = NULL; 2410 QCryptoBlock *crypto = NULL; 2411 int ret = -EINVAL; 2412 QDict *options, *encryptopts; 2413 int fmt; 2414 2415 options = qemu_opts_to_qdict(opts, NULL); 2416 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 2417 QDECREF(options); 2418 2419 fmt = qcow2_crypt_method_from_format(encryptfmt); 2420 2421 switch (fmt) { 2422 case QCOW_CRYPT_LUKS: 2423 cryptoopts = block_crypto_create_opts_init( 2424 Q_CRYPTO_BLOCK_FORMAT_LUKS, encryptopts, errp); 2425 break; 2426 case QCOW_CRYPT_AES: 2427 cryptoopts = block_crypto_create_opts_init( 2428 Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); 2429 break; 2430 default: 2431 error_setg(errp, "Unknown encryption format '%s'", encryptfmt); 2432 break; 2433 } 2434 if (!cryptoopts) { 2435 ret = -EINVAL; 2436 goto out; 2437 } 2438 s->crypt_method_header = fmt; 2439 2440 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 2441 qcow2_crypto_hdr_init_func, 2442 qcow2_crypto_hdr_write_func, 2443 bs, errp); 2444 if (!crypto) { 2445 ret = -EINVAL; 2446 goto out; 2447 } 2448 2449 ret = qcow2_update_header(bs); 2450 if (ret < 0) { 2451 error_setg_errno(errp, -ret, "Could not write encryption header"); 2452 goto out; 2453 } 2454 2455 out: 2456 QDECREF(encryptopts); 2457 qcrypto_block_free(crypto); 2458 qapi_free_QCryptoBlockCreateOptions(cryptoopts); 2459 return ret; 2460 } 2461 2462 2463 /** 2464 * Preallocates metadata structures for data clusters between @offset (in the 2465 * guest disk) and @new_length (which is thus generally the new guest disk 2466 * size). 2467 * 2468 * Returns: 0 on success, -errno on failure. 2469 */ 2470 static int preallocate(BlockDriverState *bs, 2471 uint64_t offset, uint64_t new_length) 2472 { 2473 BDRVQcow2State *s = bs->opaque; 2474 uint64_t bytes; 2475 uint64_t host_offset = 0; 2476 unsigned int cur_bytes; 2477 int ret; 2478 QCowL2Meta *meta; 2479 2480 if (qemu_in_coroutine()) { 2481 qemu_co_mutex_lock(&s->lock); 2482 } 2483 2484 assert(offset <= new_length); 2485 bytes = new_length - offset; 2486 2487 while (bytes) { 2488 cur_bytes = MIN(bytes, INT_MAX); 2489 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2490 &host_offset, &meta); 2491 if (ret < 0) { 2492 goto done; 2493 } 2494 2495 while (meta) { 2496 QCowL2Meta *next = meta->next; 2497 2498 ret = qcow2_alloc_cluster_link_l2(bs, meta); 2499 if (ret < 0) { 2500 qcow2_free_any_clusters(bs, meta->alloc_offset, 2501 meta->nb_clusters, QCOW2_DISCARD_NEVER); 2502 goto done; 2503 } 2504 2505 /* There are no dependent requests, but we need to remove our 2506 * request from the list of in-flight requests */ 2507 QLIST_REMOVE(meta, next_in_flight); 2508 2509 g_free(meta); 2510 meta = next; 2511 } 2512 2513 /* TODO Preallocate data if requested */ 2514 2515 bytes -= cur_bytes; 2516 offset += cur_bytes; 2517 } 2518 2519 /* 2520 * It is expected that the image file is large enough to actually contain 2521 * all of the allocated clusters (otherwise we get failing reads after 2522 * EOF). Extend the image to the last allocated sector. 2523 */ 2524 if (host_offset != 0) { 2525 uint8_t data = 0; 2526 ret = bdrv_pwrite(bs->file, (host_offset + cur_bytes) - 1, 2527 &data, 1); 2528 if (ret < 0) { 2529 goto done; 2530 } 2531 } 2532 2533 ret = 0; 2534 2535 done: 2536 if (qemu_in_coroutine()) { 2537 qemu_co_mutex_unlock(&s->lock); 2538 } 2539 return ret; 2540 } 2541 2542 /* qcow2_refcount_metadata_size: 2543 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 2544 * @cluster_size: size of a cluster, in bytes 2545 * @refcount_order: refcount bits power-of-2 exponent 2546 * @generous_increase: allow for the refcount table to be 1.5x as large as it 2547 * needs to be 2548 * 2549 * Returns: Number of bytes required for refcount blocks and table metadata. 2550 */ 2551 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 2552 int refcount_order, bool generous_increase, 2553 uint64_t *refblock_count) 2554 { 2555 /* 2556 * Every host cluster is reference-counted, including metadata (even 2557 * refcount metadata is recursively included). 2558 * 2559 * An accurate formula for the size of refcount metadata size is difficult 2560 * to derive. An easier method of calculation is finding the fixed point 2561 * where no further refcount blocks or table clusters are required to 2562 * reference count every cluster. 2563 */ 2564 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t); 2565 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 2566 int64_t table = 0; /* number of refcount table clusters */ 2567 int64_t blocks = 0; /* number of refcount block clusters */ 2568 int64_t last; 2569 int64_t n = 0; 2570 2571 do { 2572 last = n; 2573 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 2574 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 2575 n = clusters + blocks + table; 2576 2577 if (n == last && generous_increase) { 2578 clusters += DIV_ROUND_UP(table, 2); 2579 n = 0; /* force another loop */ 2580 generous_increase = false; 2581 } 2582 } while (n != last); 2583 2584 if (refblock_count) { 2585 *refblock_count = blocks; 2586 } 2587 2588 return (blocks + table) * cluster_size; 2589 } 2590 2591 /** 2592 * qcow2_calc_prealloc_size: 2593 * @total_size: virtual disk size in bytes 2594 * @cluster_size: cluster size in bytes 2595 * @refcount_order: refcount bits power-of-2 exponent 2596 * 2597 * Returns: Total number of bytes required for the fully allocated image 2598 * (including metadata). 2599 */ 2600 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 2601 size_t cluster_size, 2602 int refcount_order) 2603 { 2604 int64_t meta_size = 0; 2605 uint64_t nl1e, nl2e; 2606 int64_t aligned_total_size = align_offset(total_size, cluster_size); 2607 2608 /* header: 1 cluster */ 2609 meta_size += cluster_size; 2610 2611 /* total size of L2 tables */ 2612 nl2e = aligned_total_size / cluster_size; 2613 nl2e = align_offset(nl2e, cluster_size / sizeof(uint64_t)); 2614 meta_size += nl2e * sizeof(uint64_t); 2615 2616 /* total size of L1 tables */ 2617 nl1e = nl2e * sizeof(uint64_t) / cluster_size; 2618 nl1e = align_offset(nl1e, cluster_size / sizeof(uint64_t)); 2619 meta_size += nl1e * sizeof(uint64_t); 2620 2621 /* total size of refcount table and blocks */ 2622 meta_size += qcow2_refcount_metadata_size( 2623 (meta_size + aligned_total_size) / cluster_size, 2624 cluster_size, refcount_order, false, NULL); 2625 2626 return meta_size + aligned_total_size; 2627 } 2628 2629 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp) 2630 { 2631 size_t cluster_size; 2632 int cluster_bits; 2633 2634 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 2635 DEFAULT_CLUSTER_SIZE); 2636 cluster_bits = ctz32(cluster_size); 2637 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 2638 (1 << cluster_bits) != cluster_size) 2639 { 2640 error_setg(errp, "Cluster size must be a power of two between %d and " 2641 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 2642 return 0; 2643 } 2644 return cluster_size; 2645 } 2646 2647 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 2648 { 2649 char *buf; 2650 int ret; 2651 2652 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 2653 if (!buf) { 2654 ret = 3; /* default */ 2655 } else if (!strcmp(buf, "0.10")) { 2656 ret = 2; 2657 } else if (!strcmp(buf, "1.1")) { 2658 ret = 3; 2659 } else { 2660 error_setg(errp, "Invalid compatibility level: '%s'", buf); 2661 ret = -EINVAL; 2662 } 2663 g_free(buf); 2664 return ret; 2665 } 2666 2667 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 2668 Error **errp) 2669 { 2670 uint64_t refcount_bits; 2671 2672 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 2673 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 2674 error_setg(errp, "Refcount width must be a power of two and may not " 2675 "exceed 64 bits"); 2676 return 0; 2677 } 2678 2679 if (version < 3 && refcount_bits != 16) { 2680 error_setg(errp, "Different refcount widths than 16 bits require " 2681 "compatibility level 1.1 or above (use compat=1.1 or " 2682 "greater)"); 2683 return 0; 2684 } 2685 2686 return refcount_bits; 2687 } 2688 2689 static int qcow2_create2(const char *filename, int64_t total_size, 2690 const char *backing_file, const char *backing_format, 2691 int flags, size_t cluster_size, PreallocMode prealloc, 2692 QemuOpts *opts, int version, int refcount_order, 2693 const char *encryptfmt, Error **errp) 2694 { 2695 QDict *options; 2696 2697 /* 2698 * Open the image file and write a minimal qcow2 header. 2699 * 2700 * We keep things simple and start with a zero-sized image. We also 2701 * do without refcount blocks or a L1 table for now. We'll fix the 2702 * inconsistency later. 2703 * 2704 * We do need a refcount table because growing the refcount table means 2705 * allocating two new refcount blocks - the seconds of which would be at 2706 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 2707 * size for any qcow2 image. 2708 */ 2709 BlockBackend *blk; 2710 QCowHeader *header; 2711 uint64_t* refcount_table; 2712 Error *local_err = NULL; 2713 int ret; 2714 2715 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 2716 int64_t prealloc_size = 2717 qcow2_calc_prealloc_size(total_size, cluster_size, refcount_order); 2718 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, prealloc_size, &error_abort); 2719 qemu_opt_set(opts, BLOCK_OPT_PREALLOC, PreallocMode_str(prealloc), 2720 &error_abort); 2721 } 2722 2723 ret = bdrv_create_file(filename, opts, &local_err); 2724 if (ret < 0) { 2725 error_propagate(errp, local_err); 2726 return ret; 2727 } 2728 2729 blk = blk_new_open(filename, NULL, NULL, 2730 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 2731 &local_err); 2732 if (blk == NULL) { 2733 error_propagate(errp, local_err); 2734 return -EIO; 2735 } 2736 2737 blk_set_allow_write_beyond_eof(blk, true); 2738 2739 /* Write the header */ 2740 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 2741 header = g_malloc0(cluster_size); 2742 *header = (QCowHeader) { 2743 .magic = cpu_to_be32(QCOW_MAGIC), 2744 .version = cpu_to_be32(version), 2745 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 2746 .size = cpu_to_be64(0), 2747 .l1_table_offset = cpu_to_be64(0), 2748 .l1_size = cpu_to_be32(0), 2749 .refcount_table_offset = cpu_to_be64(cluster_size), 2750 .refcount_table_clusters = cpu_to_be32(1), 2751 .refcount_order = cpu_to_be32(refcount_order), 2752 .header_length = cpu_to_be32(sizeof(*header)), 2753 }; 2754 2755 /* We'll update this to correct value later */ 2756 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 2757 2758 if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) { 2759 header->compatible_features |= 2760 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 2761 } 2762 2763 ret = blk_pwrite(blk, 0, header, cluster_size, 0); 2764 g_free(header); 2765 if (ret < 0) { 2766 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 2767 goto out; 2768 } 2769 2770 /* Write a refcount table with one refcount block */ 2771 refcount_table = g_malloc0(2 * cluster_size); 2772 refcount_table[0] = cpu_to_be64(2 * cluster_size); 2773 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); 2774 g_free(refcount_table); 2775 2776 if (ret < 0) { 2777 error_setg_errno(errp, -ret, "Could not write refcount table"); 2778 goto out; 2779 } 2780 2781 blk_unref(blk); 2782 blk = NULL; 2783 2784 /* 2785 * And now open the image and make it consistent first (i.e. increase the 2786 * refcount of the cluster that is occupied by the header and the refcount 2787 * table) 2788 */ 2789 options = qdict_new(); 2790 qdict_put_str(options, "driver", "qcow2"); 2791 blk = blk_new_open(filename, NULL, options, 2792 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 2793 &local_err); 2794 if (blk == NULL) { 2795 error_propagate(errp, local_err); 2796 ret = -EIO; 2797 goto out; 2798 } 2799 2800 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 2801 if (ret < 0) { 2802 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 2803 "header and refcount table"); 2804 goto out; 2805 2806 } else if (ret != 0) { 2807 error_report("Huh, first cluster in empty image is already in use?"); 2808 abort(); 2809 } 2810 2811 /* Create a full header (including things like feature table) */ 2812 ret = qcow2_update_header(blk_bs(blk)); 2813 if (ret < 0) { 2814 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 2815 goto out; 2816 } 2817 2818 /* Okay, now that we have a valid image, let's give it the right size */ 2819 ret = blk_truncate(blk, total_size, PREALLOC_MODE_OFF, errp); 2820 if (ret < 0) { 2821 error_prepend(errp, "Could not resize image: "); 2822 goto out; 2823 } 2824 2825 /* Want a backing file? There you go.*/ 2826 if (backing_file) { 2827 ret = bdrv_change_backing_file(blk_bs(blk), backing_file, backing_format); 2828 if (ret < 0) { 2829 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 2830 "with format '%s'", backing_file, backing_format); 2831 goto out; 2832 } 2833 } 2834 2835 /* Want encryption? There you go. */ 2836 if (encryptfmt) { 2837 ret = qcow2_set_up_encryption(blk_bs(blk), encryptfmt, opts, errp); 2838 if (ret < 0) { 2839 goto out; 2840 } 2841 } 2842 2843 /* And if we're supposed to preallocate metadata, do that now */ 2844 if (prealloc != PREALLOC_MODE_OFF) { 2845 ret = preallocate(blk_bs(blk), 0, total_size); 2846 if (ret < 0) { 2847 error_setg_errno(errp, -ret, "Could not preallocate metadata"); 2848 goto out; 2849 } 2850 } 2851 2852 blk_unref(blk); 2853 blk = NULL; 2854 2855 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 2856 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 2857 * have to setup decryption context. We're not doing any I/O on the top 2858 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 2859 * not have effect. 2860 */ 2861 options = qdict_new(); 2862 qdict_put_str(options, "driver", "qcow2"); 2863 blk = blk_new_open(filename, NULL, options, 2864 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 2865 &local_err); 2866 if (blk == NULL) { 2867 error_propagate(errp, local_err); 2868 ret = -EIO; 2869 goto out; 2870 } 2871 2872 ret = 0; 2873 out: 2874 if (blk) { 2875 blk_unref(blk); 2876 } 2877 return ret; 2878 } 2879 2880 static int qcow2_create(const char *filename, QemuOpts *opts, Error **errp) 2881 { 2882 char *backing_file = NULL; 2883 char *backing_fmt = NULL; 2884 char *buf = NULL; 2885 uint64_t size = 0; 2886 int flags = 0; 2887 size_t cluster_size = DEFAULT_CLUSTER_SIZE; 2888 PreallocMode prealloc; 2889 int version; 2890 uint64_t refcount_bits; 2891 int refcount_order; 2892 char *encryptfmt = NULL; 2893 Error *local_err = NULL; 2894 int ret; 2895 2896 /* Read out options */ 2897 size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 2898 BDRV_SECTOR_SIZE); 2899 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 2900 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); 2901 encryptfmt = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 2902 if (encryptfmt) { 2903 if (qemu_opt_get(opts, BLOCK_OPT_ENCRYPT)) { 2904 error_setg(errp, "Options " BLOCK_OPT_ENCRYPT " and " 2905 BLOCK_OPT_ENCRYPT_FORMAT " are mutually exclusive"); 2906 ret = -EINVAL; 2907 goto finish; 2908 } 2909 } else if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) { 2910 encryptfmt = g_strdup("aes"); 2911 } 2912 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 2913 if (local_err) { 2914 error_propagate(errp, local_err); 2915 ret = -EINVAL; 2916 goto finish; 2917 } 2918 buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 2919 prealloc = qapi_enum_parse(&PreallocMode_lookup, buf, 2920 PREALLOC_MODE_OFF, &local_err); 2921 if (local_err) { 2922 error_propagate(errp, local_err); 2923 ret = -EINVAL; 2924 goto finish; 2925 } 2926 2927 version = qcow2_opt_get_version_del(opts, &local_err); 2928 if (local_err) { 2929 error_propagate(errp, local_err); 2930 ret = -EINVAL; 2931 goto finish; 2932 } 2933 2934 if (qemu_opt_get_bool_del(opts, BLOCK_OPT_LAZY_REFCOUNTS, false)) { 2935 flags |= BLOCK_FLAG_LAZY_REFCOUNTS; 2936 } 2937 2938 if (backing_file && prealloc != PREALLOC_MODE_OFF) { 2939 error_setg(errp, "Backing file and preallocation cannot be used at " 2940 "the same time"); 2941 ret = -EINVAL; 2942 goto finish; 2943 } 2944 2945 if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) { 2946 error_setg(errp, "Lazy refcounts only supported with compatibility " 2947 "level 1.1 and above (use compat=1.1 or greater)"); 2948 ret = -EINVAL; 2949 goto finish; 2950 } 2951 2952 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 2953 if (local_err) { 2954 error_propagate(errp, local_err); 2955 ret = -EINVAL; 2956 goto finish; 2957 } 2958 2959 refcount_order = ctz32(refcount_bits); 2960 2961 ret = qcow2_create2(filename, size, backing_file, backing_fmt, flags, 2962 cluster_size, prealloc, opts, version, refcount_order, 2963 encryptfmt, &local_err); 2964 error_propagate(errp, local_err); 2965 2966 finish: 2967 g_free(backing_file); 2968 g_free(backing_fmt); 2969 g_free(encryptfmt); 2970 g_free(buf); 2971 return ret; 2972 } 2973 2974 2975 static bool is_zero_sectors(BlockDriverState *bs, int64_t start, 2976 uint32_t count) 2977 { 2978 int nr; 2979 BlockDriverState *file; 2980 int64_t res; 2981 2982 if (start + count > bs->total_sectors) { 2983 count = bs->total_sectors - start; 2984 } 2985 2986 if (!count) { 2987 return true; 2988 } 2989 res = bdrv_get_block_status_above(bs, NULL, start, count, 2990 &nr, &file); 2991 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == count; 2992 } 2993 2994 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, 2995 int64_t offset, int bytes, BdrvRequestFlags flags) 2996 { 2997 int ret; 2998 BDRVQcow2State *s = bs->opaque; 2999 3000 uint32_t head = offset % s->cluster_size; 3001 uint32_t tail = (offset + bytes) % s->cluster_size; 3002 3003 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3004 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3005 tail = 0; 3006 } 3007 3008 if (head || tail) { 3009 int64_t cl_start = (offset - head) >> BDRV_SECTOR_BITS; 3010 uint64_t off; 3011 unsigned int nr; 3012 3013 assert(head + bytes <= s->cluster_size); 3014 3015 /* check whether remainder of cluster already reads as zero */ 3016 if (!(is_zero_sectors(bs, cl_start, 3017 DIV_ROUND_UP(head, BDRV_SECTOR_SIZE)) && 3018 is_zero_sectors(bs, (offset + bytes) >> BDRV_SECTOR_BITS, 3019 DIV_ROUND_UP(-tail & (s->cluster_size - 1), 3020 BDRV_SECTOR_SIZE)))) { 3021 return -ENOTSUP; 3022 } 3023 3024 qemu_co_mutex_lock(&s->lock); 3025 /* We can have new write after previous check */ 3026 offset = cl_start << BDRV_SECTOR_BITS; 3027 bytes = s->cluster_size; 3028 nr = s->cluster_size; 3029 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); 3030 if (ret != QCOW2_CLUSTER_UNALLOCATED && 3031 ret != QCOW2_CLUSTER_ZERO_PLAIN && 3032 ret != QCOW2_CLUSTER_ZERO_ALLOC) { 3033 qemu_co_mutex_unlock(&s->lock); 3034 return -ENOTSUP; 3035 } 3036 } else { 3037 qemu_co_mutex_lock(&s->lock); 3038 } 3039 3040 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 3041 3042 /* Whatever is left can use real zero clusters */ 3043 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags); 3044 qemu_co_mutex_unlock(&s->lock); 3045 3046 return ret; 3047 } 3048 3049 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 3050 int64_t offset, int bytes) 3051 { 3052 int ret; 3053 BDRVQcow2State *s = bs->opaque; 3054 3055 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 3056 assert(bytes < s->cluster_size); 3057 /* Ignore partial clusters, except for the special case of the 3058 * complete partial cluster at the end of an unaligned file */ 3059 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 3060 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 3061 return -ENOTSUP; 3062 } 3063 } 3064 3065 qemu_co_mutex_lock(&s->lock); 3066 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 3067 false); 3068 qemu_co_mutex_unlock(&s->lock); 3069 return ret; 3070 } 3071 3072 static int qcow2_truncate(BlockDriverState *bs, int64_t offset, 3073 PreallocMode prealloc, Error **errp) 3074 { 3075 BDRVQcow2State *s = bs->opaque; 3076 uint64_t old_length; 3077 int64_t new_l1_size; 3078 int ret; 3079 3080 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 3081 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 3082 { 3083 error_setg(errp, "Unsupported preallocation mode '%s'", 3084 PreallocMode_str(prealloc)); 3085 return -ENOTSUP; 3086 } 3087 3088 if (offset & 511) { 3089 error_setg(errp, "The new size must be a multiple of 512"); 3090 return -EINVAL; 3091 } 3092 3093 /* cannot proceed if image has snapshots */ 3094 if (s->nb_snapshots) { 3095 error_setg(errp, "Can't resize an image which has snapshots"); 3096 return -ENOTSUP; 3097 } 3098 3099 /* cannot proceed if image has bitmaps */ 3100 if (s->nb_bitmaps) { 3101 /* TODO: resize bitmaps in the image */ 3102 error_setg(errp, "Can't resize an image which has bitmaps"); 3103 return -ENOTSUP; 3104 } 3105 3106 old_length = bs->total_sectors * 512; 3107 new_l1_size = size_to_l1(s, offset); 3108 3109 if (offset < old_length) { 3110 int64_t last_cluster, old_file_size; 3111 if (prealloc != PREALLOC_MODE_OFF) { 3112 error_setg(errp, 3113 "Preallocation can't be used for shrinking an image"); 3114 return -EINVAL; 3115 } 3116 3117 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size), 3118 old_length - ROUND_UP(offset, 3119 s->cluster_size), 3120 QCOW2_DISCARD_ALWAYS, true); 3121 if (ret < 0) { 3122 error_setg_errno(errp, -ret, "Failed to discard cropped clusters"); 3123 return ret; 3124 } 3125 3126 ret = qcow2_shrink_l1_table(bs, new_l1_size); 3127 if (ret < 0) { 3128 error_setg_errno(errp, -ret, 3129 "Failed to reduce the number of L2 tables"); 3130 return ret; 3131 } 3132 3133 ret = qcow2_shrink_reftable(bs); 3134 if (ret < 0) { 3135 error_setg_errno(errp, -ret, 3136 "Failed to discard unused refblocks"); 3137 return ret; 3138 } 3139 3140 old_file_size = bdrv_getlength(bs->file->bs); 3141 if (old_file_size < 0) { 3142 error_setg_errno(errp, -old_file_size, 3143 "Failed to inquire current file length"); 3144 return old_file_size; 3145 } 3146 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 3147 if (last_cluster < 0) { 3148 error_setg_errno(errp, -last_cluster, 3149 "Failed to find the last cluster"); 3150 return last_cluster; 3151 } 3152 if ((last_cluster + 1) * s->cluster_size < old_file_size) { 3153 ret = bdrv_truncate(bs->file, (last_cluster + 1) * s->cluster_size, 3154 PREALLOC_MODE_OFF, NULL); 3155 if (ret < 0) { 3156 warn_report("Failed to truncate the tail of the image: %s", 3157 strerror(-ret)); 3158 ret = 0; 3159 } 3160 } 3161 } else { 3162 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 3163 if (ret < 0) { 3164 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 3165 return ret; 3166 } 3167 } 3168 3169 switch (prealloc) { 3170 case PREALLOC_MODE_OFF: 3171 break; 3172 3173 case PREALLOC_MODE_METADATA: 3174 ret = preallocate(bs, old_length, offset); 3175 if (ret < 0) { 3176 error_setg_errno(errp, -ret, "Preallocation failed"); 3177 return ret; 3178 } 3179 break; 3180 3181 case PREALLOC_MODE_FALLOC: 3182 case PREALLOC_MODE_FULL: 3183 { 3184 int64_t allocation_start, host_offset, guest_offset; 3185 int64_t clusters_allocated; 3186 int64_t old_file_size, new_file_size; 3187 uint64_t nb_new_data_clusters, nb_new_l2_tables; 3188 3189 old_file_size = bdrv_getlength(bs->file->bs); 3190 if (old_file_size < 0) { 3191 error_setg_errno(errp, -old_file_size, 3192 "Failed to inquire current file length"); 3193 return old_file_size; 3194 } 3195 3196 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length, 3197 s->cluster_size); 3198 3199 /* This is an overestimation; we will not actually allocate space for 3200 * these in the file but just make sure the new refcount structures are 3201 * able to cover them so we will not have to allocate new refblocks 3202 * while entering the data blocks in the potentially new L2 tables. 3203 * (We do not actually care where the L2 tables are placed. Maybe they 3204 * are already allocated or they can be placed somewhere before 3205 * @old_file_size. It does not matter because they will be fully 3206 * allocated automatically, so they do not need to be covered by the 3207 * preallocation. All that matters is that we will not have to allocate 3208 * new refcount structures for them.) */ 3209 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 3210 s->cluster_size / sizeof(uint64_t)); 3211 /* The cluster range may not be aligned to L2 boundaries, so add one L2 3212 * table for a potential head/tail */ 3213 nb_new_l2_tables++; 3214 3215 allocation_start = qcow2_refcount_area(bs, old_file_size, 3216 nb_new_data_clusters + 3217 nb_new_l2_tables, 3218 true, 0, 0); 3219 if (allocation_start < 0) { 3220 error_setg_errno(errp, -allocation_start, 3221 "Failed to resize refcount structures"); 3222 return allocation_start; 3223 } 3224 3225 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 3226 nb_new_data_clusters); 3227 if (clusters_allocated < 0) { 3228 error_setg_errno(errp, -clusters_allocated, 3229 "Failed to allocate data clusters"); 3230 return -clusters_allocated; 3231 } 3232 3233 assert(clusters_allocated == nb_new_data_clusters); 3234 3235 /* Allocate the data area */ 3236 new_file_size = allocation_start + 3237 nb_new_data_clusters * s->cluster_size; 3238 ret = bdrv_truncate(bs->file, new_file_size, prealloc, errp); 3239 if (ret < 0) { 3240 error_prepend(errp, "Failed to resize underlying file: "); 3241 qcow2_free_clusters(bs, allocation_start, 3242 nb_new_data_clusters * s->cluster_size, 3243 QCOW2_DISCARD_OTHER); 3244 return ret; 3245 } 3246 3247 /* Create the necessary L2 entries */ 3248 host_offset = allocation_start; 3249 guest_offset = old_length; 3250 while (nb_new_data_clusters) { 3251 int64_t guest_cluster = guest_offset >> s->cluster_bits; 3252 int64_t nb_clusters = MIN(nb_new_data_clusters, 3253 s->l2_size - guest_cluster % s->l2_size); 3254 QCowL2Meta allocation = { 3255 .offset = guest_offset, 3256 .alloc_offset = host_offset, 3257 .nb_clusters = nb_clusters, 3258 }; 3259 qemu_co_queue_init(&allocation.dependent_requests); 3260 3261 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 3262 if (ret < 0) { 3263 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 3264 qcow2_free_clusters(bs, host_offset, 3265 nb_new_data_clusters * s->cluster_size, 3266 QCOW2_DISCARD_OTHER); 3267 return ret; 3268 } 3269 3270 guest_offset += nb_clusters * s->cluster_size; 3271 host_offset += nb_clusters * s->cluster_size; 3272 nb_new_data_clusters -= nb_clusters; 3273 } 3274 break; 3275 } 3276 3277 default: 3278 g_assert_not_reached(); 3279 } 3280 3281 if (prealloc != PREALLOC_MODE_OFF) { 3282 /* Flush metadata before actually changing the image size */ 3283 ret = bdrv_flush(bs); 3284 if (ret < 0) { 3285 error_setg_errno(errp, -ret, 3286 "Failed to flush the preallocated area to disk"); 3287 return ret; 3288 } 3289 } 3290 3291 /* write updated header.size */ 3292 offset = cpu_to_be64(offset); 3293 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 3294 &offset, sizeof(uint64_t)); 3295 if (ret < 0) { 3296 error_setg_errno(errp, -ret, "Failed to update the image size"); 3297 return ret; 3298 } 3299 3300 s->l1_vm_state_index = new_l1_size; 3301 return 0; 3302 } 3303 3304 /* XXX: put compressed sectors first, then all the cluster aligned 3305 tables to avoid losing bytes in alignment */ 3306 static coroutine_fn int 3307 qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 3308 uint64_t bytes, QEMUIOVector *qiov) 3309 { 3310 BDRVQcow2State *s = bs->opaque; 3311 QEMUIOVector hd_qiov; 3312 struct iovec iov; 3313 z_stream strm; 3314 int ret, out_len; 3315 uint8_t *buf, *out_buf; 3316 int64_t cluster_offset; 3317 3318 if (bytes == 0) { 3319 /* align end of file to a sector boundary to ease reading with 3320 sector based I/Os */ 3321 cluster_offset = bdrv_getlength(bs->file->bs); 3322 if (cluster_offset < 0) { 3323 return cluster_offset; 3324 } 3325 return bdrv_truncate(bs->file, cluster_offset, PREALLOC_MODE_OFF, NULL); 3326 } 3327 3328 buf = qemu_blockalign(bs, s->cluster_size); 3329 if (bytes != s->cluster_size) { 3330 if (bytes > s->cluster_size || 3331 offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) 3332 { 3333 qemu_vfree(buf); 3334 return -EINVAL; 3335 } 3336 /* Zero-pad last write if image size is not cluster aligned */ 3337 memset(buf + bytes, 0, s->cluster_size - bytes); 3338 } 3339 qemu_iovec_to_buf(qiov, 0, buf, bytes); 3340 3341 out_buf = g_malloc(s->cluster_size); 3342 3343 /* best compression, small window, no zlib header */ 3344 memset(&strm, 0, sizeof(strm)); 3345 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, 3346 Z_DEFLATED, -12, 3347 9, Z_DEFAULT_STRATEGY); 3348 if (ret != 0) { 3349 ret = -EINVAL; 3350 goto fail; 3351 } 3352 3353 strm.avail_in = s->cluster_size; 3354 strm.next_in = (uint8_t *)buf; 3355 strm.avail_out = s->cluster_size; 3356 strm.next_out = out_buf; 3357 3358 ret = deflate(&strm, Z_FINISH); 3359 if (ret != Z_STREAM_END && ret != Z_OK) { 3360 deflateEnd(&strm); 3361 ret = -EINVAL; 3362 goto fail; 3363 } 3364 out_len = strm.next_out - out_buf; 3365 3366 deflateEnd(&strm); 3367 3368 if (ret != Z_STREAM_END || out_len >= s->cluster_size) { 3369 /* could not compress: write normal cluster */ 3370 ret = qcow2_co_pwritev(bs, offset, bytes, qiov, 0); 3371 if (ret < 0) { 3372 goto fail; 3373 } 3374 goto success; 3375 } 3376 3377 qemu_co_mutex_lock(&s->lock); 3378 cluster_offset = 3379 qcow2_alloc_compressed_cluster_offset(bs, offset, out_len); 3380 if (!cluster_offset) { 3381 qemu_co_mutex_unlock(&s->lock); 3382 ret = -EIO; 3383 goto fail; 3384 } 3385 cluster_offset &= s->cluster_offset_mask; 3386 3387 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len); 3388 qemu_co_mutex_unlock(&s->lock); 3389 if (ret < 0) { 3390 goto fail; 3391 } 3392 3393 iov = (struct iovec) { 3394 .iov_base = out_buf, 3395 .iov_len = out_len, 3396 }; 3397 qemu_iovec_init_external(&hd_qiov, &iov, 1); 3398 3399 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 3400 ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0); 3401 if (ret < 0) { 3402 goto fail; 3403 } 3404 success: 3405 ret = 0; 3406 fail: 3407 qemu_vfree(buf); 3408 g_free(out_buf); 3409 return ret; 3410 } 3411 3412 static int make_completely_empty(BlockDriverState *bs) 3413 { 3414 BDRVQcow2State *s = bs->opaque; 3415 Error *local_err = NULL; 3416 int ret, l1_clusters; 3417 int64_t offset; 3418 uint64_t *new_reftable = NULL; 3419 uint64_t rt_entry, l1_size2; 3420 struct { 3421 uint64_t l1_offset; 3422 uint64_t reftable_offset; 3423 uint32_t reftable_clusters; 3424 } QEMU_PACKED l1_ofs_rt_ofs_cls; 3425 3426 ret = qcow2_cache_empty(bs, s->l2_table_cache); 3427 if (ret < 0) { 3428 goto fail; 3429 } 3430 3431 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 3432 if (ret < 0) { 3433 goto fail; 3434 } 3435 3436 /* Refcounts will be broken utterly */ 3437 ret = qcow2_mark_dirty(bs); 3438 if (ret < 0) { 3439 goto fail; 3440 } 3441 3442 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3443 3444 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 3445 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t); 3446 3447 /* After this call, neither the in-memory nor the on-disk refcount 3448 * information accurately describe the actual references */ 3449 3450 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 3451 l1_clusters * s->cluster_size, 0); 3452 if (ret < 0) { 3453 goto fail_broken_refcounts; 3454 } 3455 memset(s->l1_table, 0, l1_size2); 3456 3457 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 3458 3459 /* Overwrite enough clusters at the beginning of the sectors to place 3460 * the refcount table, a refcount block and the L1 table in; this may 3461 * overwrite parts of the existing refcount and L1 table, which is not 3462 * an issue because the dirty flag is set, complete data loss is in fact 3463 * desired and partial data loss is consequently fine as well */ 3464 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 3465 (2 + l1_clusters) * s->cluster_size, 0); 3466 /* This call (even if it failed overall) may have overwritten on-disk 3467 * refcount structures; in that case, the in-memory refcount information 3468 * will probably differ from the on-disk information which makes the BDS 3469 * unusable */ 3470 if (ret < 0) { 3471 goto fail_broken_refcounts; 3472 } 3473 3474 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3475 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 3476 3477 /* "Create" an empty reftable (one cluster) directly after the image 3478 * header and an empty L1 table three clusters after the image header; 3479 * the cluster between those two will be used as the first refblock */ 3480 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 3481 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 3482 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 3483 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 3484 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); 3485 if (ret < 0) { 3486 goto fail_broken_refcounts; 3487 } 3488 3489 s->l1_table_offset = 3 * s->cluster_size; 3490 3491 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t)); 3492 if (!new_reftable) { 3493 ret = -ENOMEM; 3494 goto fail_broken_refcounts; 3495 } 3496 3497 s->refcount_table_offset = s->cluster_size; 3498 s->refcount_table_size = s->cluster_size / sizeof(uint64_t); 3499 s->max_refcount_table_index = 0; 3500 3501 g_free(s->refcount_table); 3502 s->refcount_table = new_reftable; 3503 new_reftable = NULL; 3504 3505 /* Now the in-memory refcount information again corresponds to the on-disk 3506 * information (reftable is empty and no refblocks (the refblock cache is 3507 * empty)); however, this means some clusters (e.g. the image header) are 3508 * referenced, but not refcounted, but the normal qcow2 code assumes that 3509 * the in-memory information is always correct */ 3510 3511 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 3512 3513 /* Enter the first refblock into the reftable */ 3514 rt_entry = cpu_to_be64(2 * s->cluster_size); 3515 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, 3516 &rt_entry, sizeof(rt_entry)); 3517 if (ret < 0) { 3518 goto fail_broken_refcounts; 3519 } 3520 s->refcount_table[0] = 2 * s->cluster_size; 3521 3522 s->free_cluster_index = 0; 3523 assert(3 + l1_clusters <= s->refcount_block_size); 3524 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 3525 if (offset < 0) { 3526 ret = offset; 3527 goto fail_broken_refcounts; 3528 } else if (offset > 0) { 3529 error_report("First cluster in emptied image is in use"); 3530 abort(); 3531 } 3532 3533 /* Now finally the in-memory information corresponds to the on-disk 3534 * structures and is correct */ 3535 ret = qcow2_mark_clean(bs); 3536 if (ret < 0) { 3537 goto fail; 3538 } 3539 3540 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, 3541 PREALLOC_MODE_OFF, &local_err); 3542 if (ret < 0) { 3543 error_report_err(local_err); 3544 goto fail; 3545 } 3546 3547 return 0; 3548 3549 fail_broken_refcounts: 3550 /* The BDS is unusable at this point. If we wanted to make it usable, we 3551 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 3552 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 3553 * again. However, because the functions which could have caused this error 3554 * path to be taken are used by those functions as well, it's very likely 3555 * that that sequence will fail as well. Therefore, just eject the BDS. */ 3556 bs->drv = NULL; 3557 3558 fail: 3559 g_free(new_reftable); 3560 return ret; 3561 } 3562 3563 static int qcow2_make_empty(BlockDriverState *bs) 3564 { 3565 BDRVQcow2State *s = bs->opaque; 3566 uint64_t offset, end_offset; 3567 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 3568 int l1_clusters, ret = 0; 3569 3570 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 3571 3572 if (s->qcow_version >= 3 && !s->snapshots && 3573 3 + l1_clusters <= s->refcount_block_size) { 3574 /* The following function only works for qcow2 v3 images (it requires 3575 * the dirty flag) and only as long as there are no snapshots (because 3576 * it completely empties the image). Furthermore, the L1 table and three 3577 * additional clusters (image header, refcount table, one refcount 3578 * block) have to fit inside one refcount block. */ 3579 return make_completely_empty(bs); 3580 } 3581 3582 /* This fallback code simply discards every active cluster; this is slow, 3583 * but works in all cases */ 3584 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3585 for (offset = 0; offset < end_offset; offset += step) { 3586 /* As this function is generally used after committing an external 3587 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 3588 * default action for this kind of discard is to pass the discard, 3589 * which will ideally result in an actually smaller image file, as 3590 * is probably desired. */ 3591 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 3592 QCOW2_DISCARD_SNAPSHOT, true); 3593 if (ret < 0) { 3594 break; 3595 } 3596 } 3597 3598 return ret; 3599 } 3600 3601 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 3602 { 3603 BDRVQcow2State *s = bs->opaque; 3604 int ret; 3605 3606 qemu_co_mutex_lock(&s->lock); 3607 ret = qcow2_cache_write(bs, s->l2_table_cache); 3608 if (ret < 0) { 3609 qemu_co_mutex_unlock(&s->lock); 3610 return ret; 3611 } 3612 3613 if (qcow2_need_accurate_refcounts(s)) { 3614 ret = qcow2_cache_write(bs, s->refcount_block_cache); 3615 if (ret < 0) { 3616 qemu_co_mutex_unlock(&s->lock); 3617 return ret; 3618 } 3619 } 3620 qemu_co_mutex_unlock(&s->lock); 3621 3622 return 0; 3623 } 3624 3625 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 3626 Error **errp) 3627 { 3628 Error *local_err = NULL; 3629 BlockMeasureInfo *info; 3630 uint64_t required = 0; /* bytes that contribute to required size */ 3631 uint64_t virtual_size; /* disk size as seen by guest */ 3632 uint64_t refcount_bits; 3633 uint64_t l2_tables; 3634 size_t cluster_size; 3635 int version; 3636 char *optstr; 3637 PreallocMode prealloc; 3638 bool has_backing_file; 3639 3640 /* Parse image creation options */ 3641 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 3642 if (local_err) { 3643 goto err; 3644 } 3645 3646 version = qcow2_opt_get_version_del(opts, &local_err); 3647 if (local_err) { 3648 goto err; 3649 } 3650 3651 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 3652 if (local_err) { 3653 goto err; 3654 } 3655 3656 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 3657 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr, 3658 PREALLOC_MODE_OFF, &local_err); 3659 g_free(optstr); 3660 if (local_err) { 3661 goto err; 3662 } 3663 3664 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 3665 has_backing_file = !!optstr; 3666 g_free(optstr); 3667 3668 virtual_size = align_offset(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 3669 cluster_size); 3670 3671 /* Check that virtual disk size is valid */ 3672 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 3673 cluster_size / sizeof(uint64_t)); 3674 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { 3675 error_setg(&local_err, "The image size is too large " 3676 "(try using a larger cluster size)"); 3677 goto err; 3678 } 3679 3680 /* Account for input image */ 3681 if (in_bs) { 3682 int64_t ssize = bdrv_getlength(in_bs); 3683 if (ssize < 0) { 3684 error_setg_errno(&local_err, -ssize, 3685 "Unable to get image virtual_size"); 3686 goto err; 3687 } 3688 3689 virtual_size = align_offset(ssize, cluster_size); 3690 3691 if (has_backing_file) { 3692 /* We don't how much of the backing chain is shared by the input 3693 * image and the new image file. In the worst case the new image's 3694 * backing file has nothing in common with the input image. Be 3695 * conservative and assume all clusters need to be written. 3696 */ 3697 required = virtual_size; 3698 } else { 3699 int64_t offset; 3700 int pnum = 0; 3701 3702 for (offset = 0; offset < ssize; 3703 offset += pnum * BDRV_SECTOR_SIZE) { 3704 int nb_sectors = MIN(ssize - offset, 3705 BDRV_REQUEST_MAX_BYTES) / BDRV_SECTOR_SIZE; 3706 BlockDriverState *file; 3707 int64_t ret; 3708 3709 ret = bdrv_get_block_status_above(in_bs, NULL, 3710 offset >> BDRV_SECTOR_BITS, 3711 nb_sectors, 3712 &pnum, &file); 3713 if (ret < 0) { 3714 error_setg_errno(&local_err, -ret, 3715 "Unable to get block status"); 3716 goto err; 3717 } 3718 3719 if (ret & BDRV_BLOCK_ZERO) { 3720 /* Skip zero regions (safe with no backing file) */ 3721 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 3722 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 3723 /* Extend pnum to end of cluster for next iteration */ 3724 pnum = (ROUND_UP(offset + pnum * BDRV_SECTOR_SIZE, 3725 cluster_size) - offset) >> BDRV_SECTOR_BITS; 3726 3727 /* Count clusters we've seen */ 3728 required += offset % cluster_size + pnum * BDRV_SECTOR_SIZE; 3729 } 3730 } 3731 } 3732 } 3733 3734 /* Take into account preallocation. Nothing special is needed for 3735 * PREALLOC_MODE_METADATA since metadata is always counted. 3736 */ 3737 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 3738 required = virtual_size; 3739 } 3740 3741 info = g_new(BlockMeasureInfo, 1); 3742 info->fully_allocated = 3743 qcow2_calc_prealloc_size(virtual_size, cluster_size, 3744 ctz32(refcount_bits)); 3745 3746 /* Remove data clusters that are not required. This overestimates the 3747 * required size because metadata needed for the fully allocated file is 3748 * still counted. 3749 */ 3750 info->required = info->fully_allocated - virtual_size + required; 3751 return info; 3752 3753 err: 3754 error_propagate(errp, local_err); 3755 return NULL; 3756 } 3757 3758 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 3759 { 3760 BDRVQcow2State *s = bs->opaque; 3761 bdi->unallocated_blocks_are_zero = true; 3762 bdi->can_write_zeroes_with_unmap = (s->qcow_version >= 3); 3763 bdi->cluster_size = s->cluster_size; 3764 bdi->vm_state_offset = qcow2_vm_state_offset(s); 3765 return 0; 3766 } 3767 3768 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs) 3769 { 3770 BDRVQcow2State *s = bs->opaque; 3771 ImageInfoSpecific *spec_info; 3772 QCryptoBlockInfo *encrypt_info = NULL; 3773 3774 if (s->crypto != NULL) { 3775 encrypt_info = qcrypto_block_get_info(s->crypto, &error_abort); 3776 } 3777 3778 spec_info = g_new(ImageInfoSpecific, 1); 3779 *spec_info = (ImageInfoSpecific){ 3780 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 3781 .u.qcow2.data = g_new(ImageInfoSpecificQCow2, 1), 3782 }; 3783 if (s->qcow_version == 2) { 3784 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 3785 .compat = g_strdup("0.10"), 3786 .refcount_bits = s->refcount_bits, 3787 }; 3788 } else if (s->qcow_version == 3) { 3789 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 3790 .compat = g_strdup("1.1"), 3791 .lazy_refcounts = s->compatible_features & 3792 QCOW2_COMPAT_LAZY_REFCOUNTS, 3793 .has_lazy_refcounts = true, 3794 .corrupt = s->incompatible_features & 3795 QCOW2_INCOMPAT_CORRUPT, 3796 .has_corrupt = true, 3797 .refcount_bits = s->refcount_bits, 3798 }; 3799 } else { 3800 /* if this assertion fails, this probably means a new version was 3801 * added without having it covered here */ 3802 assert(false); 3803 } 3804 3805 if (encrypt_info) { 3806 ImageInfoSpecificQCow2Encryption *qencrypt = 3807 g_new(ImageInfoSpecificQCow2Encryption, 1); 3808 switch (encrypt_info->format) { 3809 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 3810 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 3811 qencrypt->u.aes = encrypt_info->u.qcow; 3812 break; 3813 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 3814 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 3815 qencrypt->u.luks = encrypt_info->u.luks; 3816 break; 3817 default: 3818 abort(); 3819 } 3820 /* Since we did shallow copy above, erase any pointers 3821 * in the original info */ 3822 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 3823 qapi_free_QCryptoBlockInfo(encrypt_info); 3824 3825 spec_info->u.qcow2.data->has_encrypt = true; 3826 spec_info->u.qcow2.data->encrypt = qencrypt; 3827 } 3828 3829 return spec_info; 3830 } 3831 3832 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 3833 int64_t pos) 3834 { 3835 BDRVQcow2State *s = bs->opaque; 3836 3837 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 3838 return bs->drv->bdrv_co_pwritev(bs, qcow2_vm_state_offset(s) + pos, 3839 qiov->size, qiov, 0); 3840 } 3841 3842 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 3843 int64_t pos) 3844 { 3845 BDRVQcow2State *s = bs->opaque; 3846 3847 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 3848 return bs->drv->bdrv_co_preadv(bs, qcow2_vm_state_offset(s) + pos, 3849 qiov->size, qiov, 0); 3850 } 3851 3852 /* 3853 * Downgrades an image's version. To achieve this, any incompatible features 3854 * have to be removed. 3855 */ 3856 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 3857 BlockDriverAmendStatusCB *status_cb, void *cb_opaque) 3858 { 3859 BDRVQcow2State *s = bs->opaque; 3860 int current_version = s->qcow_version; 3861 int ret; 3862 3863 if (target_version == current_version) { 3864 return 0; 3865 } else if (target_version > current_version) { 3866 return -EINVAL; 3867 } else if (target_version != 2) { 3868 return -EINVAL; 3869 } 3870 3871 if (s->refcount_order != 4) { 3872 error_report("compat=0.10 requires refcount_bits=16"); 3873 return -ENOTSUP; 3874 } 3875 3876 /* clear incompatible features */ 3877 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 3878 ret = qcow2_mark_clean(bs); 3879 if (ret < 0) { 3880 return ret; 3881 } 3882 } 3883 3884 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 3885 * the first place; if that happens nonetheless, returning -ENOTSUP is the 3886 * best thing to do anyway */ 3887 3888 if (s->incompatible_features) { 3889 return -ENOTSUP; 3890 } 3891 3892 /* since we can ignore compatible features, we can set them to 0 as well */ 3893 s->compatible_features = 0; 3894 /* if lazy refcounts have been used, they have already been fixed through 3895 * clearing the dirty flag */ 3896 3897 /* clearing autoclear features is trivial */ 3898 s->autoclear_features = 0; 3899 3900 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 3901 if (ret < 0) { 3902 return ret; 3903 } 3904 3905 s->qcow_version = target_version; 3906 ret = qcow2_update_header(bs); 3907 if (ret < 0) { 3908 s->qcow_version = current_version; 3909 return ret; 3910 } 3911 return 0; 3912 } 3913 3914 typedef enum Qcow2AmendOperation { 3915 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 3916 * statically initialized to so that the helper CB can discern the first 3917 * invocation from an operation change */ 3918 QCOW2_NO_OPERATION = 0, 3919 3920 QCOW2_CHANGING_REFCOUNT_ORDER, 3921 QCOW2_DOWNGRADING, 3922 } Qcow2AmendOperation; 3923 3924 typedef struct Qcow2AmendHelperCBInfo { 3925 /* The code coordinating the amend operations should only modify 3926 * these four fields; the rest will be managed by the CB */ 3927 BlockDriverAmendStatusCB *original_status_cb; 3928 void *original_cb_opaque; 3929 3930 Qcow2AmendOperation current_operation; 3931 3932 /* Total number of operations to perform (only set once) */ 3933 int total_operations; 3934 3935 /* The following fields are managed by the CB */ 3936 3937 /* Number of operations completed */ 3938 int operations_completed; 3939 3940 /* Cumulative offset of all completed operations */ 3941 int64_t offset_completed; 3942 3943 Qcow2AmendOperation last_operation; 3944 int64_t last_work_size; 3945 } Qcow2AmendHelperCBInfo; 3946 3947 static void qcow2_amend_helper_cb(BlockDriverState *bs, 3948 int64_t operation_offset, 3949 int64_t operation_work_size, void *opaque) 3950 { 3951 Qcow2AmendHelperCBInfo *info = opaque; 3952 int64_t current_work_size; 3953 int64_t projected_work_size; 3954 3955 if (info->current_operation != info->last_operation) { 3956 if (info->last_operation != QCOW2_NO_OPERATION) { 3957 info->offset_completed += info->last_work_size; 3958 info->operations_completed++; 3959 } 3960 3961 info->last_operation = info->current_operation; 3962 } 3963 3964 assert(info->total_operations > 0); 3965 assert(info->operations_completed < info->total_operations); 3966 3967 info->last_work_size = operation_work_size; 3968 3969 current_work_size = info->offset_completed + operation_work_size; 3970 3971 /* current_work_size is the total work size for (operations_completed + 1) 3972 * operations (which includes this one), so multiply it by the number of 3973 * operations not covered and divide it by the number of operations 3974 * covered to get a projection for the operations not covered */ 3975 projected_work_size = current_work_size * (info->total_operations - 3976 info->operations_completed - 1) 3977 / (info->operations_completed + 1); 3978 3979 info->original_status_cb(bs, info->offset_completed + operation_offset, 3980 current_work_size + projected_work_size, 3981 info->original_cb_opaque); 3982 } 3983 3984 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 3985 BlockDriverAmendStatusCB *status_cb, 3986 void *cb_opaque) 3987 { 3988 BDRVQcow2State *s = bs->opaque; 3989 int old_version = s->qcow_version, new_version = old_version; 3990 uint64_t new_size = 0; 3991 const char *backing_file = NULL, *backing_format = NULL; 3992 bool lazy_refcounts = s->use_lazy_refcounts; 3993 const char *compat = NULL; 3994 uint64_t cluster_size = s->cluster_size; 3995 bool encrypt; 3996 int encformat; 3997 int refcount_bits = s->refcount_bits; 3998 Error *local_err = NULL; 3999 int ret; 4000 QemuOptDesc *desc = opts->list->desc; 4001 Qcow2AmendHelperCBInfo helper_cb_info; 4002 4003 while (desc && desc->name) { 4004 if (!qemu_opt_find(opts, desc->name)) { 4005 /* only change explicitly defined options */ 4006 desc++; 4007 continue; 4008 } 4009 4010 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 4011 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 4012 if (!compat) { 4013 /* preserve default */ 4014 } else if (!strcmp(compat, "0.10")) { 4015 new_version = 2; 4016 } else if (!strcmp(compat, "1.1")) { 4017 new_version = 3; 4018 } else { 4019 error_report("Unknown compatibility level %s", compat); 4020 return -EINVAL; 4021 } 4022 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) { 4023 error_report("Cannot change preallocation mode"); 4024 return -ENOTSUP; 4025 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 4026 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 4027 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 4028 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 4029 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 4030 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 4031 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) { 4032 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, 4033 !!s->crypto); 4034 4035 if (encrypt != !!s->crypto) { 4036 error_report("Changing the encryption flag is not supported"); 4037 return -ENOTSUP; 4038 } 4039 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) { 4040 encformat = qcow2_crypt_method_from_format( 4041 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT)); 4042 4043 if (encformat != s->crypt_method_header) { 4044 error_report("Changing the encryption format is not supported"); 4045 return -ENOTSUP; 4046 } 4047 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) { 4048 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 4049 cluster_size); 4050 if (cluster_size != s->cluster_size) { 4051 error_report("Changing the cluster size is not supported"); 4052 return -ENOTSUP; 4053 } 4054 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 4055 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 4056 lazy_refcounts); 4057 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 4058 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 4059 refcount_bits); 4060 4061 if (refcount_bits <= 0 || refcount_bits > 64 || 4062 !is_power_of_2(refcount_bits)) 4063 { 4064 error_report("Refcount width must be a power of two and may " 4065 "not exceed 64 bits"); 4066 return -EINVAL; 4067 } 4068 } else { 4069 /* if this point is reached, this probably means a new option was 4070 * added without having it covered here */ 4071 abort(); 4072 } 4073 4074 desc++; 4075 } 4076 4077 helper_cb_info = (Qcow2AmendHelperCBInfo){ 4078 .original_status_cb = status_cb, 4079 .original_cb_opaque = cb_opaque, 4080 .total_operations = (new_version < old_version) 4081 + (s->refcount_bits != refcount_bits) 4082 }; 4083 4084 /* Upgrade first (some features may require compat=1.1) */ 4085 if (new_version > old_version) { 4086 s->qcow_version = new_version; 4087 ret = qcow2_update_header(bs); 4088 if (ret < 0) { 4089 s->qcow_version = old_version; 4090 return ret; 4091 } 4092 } 4093 4094 if (s->refcount_bits != refcount_bits) { 4095 int refcount_order = ctz32(refcount_bits); 4096 4097 if (new_version < 3 && refcount_bits != 16) { 4098 error_report("Different refcount widths than 16 bits require " 4099 "compatibility level 1.1 or above (use compat=1.1 or " 4100 "greater)"); 4101 return -EINVAL; 4102 } 4103 4104 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 4105 ret = qcow2_change_refcount_order(bs, refcount_order, 4106 &qcow2_amend_helper_cb, 4107 &helper_cb_info, &local_err); 4108 if (ret < 0) { 4109 error_report_err(local_err); 4110 return ret; 4111 } 4112 } 4113 4114 if (backing_file || backing_format) { 4115 ret = qcow2_change_backing_file(bs, 4116 backing_file ?: s->image_backing_file, 4117 backing_format ?: s->image_backing_format); 4118 if (ret < 0) { 4119 return ret; 4120 } 4121 } 4122 4123 if (s->use_lazy_refcounts != lazy_refcounts) { 4124 if (lazy_refcounts) { 4125 if (new_version < 3) { 4126 error_report("Lazy refcounts only supported with compatibility " 4127 "level 1.1 and above (use compat=1.1 or greater)"); 4128 return -EINVAL; 4129 } 4130 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4131 ret = qcow2_update_header(bs); 4132 if (ret < 0) { 4133 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4134 return ret; 4135 } 4136 s->use_lazy_refcounts = true; 4137 } else { 4138 /* make image clean first */ 4139 ret = qcow2_mark_clean(bs); 4140 if (ret < 0) { 4141 return ret; 4142 } 4143 /* now disallow lazy refcounts */ 4144 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4145 ret = qcow2_update_header(bs); 4146 if (ret < 0) { 4147 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4148 return ret; 4149 } 4150 s->use_lazy_refcounts = false; 4151 } 4152 } 4153 4154 if (new_size) { 4155 BlockBackend *blk = blk_new(BLK_PERM_RESIZE, BLK_PERM_ALL); 4156 ret = blk_insert_bs(blk, bs, &local_err); 4157 if (ret < 0) { 4158 error_report_err(local_err); 4159 blk_unref(blk); 4160 return ret; 4161 } 4162 4163 ret = blk_truncate(blk, new_size, PREALLOC_MODE_OFF, &local_err); 4164 blk_unref(blk); 4165 if (ret < 0) { 4166 error_report_err(local_err); 4167 return ret; 4168 } 4169 } 4170 4171 /* Downgrade last (so unsupported features can be removed before) */ 4172 if (new_version < old_version) { 4173 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 4174 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 4175 &helper_cb_info); 4176 if (ret < 0) { 4177 return ret; 4178 } 4179 } 4180 4181 return 0; 4182 } 4183 4184 /* 4185 * If offset or size are negative, respectively, they will not be included in 4186 * the BLOCK_IMAGE_CORRUPTED event emitted. 4187 * fatal will be ignored for read-only BDS; corruptions found there will always 4188 * be considered non-fatal. 4189 */ 4190 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 4191 int64_t size, const char *message_format, ...) 4192 { 4193 BDRVQcow2State *s = bs->opaque; 4194 const char *node_name; 4195 char *message; 4196 va_list ap; 4197 4198 fatal = fatal && !bs->read_only; 4199 4200 if (s->signaled_corruption && 4201 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 4202 { 4203 return; 4204 } 4205 4206 va_start(ap, message_format); 4207 message = g_strdup_vprintf(message_format, ap); 4208 va_end(ap); 4209 4210 if (fatal) { 4211 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 4212 "corruption events will be suppressed\n", message); 4213 } else { 4214 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 4215 "corruption events will be suppressed\n", message); 4216 } 4217 4218 node_name = bdrv_get_node_name(bs); 4219 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 4220 *node_name != '\0', node_name, 4221 message, offset >= 0, offset, 4222 size >= 0, size, 4223 fatal, &error_abort); 4224 g_free(message); 4225 4226 if (fatal) { 4227 qcow2_mark_corrupt(bs); 4228 bs->drv = NULL; /* make BDS unusable */ 4229 } 4230 4231 s->signaled_corruption = true; 4232 } 4233 4234 static QemuOptsList qcow2_create_opts = { 4235 .name = "qcow2-create-opts", 4236 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 4237 .desc = { 4238 { 4239 .name = BLOCK_OPT_SIZE, 4240 .type = QEMU_OPT_SIZE, 4241 .help = "Virtual disk size" 4242 }, 4243 { 4244 .name = BLOCK_OPT_COMPAT_LEVEL, 4245 .type = QEMU_OPT_STRING, 4246 .help = "Compatibility level (0.10 or 1.1)" 4247 }, 4248 { 4249 .name = BLOCK_OPT_BACKING_FILE, 4250 .type = QEMU_OPT_STRING, 4251 .help = "File name of a base image" 4252 }, 4253 { 4254 .name = BLOCK_OPT_BACKING_FMT, 4255 .type = QEMU_OPT_STRING, 4256 .help = "Image format of the base image" 4257 }, 4258 { 4259 .name = BLOCK_OPT_ENCRYPT, 4260 .type = QEMU_OPT_BOOL, 4261 .help = "Encrypt the image with format 'aes'. (Deprecated " 4262 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", 4263 }, 4264 { 4265 .name = BLOCK_OPT_ENCRYPT_FORMAT, 4266 .type = QEMU_OPT_STRING, 4267 .help = "Encrypt the image, format choices: 'aes', 'luks'", 4268 }, 4269 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 4270 "ID of secret providing qcow AES key or LUKS passphrase"), 4271 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), 4272 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), 4273 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), 4274 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), 4275 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), 4276 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 4277 { 4278 .name = BLOCK_OPT_CLUSTER_SIZE, 4279 .type = QEMU_OPT_SIZE, 4280 .help = "qcow2 cluster size", 4281 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) 4282 }, 4283 { 4284 .name = BLOCK_OPT_PREALLOC, 4285 .type = QEMU_OPT_STRING, 4286 .help = "Preallocation mode (allowed values: off, metadata, " 4287 "falloc, full)" 4288 }, 4289 { 4290 .name = BLOCK_OPT_LAZY_REFCOUNTS, 4291 .type = QEMU_OPT_BOOL, 4292 .help = "Postpone refcount updates", 4293 .def_value_str = "off" 4294 }, 4295 { 4296 .name = BLOCK_OPT_REFCOUNT_BITS, 4297 .type = QEMU_OPT_NUMBER, 4298 .help = "Width of a reference count entry in bits", 4299 .def_value_str = "16" 4300 }, 4301 { /* end of list */ } 4302 } 4303 }; 4304 4305 BlockDriver bdrv_qcow2 = { 4306 .format_name = "qcow2", 4307 .instance_size = sizeof(BDRVQcow2State), 4308 .bdrv_probe = qcow2_probe, 4309 .bdrv_open = qcow2_open, 4310 .bdrv_close = qcow2_close, 4311 .bdrv_reopen_prepare = qcow2_reopen_prepare, 4312 .bdrv_reopen_commit = qcow2_reopen_commit, 4313 .bdrv_reopen_abort = qcow2_reopen_abort, 4314 .bdrv_join_options = qcow2_join_options, 4315 .bdrv_child_perm = bdrv_format_default_perms, 4316 .bdrv_create = qcow2_create, 4317 .bdrv_has_zero_init = bdrv_has_zero_init_1, 4318 .bdrv_co_get_block_status = qcow2_co_get_block_status, 4319 4320 .bdrv_co_preadv = qcow2_co_preadv, 4321 .bdrv_co_pwritev = qcow2_co_pwritev, 4322 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 4323 4324 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 4325 .bdrv_co_pdiscard = qcow2_co_pdiscard, 4326 .bdrv_truncate = qcow2_truncate, 4327 .bdrv_co_pwritev_compressed = qcow2_co_pwritev_compressed, 4328 .bdrv_make_empty = qcow2_make_empty, 4329 4330 .bdrv_snapshot_create = qcow2_snapshot_create, 4331 .bdrv_snapshot_goto = qcow2_snapshot_goto, 4332 .bdrv_snapshot_delete = qcow2_snapshot_delete, 4333 .bdrv_snapshot_list = qcow2_snapshot_list, 4334 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 4335 .bdrv_measure = qcow2_measure, 4336 .bdrv_get_info = qcow2_get_info, 4337 .bdrv_get_specific_info = qcow2_get_specific_info, 4338 4339 .bdrv_save_vmstate = qcow2_save_vmstate, 4340 .bdrv_load_vmstate = qcow2_load_vmstate, 4341 4342 .supports_backing = true, 4343 .bdrv_change_backing_file = qcow2_change_backing_file, 4344 4345 .bdrv_refresh_limits = qcow2_refresh_limits, 4346 .bdrv_invalidate_cache = qcow2_invalidate_cache, 4347 .bdrv_inactivate = qcow2_inactivate, 4348 4349 .create_opts = &qcow2_create_opts, 4350 .bdrv_check = qcow2_check, 4351 .bdrv_amend_options = qcow2_amend_options, 4352 4353 .bdrv_detach_aio_context = qcow2_detach_aio_context, 4354 .bdrv_attach_aio_context = qcow2_attach_aio_context, 4355 4356 .bdrv_reopen_bitmaps_rw = qcow2_reopen_bitmaps_rw, 4357 .bdrv_can_store_new_dirty_bitmap = qcow2_can_store_new_dirty_bitmap, 4358 .bdrv_remove_persistent_dirty_bitmap = qcow2_remove_persistent_dirty_bitmap, 4359 }; 4360 4361 static void bdrv_qcow2_init(void) 4362 { 4363 bdrv_register(&bdrv_qcow2); 4364 } 4365 4366 block_init(bdrv_qcow2_init); 4367