1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include "block/block_int.h" 26 #include "sysemu/block-backend.h" 27 #include "qemu/module.h" 28 #include <zlib.h> 29 #include "block/qcow2.h" 30 #include "qemu/error-report.h" 31 #include "qapi/qmp/qerror.h" 32 #include "qapi/qmp/qbool.h" 33 #include "qapi/util.h" 34 #include "qapi/qmp/types.h" 35 #include "qapi-event.h" 36 #include "trace.h" 37 #include "qemu/option_int.h" 38 #include "qemu/cutils.h" 39 #include "qemu/bswap.h" 40 #include "qapi/opts-visitor.h" 41 #include "qapi-visit.h" 42 #include "block/crypto.h" 43 44 /* 45 Differences with QCOW: 46 47 - Support for multiple incremental snapshots. 48 - Memory management by reference counts. 49 - Clusters which have a reference count of one have the bit 50 QCOW_OFLAG_COPIED to optimize write performance. 51 - Size of compressed clusters is stored in sectors to reduce bit usage 52 in the cluster offsets. 53 - Support for storing additional data (such as the VM state) in the 54 snapshots. 55 - If a backing store is used, the cluster size is not constrained 56 (could be backported to QCOW). 57 - L2 tables have always a size of one cluster. 58 */ 59 60 61 typedef struct { 62 uint32_t magic; 63 uint32_t len; 64 } QEMU_PACKED QCowExtension; 65 66 #define QCOW2_EXT_MAGIC_END 0 67 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 68 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 69 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 70 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 71 72 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 73 { 74 const QCowHeader *cow_header = (const void *)buf; 75 76 if (buf_size >= sizeof(QCowHeader) && 77 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 78 be32_to_cpu(cow_header->version) >= 2) 79 return 100; 80 else 81 return 0; 82 } 83 84 85 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 86 uint8_t *buf, size_t buflen, 87 void *opaque, Error **errp) 88 { 89 BlockDriverState *bs = opaque; 90 BDRVQcow2State *s = bs->opaque; 91 ssize_t ret; 92 93 if ((offset + buflen) > s->crypto_header.length) { 94 error_setg(errp, "Request for data outside of extension header"); 95 return -1; 96 } 97 98 ret = bdrv_pread(bs->file, 99 s->crypto_header.offset + offset, buf, buflen); 100 if (ret < 0) { 101 error_setg_errno(errp, -ret, "Could not read encryption header"); 102 return -1; 103 } 104 return ret; 105 } 106 107 108 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 109 void *opaque, Error **errp) 110 { 111 BlockDriverState *bs = opaque; 112 BDRVQcow2State *s = bs->opaque; 113 int64_t ret; 114 int64_t clusterlen; 115 116 ret = qcow2_alloc_clusters(bs, headerlen); 117 if (ret < 0) { 118 error_setg_errno(errp, -ret, 119 "Cannot allocate cluster for LUKS header size %zu", 120 headerlen); 121 return -1; 122 } 123 124 s->crypto_header.length = headerlen; 125 s->crypto_header.offset = ret; 126 127 /* Zero fill remaining space in cluster so it has predictable 128 * content in case of future spec changes */ 129 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 130 ret = bdrv_pwrite_zeroes(bs->file, 131 ret + headerlen, 132 clusterlen - headerlen, 0); 133 if (ret < 0) { 134 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 135 return -1; 136 } 137 138 return ret; 139 } 140 141 142 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 143 const uint8_t *buf, size_t buflen, 144 void *opaque, Error **errp) 145 { 146 BlockDriverState *bs = opaque; 147 BDRVQcow2State *s = bs->opaque; 148 ssize_t ret; 149 150 if ((offset + buflen) > s->crypto_header.length) { 151 error_setg(errp, "Request for data outside of extension header"); 152 return -1; 153 } 154 155 ret = bdrv_pwrite(bs->file, 156 s->crypto_header.offset + offset, buf, buflen); 157 if (ret < 0) { 158 error_setg_errno(errp, -ret, "Could not read encryption header"); 159 return -1; 160 } 161 return ret; 162 } 163 164 165 /* 166 * read qcow2 extension and fill bs 167 * start reading from start_offset 168 * finish reading upon magic of value 0 or when end_offset reached 169 * unknown magic is skipped (future extension this version knows nothing about) 170 * return 0 upon success, non-0 otherwise 171 */ 172 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 173 uint64_t end_offset, void **p_feature_table, 174 int flags, bool *need_update_header, 175 Error **errp) 176 { 177 BDRVQcow2State *s = bs->opaque; 178 QCowExtension ext; 179 uint64_t offset; 180 int ret; 181 Qcow2BitmapHeaderExt bitmaps_ext; 182 183 if (need_update_header != NULL) { 184 *need_update_header = false; 185 } 186 187 #ifdef DEBUG_EXT 188 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 189 #endif 190 offset = start_offset; 191 while (offset < end_offset) { 192 193 #ifdef DEBUG_EXT 194 /* Sanity check */ 195 if (offset > s->cluster_size) 196 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 197 198 printf("attempting to read extended header in offset %lu\n", offset); 199 #endif 200 201 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); 202 if (ret < 0) { 203 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 204 "pread fail from offset %" PRIu64, offset); 205 return 1; 206 } 207 be32_to_cpus(&ext.magic); 208 be32_to_cpus(&ext.len); 209 offset += sizeof(ext); 210 #ifdef DEBUG_EXT 211 printf("ext.magic = 0x%x\n", ext.magic); 212 #endif 213 if (offset > end_offset || ext.len > end_offset - offset) { 214 error_setg(errp, "Header extension too large"); 215 return -EINVAL; 216 } 217 218 switch (ext.magic) { 219 case QCOW2_EXT_MAGIC_END: 220 return 0; 221 222 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 223 if (ext.len >= sizeof(bs->backing_format)) { 224 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 225 " too large (>=%zu)", ext.len, 226 sizeof(bs->backing_format)); 227 return 2; 228 } 229 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); 230 if (ret < 0) { 231 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 232 "Could not read format name"); 233 return 3; 234 } 235 bs->backing_format[ext.len] = '\0'; 236 s->image_backing_format = g_strdup(bs->backing_format); 237 #ifdef DEBUG_EXT 238 printf("Qcow2: Got format extension %s\n", bs->backing_format); 239 #endif 240 break; 241 242 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 243 if (p_feature_table != NULL) { 244 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 245 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 246 if (ret < 0) { 247 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 248 "Could not read table"); 249 return ret; 250 } 251 252 *p_feature_table = feature_table; 253 } 254 break; 255 256 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 257 unsigned int cflags = 0; 258 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 259 error_setg(errp, "CRYPTO header extension only " 260 "expected with LUKS encryption method"); 261 return -EINVAL; 262 } 263 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 264 error_setg(errp, "CRYPTO header extension size %u, " 265 "but expected size %zu", ext.len, 266 sizeof(Qcow2CryptoHeaderExtension)); 267 return -EINVAL; 268 } 269 270 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); 271 if (ret < 0) { 272 error_setg_errno(errp, -ret, 273 "Unable to read CRYPTO header extension"); 274 return ret; 275 } 276 be64_to_cpus(&s->crypto_header.offset); 277 be64_to_cpus(&s->crypto_header.length); 278 279 if ((s->crypto_header.offset % s->cluster_size) != 0) { 280 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 281 "not a multiple of cluster size '%u'", 282 s->crypto_header.offset, s->cluster_size); 283 return -EINVAL; 284 } 285 286 if (flags & BDRV_O_NO_IO) { 287 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 288 } 289 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 290 qcow2_crypto_hdr_read_func, 291 bs, cflags, errp); 292 if (!s->crypto) { 293 return -EINVAL; 294 } 295 } break; 296 297 case QCOW2_EXT_MAGIC_BITMAPS: 298 if (ext.len != sizeof(bitmaps_ext)) { 299 error_setg_errno(errp, -ret, "bitmaps_ext: " 300 "Invalid extension length"); 301 return -EINVAL; 302 } 303 304 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 305 error_report("WARNING: a program lacking bitmap support " 306 "modified this file, so all bitmaps are now " 307 "considered inconsistent. Some clusters may be " 308 "leaked, run 'qemu-img check -r' on the image " 309 "file to fix."); 310 if (need_update_header != NULL) { 311 /* Updating is needed to drop invalid bitmap extension. */ 312 *need_update_header = true; 313 } 314 break; 315 } 316 317 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); 318 if (ret < 0) { 319 error_setg_errno(errp, -ret, "bitmaps_ext: " 320 "Could not read ext header"); 321 return ret; 322 } 323 324 if (bitmaps_ext.reserved32 != 0) { 325 error_setg_errno(errp, -ret, "bitmaps_ext: " 326 "Reserved field is not zero"); 327 return -EINVAL; 328 } 329 330 be32_to_cpus(&bitmaps_ext.nb_bitmaps); 331 be64_to_cpus(&bitmaps_ext.bitmap_directory_size); 332 be64_to_cpus(&bitmaps_ext.bitmap_directory_offset); 333 334 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 335 error_setg(errp, 336 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 337 "exceeding the QEMU supported maximum of %d", 338 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 339 return -EINVAL; 340 } 341 342 if (bitmaps_ext.nb_bitmaps == 0) { 343 error_setg(errp, "found bitmaps extension with zero bitmaps"); 344 return -EINVAL; 345 } 346 347 if (bitmaps_ext.bitmap_directory_offset & (s->cluster_size - 1)) { 348 error_setg(errp, "bitmaps_ext: " 349 "invalid bitmap directory offset"); 350 return -EINVAL; 351 } 352 353 if (bitmaps_ext.bitmap_directory_size > 354 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 355 error_setg(errp, "bitmaps_ext: " 356 "bitmap directory size (%" PRIu64 ") exceeds " 357 "the maximum supported size (%d)", 358 bitmaps_ext.bitmap_directory_size, 359 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 360 return -EINVAL; 361 } 362 363 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 364 s->bitmap_directory_offset = 365 bitmaps_ext.bitmap_directory_offset; 366 s->bitmap_directory_size = 367 bitmaps_ext.bitmap_directory_size; 368 369 #ifdef DEBUG_EXT 370 printf("Qcow2: Got bitmaps extension: " 371 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 372 s->bitmap_directory_offset, s->nb_bitmaps); 373 #endif 374 break; 375 376 default: 377 /* unknown magic - save it in case we need to rewrite the header */ 378 { 379 Qcow2UnknownHeaderExtension *uext; 380 381 uext = g_malloc0(sizeof(*uext) + ext.len); 382 uext->magic = ext.magic; 383 uext->len = ext.len; 384 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 385 386 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 387 if (ret < 0) { 388 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 389 "Could not read data"); 390 return ret; 391 } 392 } 393 break; 394 } 395 396 offset += ((ext.len + 7) & ~7); 397 } 398 399 return 0; 400 } 401 402 static void cleanup_unknown_header_ext(BlockDriverState *bs) 403 { 404 BDRVQcow2State *s = bs->opaque; 405 Qcow2UnknownHeaderExtension *uext, *next; 406 407 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 408 QLIST_REMOVE(uext, next); 409 g_free(uext); 410 } 411 } 412 413 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 414 uint64_t mask) 415 { 416 char *features = g_strdup(""); 417 char *old; 418 419 while (table && table->name[0] != '\0') { 420 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 421 if (mask & (1ULL << table->bit)) { 422 old = features; 423 features = g_strdup_printf("%s%s%.46s", old, *old ? ", " : "", 424 table->name); 425 g_free(old); 426 mask &= ~(1ULL << table->bit); 427 } 428 } 429 table++; 430 } 431 432 if (mask) { 433 old = features; 434 features = g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64, 435 old, *old ? ", " : "", mask); 436 g_free(old); 437 } 438 439 error_setg(errp, "Unsupported qcow2 feature(s): %s", features); 440 g_free(features); 441 } 442 443 /* 444 * Sets the dirty bit and flushes afterwards if necessary. 445 * 446 * The incompatible_features bit is only set if the image file header was 447 * updated successfully. Therefore it is not required to check the return 448 * value of this function. 449 */ 450 int qcow2_mark_dirty(BlockDriverState *bs) 451 { 452 BDRVQcow2State *s = bs->opaque; 453 uint64_t val; 454 int ret; 455 456 assert(s->qcow_version >= 3); 457 458 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 459 return 0; /* already dirty */ 460 } 461 462 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 463 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 464 &val, sizeof(val)); 465 if (ret < 0) { 466 return ret; 467 } 468 ret = bdrv_flush(bs->file->bs); 469 if (ret < 0) { 470 return ret; 471 } 472 473 /* Only treat image as dirty if the header was updated successfully */ 474 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 475 return 0; 476 } 477 478 /* 479 * Clears the dirty bit and flushes before if necessary. Only call this 480 * function when there are no pending requests, it does not guard against 481 * concurrent requests dirtying the image. 482 */ 483 static int qcow2_mark_clean(BlockDriverState *bs) 484 { 485 BDRVQcow2State *s = bs->opaque; 486 487 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 488 int ret; 489 490 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 491 492 ret = bdrv_flush(bs); 493 if (ret < 0) { 494 return ret; 495 } 496 497 return qcow2_update_header(bs); 498 } 499 return 0; 500 } 501 502 /* 503 * Marks the image as corrupt. 504 */ 505 int qcow2_mark_corrupt(BlockDriverState *bs) 506 { 507 BDRVQcow2State *s = bs->opaque; 508 509 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 510 return qcow2_update_header(bs); 511 } 512 513 /* 514 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 515 * before if necessary. 516 */ 517 int qcow2_mark_consistent(BlockDriverState *bs) 518 { 519 BDRVQcow2State *s = bs->opaque; 520 521 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 522 int ret = bdrv_flush(bs); 523 if (ret < 0) { 524 return ret; 525 } 526 527 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 528 return qcow2_update_header(bs); 529 } 530 return 0; 531 } 532 533 static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result, 534 BdrvCheckMode fix) 535 { 536 int ret = qcow2_check_refcounts(bs, result, fix); 537 if (ret < 0) { 538 return ret; 539 } 540 541 if (fix && result->check_errors == 0 && result->corruptions == 0) { 542 ret = qcow2_mark_clean(bs); 543 if (ret < 0) { 544 return ret; 545 } 546 return qcow2_mark_consistent(bs); 547 } 548 return ret; 549 } 550 551 static int validate_table_offset(BlockDriverState *bs, uint64_t offset, 552 uint64_t entries, size_t entry_len) 553 { 554 BDRVQcow2State *s = bs->opaque; 555 uint64_t size; 556 557 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 558 * because values will be passed to qemu functions taking int64_t. */ 559 if (entries > INT64_MAX / entry_len) { 560 return -EINVAL; 561 } 562 563 size = entries * entry_len; 564 565 if (INT64_MAX - size < offset) { 566 return -EINVAL; 567 } 568 569 /* Tables must be cluster aligned */ 570 if (offset_into_cluster(s, offset) != 0) { 571 return -EINVAL; 572 } 573 574 return 0; 575 } 576 577 static QemuOptsList qcow2_runtime_opts = { 578 .name = "qcow2", 579 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 580 .desc = { 581 { 582 .name = QCOW2_OPT_LAZY_REFCOUNTS, 583 .type = QEMU_OPT_BOOL, 584 .help = "Postpone refcount updates", 585 }, 586 { 587 .name = QCOW2_OPT_DISCARD_REQUEST, 588 .type = QEMU_OPT_BOOL, 589 .help = "Pass guest discard requests to the layer below", 590 }, 591 { 592 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 593 .type = QEMU_OPT_BOOL, 594 .help = "Generate discard requests when snapshot related space " 595 "is freed", 596 }, 597 { 598 .name = QCOW2_OPT_DISCARD_OTHER, 599 .type = QEMU_OPT_BOOL, 600 .help = "Generate discard requests when other clusters are freed", 601 }, 602 { 603 .name = QCOW2_OPT_OVERLAP, 604 .type = QEMU_OPT_STRING, 605 .help = "Selects which overlap checks to perform from a range of " 606 "templates (none, constant, cached, all)", 607 }, 608 { 609 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 610 .type = QEMU_OPT_STRING, 611 .help = "Selects which overlap checks to perform from a range of " 612 "templates (none, constant, cached, all)", 613 }, 614 { 615 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 616 .type = QEMU_OPT_BOOL, 617 .help = "Check for unintended writes into the main qcow2 header", 618 }, 619 { 620 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 621 .type = QEMU_OPT_BOOL, 622 .help = "Check for unintended writes into the active L1 table", 623 }, 624 { 625 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 626 .type = QEMU_OPT_BOOL, 627 .help = "Check for unintended writes into an active L2 table", 628 }, 629 { 630 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 631 .type = QEMU_OPT_BOOL, 632 .help = "Check for unintended writes into the refcount table", 633 }, 634 { 635 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 636 .type = QEMU_OPT_BOOL, 637 .help = "Check for unintended writes into a refcount block", 638 }, 639 { 640 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 641 .type = QEMU_OPT_BOOL, 642 .help = "Check for unintended writes into the snapshot table", 643 }, 644 { 645 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 646 .type = QEMU_OPT_BOOL, 647 .help = "Check for unintended writes into an inactive L1 table", 648 }, 649 { 650 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 651 .type = QEMU_OPT_BOOL, 652 .help = "Check for unintended writes into an inactive L2 table", 653 }, 654 { 655 .name = QCOW2_OPT_CACHE_SIZE, 656 .type = QEMU_OPT_SIZE, 657 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 658 "cache size", 659 }, 660 { 661 .name = QCOW2_OPT_L2_CACHE_SIZE, 662 .type = QEMU_OPT_SIZE, 663 .help = "Maximum L2 table cache size", 664 }, 665 { 666 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 667 .type = QEMU_OPT_SIZE, 668 .help = "Maximum refcount block cache size", 669 }, 670 { 671 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 672 .type = QEMU_OPT_NUMBER, 673 .help = "Clean unused cache entries after this time (in seconds)", 674 }, 675 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 676 "ID of secret providing qcow2 AES key or LUKS passphrase"), 677 { /* end of list */ } 678 }, 679 }; 680 681 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 682 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 683 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 684 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 685 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 686 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 687 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 688 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 689 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 690 }; 691 692 static void cache_clean_timer_cb(void *opaque) 693 { 694 BlockDriverState *bs = opaque; 695 BDRVQcow2State *s = bs->opaque; 696 qcow2_cache_clean_unused(bs, s->l2_table_cache); 697 qcow2_cache_clean_unused(bs, s->refcount_block_cache); 698 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 699 (int64_t) s->cache_clean_interval * 1000); 700 } 701 702 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 703 { 704 BDRVQcow2State *s = bs->opaque; 705 if (s->cache_clean_interval > 0) { 706 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL, 707 SCALE_MS, cache_clean_timer_cb, 708 bs); 709 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 710 (int64_t) s->cache_clean_interval * 1000); 711 } 712 } 713 714 static void cache_clean_timer_del(BlockDriverState *bs) 715 { 716 BDRVQcow2State *s = bs->opaque; 717 if (s->cache_clean_timer) { 718 timer_del(s->cache_clean_timer); 719 timer_free(s->cache_clean_timer); 720 s->cache_clean_timer = NULL; 721 } 722 } 723 724 static void qcow2_detach_aio_context(BlockDriverState *bs) 725 { 726 cache_clean_timer_del(bs); 727 } 728 729 static void qcow2_attach_aio_context(BlockDriverState *bs, 730 AioContext *new_context) 731 { 732 cache_clean_timer_init(bs, new_context); 733 } 734 735 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 736 uint64_t *l2_cache_size, 737 uint64_t *refcount_cache_size, Error **errp) 738 { 739 BDRVQcow2State *s = bs->opaque; 740 uint64_t combined_cache_size; 741 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 742 743 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 744 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 745 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 746 747 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 748 *l2_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 0); 749 *refcount_cache_size = qemu_opt_get_size(opts, 750 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 751 752 if (combined_cache_size_set) { 753 if (l2_cache_size_set && refcount_cache_size_set) { 754 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 755 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 756 "the same time"); 757 return; 758 } else if (*l2_cache_size > combined_cache_size) { 759 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 760 QCOW2_OPT_CACHE_SIZE); 761 return; 762 } else if (*refcount_cache_size > combined_cache_size) { 763 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 764 QCOW2_OPT_CACHE_SIZE); 765 return; 766 } 767 768 if (l2_cache_size_set) { 769 *refcount_cache_size = combined_cache_size - *l2_cache_size; 770 } else if (refcount_cache_size_set) { 771 *l2_cache_size = combined_cache_size - *refcount_cache_size; 772 } else { 773 *refcount_cache_size = combined_cache_size 774 / (DEFAULT_L2_REFCOUNT_SIZE_RATIO + 1); 775 *l2_cache_size = combined_cache_size - *refcount_cache_size; 776 } 777 } else { 778 if (!l2_cache_size_set && !refcount_cache_size_set) { 779 *l2_cache_size = MAX(DEFAULT_L2_CACHE_BYTE_SIZE, 780 (uint64_t)DEFAULT_L2_CACHE_CLUSTERS 781 * s->cluster_size); 782 *refcount_cache_size = *l2_cache_size 783 / DEFAULT_L2_REFCOUNT_SIZE_RATIO; 784 } else if (!l2_cache_size_set) { 785 *l2_cache_size = *refcount_cache_size 786 * DEFAULT_L2_REFCOUNT_SIZE_RATIO; 787 } else if (!refcount_cache_size_set) { 788 *refcount_cache_size = *l2_cache_size 789 / DEFAULT_L2_REFCOUNT_SIZE_RATIO; 790 } 791 } 792 } 793 794 typedef struct Qcow2ReopenState { 795 Qcow2Cache *l2_table_cache; 796 Qcow2Cache *refcount_block_cache; 797 bool use_lazy_refcounts; 798 int overlap_check; 799 bool discard_passthrough[QCOW2_DISCARD_MAX]; 800 uint64_t cache_clean_interval; 801 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 802 } Qcow2ReopenState; 803 804 static int qcow2_update_options_prepare(BlockDriverState *bs, 805 Qcow2ReopenState *r, 806 QDict *options, int flags, 807 Error **errp) 808 { 809 BDRVQcow2State *s = bs->opaque; 810 QemuOpts *opts = NULL; 811 const char *opt_overlap_check, *opt_overlap_check_template; 812 int overlap_check_template = 0; 813 uint64_t l2_cache_size, refcount_cache_size; 814 int i; 815 const char *encryptfmt; 816 QDict *encryptopts = NULL; 817 Error *local_err = NULL; 818 int ret; 819 820 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 821 encryptfmt = qdict_get_try_str(encryptopts, "format"); 822 823 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 824 qemu_opts_absorb_qdict(opts, options, &local_err); 825 if (local_err) { 826 error_propagate(errp, local_err); 827 ret = -EINVAL; 828 goto fail; 829 } 830 831 /* get L2 table/refcount block cache size from command line options */ 832 read_cache_sizes(bs, opts, &l2_cache_size, &refcount_cache_size, 833 &local_err); 834 if (local_err) { 835 error_propagate(errp, local_err); 836 ret = -EINVAL; 837 goto fail; 838 } 839 840 l2_cache_size /= s->cluster_size; 841 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 842 l2_cache_size = MIN_L2_CACHE_SIZE; 843 } 844 if (l2_cache_size > INT_MAX) { 845 error_setg(errp, "L2 cache size too big"); 846 ret = -EINVAL; 847 goto fail; 848 } 849 850 refcount_cache_size /= s->cluster_size; 851 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 852 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 853 } 854 if (refcount_cache_size > INT_MAX) { 855 error_setg(errp, "Refcount cache size too big"); 856 ret = -EINVAL; 857 goto fail; 858 } 859 860 /* alloc new L2 table/refcount block cache, flush old one */ 861 if (s->l2_table_cache) { 862 ret = qcow2_cache_flush(bs, s->l2_table_cache); 863 if (ret) { 864 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 865 goto fail; 866 } 867 } 868 869 if (s->refcount_block_cache) { 870 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 871 if (ret) { 872 error_setg_errno(errp, -ret, 873 "Failed to flush the refcount block cache"); 874 goto fail; 875 } 876 } 877 878 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size); 879 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size); 880 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 881 error_setg(errp, "Could not allocate metadata caches"); 882 ret = -ENOMEM; 883 goto fail; 884 } 885 886 /* New interval for cache cleanup timer */ 887 r->cache_clean_interval = 888 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 889 s->cache_clean_interval); 890 #ifndef CONFIG_LINUX 891 if (r->cache_clean_interval != 0) { 892 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 893 " not supported on this host"); 894 ret = -EINVAL; 895 goto fail; 896 } 897 #endif 898 if (r->cache_clean_interval > UINT_MAX) { 899 error_setg(errp, "Cache clean interval too big"); 900 ret = -EINVAL; 901 goto fail; 902 } 903 904 /* lazy-refcounts; flush if going from enabled to disabled */ 905 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 906 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 907 if (r->use_lazy_refcounts && s->qcow_version < 3) { 908 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 909 "qemu 1.1 compatibility level"); 910 ret = -EINVAL; 911 goto fail; 912 } 913 914 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 915 ret = qcow2_mark_clean(bs); 916 if (ret < 0) { 917 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 918 goto fail; 919 } 920 } 921 922 /* Overlap check options */ 923 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 924 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 925 if (opt_overlap_check_template && opt_overlap_check && 926 strcmp(opt_overlap_check_template, opt_overlap_check)) 927 { 928 error_setg(errp, "Conflicting values for qcow2 options '" 929 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 930 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 931 ret = -EINVAL; 932 goto fail; 933 } 934 if (!opt_overlap_check) { 935 opt_overlap_check = opt_overlap_check_template ?: "cached"; 936 } 937 938 if (!strcmp(opt_overlap_check, "none")) { 939 overlap_check_template = 0; 940 } else if (!strcmp(opt_overlap_check, "constant")) { 941 overlap_check_template = QCOW2_OL_CONSTANT; 942 } else if (!strcmp(opt_overlap_check, "cached")) { 943 overlap_check_template = QCOW2_OL_CACHED; 944 } else if (!strcmp(opt_overlap_check, "all")) { 945 overlap_check_template = QCOW2_OL_ALL; 946 } else { 947 error_setg(errp, "Unsupported value '%s' for qcow2 option " 948 "'overlap-check'. Allowed are any of the following: " 949 "none, constant, cached, all", opt_overlap_check); 950 ret = -EINVAL; 951 goto fail; 952 } 953 954 r->overlap_check = 0; 955 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 956 /* overlap-check defines a template bitmask, but every flag may be 957 * overwritten through the associated boolean option */ 958 r->overlap_check |= 959 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 960 overlap_check_template & (1 << i)) << i; 961 } 962 963 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 964 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 965 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 966 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 967 flags & BDRV_O_UNMAP); 968 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 969 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 970 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 971 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 972 973 switch (s->crypt_method_header) { 974 case QCOW_CRYPT_NONE: 975 if (encryptfmt) { 976 error_setg(errp, "No encryption in image header, but options " 977 "specified format '%s'", encryptfmt); 978 ret = -EINVAL; 979 goto fail; 980 } 981 break; 982 983 case QCOW_CRYPT_AES: 984 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 985 error_setg(errp, 986 "Header reported 'aes' encryption format but " 987 "options specify '%s'", encryptfmt); 988 ret = -EINVAL; 989 goto fail; 990 } 991 qdict_del(encryptopts, "format"); 992 r->crypto_opts = block_crypto_open_opts_init( 993 Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); 994 break; 995 996 case QCOW_CRYPT_LUKS: 997 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 998 error_setg(errp, 999 "Header reported 'luks' encryption format but " 1000 "options specify '%s'", encryptfmt); 1001 ret = -EINVAL; 1002 goto fail; 1003 } 1004 qdict_del(encryptopts, "format"); 1005 r->crypto_opts = block_crypto_open_opts_init( 1006 Q_CRYPTO_BLOCK_FORMAT_LUKS, encryptopts, errp); 1007 break; 1008 1009 default: 1010 error_setg(errp, "Unsupported encryption method %d", 1011 s->crypt_method_header); 1012 break; 1013 } 1014 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) { 1015 ret = -EINVAL; 1016 goto fail; 1017 } 1018 1019 ret = 0; 1020 fail: 1021 QDECREF(encryptopts); 1022 qemu_opts_del(opts); 1023 opts = NULL; 1024 return ret; 1025 } 1026 1027 static void qcow2_update_options_commit(BlockDriverState *bs, 1028 Qcow2ReopenState *r) 1029 { 1030 BDRVQcow2State *s = bs->opaque; 1031 int i; 1032 1033 if (s->l2_table_cache) { 1034 qcow2_cache_destroy(bs, s->l2_table_cache); 1035 } 1036 if (s->refcount_block_cache) { 1037 qcow2_cache_destroy(bs, s->refcount_block_cache); 1038 } 1039 s->l2_table_cache = r->l2_table_cache; 1040 s->refcount_block_cache = r->refcount_block_cache; 1041 1042 s->overlap_check = r->overlap_check; 1043 s->use_lazy_refcounts = r->use_lazy_refcounts; 1044 1045 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1046 s->discard_passthrough[i] = r->discard_passthrough[i]; 1047 } 1048 1049 if (s->cache_clean_interval != r->cache_clean_interval) { 1050 cache_clean_timer_del(bs); 1051 s->cache_clean_interval = r->cache_clean_interval; 1052 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1053 } 1054 1055 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1056 s->crypto_opts = r->crypto_opts; 1057 } 1058 1059 static void qcow2_update_options_abort(BlockDriverState *bs, 1060 Qcow2ReopenState *r) 1061 { 1062 if (r->l2_table_cache) { 1063 qcow2_cache_destroy(bs, r->l2_table_cache); 1064 } 1065 if (r->refcount_block_cache) { 1066 qcow2_cache_destroy(bs, r->refcount_block_cache); 1067 } 1068 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1069 } 1070 1071 static int qcow2_update_options(BlockDriverState *bs, QDict *options, 1072 int flags, Error **errp) 1073 { 1074 Qcow2ReopenState r = {}; 1075 int ret; 1076 1077 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1078 if (ret >= 0) { 1079 qcow2_update_options_commit(bs, &r); 1080 } else { 1081 qcow2_update_options_abort(bs, &r); 1082 } 1083 1084 return ret; 1085 } 1086 1087 static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, 1088 Error **errp) 1089 { 1090 BDRVQcow2State *s = bs->opaque; 1091 unsigned int len, i; 1092 int ret = 0; 1093 QCowHeader header; 1094 Error *local_err = NULL; 1095 uint64_t ext_end; 1096 uint64_t l1_vm_state_index; 1097 bool update_header = false; 1098 1099 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 1100 if (ret < 0) { 1101 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1102 goto fail; 1103 } 1104 be32_to_cpus(&header.magic); 1105 be32_to_cpus(&header.version); 1106 be64_to_cpus(&header.backing_file_offset); 1107 be32_to_cpus(&header.backing_file_size); 1108 be64_to_cpus(&header.size); 1109 be32_to_cpus(&header.cluster_bits); 1110 be32_to_cpus(&header.crypt_method); 1111 be64_to_cpus(&header.l1_table_offset); 1112 be32_to_cpus(&header.l1_size); 1113 be64_to_cpus(&header.refcount_table_offset); 1114 be32_to_cpus(&header.refcount_table_clusters); 1115 be64_to_cpus(&header.snapshots_offset); 1116 be32_to_cpus(&header.nb_snapshots); 1117 1118 if (header.magic != QCOW_MAGIC) { 1119 error_setg(errp, "Image is not in qcow2 format"); 1120 ret = -EINVAL; 1121 goto fail; 1122 } 1123 if (header.version < 2 || header.version > 3) { 1124 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1125 ret = -ENOTSUP; 1126 goto fail; 1127 } 1128 1129 s->qcow_version = header.version; 1130 1131 /* Initialise cluster size */ 1132 if (header.cluster_bits < MIN_CLUSTER_BITS || 1133 header.cluster_bits > MAX_CLUSTER_BITS) { 1134 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1135 header.cluster_bits); 1136 ret = -EINVAL; 1137 goto fail; 1138 } 1139 1140 s->cluster_bits = header.cluster_bits; 1141 s->cluster_size = 1 << s->cluster_bits; 1142 s->cluster_sectors = 1 << (s->cluster_bits - 9); 1143 1144 /* Initialise version 3 header fields */ 1145 if (header.version == 2) { 1146 header.incompatible_features = 0; 1147 header.compatible_features = 0; 1148 header.autoclear_features = 0; 1149 header.refcount_order = 4; 1150 header.header_length = 72; 1151 } else { 1152 be64_to_cpus(&header.incompatible_features); 1153 be64_to_cpus(&header.compatible_features); 1154 be64_to_cpus(&header.autoclear_features); 1155 be32_to_cpus(&header.refcount_order); 1156 be32_to_cpus(&header.header_length); 1157 1158 if (header.header_length < 104) { 1159 error_setg(errp, "qcow2 header too short"); 1160 ret = -EINVAL; 1161 goto fail; 1162 } 1163 } 1164 1165 if (header.header_length > s->cluster_size) { 1166 error_setg(errp, "qcow2 header exceeds cluster size"); 1167 ret = -EINVAL; 1168 goto fail; 1169 } 1170 1171 if (header.header_length > sizeof(header)) { 1172 s->unknown_header_fields_size = header.header_length - sizeof(header); 1173 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1174 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 1175 s->unknown_header_fields_size); 1176 if (ret < 0) { 1177 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1178 "fields"); 1179 goto fail; 1180 } 1181 } 1182 1183 if (header.backing_file_offset > s->cluster_size) { 1184 error_setg(errp, "Invalid backing file offset"); 1185 ret = -EINVAL; 1186 goto fail; 1187 } 1188 1189 if (header.backing_file_offset) { 1190 ext_end = header.backing_file_offset; 1191 } else { 1192 ext_end = 1 << header.cluster_bits; 1193 } 1194 1195 /* Handle feature bits */ 1196 s->incompatible_features = header.incompatible_features; 1197 s->compatible_features = header.compatible_features; 1198 s->autoclear_features = header.autoclear_features; 1199 1200 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1201 void *feature_table = NULL; 1202 qcow2_read_extensions(bs, header.header_length, ext_end, 1203 &feature_table, flags, NULL, NULL); 1204 report_unsupported_feature(errp, feature_table, 1205 s->incompatible_features & 1206 ~QCOW2_INCOMPAT_MASK); 1207 ret = -ENOTSUP; 1208 g_free(feature_table); 1209 goto fail; 1210 } 1211 1212 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1213 /* Corrupt images may not be written to unless they are being repaired 1214 */ 1215 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1216 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1217 "read/write"); 1218 ret = -EACCES; 1219 goto fail; 1220 } 1221 } 1222 1223 /* Check support for various header values */ 1224 if (header.refcount_order > 6) { 1225 error_setg(errp, "Reference count entry width too large; may not " 1226 "exceed 64 bits"); 1227 ret = -EINVAL; 1228 goto fail; 1229 } 1230 s->refcount_order = header.refcount_order; 1231 s->refcount_bits = 1 << s->refcount_order; 1232 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1233 s->refcount_max += s->refcount_max - 1; 1234 1235 s->crypt_method_header = header.crypt_method; 1236 if (s->crypt_method_header) { 1237 if (bdrv_uses_whitelist() && 1238 s->crypt_method_header == QCOW_CRYPT_AES) { 1239 error_setg(errp, 1240 "Use of AES-CBC encrypted qcow2 images is no longer " 1241 "supported in system emulators"); 1242 error_append_hint(errp, 1243 "You can use 'qemu-img convert' to convert your " 1244 "image to an alternative supported format, such " 1245 "as unencrypted qcow2, or raw with the LUKS " 1246 "format instead.\n"); 1247 ret = -ENOSYS; 1248 goto fail; 1249 } 1250 1251 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1252 s->crypt_physical_offset = false; 1253 } else { 1254 /* Assuming LUKS and any future crypt methods we 1255 * add will all use physical offsets, due to the 1256 * fact that the alternative is insecure... */ 1257 s->crypt_physical_offset = true; 1258 } 1259 1260 bs->encrypted = true; 1261 } 1262 1263 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 1264 s->l2_size = 1 << s->l2_bits; 1265 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1266 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1267 s->refcount_block_size = 1 << s->refcount_block_bits; 1268 bs->total_sectors = header.size / 512; 1269 s->csize_shift = (62 - (s->cluster_bits - 8)); 1270 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1271 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1272 1273 s->refcount_table_offset = header.refcount_table_offset; 1274 s->refcount_table_size = 1275 header.refcount_table_clusters << (s->cluster_bits - 3); 1276 1277 if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) { 1278 error_setg(errp, "Reference count table too large"); 1279 ret = -EINVAL; 1280 goto fail; 1281 } 1282 1283 ret = validate_table_offset(bs, s->refcount_table_offset, 1284 s->refcount_table_size, sizeof(uint64_t)); 1285 if (ret < 0) { 1286 error_setg(errp, "Invalid reference count table offset"); 1287 goto fail; 1288 } 1289 1290 /* Snapshot table offset/length */ 1291 if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) { 1292 error_setg(errp, "Too many snapshots"); 1293 ret = -EINVAL; 1294 goto fail; 1295 } 1296 1297 ret = validate_table_offset(bs, header.snapshots_offset, 1298 header.nb_snapshots, 1299 sizeof(QCowSnapshotHeader)); 1300 if (ret < 0) { 1301 error_setg(errp, "Invalid snapshot table offset"); 1302 goto fail; 1303 } 1304 1305 /* read the level 1 table */ 1306 if (header.l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 1307 error_setg(errp, "Active L1 table too large"); 1308 ret = -EFBIG; 1309 goto fail; 1310 } 1311 s->l1_size = header.l1_size; 1312 1313 l1_vm_state_index = size_to_l1(s, header.size); 1314 if (l1_vm_state_index > INT_MAX) { 1315 error_setg(errp, "Image is too big"); 1316 ret = -EFBIG; 1317 goto fail; 1318 } 1319 s->l1_vm_state_index = l1_vm_state_index; 1320 1321 /* the L1 table must contain at least enough entries to put 1322 header.size bytes */ 1323 if (s->l1_size < s->l1_vm_state_index) { 1324 error_setg(errp, "L1 table is too small"); 1325 ret = -EINVAL; 1326 goto fail; 1327 } 1328 1329 ret = validate_table_offset(bs, header.l1_table_offset, 1330 header.l1_size, sizeof(uint64_t)); 1331 if (ret < 0) { 1332 error_setg(errp, "Invalid L1 table offset"); 1333 goto fail; 1334 } 1335 s->l1_table_offset = header.l1_table_offset; 1336 1337 1338 if (s->l1_size > 0) { 1339 s->l1_table = qemu_try_blockalign(bs->file->bs, 1340 align_offset(s->l1_size * sizeof(uint64_t), 512)); 1341 if (s->l1_table == NULL) { 1342 error_setg(errp, "Could not allocate L1 table"); 1343 ret = -ENOMEM; 1344 goto fail; 1345 } 1346 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 1347 s->l1_size * sizeof(uint64_t)); 1348 if (ret < 0) { 1349 error_setg_errno(errp, -ret, "Could not read L1 table"); 1350 goto fail; 1351 } 1352 for(i = 0;i < s->l1_size; i++) { 1353 be64_to_cpus(&s->l1_table[i]); 1354 } 1355 } 1356 1357 /* Parse driver-specific options */ 1358 ret = qcow2_update_options(bs, options, flags, errp); 1359 if (ret < 0) { 1360 goto fail; 1361 } 1362 1363 s->cluster_cache = g_malloc(s->cluster_size); 1364 /* one more sector for decompressed data alignment */ 1365 s->cluster_data = qemu_try_blockalign(bs->file->bs, QCOW_MAX_CRYPT_CLUSTERS 1366 * s->cluster_size + 512); 1367 if (s->cluster_data == NULL) { 1368 error_setg(errp, "Could not allocate temporary cluster buffer"); 1369 ret = -ENOMEM; 1370 goto fail; 1371 } 1372 1373 s->cluster_cache_offset = -1; 1374 s->flags = flags; 1375 1376 ret = qcow2_refcount_init(bs); 1377 if (ret != 0) { 1378 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1379 goto fail; 1380 } 1381 1382 QLIST_INIT(&s->cluster_allocs); 1383 QTAILQ_INIT(&s->discards); 1384 1385 /* read qcow2 extensions */ 1386 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1387 flags, &update_header, &local_err)) { 1388 error_propagate(errp, local_err); 1389 ret = -EINVAL; 1390 goto fail; 1391 } 1392 1393 /* qcow2_read_extension may have set up the crypto context 1394 * if the crypt method needs a header region, some methods 1395 * don't need header extensions, so must check here 1396 */ 1397 if (s->crypt_method_header && !s->crypto) { 1398 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1399 unsigned int cflags = 0; 1400 if (flags & BDRV_O_NO_IO) { 1401 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1402 } 1403 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1404 NULL, NULL, cflags, errp); 1405 if (!s->crypto) { 1406 ret = -EINVAL; 1407 goto fail; 1408 } 1409 } else if (!(flags & BDRV_O_NO_IO)) { 1410 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1411 s->crypt_method_header); 1412 ret = -EINVAL; 1413 goto fail; 1414 } 1415 } 1416 1417 /* read the backing file name */ 1418 if (header.backing_file_offset != 0) { 1419 len = header.backing_file_size; 1420 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1421 len >= sizeof(bs->backing_file)) { 1422 error_setg(errp, "Backing file name too long"); 1423 ret = -EINVAL; 1424 goto fail; 1425 } 1426 ret = bdrv_pread(bs->file, header.backing_file_offset, 1427 bs->backing_file, len); 1428 if (ret < 0) { 1429 error_setg_errno(errp, -ret, "Could not read backing file name"); 1430 goto fail; 1431 } 1432 bs->backing_file[len] = '\0'; 1433 s->image_backing_file = g_strdup(bs->backing_file); 1434 } 1435 1436 /* Internal snapshots */ 1437 s->snapshots_offset = header.snapshots_offset; 1438 s->nb_snapshots = header.nb_snapshots; 1439 1440 ret = qcow2_read_snapshots(bs); 1441 if (ret < 0) { 1442 error_setg_errno(errp, -ret, "Could not read snapshots"); 1443 goto fail; 1444 } 1445 1446 /* Clear unknown autoclear feature bits */ 1447 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1448 update_header = 1449 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); 1450 if (update_header) { 1451 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1452 } 1453 1454 if (qcow2_load_autoloading_dirty_bitmaps(bs, &local_err)) { 1455 update_header = false; 1456 } 1457 if (local_err != NULL) { 1458 error_propagate(errp, local_err); 1459 ret = -EINVAL; 1460 goto fail; 1461 } 1462 1463 if (update_header) { 1464 ret = qcow2_update_header(bs); 1465 if (ret < 0) { 1466 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1467 goto fail; 1468 } 1469 } 1470 1471 /* Initialise locks */ 1472 qemu_co_mutex_init(&s->lock); 1473 bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP; 1474 1475 /* Repair image if dirty */ 1476 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && 1477 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1478 BdrvCheckResult result = {0}; 1479 1480 ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1481 if (ret < 0) { 1482 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1483 goto fail; 1484 } 1485 } 1486 1487 #ifdef DEBUG_ALLOC 1488 { 1489 BdrvCheckResult result = {0}; 1490 qcow2_check_refcounts(bs, &result, 0); 1491 } 1492 #endif 1493 return ret; 1494 1495 fail: 1496 g_free(s->unknown_header_fields); 1497 cleanup_unknown_header_ext(bs); 1498 qcow2_free_snapshots(bs); 1499 qcow2_refcount_close(bs); 1500 qemu_vfree(s->l1_table); 1501 /* else pre-write overlap checks in cache_destroy may crash */ 1502 s->l1_table = NULL; 1503 cache_clean_timer_del(bs); 1504 if (s->l2_table_cache) { 1505 qcow2_cache_destroy(bs, s->l2_table_cache); 1506 } 1507 if (s->refcount_block_cache) { 1508 qcow2_cache_destroy(bs, s->refcount_block_cache); 1509 } 1510 g_free(s->cluster_cache); 1511 qemu_vfree(s->cluster_data); 1512 qcrypto_block_free(s->crypto); 1513 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1514 return ret; 1515 } 1516 1517 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1518 Error **errp) 1519 { 1520 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 1521 false, errp); 1522 if (!bs->file) { 1523 return -EINVAL; 1524 } 1525 1526 return qcow2_do_open(bs, options, flags, errp); 1527 } 1528 1529 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1530 { 1531 BDRVQcow2State *s = bs->opaque; 1532 1533 if (bs->encrypted) { 1534 /* Encryption works on a sector granularity */ 1535 bs->bl.request_alignment = BDRV_SECTOR_SIZE; 1536 } 1537 bs->bl.pwrite_zeroes_alignment = s->cluster_size; 1538 bs->bl.pdiscard_alignment = s->cluster_size; 1539 } 1540 1541 static int qcow2_reopen_prepare(BDRVReopenState *state, 1542 BlockReopenQueue *queue, Error **errp) 1543 { 1544 Qcow2ReopenState *r; 1545 int ret; 1546 1547 r = g_new0(Qcow2ReopenState, 1); 1548 state->opaque = r; 1549 1550 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1551 state->flags, errp); 1552 if (ret < 0) { 1553 goto fail; 1554 } 1555 1556 /* We need to write out any unwritten data if we reopen read-only. */ 1557 if ((state->flags & BDRV_O_RDWR) == 0) { 1558 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1559 if (ret < 0) { 1560 goto fail; 1561 } 1562 1563 ret = bdrv_flush(state->bs); 1564 if (ret < 0) { 1565 goto fail; 1566 } 1567 1568 ret = qcow2_mark_clean(state->bs); 1569 if (ret < 0) { 1570 goto fail; 1571 } 1572 } 1573 1574 return 0; 1575 1576 fail: 1577 qcow2_update_options_abort(state->bs, r); 1578 g_free(r); 1579 return ret; 1580 } 1581 1582 static void qcow2_reopen_commit(BDRVReopenState *state) 1583 { 1584 qcow2_update_options_commit(state->bs, state->opaque); 1585 g_free(state->opaque); 1586 } 1587 1588 static void qcow2_reopen_abort(BDRVReopenState *state) 1589 { 1590 qcow2_update_options_abort(state->bs, state->opaque); 1591 g_free(state->opaque); 1592 } 1593 1594 static void qcow2_join_options(QDict *options, QDict *old_options) 1595 { 1596 bool has_new_overlap_template = 1597 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 1598 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 1599 bool has_new_total_cache_size = 1600 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 1601 bool has_all_cache_options; 1602 1603 /* New overlap template overrides all old overlap options */ 1604 if (has_new_overlap_template) { 1605 qdict_del(old_options, QCOW2_OPT_OVERLAP); 1606 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 1607 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 1608 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 1609 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 1610 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 1611 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 1612 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 1613 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 1614 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 1615 } 1616 1617 /* New total cache size overrides all old options */ 1618 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 1619 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 1620 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1621 } 1622 1623 qdict_join(options, old_options, false); 1624 1625 /* 1626 * If after merging all cache size options are set, an old total size is 1627 * overwritten. Do keep all options, however, if all three are new. The 1628 * resulting error message is what we want to happen. 1629 */ 1630 has_all_cache_options = 1631 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 1632 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 1633 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1634 1635 if (has_all_cache_options && !has_new_total_cache_size) { 1636 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 1637 } 1638 } 1639 1640 static int64_t coroutine_fn qcow2_co_get_block_status(BlockDriverState *bs, 1641 int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file) 1642 { 1643 BDRVQcow2State *s = bs->opaque; 1644 uint64_t cluster_offset; 1645 int index_in_cluster, ret; 1646 unsigned int bytes; 1647 int64_t status = 0; 1648 1649 bytes = MIN(INT_MAX, nb_sectors * BDRV_SECTOR_SIZE); 1650 qemu_co_mutex_lock(&s->lock); 1651 ret = qcow2_get_cluster_offset(bs, sector_num << 9, &bytes, 1652 &cluster_offset); 1653 qemu_co_mutex_unlock(&s->lock); 1654 if (ret < 0) { 1655 return ret; 1656 } 1657 1658 *pnum = bytes >> BDRV_SECTOR_BITS; 1659 1660 if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED && 1661 !s->crypto) { 1662 index_in_cluster = sector_num & (s->cluster_sectors - 1); 1663 cluster_offset |= (index_in_cluster << BDRV_SECTOR_BITS); 1664 *file = bs->file->bs; 1665 status |= BDRV_BLOCK_OFFSET_VALID | cluster_offset; 1666 } 1667 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { 1668 status |= BDRV_BLOCK_ZERO; 1669 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { 1670 status |= BDRV_BLOCK_DATA; 1671 } 1672 return status; 1673 } 1674 1675 /* handle reading after the end of the backing file */ 1676 int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov, 1677 int64_t offset, int bytes) 1678 { 1679 uint64_t bs_size = bs->total_sectors * BDRV_SECTOR_SIZE; 1680 int n1; 1681 1682 if ((offset + bytes) <= bs_size) { 1683 return bytes; 1684 } 1685 1686 if (offset >= bs_size) { 1687 n1 = 0; 1688 } else { 1689 n1 = bs_size - offset; 1690 } 1691 1692 qemu_iovec_memset(qiov, n1, 0, bytes - n1); 1693 1694 return n1; 1695 } 1696 1697 static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, 1698 uint64_t bytes, QEMUIOVector *qiov, 1699 int flags) 1700 { 1701 BDRVQcow2State *s = bs->opaque; 1702 int offset_in_cluster, n1; 1703 int ret; 1704 unsigned int cur_bytes; /* number of bytes in current iteration */ 1705 uint64_t cluster_offset = 0; 1706 uint64_t bytes_done = 0; 1707 QEMUIOVector hd_qiov; 1708 uint8_t *cluster_data = NULL; 1709 1710 qemu_iovec_init(&hd_qiov, qiov->niov); 1711 1712 qemu_co_mutex_lock(&s->lock); 1713 1714 while (bytes != 0) { 1715 1716 /* prepare next request */ 1717 cur_bytes = MIN(bytes, INT_MAX); 1718 if (s->crypto) { 1719 cur_bytes = MIN(cur_bytes, 1720 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1721 } 1722 1723 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); 1724 if (ret < 0) { 1725 goto fail; 1726 } 1727 1728 offset_in_cluster = offset_into_cluster(s, offset); 1729 1730 qemu_iovec_reset(&hd_qiov); 1731 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1732 1733 switch (ret) { 1734 case QCOW2_CLUSTER_UNALLOCATED: 1735 1736 if (bs->backing) { 1737 /* read from the base image */ 1738 n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov, 1739 offset, cur_bytes); 1740 if (n1 > 0) { 1741 QEMUIOVector local_qiov; 1742 1743 qemu_iovec_init(&local_qiov, hd_qiov.niov); 1744 qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1); 1745 1746 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 1747 qemu_co_mutex_unlock(&s->lock); 1748 ret = bdrv_co_preadv(bs->backing, offset, n1, 1749 &local_qiov, 0); 1750 qemu_co_mutex_lock(&s->lock); 1751 1752 qemu_iovec_destroy(&local_qiov); 1753 1754 if (ret < 0) { 1755 goto fail; 1756 } 1757 } 1758 } else { 1759 /* Note: in this case, no need to wait */ 1760 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1761 } 1762 break; 1763 1764 case QCOW2_CLUSTER_ZERO_PLAIN: 1765 case QCOW2_CLUSTER_ZERO_ALLOC: 1766 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1767 break; 1768 1769 case QCOW2_CLUSTER_COMPRESSED: 1770 /* add AIO support for compressed blocks ? */ 1771 ret = qcow2_decompress_cluster(bs, cluster_offset); 1772 if (ret < 0) { 1773 goto fail; 1774 } 1775 1776 qemu_iovec_from_buf(&hd_qiov, 0, 1777 s->cluster_cache + offset_in_cluster, 1778 cur_bytes); 1779 break; 1780 1781 case QCOW2_CLUSTER_NORMAL: 1782 if ((cluster_offset & 511) != 0) { 1783 ret = -EIO; 1784 goto fail; 1785 } 1786 1787 if (bs->encrypted) { 1788 assert(s->crypto); 1789 1790 /* 1791 * For encrypted images, read everything into a temporary 1792 * contiguous buffer on which the AES functions can work. 1793 */ 1794 if (!cluster_data) { 1795 cluster_data = 1796 qemu_try_blockalign(bs->file->bs, 1797 QCOW_MAX_CRYPT_CLUSTERS 1798 * s->cluster_size); 1799 if (cluster_data == NULL) { 1800 ret = -ENOMEM; 1801 goto fail; 1802 } 1803 } 1804 1805 assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1806 qemu_iovec_reset(&hd_qiov); 1807 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1808 } 1809 1810 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1811 qemu_co_mutex_unlock(&s->lock); 1812 ret = bdrv_co_preadv(bs->file, 1813 cluster_offset + offset_in_cluster, 1814 cur_bytes, &hd_qiov, 0); 1815 qemu_co_mutex_lock(&s->lock); 1816 if (ret < 0) { 1817 goto fail; 1818 } 1819 if (bs->encrypted) { 1820 assert(s->crypto); 1821 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1822 assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1823 Error *err = NULL; 1824 if (qcrypto_block_decrypt(s->crypto, 1825 (s->crypt_physical_offset ? 1826 cluster_offset + offset_in_cluster : 1827 offset) >> BDRV_SECTOR_BITS, 1828 cluster_data, 1829 cur_bytes, 1830 &err) < 0) { 1831 error_free(err); 1832 ret = -EIO; 1833 goto fail; 1834 } 1835 qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes); 1836 } 1837 break; 1838 1839 default: 1840 g_assert_not_reached(); 1841 ret = -EIO; 1842 goto fail; 1843 } 1844 1845 bytes -= cur_bytes; 1846 offset += cur_bytes; 1847 bytes_done += cur_bytes; 1848 } 1849 ret = 0; 1850 1851 fail: 1852 qemu_co_mutex_unlock(&s->lock); 1853 1854 qemu_iovec_destroy(&hd_qiov); 1855 qemu_vfree(cluster_data); 1856 1857 return ret; 1858 } 1859 1860 /* Check if it's possible to merge a write request with the writing of 1861 * the data from the COW regions */ 1862 static bool merge_cow(uint64_t offset, unsigned bytes, 1863 QEMUIOVector *hd_qiov, QCowL2Meta *l2meta) 1864 { 1865 QCowL2Meta *m; 1866 1867 for (m = l2meta; m != NULL; m = m->next) { 1868 /* If both COW regions are empty then there's nothing to merge */ 1869 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 1870 continue; 1871 } 1872 1873 /* The data (middle) region must be immediately after the 1874 * start region */ 1875 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 1876 continue; 1877 } 1878 1879 /* The end region must be immediately after the data (middle) 1880 * region */ 1881 if (m->offset + m->cow_end.offset != offset + bytes) { 1882 continue; 1883 } 1884 1885 /* Make sure that adding both COW regions to the QEMUIOVector 1886 * does not exceed IOV_MAX */ 1887 if (hd_qiov->niov > IOV_MAX - 2) { 1888 continue; 1889 } 1890 1891 m->data_qiov = hd_qiov; 1892 return true; 1893 } 1894 1895 return false; 1896 } 1897 1898 static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, 1899 uint64_t bytes, QEMUIOVector *qiov, 1900 int flags) 1901 { 1902 BDRVQcow2State *s = bs->opaque; 1903 int offset_in_cluster; 1904 int ret; 1905 unsigned int cur_bytes; /* number of sectors in current iteration */ 1906 uint64_t cluster_offset; 1907 QEMUIOVector hd_qiov; 1908 uint64_t bytes_done = 0; 1909 uint8_t *cluster_data = NULL; 1910 QCowL2Meta *l2meta = NULL; 1911 1912 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 1913 1914 qemu_iovec_init(&hd_qiov, qiov->niov); 1915 1916 s->cluster_cache_offset = -1; /* disable compressed cache */ 1917 1918 qemu_co_mutex_lock(&s->lock); 1919 1920 while (bytes != 0) { 1921 1922 l2meta = NULL; 1923 1924 trace_qcow2_writev_start_part(qemu_coroutine_self()); 1925 offset_in_cluster = offset_into_cluster(s, offset); 1926 cur_bytes = MIN(bytes, INT_MAX); 1927 if (bs->encrypted) { 1928 cur_bytes = MIN(cur_bytes, 1929 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 1930 - offset_in_cluster); 1931 } 1932 1933 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 1934 &cluster_offset, &l2meta); 1935 if (ret < 0) { 1936 goto fail; 1937 } 1938 1939 assert((cluster_offset & 511) == 0); 1940 1941 qemu_iovec_reset(&hd_qiov); 1942 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1943 1944 if (bs->encrypted) { 1945 Error *err = NULL; 1946 assert(s->crypto); 1947 if (!cluster_data) { 1948 cluster_data = qemu_try_blockalign(bs->file->bs, 1949 QCOW_MAX_CRYPT_CLUSTERS 1950 * s->cluster_size); 1951 if (cluster_data == NULL) { 1952 ret = -ENOMEM; 1953 goto fail; 1954 } 1955 } 1956 1957 assert(hd_qiov.size <= 1958 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1959 qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); 1960 1961 if (qcrypto_block_encrypt(s->crypto, 1962 (s->crypt_physical_offset ? 1963 cluster_offset + offset_in_cluster : 1964 offset) >> BDRV_SECTOR_BITS, 1965 cluster_data, 1966 cur_bytes, &err) < 0) { 1967 error_free(err); 1968 ret = -EIO; 1969 goto fail; 1970 } 1971 1972 qemu_iovec_reset(&hd_qiov); 1973 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1974 } 1975 1976 ret = qcow2_pre_write_overlap_check(bs, 0, 1977 cluster_offset + offset_in_cluster, cur_bytes); 1978 if (ret < 0) { 1979 goto fail; 1980 } 1981 1982 /* If we need to do COW, check if it's possible to merge the 1983 * writing of the guest data together with that of the COW regions. 1984 * If it's not possible (or not necessary) then write the 1985 * guest data now. */ 1986 if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) { 1987 qemu_co_mutex_unlock(&s->lock); 1988 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 1989 trace_qcow2_writev_data(qemu_coroutine_self(), 1990 cluster_offset + offset_in_cluster); 1991 ret = bdrv_co_pwritev(bs->file, 1992 cluster_offset + offset_in_cluster, 1993 cur_bytes, &hd_qiov, 0); 1994 qemu_co_mutex_lock(&s->lock); 1995 if (ret < 0) { 1996 goto fail; 1997 } 1998 } 1999 2000 while (l2meta != NULL) { 2001 QCowL2Meta *next; 2002 2003 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 2004 if (ret < 0) { 2005 goto fail; 2006 } 2007 2008 /* Take the request off the list of running requests */ 2009 if (l2meta->nb_clusters != 0) { 2010 QLIST_REMOVE(l2meta, next_in_flight); 2011 } 2012 2013 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2014 2015 next = l2meta->next; 2016 g_free(l2meta); 2017 l2meta = next; 2018 } 2019 2020 bytes -= cur_bytes; 2021 offset += cur_bytes; 2022 bytes_done += cur_bytes; 2023 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2024 } 2025 ret = 0; 2026 2027 fail: 2028 while (l2meta != NULL) { 2029 QCowL2Meta *next; 2030 2031 if (l2meta->nb_clusters != 0) { 2032 QLIST_REMOVE(l2meta, next_in_flight); 2033 } 2034 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2035 2036 next = l2meta->next; 2037 g_free(l2meta); 2038 l2meta = next; 2039 } 2040 2041 qemu_co_mutex_unlock(&s->lock); 2042 2043 qemu_iovec_destroy(&hd_qiov); 2044 qemu_vfree(cluster_data); 2045 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2046 2047 return ret; 2048 } 2049 2050 static int qcow2_inactivate(BlockDriverState *bs) 2051 { 2052 BDRVQcow2State *s = bs->opaque; 2053 int ret, result = 0; 2054 Error *local_err = NULL; 2055 2056 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2057 if (ret) { 2058 result = ret; 2059 error_report("Failed to flush the L2 table cache: %s", 2060 strerror(-ret)); 2061 } 2062 2063 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2064 if (ret) { 2065 result = ret; 2066 error_report("Failed to flush the refcount block cache: %s", 2067 strerror(-ret)); 2068 } 2069 2070 qcow2_store_persistent_dirty_bitmaps(bs, &local_err); 2071 if (local_err != NULL) { 2072 result = -EINVAL; 2073 error_report_err(local_err); 2074 error_report("Persistent bitmaps are lost for node '%s'", 2075 bdrv_get_device_or_node_name(bs)); 2076 } 2077 2078 if (result == 0) { 2079 qcow2_mark_clean(bs); 2080 } 2081 2082 return result; 2083 } 2084 2085 static void qcow2_close(BlockDriverState *bs) 2086 { 2087 BDRVQcow2State *s = bs->opaque; 2088 qemu_vfree(s->l1_table); 2089 /* else pre-write overlap checks in cache_destroy may crash */ 2090 s->l1_table = NULL; 2091 2092 if (!(s->flags & BDRV_O_INACTIVE)) { 2093 qcow2_inactivate(bs); 2094 } 2095 2096 cache_clean_timer_del(bs); 2097 qcow2_cache_destroy(bs, s->l2_table_cache); 2098 qcow2_cache_destroy(bs, s->refcount_block_cache); 2099 2100 qcrypto_block_free(s->crypto); 2101 s->crypto = NULL; 2102 2103 g_free(s->unknown_header_fields); 2104 cleanup_unknown_header_ext(bs); 2105 2106 g_free(s->image_backing_file); 2107 g_free(s->image_backing_format); 2108 2109 g_free(s->cluster_cache); 2110 qemu_vfree(s->cluster_data); 2111 qcow2_refcount_close(bs); 2112 qcow2_free_snapshots(bs); 2113 } 2114 2115 static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp) 2116 { 2117 BDRVQcow2State *s = bs->opaque; 2118 int flags = s->flags; 2119 QCryptoBlock *crypto = NULL; 2120 QDict *options; 2121 Error *local_err = NULL; 2122 int ret; 2123 2124 /* 2125 * Backing files are read-only which makes all of their metadata immutable, 2126 * that means we don't have to worry about reopening them here. 2127 */ 2128 2129 crypto = s->crypto; 2130 s->crypto = NULL; 2131 2132 qcow2_close(bs); 2133 2134 memset(s, 0, sizeof(BDRVQcow2State)); 2135 options = qdict_clone_shallow(bs->options); 2136 2137 flags &= ~BDRV_O_INACTIVE; 2138 ret = qcow2_do_open(bs, options, flags, &local_err); 2139 QDECREF(options); 2140 if (local_err) { 2141 error_propagate(errp, local_err); 2142 error_prepend(errp, "Could not reopen qcow2 layer: "); 2143 bs->drv = NULL; 2144 return; 2145 } else if (ret < 0) { 2146 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer"); 2147 bs->drv = NULL; 2148 return; 2149 } 2150 2151 s->crypto = crypto; 2152 } 2153 2154 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2155 size_t len, size_t buflen) 2156 { 2157 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2158 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2159 2160 if (buflen < ext_len) { 2161 return -ENOSPC; 2162 } 2163 2164 *ext_backing_fmt = (QCowExtension) { 2165 .magic = cpu_to_be32(magic), 2166 .len = cpu_to_be32(len), 2167 }; 2168 2169 if (len) { 2170 memcpy(buf + sizeof(QCowExtension), s, len); 2171 } 2172 2173 return ext_len; 2174 } 2175 2176 /* 2177 * Updates the qcow2 header, including the variable length parts of it, i.e. 2178 * the backing file name and all extensions. qcow2 was not designed to allow 2179 * such changes, so if we run out of space (we can only use the first cluster) 2180 * this function may fail. 2181 * 2182 * Returns 0 on success, -errno in error cases. 2183 */ 2184 int qcow2_update_header(BlockDriverState *bs) 2185 { 2186 BDRVQcow2State *s = bs->opaque; 2187 QCowHeader *header; 2188 char *buf; 2189 size_t buflen = s->cluster_size; 2190 int ret; 2191 uint64_t total_size; 2192 uint32_t refcount_table_clusters; 2193 size_t header_length; 2194 Qcow2UnknownHeaderExtension *uext; 2195 2196 buf = qemu_blockalign(bs, buflen); 2197 2198 /* Header structure */ 2199 header = (QCowHeader*) buf; 2200 2201 if (buflen < sizeof(*header)) { 2202 ret = -ENOSPC; 2203 goto fail; 2204 } 2205 2206 header_length = sizeof(*header) + s->unknown_header_fields_size; 2207 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2208 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2209 2210 *header = (QCowHeader) { 2211 /* Version 2 fields */ 2212 .magic = cpu_to_be32(QCOW_MAGIC), 2213 .version = cpu_to_be32(s->qcow_version), 2214 .backing_file_offset = 0, 2215 .backing_file_size = 0, 2216 .cluster_bits = cpu_to_be32(s->cluster_bits), 2217 .size = cpu_to_be64(total_size), 2218 .crypt_method = cpu_to_be32(s->crypt_method_header), 2219 .l1_size = cpu_to_be32(s->l1_size), 2220 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2221 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2222 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2223 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2224 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2225 2226 /* Version 3 fields */ 2227 .incompatible_features = cpu_to_be64(s->incompatible_features), 2228 .compatible_features = cpu_to_be64(s->compatible_features), 2229 .autoclear_features = cpu_to_be64(s->autoclear_features), 2230 .refcount_order = cpu_to_be32(s->refcount_order), 2231 .header_length = cpu_to_be32(header_length), 2232 }; 2233 2234 /* For older versions, write a shorter header */ 2235 switch (s->qcow_version) { 2236 case 2: 2237 ret = offsetof(QCowHeader, incompatible_features); 2238 break; 2239 case 3: 2240 ret = sizeof(*header); 2241 break; 2242 default: 2243 ret = -EINVAL; 2244 goto fail; 2245 } 2246 2247 buf += ret; 2248 buflen -= ret; 2249 memset(buf, 0, buflen); 2250 2251 /* Preserve any unknown field in the header */ 2252 if (s->unknown_header_fields_size) { 2253 if (buflen < s->unknown_header_fields_size) { 2254 ret = -ENOSPC; 2255 goto fail; 2256 } 2257 2258 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2259 buf += s->unknown_header_fields_size; 2260 buflen -= s->unknown_header_fields_size; 2261 } 2262 2263 /* Backing file format header extension */ 2264 if (s->image_backing_format) { 2265 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2266 s->image_backing_format, 2267 strlen(s->image_backing_format), 2268 buflen); 2269 if (ret < 0) { 2270 goto fail; 2271 } 2272 2273 buf += ret; 2274 buflen -= ret; 2275 } 2276 2277 /* Full disk encryption header pointer extension */ 2278 if (s->crypto_header.offset != 0) { 2279 cpu_to_be64s(&s->crypto_header.offset); 2280 cpu_to_be64s(&s->crypto_header.length); 2281 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2282 &s->crypto_header, sizeof(s->crypto_header), 2283 buflen); 2284 be64_to_cpus(&s->crypto_header.offset); 2285 be64_to_cpus(&s->crypto_header.length); 2286 if (ret < 0) { 2287 goto fail; 2288 } 2289 buf += ret; 2290 buflen -= ret; 2291 } 2292 2293 /* Feature table */ 2294 if (s->qcow_version >= 3) { 2295 Qcow2Feature features[] = { 2296 { 2297 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2298 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2299 .name = "dirty bit", 2300 }, 2301 { 2302 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2303 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 2304 .name = "corrupt bit", 2305 }, 2306 { 2307 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 2308 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 2309 .name = "lazy refcounts", 2310 }, 2311 }; 2312 2313 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 2314 features, sizeof(features), buflen); 2315 if (ret < 0) { 2316 goto fail; 2317 } 2318 buf += ret; 2319 buflen -= ret; 2320 } 2321 2322 /* Bitmap extension */ 2323 if (s->nb_bitmaps > 0) { 2324 Qcow2BitmapHeaderExt bitmaps_header = { 2325 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 2326 .bitmap_directory_size = 2327 cpu_to_be64(s->bitmap_directory_size), 2328 .bitmap_directory_offset = 2329 cpu_to_be64(s->bitmap_directory_offset) 2330 }; 2331 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 2332 &bitmaps_header, sizeof(bitmaps_header), 2333 buflen); 2334 if (ret < 0) { 2335 goto fail; 2336 } 2337 buf += ret; 2338 buflen -= ret; 2339 } 2340 2341 /* Keep unknown header extensions */ 2342 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 2343 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 2344 if (ret < 0) { 2345 goto fail; 2346 } 2347 2348 buf += ret; 2349 buflen -= ret; 2350 } 2351 2352 /* End of header extensions */ 2353 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 2354 if (ret < 0) { 2355 goto fail; 2356 } 2357 2358 buf += ret; 2359 buflen -= ret; 2360 2361 /* Backing file name */ 2362 if (s->image_backing_file) { 2363 size_t backing_file_len = strlen(s->image_backing_file); 2364 2365 if (buflen < backing_file_len) { 2366 ret = -ENOSPC; 2367 goto fail; 2368 } 2369 2370 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 2371 strncpy(buf, s->image_backing_file, buflen); 2372 2373 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 2374 header->backing_file_size = cpu_to_be32(backing_file_len); 2375 } 2376 2377 /* Write the new header */ 2378 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 2379 if (ret < 0) { 2380 goto fail; 2381 } 2382 2383 ret = 0; 2384 fail: 2385 qemu_vfree(header); 2386 return ret; 2387 } 2388 2389 static int qcow2_change_backing_file(BlockDriverState *bs, 2390 const char *backing_file, const char *backing_fmt) 2391 { 2392 BDRVQcow2State *s = bs->opaque; 2393 2394 if (backing_file && strlen(backing_file) > 1023) { 2395 return -EINVAL; 2396 } 2397 2398 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2399 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2400 2401 g_free(s->image_backing_file); 2402 g_free(s->image_backing_format); 2403 2404 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 2405 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 2406 2407 return qcow2_update_header(bs); 2408 } 2409 2410 static int qcow2_crypt_method_from_format(const char *encryptfmt) 2411 { 2412 if (g_str_equal(encryptfmt, "luks")) { 2413 return QCOW_CRYPT_LUKS; 2414 } else if (g_str_equal(encryptfmt, "aes")) { 2415 return QCOW_CRYPT_AES; 2416 } else { 2417 return -EINVAL; 2418 } 2419 } 2420 2421 static int qcow2_set_up_encryption(BlockDriverState *bs, const char *encryptfmt, 2422 QemuOpts *opts, Error **errp) 2423 { 2424 BDRVQcow2State *s = bs->opaque; 2425 QCryptoBlockCreateOptions *cryptoopts = NULL; 2426 QCryptoBlock *crypto = NULL; 2427 int ret = -EINVAL; 2428 QDict *options, *encryptopts; 2429 int fmt; 2430 2431 options = qemu_opts_to_qdict(opts, NULL); 2432 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 2433 QDECREF(options); 2434 2435 fmt = qcow2_crypt_method_from_format(encryptfmt); 2436 2437 switch (fmt) { 2438 case QCOW_CRYPT_LUKS: 2439 cryptoopts = block_crypto_create_opts_init( 2440 Q_CRYPTO_BLOCK_FORMAT_LUKS, encryptopts, errp); 2441 break; 2442 case QCOW_CRYPT_AES: 2443 cryptoopts = block_crypto_create_opts_init( 2444 Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); 2445 break; 2446 default: 2447 error_setg(errp, "Unknown encryption format '%s'", encryptfmt); 2448 break; 2449 } 2450 if (!cryptoopts) { 2451 ret = -EINVAL; 2452 goto out; 2453 } 2454 s->crypt_method_header = fmt; 2455 2456 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 2457 qcow2_crypto_hdr_init_func, 2458 qcow2_crypto_hdr_write_func, 2459 bs, errp); 2460 if (!crypto) { 2461 ret = -EINVAL; 2462 goto out; 2463 } 2464 2465 ret = qcow2_update_header(bs); 2466 if (ret < 0) { 2467 error_setg_errno(errp, -ret, "Could not write encryption header"); 2468 goto out; 2469 } 2470 2471 out: 2472 QDECREF(encryptopts); 2473 qcrypto_block_free(crypto); 2474 qapi_free_QCryptoBlockCreateOptions(cryptoopts); 2475 return ret; 2476 } 2477 2478 2479 /** 2480 * Preallocates metadata structures for data clusters between @offset (in the 2481 * guest disk) and @new_length (which is thus generally the new guest disk 2482 * size). 2483 * 2484 * Returns: 0 on success, -errno on failure. 2485 */ 2486 static int preallocate(BlockDriverState *bs, 2487 uint64_t offset, uint64_t new_length) 2488 { 2489 BDRVQcow2State *s = bs->opaque; 2490 uint64_t bytes; 2491 uint64_t host_offset = 0; 2492 unsigned int cur_bytes; 2493 int ret; 2494 QCowL2Meta *meta; 2495 2496 if (qemu_in_coroutine()) { 2497 qemu_co_mutex_lock(&s->lock); 2498 } 2499 2500 assert(offset <= new_length); 2501 bytes = new_length - offset; 2502 2503 while (bytes) { 2504 cur_bytes = MIN(bytes, INT_MAX); 2505 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2506 &host_offset, &meta); 2507 if (ret < 0) { 2508 goto done; 2509 } 2510 2511 while (meta) { 2512 QCowL2Meta *next = meta->next; 2513 2514 ret = qcow2_alloc_cluster_link_l2(bs, meta); 2515 if (ret < 0) { 2516 qcow2_free_any_clusters(bs, meta->alloc_offset, 2517 meta->nb_clusters, QCOW2_DISCARD_NEVER); 2518 goto done; 2519 } 2520 2521 /* There are no dependent requests, but we need to remove our 2522 * request from the list of in-flight requests */ 2523 QLIST_REMOVE(meta, next_in_flight); 2524 2525 g_free(meta); 2526 meta = next; 2527 } 2528 2529 /* TODO Preallocate data if requested */ 2530 2531 bytes -= cur_bytes; 2532 offset += cur_bytes; 2533 } 2534 2535 /* 2536 * It is expected that the image file is large enough to actually contain 2537 * all of the allocated clusters (otherwise we get failing reads after 2538 * EOF). Extend the image to the last allocated sector. 2539 */ 2540 if (host_offset != 0) { 2541 uint8_t data = 0; 2542 ret = bdrv_pwrite(bs->file, (host_offset + cur_bytes) - 1, 2543 &data, 1); 2544 if (ret < 0) { 2545 goto done; 2546 } 2547 } 2548 2549 ret = 0; 2550 2551 done: 2552 if (qemu_in_coroutine()) { 2553 qemu_co_mutex_unlock(&s->lock); 2554 } 2555 return ret; 2556 } 2557 2558 /* qcow2_refcount_metadata_size: 2559 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 2560 * @cluster_size: size of a cluster, in bytes 2561 * @refcount_order: refcount bits power-of-2 exponent 2562 * @generous_increase: allow for the refcount table to be 1.5x as large as it 2563 * needs to be 2564 * 2565 * Returns: Number of bytes required for refcount blocks and table metadata. 2566 */ 2567 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 2568 int refcount_order, bool generous_increase, 2569 uint64_t *refblock_count) 2570 { 2571 /* 2572 * Every host cluster is reference-counted, including metadata (even 2573 * refcount metadata is recursively included). 2574 * 2575 * An accurate formula for the size of refcount metadata size is difficult 2576 * to derive. An easier method of calculation is finding the fixed point 2577 * where no further refcount blocks or table clusters are required to 2578 * reference count every cluster. 2579 */ 2580 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t); 2581 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 2582 int64_t table = 0; /* number of refcount table clusters */ 2583 int64_t blocks = 0; /* number of refcount block clusters */ 2584 int64_t last; 2585 int64_t n = 0; 2586 2587 do { 2588 last = n; 2589 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 2590 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 2591 n = clusters + blocks + table; 2592 2593 if (n == last && generous_increase) { 2594 clusters += DIV_ROUND_UP(table, 2); 2595 n = 0; /* force another loop */ 2596 generous_increase = false; 2597 } 2598 } while (n != last); 2599 2600 if (refblock_count) { 2601 *refblock_count = blocks; 2602 } 2603 2604 return (blocks + table) * cluster_size; 2605 } 2606 2607 /** 2608 * qcow2_calc_prealloc_size: 2609 * @total_size: virtual disk size in bytes 2610 * @cluster_size: cluster size in bytes 2611 * @refcount_order: refcount bits power-of-2 exponent 2612 * 2613 * Returns: Total number of bytes required for the fully allocated image 2614 * (including metadata). 2615 */ 2616 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 2617 size_t cluster_size, 2618 int refcount_order) 2619 { 2620 int64_t meta_size = 0; 2621 uint64_t nl1e, nl2e; 2622 int64_t aligned_total_size = align_offset(total_size, cluster_size); 2623 2624 /* header: 1 cluster */ 2625 meta_size += cluster_size; 2626 2627 /* total size of L2 tables */ 2628 nl2e = aligned_total_size / cluster_size; 2629 nl2e = align_offset(nl2e, cluster_size / sizeof(uint64_t)); 2630 meta_size += nl2e * sizeof(uint64_t); 2631 2632 /* total size of L1 tables */ 2633 nl1e = nl2e * sizeof(uint64_t) / cluster_size; 2634 nl1e = align_offset(nl1e, cluster_size / sizeof(uint64_t)); 2635 meta_size += nl1e * sizeof(uint64_t); 2636 2637 /* total size of refcount table and blocks */ 2638 meta_size += qcow2_refcount_metadata_size( 2639 (meta_size + aligned_total_size) / cluster_size, 2640 cluster_size, refcount_order, false, NULL); 2641 2642 return meta_size + aligned_total_size; 2643 } 2644 2645 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp) 2646 { 2647 size_t cluster_size; 2648 int cluster_bits; 2649 2650 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 2651 DEFAULT_CLUSTER_SIZE); 2652 cluster_bits = ctz32(cluster_size); 2653 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 2654 (1 << cluster_bits) != cluster_size) 2655 { 2656 error_setg(errp, "Cluster size must be a power of two between %d and " 2657 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 2658 return 0; 2659 } 2660 return cluster_size; 2661 } 2662 2663 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 2664 { 2665 char *buf; 2666 int ret; 2667 2668 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 2669 if (!buf) { 2670 ret = 3; /* default */ 2671 } else if (!strcmp(buf, "0.10")) { 2672 ret = 2; 2673 } else if (!strcmp(buf, "1.1")) { 2674 ret = 3; 2675 } else { 2676 error_setg(errp, "Invalid compatibility level: '%s'", buf); 2677 ret = -EINVAL; 2678 } 2679 g_free(buf); 2680 return ret; 2681 } 2682 2683 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 2684 Error **errp) 2685 { 2686 uint64_t refcount_bits; 2687 2688 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 2689 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 2690 error_setg(errp, "Refcount width must be a power of two and may not " 2691 "exceed 64 bits"); 2692 return 0; 2693 } 2694 2695 if (version < 3 && refcount_bits != 16) { 2696 error_setg(errp, "Different refcount widths than 16 bits require " 2697 "compatibility level 1.1 or above (use compat=1.1 or " 2698 "greater)"); 2699 return 0; 2700 } 2701 2702 return refcount_bits; 2703 } 2704 2705 static int qcow2_create2(const char *filename, int64_t total_size, 2706 const char *backing_file, const char *backing_format, 2707 int flags, size_t cluster_size, PreallocMode prealloc, 2708 QemuOpts *opts, int version, int refcount_order, 2709 const char *encryptfmt, Error **errp) 2710 { 2711 QDict *options; 2712 2713 /* 2714 * Open the image file and write a minimal qcow2 header. 2715 * 2716 * We keep things simple and start with a zero-sized image. We also 2717 * do without refcount blocks or a L1 table for now. We'll fix the 2718 * inconsistency later. 2719 * 2720 * We do need a refcount table because growing the refcount table means 2721 * allocating two new refcount blocks - the seconds of which would be at 2722 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 2723 * size for any qcow2 image. 2724 */ 2725 BlockBackend *blk; 2726 QCowHeader *header; 2727 uint64_t* refcount_table; 2728 Error *local_err = NULL; 2729 int ret; 2730 2731 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 2732 int64_t prealloc_size = 2733 qcow2_calc_prealloc_size(total_size, cluster_size, refcount_order); 2734 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, prealloc_size, &error_abort); 2735 qemu_opt_set(opts, BLOCK_OPT_PREALLOC, PreallocMode_lookup[prealloc], 2736 &error_abort); 2737 } 2738 2739 ret = bdrv_create_file(filename, opts, &local_err); 2740 if (ret < 0) { 2741 error_propagate(errp, local_err); 2742 return ret; 2743 } 2744 2745 blk = blk_new_open(filename, NULL, NULL, 2746 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 2747 &local_err); 2748 if (blk == NULL) { 2749 error_propagate(errp, local_err); 2750 return -EIO; 2751 } 2752 2753 blk_set_allow_write_beyond_eof(blk, true); 2754 2755 /* Write the header */ 2756 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 2757 header = g_malloc0(cluster_size); 2758 *header = (QCowHeader) { 2759 .magic = cpu_to_be32(QCOW_MAGIC), 2760 .version = cpu_to_be32(version), 2761 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 2762 .size = cpu_to_be64(0), 2763 .l1_table_offset = cpu_to_be64(0), 2764 .l1_size = cpu_to_be32(0), 2765 .refcount_table_offset = cpu_to_be64(cluster_size), 2766 .refcount_table_clusters = cpu_to_be32(1), 2767 .refcount_order = cpu_to_be32(refcount_order), 2768 .header_length = cpu_to_be32(sizeof(*header)), 2769 }; 2770 2771 /* We'll update this to correct value later */ 2772 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 2773 2774 if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) { 2775 header->compatible_features |= 2776 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 2777 } 2778 2779 ret = blk_pwrite(blk, 0, header, cluster_size, 0); 2780 g_free(header); 2781 if (ret < 0) { 2782 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 2783 goto out; 2784 } 2785 2786 /* Write a refcount table with one refcount block */ 2787 refcount_table = g_malloc0(2 * cluster_size); 2788 refcount_table[0] = cpu_to_be64(2 * cluster_size); 2789 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); 2790 g_free(refcount_table); 2791 2792 if (ret < 0) { 2793 error_setg_errno(errp, -ret, "Could not write refcount table"); 2794 goto out; 2795 } 2796 2797 blk_unref(blk); 2798 blk = NULL; 2799 2800 /* 2801 * And now open the image and make it consistent first (i.e. increase the 2802 * refcount of the cluster that is occupied by the header and the refcount 2803 * table) 2804 */ 2805 options = qdict_new(); 2806 qdict_put_str(options, "driver", "qcow2"); 2807 blk = blk_new_open(filename, NULL, options, 2808 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 2809 &local_err); 2810 if (blk == NULL) { 2811 error_propagate(errp, local_err); 2812 ret = -EIO; 2813 goto out; 2814 } 2815 2816 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 2817 if (ret < 0) { 2818 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 2819 "header and refcount table"); 2820 goto out; 2821 2822 } else if (ret != 0) { 2823 error_report("Huh, first cluster in empty image is already in use?"); 2824 abort(); 2825 } 2826 2827 /* Create a full header (including things like feature table) */ 2828 ret = qcow2_update_header(blk_bs(blk)); 2829 if (ret < 0) { 2830 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 2831 goto out; 2832 } 2833 2834 /* Okay, now that we have a valid image, let's give it the right size */ 2835 ret = blk_truncate(blk, total_size, PREALLOC_MODE_OFF, errp); 2836 if (ret < 0) { 2837 error_prepend(errp, "Could not resize image: "); 2838 goto out; 2839 } 2840 2841 /* Want a backing file? There you go.*/ 2842 if (backing_file) { 2843 ret = bdrv_change_backing_file(blk_bs(blk), backing_file, backing_format); 2844 if (ret < 0) { 2845 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 2846 "with format '%s'", backing_file, backing_format); 2847 goto out; 2848 } 2849 } 2850 2851 /* Want encryption? There you go. */ 2852 if (encryptfmt) { 2853 ret = qcow2_set_up_encryption(blk_bs(blk), encryptfmt, opts, errp); 2854 if (ret < 0) { 2855 goto out; 2856 } 2857 } 2858 2859 /* And if we're supposed to preallocate metadata, do that now */ 2860 if (prealloc != PREALLOC_MODE_OFF) { 2861 ret = preallocate(blk_bs(blk), 0, total_size); 2862 if (ret < 0) { 2863 error_setg_errno(errp, -ret, "Could not preallocate metadata"); 2864 goto out; 2865 } 2866 } 2867 2868 blk_unref(blk); 2869 blk = NULL; 2870 2871 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 2872 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 2873 * have to setup decryption context. We're not doing any I/O on the top 2874 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 2875 * not have effect. 2876 */ 2877 options = qdict_new(); 2878 qdict_put_str(options, "driver", "qcow2"); 2879 blk = blk_new_open(filename, NULL, options, 2880 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 2881 &local_err); 2882 if (blk == NULL) { 2883 error_propagate(errp, local_err); 2884 ret = -EIO; 2885 goto out; 2886 } 2887 2888 ret = 0; 2889 out: 2890 if (blk) { 2891 blk_unref(blk); 2892 } 2893 return ret; 2894 } 2895 2896 static int qcow2_create(const char *filename, QemuOpts *opts, Error **errp) 2897 { 2898 char *backing_file = NULL; 2899 char *backing_fmt = NULL; 2900 char *buf = NULL; 2901 uint64_t size = 0; 2902 int flags = 0; 2903 size_t cluster_size = DEFAULT_CLUSTER_SIZE; 2904 PreallocMode prealloc; 2905 int version; 2906 uint64_t refcount_bits; 2907 int refcount_order; 2908 char *encryptfmt = NULL; 2909 Error *local_err = NULL; 2910 int ret; 2911 2912 /* Read out options */ 2913 size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 2914 BDRV_SECTOR_SIZE); 2915 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 2916 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); 2917 encryptfmt = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 2918 if (encryptfmt) { 2919 if (qemu_opt_get(opts, BLOCK_OPT_ENCRYPT)) { 2920 error_setg(errp, "Options " BLOCK_OPT_ENCRYPT " and " 2921 BLOCK_OPT_ENCRYPT_FORMAT " are mutually exclusive"); 2922 ret = -EINVAL; 2923 goto finish; 2924 } 2925 } else if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) { 2926 encryptfmt = g_strdup("aes"); 2927 } 2928 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 2929 if (local_err) { 2930 error_propagate(errp, local_err); 2931 ret = -EINVAL; 2932 goto finish; 2933 } 2934 buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 2935 prealloc = qapi_enum_parse(PreallocMode_lookup, buf, 2936 PREALLOC_MODE__MAX, PREALLOC_MODE_OFF, 2937 &local_err); 2938 if (local_err) { 2939 error_propagate(errp, local_err); 2940 ret = -EINVAL; 2941 goto finish; 2942 } 2943 2944 version = qcow2_opt_get_version_del(opts, &local_err); 2945 if (local_err) { 2946 error_propagate(errp, local_err); 2947 ret = -EINVAL; 2948 goto finish; 2949 } 2950 2951 if (qemu_opt_get_bool_del(opts, BLOCK_OPT_LAZY_REFCOUNTS, false)) { 2952 flags |= BLOCK_FLAG_LAZY_REFCOUNTS; 2953 } 2954 2955 if (backing_file && prealloc != PREALLOC_MODE_OFF) { 2956 error_setg(errp, "Backing file and preallocation cannot be used at " 2957 "the same time"); 2958 ret = -EINVAL; 2959 goto finish; 2960 } 2961 2962 if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) { 2963 error_setg(errp, "Lazy refcounts only supported with compatibility " 2964 "level 1.1 and above (use compat=1.1 or greater)"); 2965 ret = -EINVAL; 2966 goto finish; 2967 } 2968 2969 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 2970 if (local_err) { 2971 error_propagate(errp, local_err); 2972 ret = -EINVAL; 2973 goto finish; 2974 } 2975 2976 refcount_order = ctz32(refcount_bits); 2977 2978 ret = qcow2_create2(filename, size, backing_file, backing_fmt, flags, 2979 cluster_size, prealloc, opts, version, refcount_order, 2980 encryptfmt, &local_err); 2981 error_propagate(errp, local_err); 2982 2983 finish: 2984 g_free(backing_file); 2985 g_free(backing_fmt); 2986 g_free(encryptfmt); 2987 g_free(buf); 2988 return ret; 2989 } 2990 2991 2992 static bool is_zero_sectors(BlockDriverState *bs, int64_t start, 2993 uint32_t count) 2994 { 2995 int nr; 2996 BlockDriverState *file; 2997 int64_t res; 2998 2999 if (start + count > bs->total_sectors) { 3000 count = bs->total_sectors - start; 3001 } 3002 3003 if (!count) { 3004 return true; 3005 } 3006 res = bdrv_get_block_status_above(bs, NULL, start, count, 3007 &nr, &file); 3008 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == count; 3009 } 3010 3011 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, 3012 int64_t offset, int bytes, BdrvRequestFlags flags) 3013 { 3014 int ret; 3015 BDRVQcow2State *s = bs->opaque; 3016 3017 uint32_t head = offset % s->cluster_size; 3018 uint32_t tail = (offset + bytes) % s->cluster_size; 3019 3020 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3021 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3022 tail = 0; 3023 } 3024 3025 if (head || tail) { 3026 int64_t cl_start = (offset - head) >> BDRV_SECTOR_BITS; 3027 uint64_t off; 3028 unsigned int nr; 3029 3030 assert(head + bytes <= s->cluster_size); 3031 3032 /* check whether remainder of cluster already reads as zero */ 3033 if (!(is_zero_sectors(bs, cl_start, 3034 DIV_ROUND_UP(head, BDRV_SECTOR_SIZE)) && 3035 is_zero_sectors(bs, (offset + bytes) >> BDRV_SECTOR_BITS, 3036 DIV_ROUND_UP(-tail & (s->cluster_size - 1), 3037 BDRV_SECTOR_SIZE)))) { 3038 return -ENOTSUP; 3039 } 3040 3041 qemu_co_mutex_lock(&s->lock); 3042 /* We can have new write after previous check */ 3043 offset = cl_start << BDRV_SECTOR_BITS; 3044 bytes = s->cluster_size; 3045 nr = s->cluster_size; 3046 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); 3047 if (ret != QCOW2_CLUSTER_UNALLOCATED && 3048 ret != QCOW2_CLUSTER_ZERO_PLAIN && 3049 ret != QCOW2_CLUSTER_ZERO_ALLOC) { 3050 qemu_co_mutex_unlock(&s->lock); 3051 return -ENOTSUP; 3052 } 3053 } else { 3054 qemu_co_mutex_lock(&s->lock); 3055 } 3056 3057 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 3058 3059 /* Whatever is left can use real zero clusters */ 3060 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags); 3061 qemu_co_mutex_unlock(&s->lock); 3062 3063 return ret; 3064 } 3065 3066 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 3067 int64_t offset, int bytes) 3068 { 3069 int ret; 3070 BDRVQcow2State *s = bs->opaque; 3071 3072 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 3073 assert(bytes < s->cluster_size); 3074 /* Ignore partial clusters, except for the special case of the 3075 * complete partial cluster at the end of an unaligned file */ 3076 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 3077 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 3078 return -ENOTSUP; 3079 } 3080 } 3081 3082 qemu_co_mutex_lock(&s->lock); 3083 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 3084 false); 3085 qemu_co_mutex_unlock(&s->lock); 3086 return ret; 3087 } 3088 3089 static int qcow2_truncate(BlockDriverState *bs, int64_t offset, 3090 PreallocMode prealloc, Error **errp) 3091 { 3092 BDRVQcow2State *s = bs->opaque; 3093 uint64_t old_length; 3094 int64_t new_l1_size; 3095 int ret; 3096 3097 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 3098 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 3099 { 3100 error_setg(errp, "Unsupported preallocation mode '%s'", 3101 PreallocMode_lookup[prealloc]); 3102 return -ENOTSUP; 3103 } 3104 3105 if (offset & 511) { 3106 error_setg(errp, "The new size must be a multiple of 512"); 3107 return -EINVAL; 3108 } 3109 3110 /* cannot proceed if image has snapshots */ 3111 if (s->nb_snapshots) { 3112 error_setg(errp, "Can't resize an image which has snapshots"); 3113 return -ENOTSUP; 3114 } 3115 3116 /* cannot proceed if image has bitmaps */ 3117 if (s->nb_bitmaps) { 3118 /* TODO: resize bitmaps in the image */ 3119 error_setg(errp, "Can't resize an image which has bitmaps"); 3120 return -ENOTSUP; 3121 } 3122 3123 old_length = bs->total_sectors * 512; 3124 3125 /* shrinking is currently not supported */ 3126 if (offset < old_length) { 3127 error_setg(errp, "qcow2 doesn't support shrinking images yet"); 3128 return -ENOTSUP; 3129 } 3130 3131 new_l1_size = size_to_l1(s, offset); 3132 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 3133 if (ret < 0) { 3134 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 3135 return ret; 3136 } 3137 3138 switch (prealloc) { 3139 case PREALLOC_MODE_OFF: 3140 break; 3141 3142 case PREALLOC_MODE_METADATA: 3143 ret = preallocate(bs, old_length, offset); 3144 if (ret < 0) { 3145 error_setg_errno(errp, -ret, "Preallocation failed"); 3146 return ret; 3147 } 3148 break; 3149 3150 case PREALLOC_MODE_FALLOC: 3151 case PREALLOC_MODE_FULL: 3152 { 3153 int64_t allocation_start, host_offset, guest_offset; 3154 int64_t clusters_allocated; 3155 int64_t old_file_size, new_file_size; 3156 uint64_t nb_new_data_clusters, nb_new_l2_tables; 3157 3158 old_file_size = bdrv_getlength(bs->file->bs); 3159 if (old_file_size < 0) { 3160 error_setg_errno(errp, -old_file_size, 3161 "Failed to inquire current file length"); 3162 return ret; 3163 } 3164 3165 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length, 3166 s->cluster_size); 3167 3168 /* This is an overestimation; we will not actually allocate space for 3169 * these in the file but just make sure the new refcount structures are 3170 * able to cover them so we will not have to allocate new refblocks 3171 * while entering the data blocks in the potentially new L2 tables. 3172 * (We do not actually care where the L2 tables are placed. Maybe they 3173 * are already allocated or they can be placed somewhere before 3174 * @old_file_size. It does not matter because they will be fully 3175 * allocated automatically, so they do not need to be covered by the 3176 * preallocation. All that matters is that we will not have to allocate 3177 * new refcount structures for them.) */ 3178 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 3179 s->cluster_size / sizeof(uint64_t)); 3180 /* The cluster range may not be aligned to L2 boundaries, so add one L2 3181 * table for a potential head/tail */ 3182 nb_new_l2_tables++; 3183 3184 allocation_start = qcow2_refcount_area(bs, old_file_size, 3185 nb_new_data_clusters + 3186 nb_new_l2_tables, 3187 true, 0, 0); 3188 if (allocation_start < 0) { 3189 error_setg_errno(errp, -allocation_start, 3190 "Failed to resize refcount structures"); 3191 return -allocation_start; 3192 } 3193 3194 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 3195 nb_new_data_clusters); 3196 if (clusters_allocated < 0) { 3197 error_setg_errno(errp, -clusters_allocated, 3198 "Failed to allocate data clusters"); 3199 return -clusters_allocated; 3200 } 3201 3202 assert(clusters_allocated == nb_new_data_clusters); 3203 3204 /* Allocate the data area */ 3205 new_file_size = allocation_start + 3206 nb_new_data_clusters * s->cluster_size; 3207 ret = bdrv_truncate(bs->file, new_file_size, prealloc, errp); 3208 if (ret < 0) { 3209 error_prepend(errp, "Failed to resize underlying file: "); 3210 qcow2_free_clusters(bs, allocation_start, 3211 nb_new_data_clusters * s->cluster_size, 3212 QCOW2_DISCARD_OTHER); 3213 return ret; 3214 } 3215 3216 /* Create the necessary L2 entries */ 3217 host_offset = allocation_start; 3218 guest_offset = old_length; 3219 while (nb_new_data_clusters) { 3220 int64_t guest_cluster = guest_offset >> s->cluster_bits; 3221 int64_t nb_clusters = MIN(nb_new_data_clusters, 3222 s->l2_size - guest_cluster % s->l2_size); 3223 QCowL2Meta allocation = { 3224 .offset = guest_offset, 3225 .alloc_offset = host_offset, 3226 .nb_clusters = nb_clusters, 3227 }; 3228 qemu_co_queue_init(&allocation.dependent_requests); 3229 3230 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 3231 if (ret < 0) { 3232 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 3233 qcow2_free_clusters(bs, host_offset, 3234 nb_new_data_clusters * s->cluster_size, 3235 QCOW2_DISCARD_OTHER); 3236 return ret; 3237 } 3238 3239 guest_offset += nb_clusters * s->cluster_size; 3240 host_offset += nb_clusters * s->cluster_size; 3241 nb_new_data_clusters -= nb_clusters; 3242 } 3243 break; 3244 } 3245 3246 default: 3247 g_assert_not_reached(); 3248 } 3249 3250 if (prealloc != PREALLOC_MODE_OFF) { 3251 /* Flush metadata before actually changing the image size */ 3252 ret = bdrv_flush(bs); 3253 if (ret < 0) { 3254 error_setg_errno(errp, -ret, 3255 "Failed to flush the preallocated area to disk"); 3256 return ret; 3257 } 3258 } 3259 3260 /* write updated header.size */ 3261 offset = cpu_to_be64(offset); 3262 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 3263 &offset, sizeof(uint64_t)); 3264 if (ret < 0) { 3265 error_setg_errno(errp, -ret, "Failed to update the image size"); 3266 return ret; 3267 } 3268 3269 s->l1_vm_state_index = new_l1_size; 3270 return 0; 3271 } 3272 3273 /* XXX: put compressed sectors first, then all the cluster aligned 3274 tables to avoid losing bytes in alignment */ 3275 static coroutine_fn int 3276 qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 3277 uint64_t bytes, QEMUIOVector *qiov) 3278 { 3279 BDRVQcow2State *s = bs->opaque; 3280 QEMUIOVector hd_qiov; 3281 struct iovec iov; 3282 z_stream strm; 3283 int ret, out_len; 3284 uint8_t *buf, *out_buf; 3285 uint64_t cluster_offset; 3286 3287 if (bytes == 0) { 3288 /* align end of file to a sector boundary to ease reading with 3289 sector based I/Os */ 3290 cluster_offset = bdrv_getlength(bs->file->bs); 3291 return bdrv_truncate(bs->file, cluster_offset, PREALLOC_MODE_OFF, NULL); 3292 } 3293 3294 buf = qemu_blockalign(bs, s->cluster_size); 3295 if (bytes != s->cluster_size) { 3296 if (bytes > s->cluster_size || 3297 offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) 3298 { 3299 qemu_vfree(buf); 3300 return -EINVAL; 3301 } 3302 /* Zero-pad last write if image size is not cluster aligned */ 3303 memset(buf + bytes, 0, s->cluster_size - bytes); 3304 } 3305 qemu_iovec_to_buf(qiov, 0, buf, bytes); 3306 3307 out_buf = g_malloc(s->cluster_size); 3308 3309 /* best compression, small window, no zlib header */ 3310 memset(&strm, 0, sizeof(strm)); 3311 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, 3312 Z_DEFLATED, -12, 3313 9, Z_DEFAULT_STRATEGY); 3314 if (ret != 0) { 3315 ret = -EINVAL; 3316 goto fail; 3317 } 3318 3319 strm.avail_in = s->cluster_size; 3320 strm.next_in = (uint8_t *)buf; 3321 strm.avail_out = s->cluster_size; 3322 strm.next_out = out_buf; 3323 3324 ret = deflate(&strm, Z_FINISH); 3325 if (ret != Z_STREAM_END && ret != Z_OK) { 3326 deflateEnd(&strm); 3327 ret = -EINVAL; 3328 goto fail; 3329 } 3330 out_len = strm.next_out - out_buf; 3331 3332 deflateEnd(&strm); 3333 3334 if (ret != Z_STREAM_END || out_len >= s->cluster_size) { 3335 /* could not compress: write normal cluster */ 3336 ret = qcow2_co_pwritev(bs, offset, bytes, qiov, 0); 3337 if (ret < 0) { 3338 goto fail; 3339 } 3340 goto success; 3341 } 3342 3343 qemu_co_mutex_lock(&s->lock); 3344 cluster_offset = 3345 qcow2_alloc_compressed_cluster_offset(bs, offset, out_len); 3346 if (!cluster_offset) { 3347 qemu_co_mutex_unlock(&s->lock); 3348 ret = -EIO; 3349 goto fail; 3350 } 3351 cluster_offset &= s->cluster_offset_mask; 3352 3353 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len); 3354 qemu_co_mutex_unlock(&s->lock); 3355 if (ret < 0) { 3356 goto fail; 3357 } 3358 3359 iov = (struct iovec) { 3360 .iov_base = out_buf, 3361 .iov_len = out_len, 3362 }; 3363 qemu_iovec_init_external(&hd_qiov, &iov, 1); 3364 3365 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 3366 ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0); 3367 if (ret < 0) { 3368 goto fail; 3369 } 3370 success: 3371 ret = 0; 3372 fail: 3373 qemu_vfree(buf); 3374 g_free(out_buf); 3375 return ret; 3376 } 3377 3378 static int make_completely_empty(BlockDriverState *bs) 3379 { 3380 BDRVQcow2State *s = bs->opaque; 3381 Error *local_err = NULL; 3382 int ret, l1_clusters; 3383 int64_t offset; 3384 uint64_t *new_reftable = NULL; 3385 uint64_t rt_entry, l1_size2; 3386 struct { 3387 uint64_t l1_offset; 3388 uint64_t reftable_offset; 3389 uint32_t reftable_clusters; 3390 } QEMU_PACKED l1_ofs_rt_ofs_cls; 3391 3392 ret = qcow2_cache_empty(bs, s->l2_table_cache); 3393 if (ret < 0) { 3394 goto fail; 3395 } 3396 3397 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 3398 if (ret < 0) { 3399 goto fail; 3400 } 3401 3402 /* Refcounts will be broken utterly */ 3403 ret = qcow2_mark_dirty(bs); 3404 if (ret < 0) { 3405 goto fail; 3406 } 3407 3408 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3409 3410 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 3411 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t); 3412 3413 /* After this call, neither the in-memory nor the on-disk refcount 3414 * information accurately describe the actual references */ 3415 3416 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 3417 l1_clusters * s->cluster_size, 0); 3418 if (ret < 0) { 3419 goto fail_broken_refcounts; 3420 } 3421 memset(s->l1_table, 0, l1_size2); 3422 3423 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 3424 3425 /* Overwrite enough clusters at the beginning of the sectors to place 3426 * the refcount table, a refcount block and the L1 table in; this may 3427 * overwrite parts of the existing refcount and L1 table, which is not 3428 * an issue because the dirty flag is set, complete data loss is in fact 3429 * desired and partial data loss is consequently fine as well */ 3430 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 3431 (2 + l1_clusters) * s->cluster_size, 0); 3432 /* This call (even if it failed overall) may have overwritten on-disk 3433 * refcount structures; in that case, the in-memory refcount information 3434 * will probably differ from the on-disk information which makes the BDS 3435 * unusable */ 3436 if (ret < 0) { 3437 goto fail_broken_refcounts; 3438 } 3439 3440 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3441 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 3442 3443 /* "Create" an empty reftable (one cluster) directly after the image 3444 * header and an empty L1 table three clusters after the image header; 3445 * the cluster between those two will be used as the first refblock */ 3446 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 3447 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 3448 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 3449 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 3450 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); 3451 if (ret < 0) { 3452 goto fail_broken_refcounts; 3453 } 3454 3455 s->l1_table_offset = 3 * s->cluster_size; 3456 3457 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t)); 3458 if (!new_reftable) { 3459 ret = -ENOMEM; 3460 goto fail_broken_refcounts; 3461 } 3462 3463 s->refcount_table_offset = s->cluster_size; 3464 s->refcount_table_size = s->cluster_size / sizeof(uint64_t); 3465 s->max_refcount_table_index = 0; 3466 3467 g_free(s->refcount_table); 3468 s->refcount_table = new_reftable; 3469 new_reftable = NULL; 3470 3471 /* Now the in-memory refcount information again corresponds to the on-disk 3472 * information (reftable is empty and no refblocks (the refblock cache is 3473 * empty)); however, this means some clusters (e.g. the image header) are 3474 * referenced, but not refcounted, but the normal qcow2 code assumes that 3475 * the in-memory information is always correct */ 3476 3477 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 3478 3479 /* Enter the first refblock into the reftable */ 3480 rt_entry = cpu_to_be64(2 * s->cluster_size); 3481 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, 3482 &rt_entry, sizeof(rt_entry)); 3483 if (ret < 0) { 3484 goto fail_broken_refcounts; 3485 } 3486 s->refcount_table[0] = 2 * s->cluster_size; 3487 3488 s->free_cluster_index = 0; 3489 assert(3 + l1_clusters <= s->refcount_block_size); 3490 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 3491 if (offset < 0) { 3492 ret = offset; 3493 goto fail_broken_refcounts; 3494 } else if (offset > 0) { 3495 error_report("First cluster in emptied image is in use"); 3496 abort(); 3497 } 3498 3499 /* Now finally the in-memory information corresponds to the on-disk 3500 * structures and is correct */ 3501 ret = qcow2_mark_clean(bs); 3502 if (ret < 0) { 3503 goto fail; 3504 } 3505 3506 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, 3507 PREALLOC_MODE_OFF, &local_err); 3508 if (ret < 0) { 3509 error_report_err(local_err); 3510 goto fail; 3511 } 3512 3513 return 0; 3514 3515 fail_broken_refcounts: 3516 /* The BDS is unusable at this point. If we wanted to make it usable, we 3517 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 3518 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 3519 * again. However, because the functions which could have caused this error 3520 * path to be taken are used by those functions as well, it's very likely 3521 * that that sequence will fail as well. Therefore, just eject the BDS. */ 3522 bs->drv = NULL; 3523 3524 fail: 3525 g_free(new_reftable); 3526 return ret; 3527 } 3528 3529 static int qcow2_make_empty(BlockDriverState *bs) 3530 { 3531 BDRVQcow2State *s = bs->opaque; 3532 uint64_t offset, end_offset; 3533 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 3534 int l1_clusters, ret = 0; 3535 3536 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 3537 3538 if (s->qcow_version >= 3 && !s->snapshots && 3539 3 + l1_clusters <= s->refcount_block_size) { 3540 /* The following function only works for qcow2 v3 images (it requires 3541 * the dirty flag) and only as long as there are no snapshots (because 3542 * it completely empties the image). Furthermore, the L1 table and three 3543 * additional clusters (image header, refcount table, one refcount 3544 * block) have to fit inside one refcount block. */ 3545 return make_completely_empty(bs); 3546 } 3547 3548 /* This fallback code simply discards every active cluster; this is slow, 3549 * but works in all cases */ 3550 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3551 for (offset = 0; offset < end_offset; offset += step) { 3552 /* As this function is generally used after committing an external 3553 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 3554 * default action for this kind of discard is to pass the discard, 3555 * which will ideally result in an actually smaller image file, as 3556 * is probably desired. */ 3557 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 3558 QCOW2_DISCARD_SNAPSHOT, true); 3559 if (ret < 0) { 3560 break; 3561 } 3562 } 3563 3564 return ret; 3565 } 3566 3567 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 3568 { 3569 BDRVQcow2State *s = bs->opaque; 3570 int ret; 3571 3572 qemu_co_mutex_lock(&s->lock); 3573 ret = qcow2_cache_write(bs, s->l2_table_cache); 3574 if (ret < 0) { 3575 qemu_co_mutex_unlock(&s->lock); 3576 return ret; 3577 } 3578 3579 if (qcow2_need_accurate_refcounts(s)) { 3580 ret = qcow2_cache_write(bs, s->refcount_block_cache); 3581 if (ret < 0) { 3582 qemu_co_mutex_unlock(&s->lock); 3583 return ret; 3584 } 3585 } 3586 qemu_co_mutex_unlock(&s->lock); 3587 3588 return 0; 3589 } 3590 3591 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 3592 Error **errp) 3593 { 3594 Error *local_err = NULL; 3595 BlockMeasureInfo *info; 3596 uint64_t required = 0; /* bytes that contribute to required size */ 3597 uint64_t virtual_size; /* disk size as seen by guest */ 3598 uint64_t refcount_bits; 3599 uint64_t l2_tables; 3600 size_t cluster_size; 3601 int version; 3602 char *optstr; 3603 PreallocMode prealloc; 3604 bool has_backing_file; 3605 3606 /* Parse image creation options */ 3607 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 3608 if (local_err) { 3609 goto err; 3610 } 3611 3612 version = qcow2_opt_get_version_del(opts, &local_err); 3613 if (local_err) { 3614 goto err; 3615 } 3616 3617 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 3618 if (local_err) { 3619 goto err; 3620 } 3621 3622 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 3623 prealloc = qapi_enum_parse(PreallocMode_lookup, optstr, 3624 PREALLOC_MODE__MAX, PREALLOC_MODE_OFF, 3625 &local_err); 3626 g_free(optstr); 3627 if (local_err) { 3628 goto err; 3629 } 3630 3631 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 3632 has_backing_file = !!optstr; 3633 g_free(optstr); 3634 3635 virtual_size = align_offset(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 3636 cluster_size); 3637 3638 /* Check that virtual disk size is valid */ 3639 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 3640 cluster_size / sizeof(uint64_t)); 3641 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { 3642 error_setg(&local_err, "The image size is too large " 3643 "(try using a larger cluster size)"); 3644 goto err; 3645 } 3646 3647 /* Account for input image */ 3648 if (in_bs) { 3649 int64_t ssize = bdrv_getlength(in_bs); 3650 if (ssize < 0) { 3651 error_setg_errno(&local_err, -ssize, 3652 "Unable to get image virtual_size"); 3653 goto err; 3654 } 3655 3656 virtual_size = align_offset(ssize, cluster_size); 3657 3658 if (has_backing_file) { 3659 /* We don't how much of the backing chain is shared by the input 3660 * image and the new image file. In the worst case the new image's 3661 * backing file has nothing in common with the input image. Be 3662 * conservative and assume all clusters need to be written. 3663 */ 3664 required = virtual_size; 3665 } else { 3666 int cluster_sectors = cluster_size / BDRV_SECTOR_SIZE; 3667 int64_t sector_num; 3668 int pnum = 0; 3669 3670 for (sector_num = 0; 3671 sector_num < ssize / BDRV_SECTOR_SIZE; 3672 sector_num += pnum) { 3673 int nb_sectors = MIN(ssize / BDRV_SECTOR_SIZE - sector_num, 3674 BDRV_REQUEST_MAX_SECTORS); 3675 BlockDriverState *file; 3676 int64_t ret; 3677 3678 ret = bdrv_get_block_status_above(in_bs, NULL, 3679 sector_num, nb_sectors, 3680 &pnum, &file); 3681 if (ret < 0) { 3682 error_setg_errno(&local_err, -ret, 3683 "Unable to get block status"); 3684 goto err; 3685 } 3686 3687 if (ret & BDRV_BLOCK_ZERO) { 3688 /* Skip zero regions (safe with no backing file) */ 3689 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 3690 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 3691 /* Extend pnum to end of cluster for next iteration */ 3692 pnum = ROUND_UP(sector_num + pnum, cluster_sectors) - 3693 sector_num; 3694 3695 /* Count clusters we've seen */ 3696 required += (sector_num % cluster_sectors + pnum) * 3697 BDRV_SECTOR_SIZE; 3698 } 3699 } 3700 } 3701 } 3702 3703 /* Take into account preallocation. Nothing special is needed for 3704 * PREALLOC_MODE_METADATA since metadata is always counted. 3705 */ 3706 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 3707 required = virtual_size; 3708 } 3709 3710 info = g_new(BlockMeasureInfo, 1); 3711 info->fully_allocated = 3712 qcow2_calc_prealloc_size(virtual_size, cluster_size, 3713 ctz32(refcount_bits)); 3714 3715 /* Remove data clusters that are not required. This overestimates the 3716 * required size because metadata needed for the fully allocated file is 3717 * still counted. 3718 */ 3719 info->required = info->fully_allocated - virtual_size + required; 3720 return info; 3721 3722 err: 3723 error_propagate(errp, local_err); 3724 return NULL; 3725 } 3726 3727 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 3728 { 3729 BDRVQcow2State *s = bs->opaque; 3730 bdi->unallocated_blocks_are_zero = true; 3731 bdi->can_write_zeroes_with_unmap = (s->qcow_version >= 3); 3732 bdi->cluster_size = s->cluster_size; 3733 bdi->vm_state_offset = qcow2_vm_state_offset(s); 3734 return 0; 3735 } 3736 3737 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs) 3738 { 3739 BDRVQcow2State *s = bs->opaque; 3740 ImageInfoSpecific *spec_info; 3741 QCryptoBlockInfo *encrypt_info = NULL; 3742 3743 if (s->crypto != NULL) { 3744 encrypt_info = qcrypto_block_get_info(s->crypto, &error_abort); 3745 } 3746 3747 spec_info = g_new(ImageInfoSpecific, 1); 3748 *spec_info = (ImageInfoSpecific){ 3749 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 3750 .u.qcow2.data = g_new(ImageInfoSpecificQCow2, 1), 3751 }; 3752 if (s->qcow_version == 2) { 3753 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 3754 .compat = g_strdup("0.10"), 3755 .refcount_bits = s->refcount_bits, 3756 }; 3757 } else if (s->qcow_version == 3) { 3758 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 3759 .compat = g_strdup("1.1"), 3760 .lazy_refcounts = s->compatible_features & 3761 QCOW2_COMPAT_LAZY_REFCOUNTS, 3762 .has_lazy_refcounts = true, 3763 .corrupt = s->incompatible_features & 3764 QCOW2_INCOMPAT_CORRUPT, 3765 .has_corrupt = true, 3766 .refcount_bits = s->refcount_bits, 3767 }; 3768 } else { 3769 /* if this assertion fails, this probably means a new version was 3770 * added without having it covered here */ 3771 assert(false); 3772 } 3773 3774 if (encrypt_info) { 3775 ImageInfoSpecificQCow2Encryption *qencrypt = 3776 g_new(ImageInfoSpecificQCow2Encryption, 1); 3777 switch (encrypt_info->format) { 3778 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 3779 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 3780 qencrypt->u.aes = encrypt_info->u.qcow; 3781 break; 3782 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 3783 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 3784 qencrypt->u.luks = encrypt_info->u.luks; 3785 break; 3786 default: 3787 abort(); 3788 } 3789 /* Since we did shallow copy above, erase any pointers 3790 * in the original info */ 3791 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 3792 qapi_free_QCryptoBlockInfo(encrypt_info); 3793 3794 spec_info->u.qcow2.data->has_encrypt = true; 3795 spec_info->u.qcow2.data->encrypt = qencrypt; 3796 } 3797 3798 return spec_info; 3799 } 3800 3801 #if 0 3802 static void dump_refcounts(BlockDriverState *bs) 3803 { 3804 BDRVQcow2State *s = bs->opaque; 3805 int64_t nb_clusters, k, k1, size; 3806 int refcount; 3807 3808 size = bdrv_getlength(bs->file->bs); 3809 nb_clusters = size_to_clusters(s, size); 3810 for(k = 0; k < nb_clusters;) { 3811 k1 = k; 3812 refcount = get_refcount(bs, k); 3813 k++; 3814 while (k < nb_clusters && get_refcount(bs, k) == refcount) 3815 k++; 3816 printf("%" PRId64 ": refcount=%d nb=%" PRId64 "\n", k, refcount, 3817 k - k1); 3818 } 3819 } 3820 #endif 3821 3822 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 3823 int64_t pos) 3824 { 3825 BDRVQcow2State *s = bs->opaque; 3826 3827 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 3828 return bs->drv->bdrv_co_pwritev(bs, qcow2_vm_state_offset(s) + pos, 3829 qiov->size, qiov, 0); 3830 } 3831 3832 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 3833 int64_t pos) 3834 { 3835 BDRVQcow2State *s = bs->opaque; 3836 3837 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 3838 return bs->drv->bdrv_co_preadv(bs, qcow2_vm_state_offset(s) + pos, 3839 qiov->size, qiov, 0); 3840 } 3841 3842 /* 3843 * Downgrades an image's version. To achieve this, any incompatible features 3844 * have to be removed. 3845 */ 3846 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 3847 BlockDriverAmendStatusCB *status_cb, void *cb_opaque) 3848 { 3849 BDRVQcow2State *s = bs->opaque; 3850 int current_version = s->qcow_version; 3851 int ret; 3852 3853 if (target_version == current_version) { 3854 return 0; 3855 } else if (target_version > current_version) { 3856 return -EINVAL; 3857 } else if (target_version != 2) { 3858 return -EINVAL; 3859 } 3860 3861 if (s->refcount_order != 4) { 3862 error_report("compat=0.10 requires refcount_bits=16"); 3863 return -ENOTSUP; 3864 } 3865 3866 /* clear incompatible features */ 3867 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 3868 ret = qcow2_mark_clean(bs); 3869 if (ret < 0) { 3870 return ret; 3871 } 3872 } 3873 3874 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 3875 * the first place; if that happens nonetheless, returning -ENOTSUP is the 3876 * best thing to do anyway */ 3877 3878 if (s->incompatible_features) { 3879 return -ENOTSUP; 3880 } 3881 3882 /* since we can ignore compatible features, we can set them to 0 as well */ 3883 s->compatible_features = 0; 3884 /* if lazy refcounts have been used, they have already been fixed through 3885 * clearing the dirty flag */ 3886 3887 /* clearing autoclear features is trivial */ 3888 s->autoclear_features = 0; 3889 3890 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 3891 if (ret < 0) { 3892 return ret; 3893 } 3894 3895 s->qcow_version = target_version; 3896 ret = qcow2_update_header(bs); 3897 if (ret < 0) { 3898 s->qcow_version = current_version; 3899 return ret; 3900 } 3901 return 0; 3902 } 3903 3904 typedef enum Qcow2AmendOperation { 3905 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 3906 * statically initialized to so that the helper CB can discern the first 3907 * invocation from an operation change */ 3908 QCOW2_NO_OPERATION = 0, 3909 3910 QCOW2_CHANGING_REFCOUNT_ORDER, 3911 QCOW2_DOWNGRADING, 3912 } Qcow2AmendOperation; 3913 3914 typedef struct Qcow2AmendHelperCBInfo { 3915 /* The code coordinating the amend operations should only modify 3916 * these four fields; the rest will be managed by the CB */ 3917 BlockDriverAmendStatusCB *original_status_cb; 3918 void *original_cb_opaque; 3919 3920 Qcow2AmendOperation current_operation; 3921 3922 /* Total number of operations to perform (only set once) */ 3923 int total_operations; 3924 3925 /* The following fields are managed by the CB */ 3926 3927 /* Number of operations completed */ 3928 int operations_completed; 3929 3930 /* Cumulative offset of all completed operations */ 3931 int64_t offset_completed; 3932 3933 Qcow2AmendOperation last_operation; 3934 int64_t last_work_size; 3935 } Qcow2AmendHelperCBInfo; 3936 3937 static void qcow2_amend_helper_cb(BlockDriverState *bs, 3938 int64_t operation_offset, 3939 int64_t operation_work_size, void *opaque) 3940 { 3941 Qcow2AmendHelperCBInfo *info = opaque; 3942 int64_t current_work_size; 3943 int64_t projected_work_size; 3944 3945 if (info->current_operation != info->last_operation) { 3946 if (info->last_operation != QCOW2_NO_OPERATION) { 3947 info->offset_completed += info->last_work_size; 3948 info->operations_completed++; 3949 } 3950 3951 info->last_operation = info->current_operation; 3952 } 3953 3954 assert(info->total_operations > 0); 3955 assert(info->operations_completed < info->total_operations); 3956 3957 info->last_work_size = operation_work_size; 3958 3959 current_work_size = info->offset_completed + operation_work_size; 3960 3961 /* current_work_size is the total work size for (operations_completed + 1) 3962 * operations (which includes this one), so multiply it by the number of 3963 * operations not covered and divide it by the number of operations 3964 * covered to get a projection for the operations not covered */ 3965 projected_work_size = current_work_size * (info->total_operations - 3966 info->operations_completed - 1) 3967 / (info->operations_completed + 1); 3968 3969 info->original_status_cb(bs, info->offset_completed + operation_offset, 3970 current_work_size + projected_work_size, 3971 info->original_cb_opaque); 3972 } 3973 3974 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 3975 BlockDriverAmendStatusCB *status_cb, 3976 void *cb_opaque) 3977 { 3978 BDRVQcow2State *s = bs->opaque; 3979 int old_version = s->qcow_version, new_version = old_version; 3980 uint64_t new_size = 0; 3981 const char *backing_file = NULL, *backing_format = NULL; 3982 bool lazy_refcounts = s->use_lazy_refcounts; 3983 const char *compat = NULL; 3984 uint64_t cluster_size = s->cluster_size; 3985 bool encrypt; 3986 int encformat; 3987 int refcount_bits = s->refcount_bits; 3988 Error *local_err = NULL; 3989 int ret; 3990 QemuOptDesc *desc = opts->list->desc; 3991 Qcow2AmendHelperCBInfo helper_cb_info; 3992 3993 while (desc && desc->name) { 3994 if (!qemu_opt_find(opts, desc->name)) { 3995 /* only change explicitly defined options */ 3996 desc++; 3997 continue; 3998 } 3999 4000 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 4001 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 4002 if (!compat) { 4003 /* preserve default */ 4004 } else if (!strcmp(compat, "0.10")) { 4005 new_version = 2; 4006 } else if (!strcmp(compat, "1.1")) { 4007 new_version = 3; 4008 } else { 4009 error_report("Unknown compatibility level %s", compat); 4010 return -EINVAL; 4011 } 4012 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) { 4013 error_report("Cannot change preallocation mode"); 4014 return -ENOTSUP; 4015 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 4016 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 4017 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 4018 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 4019 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 4020 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 4021 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) { 4022 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, 4023 !!s->crypto); 4024 4025 if (encrypt != !!s->crypto) { 4026 error_report("Changing the encryption flag is not supported"); 4027 return -ENOTSUP; 4028 } 4029 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) { 4030 encformat = qcow2_crypt_method_from_format( 4031 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT)); 4032 4033 if (encformat != s->crypt_method_header) { 4034 error_report("Changing the encryption format is not supported"); 4035 return -ENOTSUP; 4036 } 4037 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) { 4038 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 4039 cluster_size); 4040 if (cluster_size != s->cluster_size) { 4041 error_report("Changing the cluster size is not supported"); 4042 return -ENOTSUP; 4043 } 4044 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 4045 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 4046 lazy_refcounts); 4047 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 4048 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 4049 refcount_bits); 4050 4051 if (refcount_bits <= 0 || refcount_bits > 64 || 4052 !is_power_of_2(refcount_bits)) 4053 { 4054 error_report("Refcount width must be a power of two and may " 4055 "not exceed 64 bits"); 4056 return -EINVAL; 4057 } 4058 } else { 4059 /* if this point is reached, this probably means a new option was 4060 * added without having it covered here */ 4061 abort(); 4062 } 4063 4064 desc++; 4065 } 4066 4067 helper_cb_info = (Qcow2AmendHelperCBInfo){ 4068 .original_status_cb = status_cb, 4069 .original_cb_opaque = cb_opaque, 4070 .total_operations = (new_version < old_version) 4071 + (s->refcount_bits != refcount_bits) 4072 }; 4073 4074 /* Upgrade first (some features may require compat=1.1) */ 4075 if (new_version > old_version) { 4076 s->qcow_version = new_version; 4077 ret = qcow2_update_header(bs); 4078 if (ret < 0) { 4079 s->qcow_version = old_version; 4080 return ret; 4081 } 4082 } 4083 4084 if (s->refcount_bits != refcount_bits) { 4085 int refcount_order = ctz32(refcount_bits); 4086 4087 if (new_version < 3 && refcount_bits != 16) { 4088 error_report("Different refcount widths than 16 bits require " 4089 "compatibility level 1.1 or above (use compat=1.1 or " 4090 "greater)"); 4091 return -EINVAL; 4092 } 4093 4094 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 4095 ret = qcow2_change_refcount_order(bs, refcount_order, 4096 &qcow2_amend_helper_cb, 4097 &helper_cb_info, &local_err); 4098 if (ret < 0) { 4099 error_report_err(local_err); 4100 return ret; 4101 } 4102 } 4103 4104 if (backing_file || backing_format) { 4105 ret = qcow2_change_backing_file(bs, 4106 backing_file ?: s->image_backing_file, 4107 backing_format ?: s->image_backing_format); 4108 if (ret < 0) { 4109 return ret; 4110 } 4111 } 4112 4113 if (s->use_lazy_refcounts != lazy_refcounts) { 4114 if (lazy_refcounts) { 4115 if (new_version < 3) { 4116 error_report("Lazy refcounts only supported with compatibility " 4117 "level 1.1 and above (use compat=1.1 or greater)"); 4118 return -EINVAL; 4119 } 4120 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4121 ret = qcow2_update_header(bs); 4122 if (ret < 0) { 4123 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4124 return ret; 4125 } 4126 s->use_lazy_refcounts = true; 4127 } else { 4128 /* make image clean first */ 4129 ret = qcow2_mark_clean(bs); 4130 if (ret < 0) { 4131 return ret; 4132 } 4133 /* now disallow lazy refcounts */ 4134 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4135 ret = qcow2_update_header(bs); 4136 if (ret < 0) { 4137 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4138 return ret; 4139 } 4140 s->use_lazy_refcounts = false; 4141 } 4142 } 4143 4144 if (new_size) { 4145 BlockBackend *blk = blk_new(BLK_PERM_RESIZE, BLK_PERM_ALL); 4146 ret = blk_insert_bs(blk, bs, &local_err); 4147 if (ret < 0) { 4148 error_report_err(local_err); 4149 blk_unref(blk); 4150 return ret; 4151 } 4152 4153 ret = blk_truncate(blk, new_size, PREALLOC_MODE_OFF, &local_err); 4154 blk_unref(blk); 4155 if (ret < 0) { 4156 error_report_err(local_err); 4157 return ret; 4158 } 4159 } 4160 4161 /* Downgrade last (so unsupported features can be removed before) */ 4162 if (new_version < old_version) { 4163 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 4164 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 4165 &helper_cb_info); 4166 if (ret < 0) { 4167 return ret; 4168 } 4169 } 4170 4171 return 0; 4172 } 4173 4174 /* 4175 * If offset or size are negative, respectively, they will not be included in 4176 * the BLOCK_IMAGE_CORRUPTED event emitted. 4177 * fatal will be ignored for read-only BDS; corruptions found there will always 4178 * be considered non-fatal. 4179 */ 4180 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 4181 int64_t size, const char *message_format, ...) 4182 { 4183 BDRVQcow2State *s = bs->opaque; 4184 const char *node_name; 4185 char *message; 4186 va_list ap; 4187 4188 fatal = fatal && !bs->read_only; 4189 4190 if (s->signaled_corruption && 4191 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 4192 { 4193 return; 4194 } 4195 4196 va_start(ap, message_format); 4197 message = g_strdup_vprintf(message_format, ap); 4198 va_end(ap); 4199 4200 if (fatal) { 4201 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 4202 "corruption events will be suppressed\n", message); 4203 } else { 4204 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 4205 "corruption events will be suppressed\n", message); 4206 } 4207 4208 node_name = bdrv_get_node_name(bs); 4209 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 4210 *node_name != '\0', node_name, 4211 message, offset >= 0, offset, 4212 size >= 0, size, 4213 fatal, &error_abort); 4214 g_free(message); 4215 4216 if (fatal) { 4217 qcow2_mark_corrupt(bs); 4218 bs->drv = NULL; /* make BDS unusable */ 4219 } 4220 4221 s->signaled_corruption = true; 4222 } 4223 4224 static QemuOptsList qcow2_create_opts = { 4225 .name = "qcow2-create-opts", 4226 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 4227 .desc = { 4228 { 4229 .name = BLOCK_OPT_SIZE, 4230 .type = QEMU_OPT_SIZE, 4231 .help = "Virtual disk size" 4232 }, 4233 { 4234 .name = BLOCK_OPT_COMPAT_LEVEL, 4235 .type = QEMU_OPT_STRING, 4236 .help = "Compatibility level (0.10 or 1.1)" 4237 }, 4238 { 4239 .name = BLOCK_OPT_BACKING_FILE, 4240 .type = QEMU_OPT_STRING, 4241 .help = "File name of a base image" 4242 }, 4243 { 4244 .name = BLOCK_OPT_BACKING_FMT, 4245 .type = QEMU_OPT_STRING, 4246 .help = "Image format of the base image" 4247 }, 4248 { 4249 .name = BLOCK_OPT_ENCRYPT, 4250 .type = QEMU_OPT_BOOL, 4251 .help = "Encrypt the image with format 'aes'. (Deprecated " 4252 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", 4253 }, 4254 { 4255 .name = BLOCK_OPT_ENCRYPT_FORMAT, 4256 .type = QEMU_OPT_STRING, 4257 .help = "Encrypt the image, format choices: 'aes', 'luks'", 4258 }, 4259 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 4260 "ID of secret providing qcow AES key or LUKS passphrase"), 4261 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), 4262 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), 4263 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), 4264 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), 4265 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), 4266 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 4267 { 4268 .name = BLOCK_OPT_CLUSTER_SIZE, 4269 .type = QEMU_OPT_SIZE, 4270 .help = "qcow2 cluster size", 4271 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) 4272 }, 4273 { 4274 .name = BLOCK_OPT_PREALLOC, 4275 .type = QEMU_OPT_STRING, 4276 .help = "Preallocation mode (allowed values: off, metadata, " 4277 "falloc, full)" 4278 }, 4279 { 4280 .name = BLOCK_OPT_LAZY_REFCOUNTS, 4281 .type = QEMU_OPT_BOOL, 4282 .help = "Postpone refcount updates", 4283 .def_value_str = "off" 4284 }, 4285 { 4286 .name = BLOCK_OPT_REFCOUNT_BITS, 4287 .type = QEMU_OPT_NUMBER, 4288 .help = "Width of a reference count entry in bits", 4289 .def_value_str = "16" 4290 }, 4291 { /* end of list */ } 4292 } 4293 }; 4294 4295 BlockDriver bdrv_qcow2 = { 4296 .format_name = "qcow2", 4297 .instance_size = sizeof(BDRVQcow2State), 4298 .bdrv_probe = qcow2_probe, 4299 .bdrv_open = qcow2_open, 4300 .bdrv_close = qcow2_close, 4301 .bdrv_reopen_prepare = qcow2_reopen_prepare, 4302 .bdrv_reopen_commit = qcow2_reopen_commit, 4303 .bdrv_reopen_abort = qcow2_reopen_abort, 4304 .bdrv_join_options = qcow2_join_options, 4305 .bdrv_child_perm = bdrv_format_default_perms, 4306 .bdrv_create = qcow2_create, 4307 .bdrv_has_zero_init = bdrv_has_zero_init_1, 4308 .bdrv_co_get_block_status = qcow2_co_get_block_status, 4309 4310 .bdrv_co_preadv = qcow2_co_preadv, 4311 .bdrv_co_pwritev = qcow2_co_pwritev, 4312 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 4313 4314 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 4315 .bdrv_co_pdiscard = qcow2_co_pdiscard, 4316 .bdrv_truncate = qcow2_truncate, 4317 .bdrv_co_pwritev_compressed = qcow2_co_pwritev_compressed, 4318 .bdrv_make_empty = qcow2_make_empty, 4319 4320 .bdrv_snapshot_create = qcow2_snapshot_create, 4321 .bdrv_snapshot_goto = qcow2_snapshot_goto, 4322 .bdrv_snapshot_delete = qcow2_snapshot_delete, 4323 .bdrv_snapshot_list = qcow2_snapshot_list, 4324 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 4325 .bdrv_measure = qcow2_measure, 4326 .bdrv_get_info = qcow2_get_info, 4327 .bdrv_get_specific_info = qcow2_get_specific_info, 4328 4329 .bdrv_save_vmstate = qcow2_save_vmstate, 4330 .bdrv_load_vmstate = qcow2_load_vmstate, 4331 4332 .supports_backing = true, 4333 .bdrv_change_backing_file = qcow2_change_backing_file, 4334 4335 .bdrv_refresh_limits = qcow2_refresh_limits, 4336 .bdrv_invalidate_cache = qcow2_invalidate_cache, 4337 .bdrv_inactivate = qcow2_inactivate, 4338 4339 .create_opts = &qcow2_create_opts, 4340 .bdrv_check = qcow2_check, 4341 .bdrv_amend_options = qcow2_amend_options, 4342 4343 .bdrv_detach_aio_context = qcow2_detach_aio_context, 4344 .bdrv_attach_aio_context = qcow2_attach_aio_context, 4345 4346 .bdrv_reopen_bitmaps_rw = qcow2_reopen_bitmaps_rw, 4347 .bdrv_can_store_new_dirty_bitmap = qcow2_can_store_new_dirty_bitmap, 4348 .bdrv_remove_persistent_dirty_bitmap = qcow2_remove_persistent_dirty_bitmap, 4349 }; 4350 4351 static void bdrv_qcow2_init(void) 4352 { 4353 bdrv_register(&bdrv_qcow2); 4354 } 4355 4356 block_init(bdrv_qcow2_init); 4357