1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include "block/block_int.h" 26 #include "sysemu/block-backend.h" 27 #include "qemu/module.h" 28 #include <zlib.h> 29 #include "block/qcow2.h" 30 #include "qemu/error-report.h" 31 #include "qapi/qmp/qerror.h" 32 #include "qapi/qmp/qbool.h" 33 #include "qapi/util.h" 34 #include "qapi/qmp/types.h" 35 #include "qapi-event.h" 36 #include "trace.h" 37 #include "qemu/option_int.h" 38 #include "qemu/cutils.h" 39 #include "qemu/bswap.h" 40 #include "qapi/opts-visitor.h" 41 #include "qapi-visit.h" 42 #include "block/crypto.h" 43 44 /* 45 Differences with QCOW: 46 47 - Support for multiple incremental snapshots. 48 - Memory management by reference counts. 49 - Clusters which have a reference count of one have the bit 50 QCOW_OFLAG_COPIED to optimize write performance. 51 - Size of compressed clusters is stored in sectors to reduce bit usage 52 in the cluster offsets. 53 - Support for storing additional data (such as the VM state) in the 54 snapshots. 55 - If a backing store is used, the cluster size is not constrained 56 (could be backported to QCOW). 57 - L2 tables have always a size of one cluster. 58 */ 59 60 61 typedef struct { 62 uint32_t magic; 63 uint32_t len; 64 } QEMU_PACKED QCowExtension; 65 66 #define QCOW2_EXT_MAGIC_END 0 67 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 68 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 69 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 70 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 71 72 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 73 { 74 const QCowHeader *cow_header = (const void *)buf; 75 76 if (buf_size >= sizeof(QCowHeader) && 77 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 78 be32_to_cpu(cow_header->version) >= 2) 79 return 100; 80 else 81 return 0; 82 } 83 84 85 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 86 uint8_t *buf, size_t buflen, 87 void *opaque, Error **errp) 88 { 89 BlockDriverState *bs = opaque; 90 BDRVQcow2State *s = bs->opaque; 91 ssize_t ret; 92 93 if ((offset + buflen) > s->crypto_header.length) { 94 error_setg(errp, "Request for data outside of extension header"); 95 return -1; 96 } 97 98 ret = bdrv_pread(bs->file, 99 s->crypto_header.offset + offset, buf, buflen); 100 if (ret < 0) { 101 error_setg_errno(errp, -ret, "Could not read encryption header"); 102 return -1; 103 } 104 return ret; 105 } 106 107 108 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 109 void *opaque, Error **errp) 110 { 111 BlockDriverState *bs = opaque; 112 BDRVQcow2State *s = bs->opaque; 113 int64_t ret; 114 int64_t clusterlen; 115 116 ret = qcow2_alloc_clusters(bs, headerlen); 117 if (ret < 0) { 118 error_setg_errno(errp, -ret, 119 "Cannot allocate cluster for LUKS header size %zu", 120 headerlen); 121 return -1; 122 } 123 124 s->crypto_header.length = headerlen; 125 s->crypto_header.offset = ret; 126 127 /* Zero fill remaining space in cluster so it has predictable 128 * content in case of future spec changes */ 129 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 130 ret = bdrv_pwrite_zeroes(bs->file, 131 ret + headerlen, 132 clusterlen - headerlen, 0); 133 if (ret < 0) { 134 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 135 return -1; 136 } 137 138 return ret; 139 } 140 141 142 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 143 const uint8_t *buf, size_t buflen, 144 void *opaque, Error **errp) 145 { 146 BlockDriverState *bs = opaque; 147 BDRVQcow2State *s = bs->opaque; 148 ssize_t ret; 149 150 if ((offset + buflen) > s->crypto_header.length) { 151 error_setg(errp, "Request for data outside of extension header"); 152 return -1; 153 } 154 155 ret = bdrv_pwrite(bs->file, 156 s->crypto_header.offset + offset, buf, buflen); 157 if (ret < 0) { 158 error_setg_errno(errp, -ret, "Could not read encryption header"); 159 return -1; 160 } 161 return ret; 162 } 163 164 165 /* 166 * read qcow2 extension and fill bs 167 * start reading from start_offset 168 * finish reading upon magic of value 0 or when end_offset reached 169 * unknown magic is skipped (future extension this version knows nothing about) 170 * return 0 upon success, non-0 otherwise 171 */ 172 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 173 uint64_t end_offset, void **p_feature_table, 174 int flags, bool *need_update_header, 175 Error **errp) 176 { 177 BDRVQcow2State *s = bs->opaque; 178 QCowExtension ext; 179 uint64_t offset; 180 int ret; 181 Qcow2BitmapHeaderExt bitmaps_ext; 182 183 if (need_update_header != NULL) { 184 *need_update_header = false; 185 } 186 187 #ifdef DEBUG_EXT 188 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 189 #endif 190 offset = start_offset; 191 while (offset < end_offset) { 192 193 #ifdef DEBUG_EXT 194 /* Sanity check */ 195 if (offset > s->cluster_size) 196 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 197 198 printf("attempting to read extended header in offset %lu\n", offset); 199 #endif 200 201 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); 202 if (ret < 0) { 203 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 204 "pread fail from offset %" PRIu64, offset); 205 return 1; 206 } 207 be32_to_cpus(&ext.magic); 208 be32_to_cpus(&ext.len); 209 offset += sizeof(ext); 210 #ifdef DEBUG_EXT 211 printf("ext.magic = 0x%x\n", ext.magic); 212 #endif 213 if (offset > end_offset || ext.len > end_offset - offset) { 214 error_setg(errp, "Header extension too large"); 215 return -EINVAL; 216 } 217 218 switch (ext.magic) { 219 case QCOW2_EXT_MAGIC_END: 220 return 0; 221 222 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 223 if (ext.len >= sizeof(bs->backing_format)) { 224 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 225 " too large (>=%zu)", ext.len, 226 sizeof(bs->backing_format)); 227 return 2; 228 } 229 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); 230 if (ret < 0) { 231 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 232 "Could not read format name"); 233 return 3; 234 } 235 bs->backing_format[ext.len] = '\0'; 236 s->image_backing_format = g_strdup(bs->backing_format); 237 #ifdef DEBUG_EXT 238 printf("Qcow2: Got format extension %s\n", bs->backing_format); 239 #endif 240 break; 241 242 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 243 if (p_feature_table != NULL) { 244 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 245 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 246 if (ret < 0) { 247 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 248 "Could not read table"); 249 return ret; 250 } 251 252 *p_feature_table = feature_table; 253 } 254 break; 255 256 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 257 unsigned int cflags = 0; 258 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 259 error_setg(errp, "CRYPTO header extension only " 260 "expected with LUKS encryption method"); 261 return -EINVAL; 262 } 263 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 264 error_setg(errp, "CRYPTO header extension size %u, " 265 "but expected size %zu", ext.len, 266 sizeof(Qcow2CryptoHeaderExtension)); 267 return -EINVAL; 268 } 269 270 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); 271 if (ret < 0) { 272 error_setg_errno(errp, -ret, 273 "Unable to read CRYPTO header extension"); 274 return ret; 275 } 276 be64_to_cpus(&s->crypto_header.offset); 277 be64_to_cpus(&s->crypto_header.length); 278 279 if ((s->crypto_header.offset % s->cluster_size) != 0) { 280 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 281 "not a multiple of cluster size '%u'", 282 s->crypto_header.offset, s->cluster_size); 283 return -EINVAL; 284 } 285 286 if (flags & BDRV_O_NO_IO) { 287 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 288 } 289 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 290 qcow2_crypto_hdr_read_func, 291 bs, cflags, errp); 292 if (!s->crypto) { 293 return -EINVAL; 294 } 295 } break; 296 297 case QCOW2_EXT_MAGIC_BITMAPS: 298 if (ext.len != sizeof(bitmaps_ext)) { 299 error_setg_errno(errp, -ret, "bitmaps_ext: " 300 "Invalid extension length"); 301 return -EINVAL; 302 } 303 304 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 305 error_report("WARNING: a program lacking bitmap support " 306 "modified this file, so all bitmaps are now " 307 "considered inconsistent. Some clusters may be " 308 "leaked, run 'qemu-img check -r' on the image " 309 "file to fix."); 310 if (need_update_header != NULL) { 311 /* Updating is needed to drop invalid bitmap extension. */ 312 *need_update_header = true; 313 } 314 break; 315 } 316 317 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); 318 if (ret < 0) { 319 error_setg_errno(errp, -ret, "bitmaps_ext: " 320 "Could not read ext header"); 321 return ret; 322 } 323 324 if (bitmaps_ext.reserved32 != 0) { 325 error_setg_errno(errp, -ret, "bitmaps_ext: " 326 "Reserved field is not zero"); 327 return -EINVAL; 328 } 329 330 be32_to_cpus(&bitmaps_ext.nb_bitmaps); 331 be64_to_cpus(&bitmaps_ext.bitmap_directory_size); 332 be64_to_cpus(&bitmaps_ext.bitmap_directory_offset); 333 334 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 335 error_setg(errp, 336 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 337 "exceeding the QEMU supported maximum of %d", 338 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 339 return -EINVAL; 340 } 341 342 if (bitmaps_ext.nb_bitmaps == 0) { 343 error_setg(errp, "found bitmaps extension with zero bitmaps"); 344 return -EINVAL; 345 } 346 347 if (bitmaps_ext.bitmap_directory_offset & (s->cluster_size - 1)) { 348 error_setg(errp, "bitmaps_ext: " 349 "invalid bitmap directory offset"); 350 return -EINVAL; 351 } 352 353 if (bitmaps_ext.bitmap_directory_size > 354 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 355 error_setg(errp, "bitmaps_ext: " 356 "bitmap directory size (%" PRIu64 ") exceeds " 357 "the maximum supported size (%d)", 358 bitmaps_ext.bitmap_directory_size, 359 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 360 return -EINVAL; 361 } 362 363 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 364 s->bitmap_directory_offset = 365 bitmaps_ext.bitmap_directory_offset; 366 s->bitmap_directory_size = 367 bitmaps_ext.bitmap_directory_size; 368 369 #ifdef DEBUG_EXT 370 printf("Qcow2: Got bitmaps extension: " 371 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 372 s->bitmap_directory_offset, s->nb_bitmaps); 373 #endif 374 break; 375 376 default: 377 /* unknown magic - save it in case we need to rewrite the header */ 378 { 379 Qcow2UnknownHeaderExtension *uext; 380 381 uext = g_malloc0(sizeof(*uext) + ext.len); 382 uext->magic = ext.magic; 383 uext->len = ext.len; 384 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 385 386 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 387 if (ret < 0) { 388 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 389 "Could not read data"); 390 return ret; 391 } 392 } 393 break; 394 } 395 396 offset += ((ext.len + 7) & ~7); 397 } 398 399 return 0; 400 } 401 402 static void cleanup_unknown_header_ext(BlockDriverState *bs) 403 { 404 BDRVQcow2State *s = bs->opaque; 405 Qcow2UnknownHeaderExtension *uext, *next; 406 407 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 408 QLIST_REMOVE(uext, next); 409 g_free(uext); 410 } 411 } 412 413 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 414 uint64_t mask) 415 { 416 char *features = g_strdup(""); 417 char *old; 418 419 while (table && table->name[0] != '\0') { 420 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 421 if (mask & (1ULL << table->bit)) { 422 old = features; 423 features = g_strdup_printf("%s%s%.46s", old, *old ? ", " : "", 424 table->name); 425 g_free(old); 426 mask &= ~(1ULL << table->bit); 427 } 428 } 429 table++; 430 } 431 432 if (mask) { 433 old = features; 434 features = g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64, 435 old, *old ? ", " : "", mask); 436 g_free(old); 437 } 438 439 error_setg(errp, "Unsupported qcow2 feature(s): %s", features); 440 g_free(features); 441 } 442 443 /* 444 * Sets the dirty bit and flushes afterwards if necessary. 445 * 446 * The incompatible_features bit is only set if the image file header was 447 * updated successfully. Therefore it is not required to check the return 448 * value of this function. 449 */ 450 int qcow2_mark_dirty(BlockDriverState *bs) 451 { 452 BDRVQcow2State *s = bs->opaque; 453 uint64_t val; 454 int ret; 455 456 assert(s->qcow_version >= 3); 457 458 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 459 return 0; /* already dirty */ 460 } 461 462 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 463 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 464 &val, sizeof(val)); 465 if (ret < 0) { 466 return ret; 467 } 468 ret = bdrv_flush(bs->file->bs); 469 if (ret < 0) { 470 return ret; 471 } 472 473 /* Only treat image as dirty if the header was updated successfully */ 474 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 475 return 0; 476 } 477 478 /* 479 * Clears the dirty bit and flushes before if necessary. Only call this 480 * function when there are no pending requests, it does not guard against 481 * concurrent requests dirtying the image. 482 */ 483 static int qcow2_mark_clean(BlockDriverState *bs) 484 { 485 BDRVQcow2State *s = bs->opaque; 486 487 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 488 int ret; 489 490 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 491 492 ret = bdrv_flush(bs); 493 if (ret < 0) { 494 return ret; 495 } 496 497 return qcow2_update_header(bs); 498 } 499 return 0; 500 } 501 502 /* 503 * Marks the image as corrupt. 504 */ 505 int qcow2_mark_corrupt(BlockDriverState *bs) 506 { 507 BDRVQcow2State *s = bs->opaque; 508 509 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 510 return qcow2_update_header(bs); 511 } 512 513 /* 514 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 515 * before if necessary. 516 */ 517 int qcow2_mark_consistent(BlockDriverState *bs) 518 { 519 BDRVQcow2State *s = bs->opaque; 520 521 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 522 int ret = bdrv_flush(bs); 523 if (ret < 0) { 524 return ret; 525 } 526 527 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 528 return qcow2_update_header(bs); 529 } 530 return 0; 531 } 532 533 static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result, 534 BdrvCheckMode fix) 535 { 536 int ret = qcow2_check_refcounts(bs, result, fix); 537 if (ret < 0) { 538 return ret; 539 } 540 541 if (fix && result->check_errors == 0 && result->corruptions == 0) { 542 ret = qcow2_mark_clean(bs); 543 if (ret < 0) { 544 return ret; 545 } 546 return qcow2_mark_consistent(bs); 547 } 548 return ret; 549 } 550 551 static int validate_table_offset(BlockDriverState *bs, uint64_t offset, 552 uint64_t entries, size_t entry_len) 553 { 554 BDRVQcow2State *s = bs->opaque; 555 uint64_t size; 556 557 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 558 * because values will be passed to qemu functions taking int64_t. */ 559 if (entries > INT64_MAX / entry_len) { 560 return -EINVAL; 561 } 562 563 size = entries * entry_len; 564 565 if (INT64_MAX - size < offset) { 566 return -EINVAL; 567 } 568 569 /* Tables must be cluster aligned */ 570 if (offset_into_cluster(s, offset) != 0) { 571 return -EINVAL; 572 } 573 574 return 0; 575 } 576 577 static QemuOptsList qcow2_runtime_opts = { 578 .name = "qcow2", 579 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 580 .desc = { 581 { 582 .name = QCOW2_OPT_LAZY_REFCOUNTS, 583 .type = QEMU_OPT_BOOL, 584 .help = "Postpone refcount updates", 585 }, 586 { 587 .name = QCOW2_OPT_DISCARD_REQUEST, 588 .type = QEMU_OPT_BOOL, 589 .help = "Pass guest discard requests to the layer below", 590 }, 591 { 592 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 593 .type = QEMU_OPT_BOOL, 594 .help = "Generate discard requests when snapshot related space " 595 "is freed", 596 }, 597 { 598 .name = QCOW2_OPT_DISCARD_OTHER, 599 .type = QEMU_OPT_BOOL, 600 .help = "Generate discard requests when other clusters are freed", 601 }, 602 { 603 .name = QCOW2_OPT_OVERLAP, 604 .type = QEMU_OPT_STRING, 605 .help = "Selects which overlap checks to perform from a range of " 606 "templates (none, constant, cached, all)", 607 }, 608 { 609 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 610 .type = QEMU_OPT_STRING, 611 .help = "Selects which overlap checks to perform from a range of " 612 "templates (none, constant, cached, all)", 613 }, 614 { 615 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 616 .type = QEMU_OPT_BOOL, 617 .help = "Check for unintended writes into the main qcow2 header", 618 }, 619 { 620 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 621 .type = QEMU_OPT_BOOL, 622 .help = "Check for unintended writes into the active L1 table", 623 }, 624 { 625 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 626 .type = QEMU_OPT_BOOL, 627 .help = "Check for unintended writes into an active L2 table", 628 }, 629 { 630 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 631 .type = QEMU_OPT_BOOL, 632 .help = "Check for unintended writes into the refcount table", 633 }, 634 { 635 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 636 .type = QEMU_OPT_BOOL, 637 .help = "Check for unintended writes into a refcount block", 638 }, 639 { 640 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 641 .type = QEMU_OPT_BOOL, 642 .help = "Check for unintended writes into the snapshot table", 643 }, 644 { 645 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 646 .type = QEMU_OPT_BOOL, 647 .help = "Check for unintended writes into an inactive L1 table", 648 }, 649 { 650 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 651 .type = QEMU_OPT_BOOL, 652 .help = "Check for unintended writes into an inactive L2 table", 653 }, 654 { 655 .name = QCOW2_OPT_CACHE_SIZE, 656 .type = QEMU_OPT_SIZE, 657 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 658 "cache size", 659 }, 660 { 661 .name = QCOW2_OPT_L2_CACHE_SIZE, 662 .type = QEMU_OPT_SIZE, 663 .help = "Maximum L2 table cache size", 664 }, 665 { 666 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 667 .type = QEMU_OPT_SIZE, 668 .help = "Maximum refcount block cache size", 669 }, 670 { 671 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 672 .type = QEMU_OPT_NUMBER, 673 .help = "Clean unused cache entries after this time (in seconds)", 674 }, 675 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 676 "ID of secret providing qcow2 AES key or LUKS passphrase"), 677 { /* end of list */ } 678 }, 679 }; 680 681 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 682 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 683 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 684 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 685 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 686 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 687 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 688 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 689 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 690 }; 691 692 static void cache_clean_timer_cb(void *opaque) 693 { 694 BlockDriverState *bs = opaque; 695 BDRVQcow2State *s = bs->opaque; 696 qcow2_cache_clean_unused(bs, s->l2_table_cache); 697 qcow2_cache_clean_unused(bs, s->refcount_block_cache); 698 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 699 (int64_t) s->cache_clean_interval * 1000); 700 } 701 702 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 703 { 704 BDRVQcow2State *s = bs->opaque; 705 if (s->cache_clean_interval > 0) { 706 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL, 707 SCALE_MS, cache_clean_timer_cb, 708 bs); 709 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 710 (int64_t) s->cache_clean_interval * 1000); 711 } 712 } 713 714 static void cache_clean_timer_del(BlockDriverState *bs) 715 { 716 BDRVQcow2State *s = bs->opaque; 717 if (s->cache_clean_timer) { 718 timer_del(s->cache_clean_timer); 719 timer_free(s->cache_clean_timer); 720 s->cache_clean_timer = NULL; 721 } 722 } 723 724 static void qcow2_detach_aio_context(BlockDriverState *bs) 725 { 726 cache_clean_timer_del(bs); 727 } 728 729 static void qcow2_attach_aio_context(BlockDriverState *bs, 730 AioContext *new_context) 731 { 732 cache_clean_timer_init(bs, new_context); 733 } 734 735 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 736 uint64_t *l2_cache_size, 737 uint64_t *refcount_cache_size, Error **errp) 738 { 739 BDRVQcow2State *s = bs->opaque; 740 uint64_t combined_cache_size; 741 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 742 743 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 744 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 745 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 746 747 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 748 *l2_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 0); 749 *refcount_cache_size = qemu_opt_get_size(opts, 750 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 751 752 if (combined_cache_size_set) { 753 if (l2_cache_size_set && refcount_cache_size_set) { 754 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 755 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 756 "the same time"); 757 return; 758 } else if (*l2_cache_size > combined_cache_size) { 759 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 760 QCOW2_OPT_CACHE_SIZE); 761 return; 762 } else if (*refcount_cache_size > combined_cache_size) { 763 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 764 QCOW2_OPT_CACHE_SIZE); 765 return; 766 } 767 768 if (l2_cache_size_set) { 769 *refcount_cache_size = combined_cache_size - *l2_cache_size; 770 } else if (refcount_cache_size_set) { 771 *l2_cache_size = combined_cache_size - *refcount_cache_size; 772 } else { 773 *refcount_cache_size = combined_cache_size 774 / (DEFAULT_L2_REFCOUNT_SIZE_RATIO + 1); 775 *l2_cache_size = combined_cache_size - *refcount_cache_size; 776 } 777 } else { 778 if (!l2_cache_size_set && !refcount_cache_size_set) { 779 *l2_cache_size = MAX(DEFAULT_L2_CACHE_BYTE_SIZE, 780 (uint64_t)DEFAULT_L2_CACHE_CLUSTERS 781 * s->cluster_size); 782 *refcount_cache_size = *l2_cache_size 783 / DEFAULT_L2_REFCOUNT_SIZE_RATIO; 784 } else if (!l2_cache_size_set) { 785 *l2_cache_size = *refcount_cache_size 786 * DEFAULT_L2_REFCOUNT_SIZE_RATIO; 787 } else if (!refcount_cache_size_set) { 788 *refcount_cache_size = *l2_cache_size 789 / DEFAULT_L2_REFCOUNT_SIZE_RATIO; 790 } 791 } 792 } 793 794 typedef struct Qcow2ReopenState { 795 Qcow2Cache *l2_table_cache; 796 Qcow2Cache *refcount_block_cache; 797 bool use_lazy_refcounts; 798 int overlap_check; 799 bool discard_passthrough[QCOW2_DISCARD_MAX]; 800 uint64_t cache_clean_interval; 801 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 802 } Qcow2ReopenState; 803 804 static int qcow2_update_options_prepare(BlockDriverState *bs, 805 Qcow2ReopenState *r, 806 QDict *options, int flags, 807 Error **errp) 808 { 809 BDRVQcow2State *s = bs->opaque; 810 QemuOpts *opts = NULL; 811 const char *opt_overlap_check, *opt_overlap_check_template; 812 int overlap_check_template = 0; 813 uint64_t l2_cache_size, refcount_cache_size; 814 int i; 815 const char *encryptfmt; 816 QDict *encryptopts = NULL; 817 Error *local_err = NULL; 818 int ret; 819 820 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 821 encryptfmt = qdict_get_try_str(encryptopts, "format"); 822 823 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 824 qemu_opts_absorb_qdict(opts, options, &local_err); 825 if (local_err) { 826 error_propagate(errp, local_err); 827 ret = -EINVAL; 828 goto fail; 829 } 830 831 /* get L2 table/refcount block cache size from command line options */ 832 read_cache_sizes(bs, opts, &l2_cache_size, &refcount_cache_size, 833 &local_err); 834 if (local_err) { 835 error_propagate(errp, local_err); 836 ret = -EINVAL; 837 goto fail; 838 } 839 840 l2_cache_size /= s->cluster_size; 841 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 842 l2_cache_size = MIN_L2_CACHE_SIZE; 843 } 844 if (l2_cache_size > INT_MAX) { 845 error_setg(errp, "L2 cache size too big"); 846 ret = -EINVAL; 847 goto fail; 848 } 849 850 refcount_cache_size /= s->cluster_size; 851 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 852 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 853 } 854 if (refcount_cache_size > INT_MAX) { 855 error_setg(errp, "Refcount cache size too big"); 856 ret = -EINVAL; 857 goto fail; 858 } 859 860 /* alloc new L2 table/refcount block cache, flush old one */ 861 if (s->l2_table_cache) { 862 ret = qcow2_cache_flush(bs, s->l2_table_cache); 863 if (ret) { 864 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 865 goto fail; 866 } 867 } 868 869 if (s->refcount_block_cache) { 870 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 871 if (ret) { 872 error_setg_errno(errp, -ret, 873 "Failed to flush the refcount block cache"); 874 goto fail; 875 } 876 } 877 878 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size); 879 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size); 880 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 881 error_setg(errp, "Could not allocate metadata caches"); 882 ret = -ENOMEM; 883 goto fail; 884 } 885 886 /* New interval for cache cleanup timer */ 887 r->cache_clean_interval = 888 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 889 s->cache_clean_interval); 890 #ifndef CONFIG_LINUX 891 if (r->cache_clean_interval != 0) { 892 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 893 " not supported on this host"); 894 ret = -EINVAL; 895 goto fail; 896 } 897 #endif 898 if (r->cache_clean_interval > UINT_MAX) { 899 error_setg(errp, "Cache clean interval too big"); 900 ret = -EINVAL; 901 goto fail; 902 } 903 904 /* lazy-refcounts; flush if going from enabled to disabled */ 905 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 906 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 907 if (r->use_lazy_refcounts && s->qcow_version < 3) { 908 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 909 "qemu 1.1 compatibility level"); 910 ret = -EINVAL; 911 goto fail; 912 } 913 914 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 915 ret = qcow2_mark_clean(bs); 916 if (ret < 0) { 917 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 918 goto fail; 919 } 920 } 921 922 /* Overlap check options */ 923 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 924 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 925 if (opt_overlap_check_template && opt_overlap_check && 926 strcmp(opt_overlap_check_template, opt_overlap_check)) 927 { 928 error_setg(errp, "Conflicting values for qcow2 options '" 929 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 930 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 931 ret = -EINVAL; 932 goto fail; 933 } 934 if (!opt_overlap_check) { 935 opt_overlap_check = opt_overlap_check_template ?: "cached"; 936 } 937 938 if (!strcmp(opt_overlap_check, "none")) { 939 overlap_check_template = 0; 940 } else if (!strcmp(opt_overlap_check, "constant")) { 941 overlap_check_template = QCOW2_OL_CONSTANT; 942 } else if (!strcmp(opt_overlap_check, "cached")) { 943 overlap_check_template = QCOW2_OL_CACHED; 944 } else if (!strcmp(opt_overlap_check, "all")) { 945 overlap_check_template = QCOW2_OL_ALL; 946 } else { 947 error_setg(errp, "Unsupported value '%s' for qcow2 option " 948 "'overlap-check'. Allowed are any of the following: " 949 "none, constant, cached, all", opt_overlap_check); 950 ret = -EINVAL; 951 goto fail; 952 } 953 954 r->overlap_check = 0; 955 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 956 /* overlap-check defines a template bitmask, but every flag may be 957 * overwritten through the associated boolean option */ 958 r->overlap_check |= 959 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 960 overlap_check_template & (1 << i)) << i; 961 } 962 963 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 964 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 965 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 966 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 967 flags & BDRV_O_UNMAP); 968 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 969 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 970 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 971 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 972 973 switch (s->crypt_method_header) { 974 case QCOW_CRYPT_NONE: 975 if (encryptfmt) { 976 error_setg(errp, "No encryption in image header, but options " 977 "specified format '%s'", encryptfmt); 978 ret = -EINVAL; 979 goto fail; 980 } 981 break; 982 983 case QCOW_CRYPT_AES: 984 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 985 error_setg(errp, 986 "Header reported 'aes' encryption format but " 987 "options specify '%s'", encryptfmt); 988 ret = -EINVAL; 989 goto fail; 990 } 991 qdict_del(encryptopts, "format"); 992 r->crypto_opts = block_crypto_open_opts_init( 993 Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); 994 break; 995 996 case QCOW_CRYPT_LUKS: 997 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 998 error_setg(errp, 999 "Header reported 'luks' encryption format but " 1000 "options specify '%s'", encryptfmt); 1001 ret = -EINVAL; 1002 goto fail; 1003 } 1004 qdict_del(encryptopts, "format"); 1005 r->crypto_opts = block_crypto_open_opts_init( 1006 Q_CRYPTO_BLOCK_FORMAT_LUKS, encryptopts, errp); 1007 break; 1008 1009 default: 1010 error_setg(errp, "Unsupported encryption method %d", 1011 s->crypt_method_header); 1012 break; 1013 } 1014 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) { 1015 ret = -EINVAL; 1016 goto fail; 1017 } 1018 1019 ret = 0; 1020 fail: 1021 QDECREF(encryptopts); 1022 qemu_opts_del(opts); 1023 opts = NULL; 1024 return ret; 1025 } 1026 1027 static void qcow2_update_options_commit(BlockDriverState *bs, 1028 Qcow2ReopenState *r) 1029 { 1030 BDRVQcow2State *s = bs->opaque; 1031 int i; 1032 1033 if (s->l2_table_cache) { 1034 qcow2_cache_destroy(bs, s->l2_table_cache); 1035 } 1036 if (s->refcount_block_cache) { 1037 qcow2_cache_destroy(bs, s->refcount_block_cache); 1038 } 1039 s->l2_table_cache = r->l2_table_cache; 1040 s->refcount_block_cache = r->refcount_block_cache; 1041 1042 s->overlap_check = r->overlap_check; 1043 s->use_lazy_refcounts = r->use_lazy_refcounts; 1044 1045 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1046 s->discard_passthrough[i] = r->discard_passthrough[i]; 1047 } 1048 1049 if (s->cache_clean_interval != r->cache_clean_interval) { 1050 cache_clean_timer_del(bs); 1051 s->cache_clean_interval = r->cache_clean_interval; 1052 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1053 } 1054 1055 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1056 s->crypto_opts = r->crypto_opts; 1057 } 1058 1059 static void qcow2_update_options_abort(BlockDriverState *bs, 1060 Qcow2ReopenState *r) 1061 { 1062 if (r->l2_table_cache) { 1063 qcow2_cache_destroy(bs, r->l2_table_cache); 1064 } 1065 if (r->refcount_block_cache) { 1066 qcow2_cache_destroy(bs, r->refcount_block_cache); 1067 } 1068 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1069 } 1070 1071 static int qcow2_update_options(BlockDriverState *bs, QDict *options, 1072 int flags, Error **errp) 1073 { 1074 Qcow2ReopenState r = {}; 1075 int ret; 1076 1077 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1078 if (ret >= 0) { 1079 qcow2_update_options_commit(bs, &r); 1080 } else { 1081 qcow2_update_options_abort(bs, &r); 1082 } 1083 1084 return ret; 1085 } 1086 1087 static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, 1088 Error **errp) 1089 { 1090 BDRVQcow2State *s = bs->opaque; 1091 unsigned int len, i; 1092 int ret = 0; 1093 QCowHeader header; 1094 Error *local_err = NULL; 1095 uint64_t ext_end; 1096 uint64_t l1_vm_state_index; 1097 bool update_header = false; 1098 1099 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 1100 if (ret < 0) { 1101 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1102 goto fail; 1103 } 1104 be32_to_cpus(&header.magic); 1105 be32_to_cpus(&header.version); 1106 be64_to_cpus(&header.backing_file_offset); 1107 be32_to_cpus(&header.backing_file_size); 1108 be64_to_cpus(&header.size); 1109 be32_to_cpus(&header.cluster_bits); 1110 be32_to_cpus(&header.crypt_method); 1111 be64_to_cpus(&header.l1_table_offset); 1112 be32_to_cpus(&header.l1_size); 1113 be64_to_cpus(&header.refcount_table_offset); 1114 be32_to_cpus(&header.refcount_table_clusters); 1115 be64_to_cpus(&header.snapshots_offset); 1116 be32_to_cpus(&header.nb_snapshots); 1117 1118 if (header.magic != QCOW_MAGIC) { 1119 error_setg(errp, "Image is not in qcow2 format"); 1120 ret = -EINVAL; 1121 goto fail; 1122 } 1123 if (header.version < 2 || header.version > 3) { 1124 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1125 ret = -ENOTSUP; 1126 goto fail; 1127 } 1128 1129 s->qcow_version = header.version; 1130 1131 /* Initialise cluster size */ 1132 if (header.cluster_bits < MIN_CLUSTER_BITS || 1133 header.cluster_bits > MAX_CLUSTER_BITS) { 1134 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1135 header.cluster_bits); 1136 ret = -EINVAL; 1137 goto fail; 1138 } 1139 1140 s->cluster_bits = header.cluster_bits; 1141 s->cluster_size = 1 << s->cluster_bits; 1142 s->cluster_sectors = 1 << (s->cluster_bits - 9); 1143 1144 /* Initialise version 3 header fields */ 1145 if (header.version == 2) { 1146 header.incompatible_features = 0; 1147 header.compatible_features = 0; 1148 header.autoclear_features = 0; 1149 header.refcount_order = 4; 1150 header.header_length = 72; 1151 } else { 1152 be64_to_cpus(&header.incompatible_features); 1153 be64_to_cpus(&header.compatible_features); 1154 be64_to_cpus(&header.autoclear_features); 1155 be32_to_cpus(&header.refcount_order); 1156 be32_to_cpus(&header.header_length); 1157 1158 if (header.header_length < 104) { 1159 error_setg(errp, "qcow2 header too short"); 1160 ret = -EINVAL; 1161 goto fail; 1162 } 1163 } 1164 1165 if (header.header_length > s->cluster_size) { 1166 error_setg(errp, "qcow2 header exceeds cluster size"); 1167 ret = -EINVAL; 1168 goto fail; 1169 } 1170 1171 if (header.header_length > sizeof(header)) { 1172 s->unknown_header_fields_size = header.header_length - sizeof(header); 1173 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1174 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 1175 s->unknown_header_fields_size); 1176 if (ret < 0) { 1177 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1178 "fields"); 1179 goto fail; 1180 } 1181 } 1182 1183 if (header.backing_file_offset > s->cluster_size) { 1184 error_setg(errp, "Invalid backing file offset"); 1185 ret = -EINVAL; 1186 goto fail; 1187 } 1188 1189 if (header.backing_file_offset) { 1190 ext_end = header.backing_file_offset; 1191 } else { 1192 ext_end = 1 << header.cluster_bits; 1193 } 1194 1195 /* Handle feature bits */ 1196 s->incompatible_features = header.incompatible_features; 1197 s->compatible_features = header.compatible_features; 1198 s->autoclear_features = header.autoclear_features; 1199 1200 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1201 void *feature_table = NULL; 1202 qcow2_read_extensions(bs, header.header_length, ext_end, 1203 &feature_table, flags, NULL, NULL); 1204 report_unsupported_feature(errp, feature_table, 1205 s->incompatible_features & 1206 ~QCOW2_INCOMPAT_MASK); 1207 ret = -ENOTSUP; 1208 g_free(feature_table); 1209 goto fail; 1210 } 1211 1212 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1213 /* Corrupt images may not be written to unless they are being repaired 1214 */ 1215 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1216 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1217 "read/write"); 1218 ret = -EACCES; 1219 goto fail; 1220 } 1221 } 1222 1223 /* Check support for various header values */ 1224 if (header.refcount_order > 6) { 1225 error_setg(errp, "Reference count entry width too large; may not " 1226 "exceed 64 bits"); 1227 ret = -EINVAL; 1228 goto fail; 1229 } 1230 s->refcount_order = header.refcount_order; 1231 s->refcount_bits = 1 << s->refcount_order; 1232 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1233 s->refcount_max += s->refcount_max - 1; 1234 1235 s->crypt_method_header = header.crypt_method; 1236 if (s->crypt_method_header) { 1237 if (bdrv_uses_whitelist() && 1238 s->crypt_method_header == QCOW_CRYPT_AES) { 1239 error_setg(errp, 1240 "Use of AES-CBC encrypted qcow2 images is no longer " 1241 "supported in system emulators"); 1242 error_append_hint(errp, 1243 "You can use 'qemu-img convert' to convert your " 1244 "image to an alternative supported format, such " 1245 "as unencrypted qcow2, or raw with the LUKS " 1246 "format instead.\n"); 1247 ret = -ENOSYS; 1248 goto fail; 1249 } 1250 1251 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1252 s->crypt_physical_offset = false; 1253 } else { 1254 /* Assuming LUKS and any future crypt methods we 1255 * add will all use physical offsets, due to the 1256 * fact that the alternative is insecure... */ 1257 s->crypt_physical_offset = true; 1258 } 1259 1260 bs->encrypted = true; 1261 } 1262 1263 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 1264 s->l2_size = 1 << s->l2_bits; 1265 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1266 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1267 s->refcount_block_size = 1 << s->refcount_block_bits; 1268 bs->total_sectors = header.size / 512; 1269 s->csize_shift = (62 - (s->cluster_bits - 8)); 1270 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1271 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1272 1273 s->refcount_table_offset = header.refcount_table_offset; 1274 s->refcount_table_size = 1275 header.refcount_table_clusters << (s->cluster_bits - 3); 1276 1277 if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) { 1278 error_setg(errp, "Reference count table too large"); 1279 ret = -EINVAL; 1280 goto fail; 1281 } 1282 1283 ret = validate_table_offset(bs, s->refcount_table_offset, 1284 s->refcount_table_size, sizeof(uint64_t)); 1285 if (ret < 0) { 1286 error_setg(errp, "Invalid reference count table offset"); 1287 goto fail; 1288 } 1289 1290 /* Snapshot table offset/length */ 1291 if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) { 1292 error_setg(errp, "Too many snapshots"); 1293 ret = -EINVAL; 1294 goto fail; 1295 } 1296 1297 ret = validate_table_offset(bs, header.snapshots_offset, 1298 header.nb_snapshots, 1299 sizeof(QCowSnapshotHeader)); 1300 if (ret < 0) { 1301 error_setg(errp, "Invalid snapshot table offset"); 1302 goto fail; 1303 } 1304 1305 /* read the level 1 table */ 1306 if (header.l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 1307 error_setg(errp, "Active L1 table too large"); 1308 ret = -EFBIG; 1309 goto fail; 1310 } 1311 s->l1_size = header.l1_size; 1312 1313 l1_vm_state_index = size_to_l1(s, header.size); 1314 if (l1_vm_state_index > INT_MAX) { 1315 error_setg(errp, "Image is too big"); 1316 ret = -EFBIG; 1317 goto fail; 1318 } 1319 s->l1_vm_state_index = l1_vm_state_index; 1320 1321 /* the L1 table must contain at least enough entries to put 1322 header.size bytes */ 1323 if (s->l1_size < s->l1_vm_state_index) { 1324 error_setg(errp, "L1 table is too small"); 1325 ret = -EINVAL; 1326 goto fail; 1327 } 1328 1329 ret = validate_table_offset(bs, header.l1_table_offset, 1330 header.l1_size, sizeof(uint64_t)); 1331 if (ret < 0) { 1332 error_setg(errp, "Invalid L1 table offset"); 1333 goto fail; 1334 } 1335 s->l1_table_offset = header.l1_table_offset; 1336 1337 1338 if (s->l1_size > 0) { 1339 s->l1_table = qemu_try_blockalign(bs->file->bs, 1340 align_offset(s->l1_size * sizeof(uint64_t), 512)); 1341 if (s->l1_table == NULL) { 1342 error_setg(errp, "Could not allocate L1 table"); 1343 ret = -ENOMEM; 1344 goto fail; 1345 } 1346 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 1347 s->l1_size * sizeof(uint64_t)); 1348 if (ret < 0) { 1349 error_setg_errno(errp, -ret, "Could not read L1 table"); 1350 goto fail; 1351 } 1352 for(i = 0;i < s->l1_size; i++) { 1353 be64_to_cpus(&s->l1_table[i]); 1354 } 1355 } 1356 1357 /* Parse driver-specific options */ 1358 ret = qcow2_update_options(bs, options, flags, errp); 1359 if (ret < 0) { 1360 goto fail; 1361 } 1362 1363 s->cluster_cache = g_malloc(s->cluster_size); 1364 /* one more sector for decompressed data alignment */ 1365 s->cluster_data = qemu_try_blockalign(bs->file->bs, QCOW_MAX_CRYPT_CLUSTERS 1366 * s->cluster_size + 512); 1367 if (s->cluster_data == NULL) { 1368 error_setg(errp, "Could not allocate temporary cluster buffer"); 1369 ret = -ENOMEM; 1370 goto fail; 1371 } 1372 1373 s->cluster_cache_offset = -1; 1374 s->flags = flags; 1375 1376 ret = qcow2_refcount_init(bs); 1377 if (ret != 0) { 1378 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1379 goto fail; 1380 } 1381 1382 QLIST_INIT(&s->cluster_allocs); 1383 QTAILQ_INIT(&s->discards); 1384 1385 /* read qcow2 extensions */ 1386 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1387 flags, &update_header, &local_err)) { 1388 error_propagate(errp, local_err); 1389 ret = -EINVAL; 1390 goto fail; 1391 } 1392 1393 /* qcow2_read_extension may have set up the crypto context 1394 * if the crypt method needs a header region, some methods 1395 * don't need header extensions, so must check here 1396 */ 1397 if (s->crypt_method_header && !s->crypto) { 1398 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1399 unsigned int cflags = 0; 1400 if (flags & BDRV_O_NO_IO) { 1401 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1402 } 1403 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1404 NULL, NULL, cflags, errp); 1405 if (!s->crypto) { 1406 ret = -EINVAL; 1407 goto fail; 1408 } 1409 } else if (!(flags & BDRV_O_NO_IO)) { 1410 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1411 s->crypt_method_header); 1412 ret = -EINVAL; 1413 goto fail; 1414 } 1415 } 1416 1417 /* read the backing file name */ 1418 if (header.backing_file_offset != 0) { 1419 len = header.backing_file_size; 1420 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1421 len >= sizeof(bs->backing_file)) { 1422 error_setg(errp, "Backing file name too long"); 1423 ret = -EINVAL; 1424 goto fail; 1425 } 1426 ret = bdrv_pread(bs->file, header.backing_file_offset, 1427 bs->backing_file, len); 1428 if (ret < 0) { 1429 error_setg_errno(errp, -ret, "Could not read backing file name"); 1430 goto fail; 1431 } 1432 bs->backing_file[len] = '\0'; 1433 s->image_backing_file = g_strdup(bs->backing_file); 1434 } 1435 1436 /* Internal snapshots */ 1437 s->snapshots_offset = header.snapshots_offset; 1438 s->nb_snapshots = header.nb_snapshots; 1439 1440 ret = qcow2_read_snapshots(bs); 1441 if (ret < 0) { 1442 error_setg_errno(errp, -ret, "Could not read snapshots"); 1443 goto fail; 1444 } 1445 1446 /* Clear unknown autoclear feature bits */ 1447 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1448 update_header = 1449 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); 1450 if (update_header) { 1451 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1452 } 1453 1454 if (qcow2_load_autoloading_dirty_bitmaps(bs, &local_err)) { 1455 update_header = false; 1456 } 1457 if (local_err != NULL) { 1458 error_propagate(errp, local_err); 1459 ret = -EINVAL; 1460 goto fail; 1461 } 1462 1463 if (update_header) { 1464 ret = qcow2_update_header(bs); 1465 if (ret < 0) { 1466 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1467 goto fail; 1468 } 1469 } 1470 1471 /* Initialise locks */ 1472 qemu_co_mutex_init(&s->lock); 1473 bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP; 1474 1475 /* Repair image if dirty */ 1476 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && 1477 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1478 BdrvCheckResult result = {0}; 1479 1480 ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1481 if (ret < 0) { 1482 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1483 goto fail; 1484 } 1485 } 1486 1487 #ifdef DEBUG_ALLOC 1488 { 1489 BdrvCheckResult result = {0}; 1490 qcow2_check_refcounts(bs, &result, 0); 1491 } 1492 #endif 1493 return ret; 1494 1495 fail: 1496 g_free(s->unknown_header_fields); 1497 cleanup_unknown_header_ext(bs); 1498 qcow2_free_snapshots(bs); 1499 qcow2_refcount_close(bs); 1500 qemu_vfree(s->l1_table); 1501 /* else pre-write overlap checks in cache_destroy may crash */ 1502 s->l1_table = NULL; 1503 cache_clean_timer_del(bs); 1504 if (s->l2_table_cache) { 1505 qcow2_cache_destroy(bs, s->l2_table_cache); 1506 } 1507 if (s->refcount_block_cache) { 1508 qcow2_cache_destroy(bs, s->refcount_block_cache); 1509 } 1510 g_free(s->cluster_cache); 1511 qemu_vfree(s->cluster_data); 1512 qcrypto_block_free(s->crypto); 1513 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1514 return ret; 1515 } 1516 1517 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1518 Error **errp) 1519 { 1520 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 1521 false, errp); 1522 if (!bs->file) { 1523 return -EINVAL; 1524 } 1525 1526 return qcow2_do_open(bs, options, flags, errp); 1527 } 1528 1529 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1530 { 1531 BDRVQcow2State *s = bs->opaque; 1532 1533 if (bs->encrypted) { 1534 /* Encryption works on a sector granularity */ 1535 bs->bl.request_alignment = BDRV_SECTOR_SIZE; 1536 } 1537 bs->bl.pwrite_zeroes_alignment = s->cluster_size; 1538 bs->bl.pdiscard_alignment = s->cluster_size; 1539 } 1540 1541 static int qcow2_reopen_prepare(BDRVReopenState *state, 1542 BlockReopenQueue *queue, Error **errp) 1543 { 1544 Qcow2ReopenState *r; 1545 int ret; 1546 1547 r = g_new0(Qcow2ReopenState, 1); 1548 state->opaque = r; 1549 1550 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1551 state->flags, errp); 1552 if (ret < 0) { 1553 goto fail; 1554 } 1555 1556 /* We need to write out any unwritten data if we reopen read-only. */ 1557 if ((state->flags & BDRV_O_RDWR) == 0) { 1558 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1559 if (ret < 0) { 1560 goto fail; 1561 } 1562 1563 ret = bdrv_flush(state->bs); 1564 if (ret < 0) { 1565 goto fail; 1566 } 1567 1568 ret = qcow2_mark_clean(state->bs); 1569 if (ret < 0) { 1570 goto fail; 1571 } 1572 } 1573 1574 return 0; 1575 1576 fail: 1577 qcow2_update_options_abort(state->bs, r); 1578 g_free(r); 1579 return ret; 1580 } 1581 1582 static void qcow2_reopen_commit(BDRVReopenState *state) 1583 { 1584 qcow2_update_options_commit(state->bs, state->opaque); 1585 g_free(state->opaque); 1586 } 1587 1588 static void qcow2_reopen_abort(BDRVReopenState *state) 1589 { 1590 qcow2_update_options_abort(state->bs, state->opaque); 1591 g_free(state->opaque); 1592 } 1593 1594 static void qcow2_join_options(QDict *options, QDict *old_options) 1595 { 1596 bool has_new_overlap_template = 1597 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 1598 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 1599 bool has_new_total_cache_size = 1600 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 1601 bool has_all_cache_options; 1602 1603 /* New overlap template overrides all old overlap options */ 1604 if (has_new_overlap_template) { 1605 qdict_del(old_options, QCOW2_OPT_OVERLAP); 1606 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 1607 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 1608 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 1609 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 1610 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 1611 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 1612 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 1613 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 1614 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 1615 } 1616 1617 /* New total cache size overrides all old options */ 1618 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 1619 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 1620 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1621 } 1622 1623 qdict_join(options, old_options, false); 1624 1625 /* 1626 * If after merging all cache size options are set, an old total size is 1627 * overwritten. Do keep all options, however, if all three are new. The 1628 * resulting error message is what we want to happen. 1629 */ 1630 has_all_cache_options = 1631 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 1632 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 1633 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1634 1635 if (has_all_cache_options && !has_new_total_cache_size) { 1636 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 1637 } 1638 } 1639 1640 static int64_t coroutine_fn qcow2_co_get_block_status(BlockDriverState *bs, 1641 int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file) 1642 { 1643 BDRVQcow2State *s = bs->opaque; 1644 uint64_t cluster_offset; 1645 int index_in_cluster, ret; 1646 unsigned int bytes; 1647 int64_t status = 0; 1648 1649 bytes = MIN(INT_MAX, nb_sectors * BDRV_SECTOR_SIZE); 1650 qemu_co_mutex_lock(&s->lock); 1651 ret = qcow2_get_cluster_offset(bs, sector_num << 9, &bytes, 1652 &cluster_offset); 1653 qemu_co_mutex_unlock(&s->lock); 1654 if (ret < 0) { 1655 return ret; 1656 } 1657 1658 *pnum = bytes >> BDRV_SECTOR_BITS; 1659 1660 if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED && 1661 !s->crypto) { 1662 index_in_cluster = sector_num & (s->cluster_sectors - 1); 1663 cluster_offset |= (index_in_cluster << BDRV_SECTOR_BITS); 1664 *file = bs->file->bs; 1665 status |= BDRV_BLOCK_OFFSET_VALID | cluster_offset; 1666 } 1667 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { 1668 status |= BDRV_BLOCK_ZERO; 1669 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { 1670 status |= BDRV_BLOCK_DATA; 1671 } 1672 return status; 1673 } 1674 1675 /* handle reading after the end of the backing file */ 1676 int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov, 1677 int64_t offset, int bytes) 1678 { 1679 uint64_t bs_size = bs->total_sectors * BDRV_SECTOR_SIZE; 1680 int n1; 1681 1682 if ((offset + bytes) <= bs_size) { 1683 return bytes; 1684 } 1685 1686 if (offset >= bs_size) { 1687 n1 = 0; 1688 } else { 1689 n1 = bs_size - offset; 1690 } 1691 1692 qemu_iovec_memset(qiov, n1, 0, bytes - n1); 1693 1694 return n1; 1695 } 1696 1697 static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, 1698 uint64_t bytes, QEMUIOVector *qiov, 1699 int flags) 1700 { 1701 BDRVQcow2State *s = bs->opaque; 1702 int offset_in_cluster, n1; 1703 int ret; 1704 unsigned int cur_bytes; /* number of bytes in current iteration */ 1705 uint64_t cluster_offset = 0; 1706 uint64_t bytes_done = 0; 1707 QEMUIOVector hd_qiov; 1708 uint8_t *cluster_data = NULL; 1709 1710 qemu_iovec_init(&hd_qiov, qiov->niov); 1711 1712 qemu_co_mutex_lock(&s->lock); 1713 1714 while (bytes != 0) { 1715 1716 /* prepare next request */ 1717 cur_bytes = MIN(bytes, INT_MAX); 1718 if (s->crypto) { 1719 cur_bytes = MIN(cur_bytes, 1720 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1721 } 1722 1723 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); 1724 if (ret < 0) { 1725 goto fail; 1726 } 1727 1728 offset_in_cluster = offset_into_cluster(s, offset); 1729 1730 qemu_iovec_reset(&hd_qiov); 1731 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1732 1733 switch (ret) { 1734 case QCOW2_CLUSTER_UNALLOCATED: 1735 1736 if (bs->backing) { 1737 /* read from the base image */ 1738 n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov, 1739 offset, cur_bytes); 1740 if (n1 > 0) { 1741 QEMUIOVector local_qiov; 1742 1743 qemu_iovec_init(&local_qiov, hd_qiov.niov); 1744 qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1); 1745 1746 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 1747 qemu_co_mutex_unlock(&s->lock); 1748 ret = bdrv_co_preadv(bs->backing, offset, n1, 1749 &local_qiov, 0); 1750 qemu_co_mutex_lock(&s->lock); 1751 1752 qemu_iovec_destroy(&local_qiov); 1753 1754 if (ret < 0) { 1755 goto fail; 1756 } 1757 } 1758 } else { 1759 /* Note: in this case, no need to wait */ 1760 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1761 } 1762 break; 1763 1764 case QCOW2_CLUSTER_ZERO_PLAIN: 1765 case QCOW2_CLUSTER_ZERO_ALLOC: 1766 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1767 break; 1768 1769 case QCOW2_CLUSTER_COMPRESSED: 1770 /* add AIO support for compressed blocks ? */ 1771 ret = qcow2_decompress_cluster(bs, cluster_offset); 1772 if (ret < 0) { 1773 goto fail; 1774 } 1775 1776 qemu_iovec_from_buf(&hd_qiov, 0, 1777 s->cluster_cache + offset_in_cluster, 1778 cur_bytes); 1779 break; 1780 1781 case QCOW2_CLUSTER_NORMAL: 1782 if ((cluster_offset & 511) != 0) { 1783 ret = -EIO; 1784 goto fail; 1785 } 1786 1787 if (bs->encrypted) { 1788 assert(s->crypto); 1789 1790 /* 1791 * For encrypted images, read everything into a temporary 1792 * contiguous buffer on which the AES functions can work. 1793 */ 1794 if (!cluster_data) { 1795 cluster_data = 1796 qemu_try_blockalign(bs->file->bs, 1797 QCOW_MAX_CRYPT_CLUSTERS 1798 * s->cluster_size); 1799 if (cluster_data == NULL) { 1800 ret = -ENOMEM; 1801 goto fail; 1802 } 1803 } 1804 1805 assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1806 qemu_iovec_reset(&hd_qiov); 1807 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1808 } 1809 1810 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1811 qemu_co_mutex_unlock(&s->lock); 1812 ret = bdrv_co_preadv(bs->file, 1813 cluster_offset + offset_in_cluster, 1814 cur_bytes, &hd_qiov, 0); 1815 qemu_co_mutex_lock(&s->lock); 1816 if (ret < 0) { 1817 goto fail; 1818 } 1819 if (bs->encrypted) { 1820 assert(s->crypto); 1821 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1822 assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1823 Error *err = NULL; 1824 if (qcrypto_block_decrypt(s->crypto, 1825 (s->crypt_physical_offset ? 1826 cluster_offset + offset_in_cluster : 1827 offset) >> BDRV_SECTOR_BITS, 1828 cluster_data, 1829 cur_bytes, 1830 &err) < 0) { 1831 error_free(err); 1832 ret = -EIO; 1833 goto fail; 1834 } 1835 qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes); 1836 } 1837 break; 1838 1839 default: 1840 g_assert_not_reached(); 1841 ret = -EIO; 1842 goto fail; 1843 } 1844 1845 bytes -= cur_bytes; 1846 offset += cur_bytes; 1847 bytes_done += cur_bytes; 1848 } 1849 ret = 0; 1850 1851 fail: 1852 qemu_co_mutex_unlock(&s->lock); 1853 1854 qemu_iovec_destroy(&hd_qiov); 1855 qemu_vfree(cluster_data); 1856 1857 return ret; 1858 } 1859 1860 /* Check if it's possible to merge a write request with the writing of 1861 * the data from the COW regions */ 1862 static bool merge_cow(uint64_t offset, unsigned bytes, 1863 QEMUIOVector *hd_qiov, QCowL2Meta *l2meta) 1864 { 1865 QCowL2Meta *m; 1866 1867 for (m = l2meta; m != NULL; m = m->next) { 1868 /* If both COW regions are empty then there's nothing to merge */ 1869 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 1870 continue; 1871 } 1872 1873 /* The data (middle) region must be immediately after the 1874 * start region */ 1875 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 1876 continue; 1877 } 1878 1879 /* The end region must be immediately after the data (middle) 1880 * region */ 1881 if (m->offset + m->cow_end.offset != offset + bytes) { 1882 continue; 1883 } 1884 1885 /* Make sure that adding both COW regions to the QEMUIOVector 1886 * does not exceed IOV_MAX */ 1887 if (hd_qiov->niov > IOV_MAX - 2) { 1888 continue; 1889 } 1890 1891 m->data_qiov = hd_qiov; 1892 return true; 1893 } 1894 1895 return false; 1896 } 1897 1898 static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, 1899 uint64_t bytes, QEMUIOVector *qiov, 1900 int flags) 1901 { 1902 BDRVQcow2State *s = bs->opaque; 1903 int offset_in_cluster; 1904 int ret; 1905 unsigned int cur_bytes; /* number of sectors in current iteration */ 1906 uint64_t cluster_offset; 1907 QEMUIOVector hd_qiov; 1908 uint64_t bytes_done = 0; 1909 uint8_t *cluster_data = NULL; 1910 QCowL2Meta *l2meta = NULL; 1911 1912 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 1913 1914 qemu_iovec_init(&hd_qiov, qiov->niov); 1915 1916 s->cluster_cache_offset = -1; /* disable compressed cache */ 1917 1918 qemu_co_mutex_lock(&s->lock); 1919 1920 while (bytes != 0) { 1921 1922 l2meta = NULL; 1923 1924 trace_qcow2_writev_start_part(qemu_coroutine_self()); 1925 offset_in_cluster = offset_into_cluster(s, offset); 1926 cur_bytes = MIN(bytes, INT_MAX); 1927 if (bs->encrypted) { 1928 cur_bytes = MIN(cur_bytes, 1929 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 1930 - offset_in_cluster); 1931 } 1932 1933 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 1934 &cluster_offset, &l2meta); 1935 if (ret < 0) { 1936 goto fail; 1937 } 1938 1939 assert((cluster_offset & 511) == 0); 1940 1941 qemu_iovec_reset(&hd_qiov); 1942 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1943 1944 if (bs->encrypted) { 1945 Error *err = NULL; 1946 assert(s->crypto); 1947 if (!cluster_data) { 1948 cluster_data = qemu_try_blockalign(bs->file->bs, 1949 QCOW_MAX_CRYPT_CLUSTERS 1950 * s->cluster_size); 1951 if (cluster_data == NULL) { 1952 ret = -ENOMEM; 1953 goto fail; 1954 } 1955 } 1956 1957 assert(hd_qiov.size <= 1958 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1959 qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); 1960 1961 if (qcrypto_block_encrypt(s->crypto, 1962 (s->crypt_physical_offset ? 1963 cluster_offset + offset_in_cluster : 1964 offset) >> BDRV_SECTOR_BITS, 1965 cluster_data, 1966 cur_bytes, &err) < 0) { 1967 error_free(err); 1968 ret = -EIO; 1969 goto fail; 1970 } 1971 1972 qemu_iovec_reset(&hd_qiov); 1973 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1974 } 1975 1976 ret = qcow2_pre_write_overlap_check(bs, 0, 1977 cluster_offset + offset_in_cluster, cur_bytes); 1978 if (ret < 0) { 1979 goto fail; 1980 } 1981 1982 /* If we need to do COW, check if it's possible to merge the 1983 * writing of the guest data together with that of the COW regions. 1984 * If it's not possible (or not necessary) then write the 1985 * guest data now. */ 1986 if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) { 1987 qemu_co_mutex_unlock(&s->lock); 1988 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 1989 trace_qcow2_writev_data(qemu_coroutine_self(), 1990 cluster_offset + offset_in_cluster); 1991 ret = bdrv_co_pwritev(bs->file, 1992 cluster_offset + offset_in_cluster, 1993 cur_bytes, &hd_qiov, 0); 1994 qemu_co_mutex_lock(&s->lock); 1995 if (ret < 0) { 1996 goto fail; 1997 } 1998 } 1999 2000 while (l2meta != NULL) { 2001 QCowL2Meta *next; 2002 2003 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 2004 if (ret < 0) { 2005 goto fail; 2006 } 2007 2008 /* Take the request off the list of running requests */ 2009 if (l2meta->nb_clusters != 0) { 2010 QLIST_REMOVE(l2meta, next_in_flight); 2011 } 2012 2013 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2014 2015 next = l2meta->next; 2016 g_free(l2meta); 2017 l2meta = next; 2018 } 2019 2020 bytes -= cur_bytes; 2021 offset += cur_bytes; 2022 bytes_done += cur_bytes; 2023 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2024 } 2025 ret = 0; 2026 2027 fail: 2028 while (l2meta != NULL) { 2029 QCowL2Meta *next; 2030 2031 if (l2meta->nb_clusters != 0) { 2032 QLIST_REMOVE(l2meta, next_in_flight); 2033 } 2034 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2035 2036 next = l2meta->next; 2037 g_free(l2meta); 2038 l2meta = next; 2039 } 2040 2041 qemu_co_mutex_unlock(&s->lock); 2042 2043 qemu_iovec_destroy(&hd_qiov); 2044 qemu_vfree(cluster_data); 2045 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2046 2047 return ret; 2048 } 2049 2050 static int qcow2_inactivate(BlockDriverState *bs) 2051 { 2052 BDRVQcow2State *s = bs->opaque; 2053 int ret, result = 0; 2054 Error *local_err = NULL; 2055 2056 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2057 if (ret) { 2058 result = ret; 2059 error_report("Failed to flush the L2 table cache: %s", 2060 strerror(-ret)); 2061 } 2062 2063 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2064 if (ret) { 2065 result = ret; 2066 error_report("Failed to flush the refcount block cache: %s", 2067 strerror(-ret)); 2068 } 2069 2070 qcow2_store_persistent_dirty_bitmaps(bs, &local_err); 2071 if (local_err != NULL) { 2072 result = -EINVAL; 2073 error_report_err(local_err); 2074 error_report("Persistent bitmaps are lost for node '%s'", 2075 bdrv_get_device_or_node_name(bs)); 2076 } 2077 2078 if (result == 0) { 2079 qcow2_mark_clean(bs); 2080 } 2081 2082 return result; 2083 } 2084 2085 static void qcow2_close(BlockDriverState *bs) 2086 { 2087 BDRVQcow2State *s = bs->opaque; 2088 qemu_vfree(s->l1_table); 2089 /* else pre-write overlap checks in cache_destroy may crash */ 2090 s->l1_table = NULL; 2091 2092 if (!(s->flags & BDRV_O_INACTIVE)) { 2093 qcow2_inactivate(bs); 2094 } 2095 2096 cache_clean_timer_del(bs); 2097 qcow2_cache_destroy(bs, s->l2_table_cache); 2098 qcow2_cache_destroy(bs, s->refcount_block_cache); 2099 2100 qcrypto_block_free(s->crypto); 2101 s->crypto = NULL; 2102 2103 g_free(s->unknown_header_fields); 2104 cleanup_unknown_header_ext(bs); 2105 2106 g_free(s->image_backing_file); 2107 g_free(s->image_backing_format); 2108 2109 g_free(s->cluster_cache); 2110 qemu_vfree(s->cluster_data); 2111 qcow2_refcount_close(bs); 2112 qcow2_free_snapshots(bs); 2113 } 2114 2115 static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp) 2116 { 2117 BDRVQcow2State *s = bs->opaque; 2118 int flags = s->flags; 2119 QCryptoBlock *crypto = NULL; 2120 QDict *options; 2121 Error *local_err = NULL; 2122 int ret; 2123 2124 /* 2125 * Backing files are read-only which makes all of their metadata immutable, 2126 * that means we don't have to worry about reopening them here. 2127 */ 2128 2129 crypto = s->crypto; 2130 s->crypto = NULL; 2131 2132 qcow2_close(bs); 2133 2134 memset(s, 0, sizeof(BDRVQcow2State)); 2135 options = qdict_clone_shallow(bs->options); 2136 2137 flags &= ~BDRV_O_INACTIVE; 2138 ret = qcow2_do_open(bs, options, flags, &local_err); 2139 QDECREF(options); 2140 if (local_err) { 2141 error_propagate(errp, local_err); 2142 error_prepend(errp, "Could not reopen qcow2 layer: "); 2143 bs->drv = NULL; 2144 return; 2145 } else if (ret < 0) { 2146 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer"); 2147 bs->drv = NULL; 2148 return; 2149 } 2150 2151 s->crypto = crypto; 2152 } 2153 2154 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2155 size_t len, size_t buflen) 2156 { 2157 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2158 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2159 2160 if (buflen < ext_len) { 2161 return -ENOSPC; 2162 } 2163 2164 *ext_backing_fmt = (QCowExtension) { 2165 .magic = cpu_to_be32(magic), 2166 .len = cpu_to_be32(len), 2167 }; 2168 2169 if (len) { 2170 memcpy(buf + sizeof(QCowExtension), s, len); 2171 } 2172 2173 return ext_len; 2174 } 2175 2176 /* 2177 * Updates the qcow2 header, including the variable length parts of it, i.e. 2178 * the backing file name and all extensions. qcow2 was not designed to allow 2179 * such changes, so if we run out of space (we can only use the first cluster) 2180 * this function may fail. 2181 * 2182 * Returns 0 on success, -errno in error cases. 2183 */ 2184 int qcow2_update_header(BlockDriverState *bs) 2185 { 2186 BDRVQcow2State *s = bs->opaque; 2187 QCowHeader *header; 2188 char *buf; 2189 size_t buflen = s->cluster_size; 2190 int ret; 2191 uint64_t total_size; 2192 uint32_t refcount_table_clusters; 2193 size_t header_length; 2194 Qcow2UnknownHeaderExtension *uext; 2195 2196 buf = qemu_blockalign(bs, buflen); 2197 2198 /* Header structure */ 2199 header = (QCowHeader*) buf; 2200 2201 if (buflen < sizeof(*header)) { 2202 ret = -ENOSPC; 2203 goto fail; 2204 } 2205 2206 header_length = sizeof(*header) + s->unknown_header_fields_size; 2207 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2208 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2209 2210 *header = (QCowHeader) { 2211 /* Version 2 fields */ 2212 .magic = cpu_to_be32(QCOW_MAGIC), 2213 .version = cpu_to_be32(s->qcow_version), 2214 .backing_file_offset = 0, 2215 .backing_file_size = 0, 2216 .cluster_bits = cpu_to_be32(s->cluster_bits), 2217 .size = cpu_to_be64(total_size), 2218 .crypt_method = cpu_to_be32(s->crypt_method_header), 2219 .l1_size = cpu_to_be32(s->l1_size), 2220 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2221 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2222 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2223 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2224 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2225 2226 /* Version 3 fields */ 2227 .incompatible_features = cpu_to_be64(s->incompatible_features), 2228 .compatible_features = cpu_to_be64(s->compatible_features), 2229 .autoclear_features = cpu_to_be64(s->autoclear_features), 2230 .refcount_order = cpu_to_be32(s->refcount_order), 2231 .header_length = cpu_to_be32(header_length), 2232 }; 2233 2234 /* For older versions, write a shorter header */ 2235 switch (s->qcow_version) { 2236 case 2: 2237 ret = offsetof(QCowHeader, incompatible_features); 2238 break; 2239 case 3: 2240 ret = sizeof(*header); 2241 break; 2242 default: 2243 ret = -EINVAL; 2244 goto fail; 2245 } 2246 2247 buf += ret; 2248 buflen -= ret; 2249 memset(buf, 0, buflen); 2250 2251 /* Preserve any unknown field in the header */ 2252 if (s->unknown_header_fields_size) { 2253 if (buflen < s->unknown_header_fields_size) { 2254 ret = -ENOSPC; 2255 goto fail; 2256 } 2257 2258 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2259 buf += s->unknown_header_fields_size; 2260 buflen -= s->unknown_header_fields_size; 2261 } 2262 2263 /* Backing file format header extension */ 2264 if (s->image_backing_format) { 2265 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2266 s->image_backing_format, 2267 strlen(s->image_backing_format), 2268 buflen); 2269 if (ret < 0) { 2270 goto fail; 2271 } 2272 2273 buf += ret; 2274 buflen -= ret; 2275 } 2276 2277 /* Full disk encryption header pointer extension */ 2278 if (s->crypto_header.offset != 0) { 2279 cpu_to_be64s(&s->crypto_header.offset); 2280 cpu_to_be64s(&s->crypto_header.length); 2281 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2282 &s->crypto_header, sizeof(s->crypto_header), 2283 buflen); 2284 be64_to_cpus(&s->crypto_header.offset); 2285 be64_to_cpus(&s->crypto_header.length); 2286 if (ret < 0) { 2287 goto fail; 2288 } 2289 buf += ret; 2290 buflen -= ret; 2291 } 2292 2293 /* Feature table */ 2294 if (s->qcow_version >= 3) { 2295 Qcow2Feature features[] = { 2296 { 2297 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2298 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2299 .name = "dirty bit", 2300 }, 2301 { 2302 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2303 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 2304 .name = "corrupt bit", 2305 }, 2306 { 2307 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 2308 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 2309 .name = "lazy refcounts", 2310 }, 2311 }; 2312 2313 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 2314 features, sizeof(features), buflen); 2315 if (ret < 0) { 2316 goto fail; 2317 } 2318 buf += ret; 2319 buflen -= ret; 2320 } 2321 2322 /* Bitmap extension */ 2323 if (s->nb_bitmaps > 0) { 2324 Qcow2BitmapHeaderExt bitmaps_header = { 2325 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 2326 .bitmap_directory_size = 2327 cpu_to_be64(s->bitmap_directory_size), 2328 .bitmap_directory_offset = 2329 cpu_to_be64(s->bitmap_directory_offset) 2330 }; 2331 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 2332 &bitmaps_header, sizeof(bitmaps_header), 2333 buflen); 2334 if (ret < 0) { 2335 goto fail; 2336 } 2337 buf += ret; 2338 buflen -= ret; 2339 } 2340 2341 /* Keep unknown header extensions */ 2342 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 2343 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 2344 if (ret < 0) { 2345 goto fail; 2346 } 2347 2348 buf += ret; 2349 buflen -= ret; 2350 } 2351 2352 /* End of header extensions */ 2353 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 2354 if (ret < 0) { 2355 goto fail; 2356 } 2357 2358 buf += ret; 2359 buflen -= ret; 2360 2361 /* Backing file name */ 2362 if (s->image_backing_file) { 2363 size_t backing_file_len = strlen(s->image_backing_file); 2364 2365 if (buflen < backing_file_len) { 2366 ret = -ENOSPC; 2367 goto fail; 2368 } 2369 2370 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 2371 strncpy(buf, s->image_backing_file, buflen); 2372 2373 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 2374 header->backing_file_size = cpu_to_be32(backing_file_len); 2375 } 2376 2377 /* Write the new header */ 2378 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 2379 if (ret < 0) { 2380 goto fail; 2381 } 2382 2383 ret = 0; 2384 fail: 2385 qemu_vfree(header); 2386 return ret; 2387 } 2388 2389 static int qcow2_change_backing_file(BlockDriverState *bs, 2390 const char *backing_file, const char *backing_fmt) 2391 { 2392 BDRVQcow2State *s = bs->opaque; 2393 2394 if (backing_file && strlen(backing_file) > 1023) { 2395 return -EINVAL; 2396 } 2397 2398 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2399 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2400 2401 g_free(s->image_backing_file); 2402 g_free(s->image_backing_format); 2403 2404 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 2405 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 2406 2407 return qcow2_update_header(bs); 2408 } 2409 2410 static int qcow2_crypt_method_from_format(const char *encryptfmt) 2411 { 2412 if (g_str_equal(encryptfmt, "luks")) { 2413 return QCOW_CRYPT_LUKS; 2414 } else if (g_str_equal(encryptfmt, "aes")) { 2415 return QCOW_CRYPT_AES; 2416 } else { 2417 return -EINVAL; 2418 } 2419 } 2420 2421 static int qcow2_set_up_encryption(BlockDriverState *bs, const char *encryptfmt, 2422 QemuOpts *opts, Error **errp) 2423 { 2424 BDRVQcow2State *s = bs->opaque; 2425 QCryptoBlockCreateOptions *cryptoopts = NULL; 2426 QCryptoBlock *crypto = NULL; 2427 int ret = -EINVAL; 2428 QDict *options, *encryptopts; 2429 int fmt; 2430 2431 options = qemu_opts_to_qdict(opts, NULL); 2432 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 2433 QDECREF(options); 2434 2435 fmt = qcow2_crypt_method_from_format(encryptfmt); 2436 2437 switch (fmt) { 2438 case QCOW_CRYPT_LUKS: 2439 cryptoopts = block_crypto_create_opts_init( 2440 Q_CRYPTO_BLOCK_FORMAT_LUKS, encryptopts, errp); 2441 break; 2442 case QCOW_CRYPT_AES: 2443 cryptoopts = block_crypto_create_opts_init( 2444 Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); 2445 break; 2446 default: 2447 error_setg(errp, "Unknown encryption format '%s'", encryptfmt); 2448 break; 2449 } 2450 if (!cryptoopts) { 2451 ret = -EINVAL; 2452 goto out; 2453 } 2454 s->crypt_method_header = fmt; 2455 2456 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 2457 qcow2_crypto_hdr_init_func, 2458 qcow2_crypto_hdr_write_func, 2459 bs, errp); 2460 if (!crypto) { 2461 ret = -EINVAL; 2462 goto out; 2463 } 2464 2465 ret = qcow2_update_header(bs); 2466 if (ret < 0) { 2467 error_setg_errno(errp, -ret, "Could not write encryption header"); 2468 goto out; 2469 } 2470 2471 out: 2472 QDECREF(encryptopts); 2473 qcrypto_block_free(crypto); 2474 qapi_free_QCryptoBlockCreateOptions(cryptoopts); 2475 return ret; 2476 } 2477 2478 2479 /** 2480 * Preallocates metadata structures for data clusters between @offset (in the 2481 * guest disk) and @new_length (which is thus generally the new guest disk 2482 * size). 2483 * 2484 * Returns: 0 on success, -errno on failure. 2485 */ 2486 static int preallocate(BlockDriverState *bs, 2487 uint64_t offset, uint64_t new_length) 2488 { 2489 BDRVQcow2State *s = bs->opaque; 2490 uint64_t bytes; 2491 uint64_t host_offset = 0; 2492 unsigned int cur_bytes; 2493 int ret; 2494 QCowL2Meta *meta; 2495 2496 if (qemu_in_coroutine()) { 2497 qemu_co_mutex_lock(&s->lock); 2498 } 2499 2500 assert(offset <= new_length); 2501 bytes = new_length - offset; 2502 2503 while (bytes) { 2504 cur_bytes = MIN(bytes, INT_MAX); 2505 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2506 &host_offset, &meta); 2507 if (ret < 0) { 2508 goto done; 2509 } 2510 2511 while (meta) { 2512 QCowL2Meta *next = meta->next; 2513 2514 ret = qcow2_alloc_cluster_link_l2(bs, meta); 2515 if (ret < 0) { 2516 qcow2_free_any_clusters(bs, meta->alloc_offset, 2517 meta->nb_clusters, QCOW2_DISCARD_NEVER); 2518 goto done; 2519 } 2520 2521 /* There are no dependent requests, but we need to remove our 2522 * request from the list of in-flight requests */ 2523 QLIST_REMOVE(meta, next_in_flight); 2524 2525 g_free(meta); 2526 meta = next; 2527 } 2528 2529 /* TODO Preallocate data if requested */ 2530 2531 bytes -= cur_bytes; 2532 offset += cur_bytes; 2533 } 2534 2535 /* 2536 * It is expected that the image file is large enough to actually contain 2537 * all of the allocated clusters (otherwise we get failing reads after 2538 * EOF). Extend the image to the last allocated sector. 2539 */ 2540 if (host_offset != 0) { 2541 uint8_t data = 0; 2542 ret = bdrv_pwrite(bs->file, (host_offset + cur_bytes) - 1, 2543 &data, 1); 2544 if (ret < 0) { 2545 goto done; 2546 } 2547 } 2548 2549 ret = 0; 2550 2551 done: 2552 if (qemu_in_coroutine()) { 2553 qemu_co_mutex_unlock(&s->lock); 2554 } 2555 return ret; 2556 } 2557 2558 /* qcow2_refcount_metadata_size: 2559 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 2560 * @cluster_size: size of a cluster, in bytes 2561 * @refcount_order: refcount bits power-of-2 exponent 2562 * @generous_increase: allow for the refcount table to be 1.5x as large as it 2563 * needs to be 2564 * 2565 * Returns: Number of bytes required for refcount blocks and table metadata. 2566 */ 2567 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 2568 int refcount_order, bool generous_increase, 2569 uint64_t *refblock_count) 2570 { 2571 /* 2572 * Every host cluster is reference-counted, including metadata (even 2573 * refcount metadata is recursively included). 2574 * 2575 * An accurate formula for the size of refcount metadata size is difficult 2576 * to derive. An easier method of calculation is finding the fixed point 2577 * where no further refcount blocks or table clusters are required to 2578 * reference count every cluster. 2579 */ 2580 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t); 2581 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 2582 int64_t table = 0; /* number of refcount table clusters */ 2583 int64_t blocks = 0; /* number of refcount block clusters */ 2584 int64_t last; 2585 int64_t n = 0; 2586 2587 do { 2588 last = n; 2589 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 2590 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 2591 n = clusters + blocks + table; 2592 2593 if (n == last && generous_increase) { 2594 clusters += DIV_ROUND_UP(table, 2); 2595 n = 0; /* force another loop */ 2596 generous_increase = false; 2597 } 2598 } while (n != last); 2599 2600 if (refblock_count) { 2601 *refblock_count = blocks; 2602 } 2603 2604 return (blocks + table) * cluster_size; 2605 } 2606 2607 /** 2608 * qcow2_calc_prealloc_size: 2609 * @total_size: virtual disk size in bytes 2610 * @cluster_size: cluster size in bytes 2611 * @refcount_order: refcount bits power-of-2 exponent 2612 * 2613 * Returns: Total number of bytes required for the fully allocated image 2614 * (including metadata). 2615 */ 2616 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 2617 size_t cluster_size, 2618 int refcount_order) 2619 { 2620 int64_t meta_size = 0; 2621 uint64_t nl1e, nl2e; 2622 int64_t aligned_total_size = align_offset(total_size, cluster_size); 2623 2624 /* header: 1 cluster */ 2625 meta_size += cluster_size; 2626 2627 /* total size of L2 tables */ 2628 nl2e = aligned_total_size / cluster_size; 2629 nl2e = align_offset(nl2e, cluster_size / sizeof(uint64_t)); 2630 meta_size += nl2e * sizeof(uint64_t); 2631 2632 /* total size of L1 tables */ 2633 nl1e = nl2e * sizeof(uint64_t) / cluster_size; 2634 nl1e = align_offset(nl1e, cluster_size / sizeof(uint64_t)); 2635 meta_size += nl1e * sizeof(uint64_t); 2636 2637 /* total size of refcount table and blocks */ 2638 meta_size += qcow2_refcount_metadata_size( 2639 (meta_size + aligned_total_size) / cluster_size, 2640 cluster_size, refcount_order, false, NULL); 2641 2642 return meta_size + aligned_total_size; 2643 } 2644 2645 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp) 2646 { 2647 size_t cluster_size; 2648 int cluster_bits; 2649 2650 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 2651 DEFAULT_CLUSTER_SIZE); 2652 cluster_bits = ctz32(cluster_size); 2653 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 2654 (1 << cluster_bits) != cluster_size) 2655 { 2656 error_setg(errp, "Cluster size must be a power of two between %d and " 2657 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 2658 return 0; 2659 } 2660 return cluster_size; 2661 } 2662 2663 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 2664 { 2665 char *buf; 2666 int ret; 2667 2668 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 2669 if (!buf) { 2670 ret = 3; /* default */ 2671 } else if (!strcmp(buf, "0.10")) { 2672 ret = 2; 2673 } else if (!strcmp(buf, "1.1")) { 2674 ret = 3; 2675 } else { 2676 error_setg(errp, "Invalid compatibility level: '%s'", buf); 2677 ret = -EINVAL; 2678 } 2679 g_free(buf); 2680 return ret; 2681 } 2682 2683 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 2684 Error **errp) 2685 { 2686 uint64_t refcount_bits; 2687 2688 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 2689 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 2690 error_setg(errp, "Refcount width must be a power of two and may not " 2691 "exceed 64 bits"); 2692 return 0; 2693 } 2694 2695 if (version < 3 && refcount_bits != 16) { 2696 error_setg(errp, "Different refcount widths than 16 bits require " 2697 "compatibility level 1.1 or above (use compat=1.1 or " 2698 "greater)"); 2699 return 0; 2700 } 2701 2702 return refcount_bits; 2703 } 2704 2705 static int qcow2_create2(const char *filename, int64_t total_size, 2706 const char *backing_file, const char *backing_format, 2707 int flags, size_t cluster_size, PreallocMode prealloc, 2708 QemuOpts *opts, int version, int refcount_order, 2709 const char *encryptfmt, Error **errp) 2710 { 2711 QDict *options; 2712 2713 /* 2714 * Open the image file and write a minimal qcow2 header. 2715 * 2716 * We keep things simple and start with a zero-sized image. We also 2717 * do without refcount blocks or a L1 table for now. We'll fix the 2718 * inconsistency later. 2719 * 2720 * We do need a refcount table because growing the refcount table means 2721 * allocating two new refcount blocks - the seconds of which would be at 2722 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 2723 * size for any qcow2 image. 2724 */ 2725 BlockBackend *blk; 2726 QCowHeader *header; 2727 uint64_t* refcount_table; 2728 Error *local_err = NULL; 2729 int ret; 2730 2731 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 2732 int64_t prealloc_size = 2733 qcow2_calc_prealloc_size(total_size, cluster_size, refcount_order); 2734 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, prealloc_size, &error_abort); 2735 qemu_opt_set(opts, BLOCK_OPT_PREALLOC, PreallocMode_lookup[prealloc], 2736 &error_abort); 2737 } 2738 2739 ret = bdrv_create_file(filename, opts, &local_err); 2740 if (ret < 0) { 2741 error_propagate(errp, local_err); 2742 return ret; 2743 } 2744 2745 blk = blk_new_open(filename, NULL, NULL, 2746 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 2747 &local_err); 2748 if (blk == NULL) { 2749 error_propagate(errp, local_err); 2750 return -EIO; 2751 } 2752 2753 blk_set_allow_write_beyond_eof(blk, true); 2754 2755 /* Write the header */ 2756 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 2757 header = g_malloc0(cluster_size); 2758 *header = (QCowHeader) { 2759 .magic = cpu_to_be32(QCOW_MAGIC), 2760 .version = cpu_to_be32(version), 2761 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 2762 .size = cpu_to_be64(0), 2763 .l1_table_offset = cpu_to_be64(0), 2764 .l1_size = cpu_to_be32(0), 2765 .refcount_table_offset = cpu_to_be64(cluster_size), 2766 .refcount_table_clusters = cpu_to_be32(1), 2767 .refcount_order = cpu_to_be32(refcount_order), 2768 .header_length = cpu_to_be32(sizeof(*header)), 2769 }; 2770 2771 /* We'll update this to correct value later */ 2772 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 2773 2774 if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) { 2775 header->compatible_features |= 2776 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 2777 } 2778 2779 ret = blk_pwrite(blk, 0, header, cluster_size, 0); 2780 g_free(header); 2781 if (ret < 0) { 2782 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 2783 goto out; 2784 } 2785 2786 /* Write a refcount table with one refcount block */ 2787 refcount_table = g_malloc0(2 * cluster_size); 2788 refcount_table[0] = cpu_to_be64(2 * cluster_size); 2789 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); 2790 g_free(refcount_table); 2791 2792 if (ret < 0) { 2793 error_setg_errno(errp, -ret, "Could not write refcount table"); 2794 goto out; 2795 } 2796 2797 blk_unref(blk); 2798 blk = NULL; 2799 2800 /* 2801 * And now open the image and make it consistent first (i.e. increase the 2802 * refcount of the cluster that is occupied by the header and the refcount 2803 * table) 2804 */ 2805 options = qdict_new(); 2806 qdict_put_str(options, "driver", "qcow2"); 2807 blk = blk_new_open(filename, NULL, options, 2808 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 2809 &local_err); 2810 if (blk == NULL) { 2811 error_propagate(errp, local_err); 2812 ret = -EIO; 2813 goto out; 2814 } 2815 2816 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 2817 if (ret < 0) { 2818 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 2819 "header and refcount table"); 2820 goto out; 2821 2822 } else if (ret != 0) { 2823 error_report("Huh, first cluster in empty image is already in use?"); 2824 abort(); 2825 } 2826 2827 /* Create a full header (including things like feature table) */ 2828 ret = qcow2_update_header(blk_bs(blk)); 2829 if (ret < 0) { 2830 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 2831 goto out; 2832 } 2833 2834 /* Okay, now that we have a valid image, let's give it the right size */ 2835 ret = blk_truncate(blk, total_size, PREALLOC_MODE_OFF, errp); 2836 if (ret < 0) { 2837 error_prepend(errp, "Could not resize image: "); 2838 goto out; 2839 } 2840 2841 /* Want a backing file? There you go.*/ 2842 if (backing_file) { 2843 ret = bdrv_change_backing_file(blk_bs(blk), backing_file, backing_format); 2844 if (ret < 0) { 2845 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 2846 "with format '%s'", backing_file, backing_format); 2847 goto out; 2848 } 2849 } 2850 2851 /* Want encryption? There you go. */ 2852 if (encryptfmt) { 2853 ret = qcow2_set_up_encryption(blk_bs(blk), encryptfmt, opts, errp); 2854 if (ret < 0) { 2855 goto out; 2856 } 2857 } 2858 2859 /* And if we're supposed to preallocate metadata, do that now */ 2860 if (prealloc != PREALLOC_MODE_OFF) { 2861 ret = preallocate(blk_bs(blk), 0, total_size); 2862 if (ret < 0) { 2863 error_setg_errno(errp, -ret, "Could not preallocate metadata"); 2864 goto out; 2865 } 2866 } 2867 2868 blk_unref(blk); 2869 blk = NULL; 2870 2871 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 2872 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 2873 * have to setup decryption context. We're not doing any I/O on the top 2874 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 2875 * not have effect. 2876 */ 2877 options = qdict_new(); 2878 qdict_put_str(options, "driver", "qcow2"); 2879 blk = blk_new_open(filename, NULL, options, 2880 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 2881 &local_err); 2882 if (blk == NULL) { 2883 error_propagate(errp, local_err); 2884 ret = -EIO; 2885 goto out; 2886 } 2887 2888 ret = 0; 2889 out: 2890 if (blk) { 2891 blk_unref(blk); 2892 } 2893 return ret; 2894 } 2895 2896 static int qcow2_create(const char *filename, QemuOpts *opts, Error **errp) 2897 { 2898 char *backing_file = NULL; 2899 char *backing_fmt = NULL; 2900 char *buf = NULL; 2901 uint64_t size = 0; 2902 int flags = 0; 2903 size_t cluster_size = DEFAULT_CLUSTER_SIZE; 2904 PreallocMode prealloc; 2905 int version; 2906 uint64_t refcount_bits; 2907 int refcount_order; 2908 const char *encryptfmt = NULL; 2909 Error *local_err = NULL; 2910 int ret; 2911 2912 /* Read out options */ 2913 size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 2914 BDRV_SECTOR_SIZE); 2915 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 2916 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); 2917 encryptfmt = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 2918 if (encryptfmt) { 2919 if (qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT)) { 2920 error_setg(errp, "Options " BLOCK_OPT_ENCRYPT " and " 2921 BLOCK_OPT_ENCRYPT_FORMAT " are mutually exclusive"); 2922 ret = -EINVAL; 2923 goto finish; 2924 } 2925 } else if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) { 2926 encryptfmt = "aes"; 2927 } 2928 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 2929 if (local_err) { 2930 error_propagate(errp, local_err); 2931 ret = -EINVAL; 2932 goto finish; 2933 } 2934 buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 2935 prealloc = qapi_enum_parse(PreallocMode_lookup, buf, 2936 PREALLOC_MODE__MAX, PREALLOC_MODE_OFF, 2937 &local_err); 2938 if (local_err) { 2939 error_propagate(errp, local_err); 2940 ret = -EINVAL; 2941 goto finish; 2942 } 2943 2944 version = qcow2_opt_get_version_del(opts, &local_err); 2945 if (local_err) { 2946 error_propagate(errp, local_err); 2947 ret = -EINVAL; 2948 goto finish; 2949 } 2950 2951 if (qemu_opt_get_bool_del(opts, BLOCK_OPT_LAZY_REFCOUNTS, false)) { 2952 flags |= BLOCK_FLAG_LAZY_REFCOUNTS; 2953 } 2954 2955 if (backing_file && prealloc != PREALLOC_MODE_OFF) { 2956 error_setg(errp, "Backing file and preallocation cannot be used at " 2957 "the same time"); 2958 ret = -EINVAL; 2959 goto finish; 2960 } 2961 2962 if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) { 2963 error_setg(errp, "Lazy refcounts only supported with compatibility " 2964 "level 1.1 and above (use compat=1.1 or greater)"); 2965 ret = -EINVAL; 2966 goto finish; 2967 } 2968 2969 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 2970 if (local_err) { 2971 error_propagate(errp, local_err); 2972 ret = -EINVAL; 2973 goto finish; 2974 } 2975 2976 refcount_order = ctz32(refcount_bits); 2977 2978 ret = qcow2_create2(filename, size, backing_file, backing_fmt, flags, 2979 cluster_size, prealloc, opts, version, refcount_order, 2980 encryptfmt, &local_err); 2981 error_propagate(errp, local_err); 2982 2983 finish: 2984 g_free(backing_file); 2985 g_free(backing_fmt); 2986 g_free(buf); 2987 return ret; 2988 } 2989 2990 2991 static bool is_zero_sectors(BlockDriverState *bs, int64_t start, 2992 uint32_t count) 2993 { 2994 int nr; 2995 BlockDriverState *file; 2996 int64_t res; 2997 2998 if (start + count > bs->total_sectors) { 2999 count = bs->total_sectors - start; 3000 } 3001 3002 if (!count) { 3003 return true; 3004 } 3005 res = bdrv_get_block_status_above(bs, NULL, start, count, 3006 &nr, &file); 3007 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == count; 3008 } 3009 3010 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, 3011 int64_t offset, int bytes, BdrvRequestFlags flags) 3012 { 3013 int ret; 3014 BDRVQcow2State *s = bs->opaque; 3015 3016 uint32_t head = offset % s->cluster_size; 3017 uint32_t tail = (offset + bytes) % s->cluster_size; 3018 3019 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3020 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3021 tail = 0; 3022 } 3023 3024 if (head || tail) { 3025 int64_t cl_start = (offset - head) >> BDRV_SECTOR_BITS; 3026 uint64_t off; 3027 unsigned int nr; 3028 3029 assert(head + bytes <= s->cluster_size); 3030 3031 /* check whether remainder of cluster already reads as zero */ 3032 if (!(is_zero_sectors(bs, cl_start, 3033 DIV_ROUND_UP(head, BDRV_SECTOR_SIZE)) && 3034 is_zero_sectors(bs, (offset + bytes) >> BDRV_SECTOR_BITS, 3035 DIV_ROUND_UP(-tail & (s->cluster_size - 1), 3036 BDRV_SECTOR_SIZE)))) { 3037 return -ENOTSUP; 3038 } 3039 3040 qemu_co_mutex_lock(&s->lock); 3041 /* We can have new write after previous check */ 3042 offset = cl_start << BDRV_SECTOR_BITS; 3043 bytes = s->cluster_size; 3044 nr = s->cluster_size; 3045 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); 3046 if (ret != QCOW2_CLUSTER_UNALLOCATED && 3047 ret != QCOW2_CLUSTER_ZERO_PLAIN && 3048 ret != QCOW2_CLUSTER_ZERO_ALLOC) { 3049 qemu_co_mutex_unlock(&s->lock); 3050 return -ENOTSUP; 3051 } 3052 } else { 3053 qemu_co_mutex_lock(&s->lock); 3054 } 3055 3056 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 3057 3058 /* Whatever is left can use real zero clusters */ 3059 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags); 3060 qemu_co_mutex_unlock(&s->lock); 3061 3062 return ret; 3063 } 3064 3065 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 3066 int64_t offset, int bytes) 3067 { 3068 int ret; 3069 BDRVQcow2State *s = bs->opaque; 3070 3071 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 3072 assert(bytes < s->cluster_size); 3073 /* Ignore partial clusters, except for the special case of the 3074 * complete partial cluster at the end of an unaligned file */ 3075 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 3076 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 3077 return -ENOTSUP; 3078 } 3079 } 3080 3081 qemu_co_mutex_lock(&s->lock); 3082 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 3083 false); 3084 qemu_co_mutex_unlock(&s->lock); 3085 return ret; 3086 } 3087 3088 static int qcow2_truncate(BlockDriverState *bs, int64_t offset, 3089 PreallocMode prealloc, Error **errp) 3090 { 3091 BDRVQcow2State *s = bs->opaque; 3092 uint64_t old_length; 3093 int64_t new_l1_size; 3094 int ret; 3095 3096 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 3097 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 3098 { 3099 error_setg(errp, "Unsupported preallocation mode '%s'", 3100 PreallocMode_lookup[prealloc]); 3101 return -ENOTSUP; 3102 } 3103 3104 if (offset & 511) { 3105 error_setg(errp, "The new size must be a multiple of 512"); 3106 return -EINVAL; 3107 } 3108 3109 /* cannot proceed if image has snapshots */ 3110 if (s->nb_snapshots) { 3111 error_setg(errp, "Can't resize an image which has snapshots"); 3112 return -ENOTSUP; 3113 } 3114 3115 /* cannot proceed if image has bitmaps */ 3116 if (s->nb_bitmaps) { 3117 /* TODO: resize bitmaps in the image */ 3118 error_setg(errp, "Can't resize an image which has bitmaps"); 3119 return -ENOTSUP; 3120 } 3121 3122 old_length = bs->total_sectors * 512; 3123 3124 /* shrinking is currently not supported */ 3125 if (offset < old_length) { 3126 error_setg(errp, "qcow2 doesn't support shrinking images yet"); 3127 return -ENOTSUP; 3128 } 3129 3130 new_l1_size = size_to_l1(s, offset); 3131 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 3132 if (ret < 0) { 3133 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 3134 return ret; 3135 } 3136 3137 switch (prealloc) { 3138 case PREALLOC_MODE_OFF: 3139 break; 3140 3141 case PREALLOC_MODE_METADATA: 3142 ret = preallocate(bs, old_length, offset); 3143 if (ret < 0) { 3144 error_setg_errno(errp, -ret, "Preallocation failed"); 3145 return ret; 3146 } 3147 break; 3148 3149 case PREALLOC_MODE_FALLOC: 3150 case PREALLOC_MODE_FULL: 3151 { 3152 int64_t allocation_start, host_offset, guest_offset; 3153 int64_t clusters_allocated; 3154 int64_t old_file_size, new_file_size; 3155 uint64_t nb_new_data_clusters, nb_new_l2_tables; 3156 3157 old_file_size = bdrv_getlength(bs->file->bs); 3158 if (old_file_size < 0) { 3159 error_setg_errno(errp, -old_file_size, 3160 "Failed to inquire current file length"); 3161 return ret; 3162 } 3163 3164 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length, 3165 s->cluster_size); 3166 3167 /* This is an overestimation; we will not actually allocate space for 3168 * these in the file but just make sure the new refcount structures are 3169 * able to cover them so we will not have to allocate new refblocks 3170 * while entering the data blocks in the potentially new L2 tables. 3171 * (We do not actually care where the L2 tables are placed. Maybe they 3172 * are already allocated or they can be placed somewhere before 3173 * @old_file_size. It does not matter because they will be fully 3174 * allocated automatically, so they do not need to be covered by the 3175 * preallocation. All that matters is that we will not have to allocate 3176 * new refcount structures for them.) */ 3177 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 3178 s->cluster_size / sizeof(uint64_t)); 3179 /* The cluster range may not be aligned to L2 boundaries, so add one L2 3180 * table for a potential head/tail */ 3181 nb_new_l2_tables++; 3182 3183 allocation_start = qcow2_refcount_area(bs, old_file_size, 3184 nb_new_data_clusters + 3185 nb_new_l2_tables, 3186 true, 0, 0); 3187 if (allocation_start < 0) { 3188 error_setg_errno(errp, -allocation_start, 3189 "Failed to resize refcount structures"); 3190 return -allocation_start; 3191 } 3192 3193 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 3194 nb_new_data_clusters); 3195 if (clusters_allocated < 0) { 3196 error_setg_errno(errp, -clusters_allocated, 3197 "Failed to allocate data clusters"); 3198 return -clusters_allocated; 3199 } 3200 3201 assert(clusters_allocated == nb_new_data_clusters); 3202 3203 /* Allocate the data area */ 3204 new_file_size = allocation_start + 3205 nb_new_data_clusters * s->cluster_size; 3206 ret = bdrv_truncate(bs->file, new_file_size, prealloc, errp); 3207 if (ret < 0) { 3208 error_prepend(errp, "Failed to resize underlying file: "); 3209 qcow2_free_clusters(bs, allocation_start, 3210 nb_new_data_clusters * s->cluster_size, 3211 QCOW2_DISCARD_OTHER); 3212 return ret; 3213 } 3214 3215 /* Create the necessary L2 entries */ 3216 host_offset = allocation_start; 3217 guest_offset = old_length; 3218 while (nb_new_data_clusters) { 3219 int64_t guest_cluster = guest_offset >> s->cluster_bits; 3220 int64_t nb_clusters = MIN(nb_new_data_clusters, 3221 s->l2_size - guest_cluster % s->l2_size); 3222 QCowL2Meta allocation = { 3223 .offset = guest_offset, 3224 .alloc_offset = host_offset, 3225 .nb_clusters = nb_clusters, 3226 }; 3227 qemu_co_queue_init(&allocation.dependent_requests); 3228 3229 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 3230 if (ret < 0) { 3231 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 3232 qcow2_free_clusters(bs, host_offset, 3233 nb_new_data_clusters * s->cluster_size, 3234 QCOW2_DISCARD_OTHER); 3235 return ret; 3236 } 3237 3238 guest_offset += nb_clusters * s->cluster_size; 3239 host_offset += nb_clusters * s->cluster_size; 3240 nb_new_data_clusters -= nb_clusters; 3241 } 3242 break; 3243 } 3244 3245 default: 3246 g_assert_not_reached(); 3247 } 3248 3249 if (prealloc != PREALLOC_MODE_OFF) { 3250 /* Flush metadata before actually changing the image size */ 3251 ret = bdrv_flush(bs); 3252 if (ret < 0) { 3253 error_setg_errno(errp, -ret, 3254 "Failed to flush the preallocated area to disk"); 3255 return ret; 3256 } 3257 } 3258 3259 /* write updated header.size */ 3260 offset = cpu_to_be64(offset); 3261 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 3262 &offset, sizeof(uint64_t)); 3263 if (ret < 0) { 3264 error_setg_errno(errp, -ret, "Failed to update the image size"); 3265 return ret; 3266 } 3267 3268 s->l1_vm_state_index = new_l1_size; 3269 return 0; 3270 } 3271 3272 /* XXX: put compressed sectors first, then all the cluster aligned 3273 tables to avoid losing bytes in alignment */ 3274 static coroutine_fn int 3275 qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 3276 uint64_t bytes, QEMUIOVector *qiov) 3277 { 3278 BDRVQcow2State *s = bs->opaque; 3279 QEMUIOVector hd_qiov; 3280 struct iovec iov; 3281 z_stream strm; 3282 int ret, out_len; 3283 uint8_t *buf, *out_buf; 3284 uint64_t cluster_offset; 3285 3286 if (bytes == 0) { 3287 /* align end of file to a sector boundary to ease reading with 3288 sector based I/Os */ 3289 cluster_offset = bdrv_getlength(bs->file->bs); 3290 return bdrv_truncate(bs->file, cluster_offset, PREALLOC_MODE_OFF, NULL); 3291 } 3292 3293 buf = qemu_blockalign(bs, s->cluster_size); 3294 if (bytes != s->cluster_size) { 3295 if (bytes > s->cluster_size || 3296 offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) 3297 { 3298 qemu_vfree(buf); 3299 return -EINVAL; 3300 } 3301 /* Zero-pad last write if image size is not cluster aligned */ 3302 memset(buf + bytes, 0, s->cluster_size - bytes); 3303 } 3304 qemu_iovec_to_buf(qiov, 0, buf, bytes); 3305 3306 out_buf = g_malloc(s->cluster_size); 3307 3308 /* best compression, small window, no zlib header */ 3309 memset(&strm, 0, sizeof(strm)); 3310 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, 3311 Z_DEFLATED, -12, 3312 9, Z_DEFAULT_STRATEGY); 3313 if (ret != 0) { 3314 ret = -EINVAL; 3315 goto fail; 3316 } 3317 3318 strm.avail_in = s->cluster_size; 3319 strm.next_in = (uint8_t *)buf; 3320 strm.avail_out = s->cluster_size; 3321 strm.next_out = out_buf; 3322 3323 ret = deflate(&strm, Z_FINISH); 3324 if (ret != Z_STREAM_END && ret != Z_OK) { 3325 deflateEnd(&strm); 3326 ret = -EINVAL; 3327 goto fail; 3328 } 3329 out_len = strm.next_out - out_buf; 3330 3331 deflateEnd(&strm); 3332 3333 if (ret != Z_STREAM_END || out_len >= s->cluster_size) { 3334 /* could not compress: write normal cluster */ 3335 ret = qcow2_co_pwritev(bs, offset, bytes, qiov, 0); 3336 if (ret < 0) { 3337 goto fail; 3338 } 3339 goto success; 3340 } 3341 3342 qemu_co_mutex_lock(&s->lock); 3343 cluster_offset = 3344 qcow2_alloc_compressed_cluster_offset(bs, offset, out_len); 3345 if (!cluster_offset) { 3346 qemu_co_mutex_unlock(&s->lock); 3347 ret = -EIO; 3348 goto fail; 3349 } 3350 cluster_offset &= s->cluster_offset_mask; 3351 3352 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len); 3353 qemu_co_mutex_unlock(&s->lock); 3354 if (ret < 0) { 3355 goto fail; 3356 } 3357 3358 iov = (struct iovec) { 3359 .iov_base = out_buf, 3360 .iov_len = out_len, 3361 }; 3362 qemu_iovec_init_external(&hd_qiov, &iov, 1); 3363 3364 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 3365 ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0); 3366 if (ret < 0) { 3367 goto fail; 3368 } 3369 success: 3370 ret = 0; 3371 fail: 3372 qemu_vfree(buf); 3373 g_free(out_buf); 3374 return ret; 3375 } 3376 3377 static int make_completely_empty(BlockDriverState *bs) 3378 { 3379 BDRVQcow2State *s = bs->opaque; 3380 Error *local_err = NULL; 3381 int ret, l1_clusters; 3382 int64_t offset; 3383 uint64_t *new_reftable = NULL; 3384 uint64_t rt_entry, l1_size2; 3385 struct { 3386 uint64_t l1_offset; 3387 uint64_t reftable_offset; 3388 uint32_t reftable_clusters; 3389 } QEMU_PACKED l1_ofs_rt_ofs_cls; 3390 3391 ret = qcow2_cache_empty(bs, s->l2_table_cache); 3392 if (ret < 0) { 3393 goto fail; 3394 } 3395 3396 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 3397 if (ret < 0) { 3398 goto fail; 3399 } 3400 3401 /* Refcounts will be broken utterly */ 3402 ret = qcow2_mark_dirty(bs); 3403 if (ret < 0) { 3404 goto fail; 3405 } 3406 3407 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3408 3409 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 3410 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t); 3411 3412 /* After this call, neither the in-memory nor the on-disk refcount 3413 * information accurately describe the actual references */ 3414 3415 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 3416 l1_clusters * s->cluster_size, 0); 3417 if (ret < 0) { 3418 goto fail_broken_refcounts; 3419 } 3420 memset(s->l1_table, 0, l1_size2); 3421 3422 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 3423 3424 /* Overwrite enough clusters at the beginning of the sectors to place 3425 * the refcount table, a refcount block and the L1 table in; this may 3426 * overwrite parts of the existing refcount and L1 table, which is not 3427 * an issue because the dirty flag is set, complete data loss is in fact 3428 * desired and partial data loss is consequently fine as well */ 3429 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 3430 (2 + l1_clusters) * s->cluster_size, 0); 3431 /* This call (even if it failed overall) may have overwritten on-disk 3432 * refcount structures; in that case, the in-memory refcount information 3433 * will probably differ from the on-disk information which makes the BDS 3434 * unusable */ 3435 if (ret < 0) { 3436 goto fail_broken_refcounts; 3437 } 3438 3439 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3440 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 3441 3442 /* "Create" an empty reftable (one cluster) directly after the image 3443 * header and an empty L1 table three clusters after the image header; 3444 * the cluster between those two will be used as the first refblock */ 3445 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 3446 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 3447 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 3448 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 3449 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); 3450 if (ret < 0) { 3451 goto fail_broken_refcounts; 3452 } 3453 3454 s->l1_table_offset = 3 * s->cluster_size; 3455 3456 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t)); 3457 if (!new_reftable) { 3458 ret = -ENOMEM; 3459 goto fail_broken_refcounts; 3460 } 3461 3462 s->refcount_table_offset = s->cluster_size; 3463 s->refcount_table_size = s->cluster_size / sizeof(uint64_t); 3464 s->max_refcount_table_index = 0; 3465 3466 g_free(s->refcount_table); 3467 s->refcount_table = new_reftable; 3468 new_reftable = NULL; 3469 3470 /* Now the in-memory refcount information again corresponds to the on-disk 3471 * information (reftable is empty and no refblocks (the refblock cache is 3472 * empty)); however, this means some clusters (e.g. the image header) are 3473 * referenced, but not refcounted, but the normal qcow2 code assumes that 3474 * the in-memory information is always correct */ 3475 3476 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 3477 3478 /* Enter the first refblock into the reftable */ 3479 rt_entry = cpu_to_be64(2 * s->cluster_size); 3480 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, 3481 &rt_entry, sizeof(rt_entry)); 3482 if (ret < 0) { 3483 goto fail_broken_refcounts; 3484 } 3485 s->refcount_table[0] = 2 * s->cluster_size; 3486 3487 s->free_cluster_index = 0; 3488 assert(3 + l1_clusters <= s->refcount_block_size); 3489 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 3490 if (offset < 0) { 3491 ret = offset; 3492 goto fail_broken_refcounts; 3493 } else if (offset > 0) { 3494 error_report("First cluster in emptied image is in use"); 3495 abort(); 3496 } 3497 3498 /* Now finally the in-memory information corresponds to the on-disk 3499 * structures and is correct */ 3500 ret = qcow2_mark_clean(bs); 3501 if (ret < 0) { 3502 goto fail; 3503 } 3504 3505 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, 3506 PREALLOC_MODE_OFF, &local_err); 3507 if (ret < 0) { 3508 error_report_err(local_err); 3509 goto fail; 3510 } 3511 3512 return 0; 3513 3514 fail_broken_refcounts: 3515 /* The BDS is unusable at this point. If we wanted to make it usable, we 3516 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 3517 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 3518 * again. However, because the functions which could have caused this error 3519 * path to be taken are used by those functions as well, it's very likely 3520 * that that sequence will fail as well. Therefore, just eject the BDS. */ 3521 bs->drv = NULL; 3522 3523 fail: 3524 g_free(new_reftable); 3525 return ret; 3526 } 3527 3528 static int qcow2_make_empty(BlockDriverState *bs) 3529 { 3530 BDRVQcow2State *s = bs->opaque; 3531 uint64_t offset, end_offset; 3532 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 3533 int l1_clusters, ret = 0; 3534 3535 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 3536 3537 if (s->qcow_version >= 3 && !s->snapshots && 3538 3 + l1_clusters <= s->refcount_block_size) { 3539 /* The following function only works for qcow2 v3 images (it requires 3540 * the dirty flag) and only as long as there are no snapshots (because 3541 * it completely empties the image). Furthermore, the L1 table and three 3542 * additional clusters (image header, refcount table, one refcount 3543 * block) have to fit inside one refcount block. */ 3544 return make_completely_empty(bs); 3545 } 3546 3547 /* This fallback code simply discards every active cluster; this is slow, 3548 * but works in all cases */ 3549 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3550 for (offset = 0; offset < end_offset; offset += step) { 3551 /* As this function is generally used after committing an external 3552 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 3553 * default action for this kind of discard is to pass the discard, 3554 * which will ideally result in an actually smaller image file, as 3555 * is probably desired. */ 3556 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 3557 QCOW2_DISCARD_SNAPSHOT, true); 3558 if (ret < 0) { 3559 break; 3560 } 3561 } 3562 3563 return ret; 3564 } 3565 3566 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 3567 { 3568 BDRVQcow2State *s = bs->opaque; 3569 int ret; 3570 3571 qemu_co_mutex_lock(&s->lock); 3572 ret = qcow2_cache_write(bs, s->l2_table_cache); 3573 if (ret < 0) { 3574 qemu_co_mutex_unlock(&s->lock); 3575 return ret; 3576 } 3577 3578 if (qcow2_need_accurate_refcounts(s)) { 3579 ret = qcow2_cache_write(bs, s->refcount_block_cache); 3580 if (ret < 0) { 3581 qemu_co_mutex_unlock(&s->lock); 3582 return ret; 3583 } 3584 } 3585 qemu_co_mutex_unlock(&s->lock); 3586 3587 return 0; 3588 } 3589 3590 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 3591 Error **errp) 3592 { 3593 Error *local_err = NULL; 3594 BlockMeasureInfo *info; 3595 uint64_t required = 0; /* bytes that contribute to required size */ 3596 uint64_t virtual_size; /* disk size as seen by guest */ 3597 uint64_t refcount_bits; 3598 uint64_t l2_tables; 3599 size_t cluster_size; 3600 int version; 3601 char *optstr; 3602 PreallocMode prealloc; 3603 bool has_backing_file; 3604 3605 /* Parse image creation options */ 3606 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 3607 if (local_err) { 3608 goto err; 3609 } 3610 3611 version = qcow2_opt_get_version_del(opts, &local_err); 3612 if (local_err) { 3613 goto err; 3614 } 3615 3616 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 3617 if (local_err) { 3618 goto err; 3619 } 3620 3621 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 3622 prealloc = qapi_enum_parse(PreallocMode_lookup, optstr, 3623 PREALLOC_MODE__MAX, PREALLOC_MODE_OFF, 3624 &local_err); 3625 g_free(optstr); 3626 if (local_err) { 3627 goto err; 3628 } 3629 3630 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 3631 has_backing_file = !!optstr; 3632 g_free(optstr); 3633 3634 virtual_size = align_offset(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 3635 cluster_size); 3636 3637 /* Check that virtual disk size is valid */ 3638 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 3639 cluster_size / sizeof(uint64_t)); 3640 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { 3641 error_setg(&local_err, "The image size is too large " 3642 "(try using a larger cluster size)"); 3643 goto err; 3644 } 3645 3646 /* Account for input image */ 3647 if (in_bs) { 3648 int64_t ssize = bdrv_getlength(in_bs); 3649 if (ssize < 0) { 3650 error_setg_errno(&local_err, -ssize, 3651 "Unable to get image virtual_size"); 3652 goto err; 3653 } 3654 3655 virtual_size = align_offset(ssize, cluster_size); 3656 3657 if (has_backing_file) { 3658 /* We don't how much of the backing chain is shared by the input 3659 * image and the new image file. In the worst case the new image's 3660 * backing file has nothing in common with the input image. Be 3661 * conservative and assume all clusters need to be written. 3662 */ 3663 required = virtual_size; 3664 } else { 3665 int cluster_sectors = cluster_size / BDRV_SECTOR_SIZE; 3666 int64_t sector_num; 3667 int pnum = 0; 3668 3669 for (sector_num = 0; 3670 sector_num < ssize / BDRV_SECTOR_SIZE; 3671 sector_num += pnum) { 3672 int nb_sectors = MAX(ssize / BDRV_SECTOR_SIZE - sector_num, 3673 INT_MAX); 3674 BlockDriverState *file; 3675 int64_t ret; 3676 3677 ret = bdrv_get_block_status_above(in_bs, NULL, 3678 sector_num, nb_sectors, 3679 &pnum, &file); 3680 if (ret < 0) { 3681 error_setg_errno(&local_err, -ret, 3682 "Unable to get block status"); 3683 goto err; 3684 } 3685 3686 if (ret & BDRV_BLOCK_ZERO) { 3687 /* Skip zero regions (safe with no backing file) */ 3688 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 3689 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 3690 /* Extend pnum to end of cluster for next iteration */ 3691 pnum = ROUND_UP(sector_num + pnum, cluster_sectors) - 3692 sector_num; 3693 3694 /* Count clusters we've seen */ 3695 required += (sector_num % cluster_sectors + pnum) * 3696 BDRV_SECTOR_SIZE; 3697 } 3698 } 3699 } 3700 } 3701 3702 /* Take into account preallocation. Nothing special is needed for 3703 * PREALLOC_MODE_METADATA since metadata is always counted. 3704 */ 3705 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 3706 required = virtual_size; 3707 } 3708 3709 info = g_new(BlockMeasureInfo, 1); 3710 info->fully_allocated = 3711 qcow2_calc_prealloc_size(virtual_size, cluster_size, 3712 ctz32(refcount_bits)); 3713 3714 /* Remove data clusters that are not required. This overestimates the 3715 * required size because metadata needed for the fully allocated file is 3716 * still counted. 3717 */ 3718 info->required = info->fully_allocated - virtual_size + required; 3719 return info; 3720 3721 err: 3722 error_propagate(errp, local_err); 3723 return NULL; 3724 } 3725 3726 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 3727 { 3728 BDRVQcow2State *s = bs->opaque; 3729 bdi->unallocated_blocks_are_zero = true; 3730 bdi->can_write_zeroes_with_unmap = (s->qcow_version >= 3); 3731 bdi->cluster_size = s->cluster_size; 3732 bdi->vm_state_offset = qcow2_vm_state_offset(s); 3733 return 0; 3734 } 3735 3736 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs) 3737 { 3738 BDRVQcow2State *s = bs->opaque; 3739 ImageInfoSpecific *spec_info; 3740 QCryptoBlockInfo *encrypt_info = NULL; 3741 3742 if (s->crypto != NULL) { 3743 encrypt_info = qcrypto_block_get_info(s->crypto, &error_abort); 3744 } 3745 3746 spec_info = g_new(ImageInfoSpecific, 1); 3747 *spec_info = (ImageInfoSpecific){ 3748 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 3749 .u.qcow2.data = g_new(ImageInfoSpecificQCow2, 1), 3750 }; 3751 if (s->qcow_version == 2) { 3752 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 3753 .compat = g_strdup("0.10"), 3754 .refcount_bits = s->refcount_bits, 3755 }; 3756 } else if (s->qcow_version == 3) { 3757 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 3758 .compat = g_strdup("1.1"), 3759 .lazy_refcounts = s->compatible_features & 3760 QCOW2_COMPAT_LAZY_REFCOUNTS, 3761 .has_lazy_refcounts = true, 3762 .corrupt = s->incompatible_features & 3763 QCOW2_INCOMPAT_CORRUPT, 3764 .has_corrupt = true, 3765 .refcount_bits = s->refcount_bits, 3766 }; 3767 } else { 3768 /* if this assertion fails, this probably means a new version was 3769 * added without having it covered here */ 3770 assert(false); 3771 } 3772 3773 if (encrypt_info) { 3774 ImageInfoSpecificQCow2Encryption *qencrypt = 3775 g_new(ImageInfoSpecificQCow2Encryption, 1); 3776 switch (encrypt_info->format) { 3777 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 3778 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 3779 qencrypt->u.aes = encrypt_info->u.qcow; 3780 break; 3781 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 3782 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 3783 qencrypt->u.luks = encrypt_info->u.luks; 3784 break; 3785 default: 3786 abort(); 3787 } 3788 /* Since we did shallow copy above, erase any pointers 3789 * in the original info */ 3790 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 3791 qapi_free_QCryptoBlockInfo(encrypt_info); 3792 3793 spec_info->u.qcow2.data->has_encrypt = true; 3794 spec_info->u.qcow2.data->encrypt = qencrypt; 3795 } 3796 3797 return spec_info; 3798 } 3799 3800 #if 0 3801 static void dump_refcounts(BlockDriverState *bs) 3802 { 3803 BDRVQcow2State *s = bs->opaque; 3804 int64_t nb_clusters, k, k1, size; 3805 int refcount; 3806 3807 size = bdrv_getlength(bs->file->bs); 3808 nb_clusters = size_to_clusters(s, size); 3809 for(k = 0; k < nb_clusters;) { 3810 k1 = k; 3811 refcount = get_refcount(bs, k); 3812 k++; 3813 while (k < nb_clusters && get_refcount(bs, k) == refcount) 3814 k++; 3815 printf("%" PRId64 ": refcount=%d nb=%" PRId64 "\n", k, refcount, 3816 k - k1); 3817 } 3818 } 3819 #endif 3820 3821 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 3822 int64_t pos) 3823 { 3824 BDRVQcow2State *s = bs->opaque; 3825 3826 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 3827 return bs->drv->bdrv_co_pwritev(bs, qcow2_vm_state_offset(s) + pos, 3828 qiov->size, qiov, 0); 3829 } 3830 3831 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 3832 int64_t pos) 3833 { 3834 BDRVQcow2State *s = bs->opaque; 3835 3836 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 3837 return bs->drv->bdrv_co_preadv(bs, qcow2_vm_state_offset(s) + pos, 3838 qiov->size, qiov, 0); 3839 } 3840 3841 /* 3842 * Downgrades an image's version. To achieve this, any incompatible features 3843 * have to be removed. 3844 */ 3845 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 3846 BlockDriverAmendStatusCB *status_cb, void *cb_opaque) 3847 { 3848 BDRVQcow2State *s = bs->opaque; 3849 int current_version = s->qcow_version; 3850 int ret; 3851 3852 if (target_version == current_version) { 3853 return 0; 3854 } else if (target_version > current_version) { 3855 return -EINVAL; 3856 } else if (target_version != 2) { 3857 return -EINVAL; 3858 } 3859 3860 if (s->refcount_order != 4) { 3861 error_report("compat=0.10 requires refcount_bits=16"); 3862 return -ENOTSUP; 3863 } 3864 3865 /* clear incompatible features */ 3866 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 3867 ret = qcow2_mark_clean(bs); 3868 if (ret < 0) { 3869 return ret; 3870 } 3871 } 3872 3873 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 3874 * the first place; if that happens nonetheless, returning -ENOTSUP is the 3875 * best thing to do anyway */ 3876 3877 if (s->incompatible_features) { 3878 return -ENOTSUP; 3879 } 3880 3881 /* since we can ignore compatible features, we can set them to 0 as well */ 3882 s->compatible_features = 0; 3883 /* if lazy refcounts have been used, they have already been fixed through 3884 * clearing the dirty flag */ 3885 3886 /* clearing autoclear features is trivial */ 3887 s->autoclear_features = 0; 3888 3889 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 3890 if (ret < 0) { 3891 return ret; 3892 } 3893 3894 s->qcow_version = target_version; 3895 ret = qcow2_update_header(bs); 3896 if (ret < 0) { 3897 s->qcow_version = current_version; 3898 return ret; 3899 } 3900 return 0; 3901 } 3902 3903 typedef enum Qcow2AmendOperation { 3904 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 3905 * statically initialized to so that the helper CB can discern the first 3906 * invocation from an operation change */ 3907 QCOW2_NO_OPERATION = 0, 3908 3909 QCOW2_CHANGING_REFCOUNT_ORDER, 3910 QCOW2_DOWNGRADING, 3911 } Qcow2AmendOperation; 3912 3913 typedef struct Qcow2AmendHelperCBInfo { 3914 /* The code coordinating the amend operations should only modify 3915 * these four fields; the rest will be managed by the CB */ 3916 BlockDriverAmendStatusCB *original_status_cb; 3917 void *original_cb_opaque; 3918 3919 Qcow2AmendOperation current_operation; 3920 3921 /* Total number of operations to perform (only set once) */ 3922 int total_operations; 3923 3924 /* The following fields are managed by the CB */ 3925 3926 /* Number of operations completed */ 3927 int operations_completed; 3928 3929 /* Cumulative offset of all completed operations */ 3930 int64_t offset_completed; 3931 3932 Qcow2AmendOperation last_operation; 3933 int64_t last_work_size; 3934 } Qcow2AmendHelperCBInfo; 3935 3936 static void qcow2_amend_helper_cb(BlockDriverState *bs, 3937 int64_t operation_offset, 3938 int64_t operation_work_size, void *opaque) 3939 { 3940 Qcow2AmendHelperCBInfo *info = opaque; 3941 int64_t current_work_size; 3942 int64_t projected_work_size; 3943 3944 if (info->current_operation != info->last_operation) { 3945 if (info->last_operation != QCOW2_NO_OPERATION) { 3946 info->offset_completed += info->last_work_size; 3947 info->operations_completed++; 3948 } 3949 3950 info->last_operation = info->current_operation; 3951 } 3952 3953 assert(info->total_operations > 0); 3954 assert(info->operations_completed < info->total_operations); 3955 3956 info->last_work_size = operation_work_size; 3957 3958 current_work_size = info->offset_completed + operation_work_size; 3959 3960 /* current_work_size is the total work size for (operations_completed + 1) 3961 * operations (which includes this one), so multiply it by the number of 3962 * operations not covered and divide it by the number of operations 3963 * covered to get a projection for the operations not covered */ 3964 projected_work_size = current_work_size * (info->total_operations - 3965 info->operations_completed - 1) 3966 / (info->operations_completed + 1); 3967 3968 info->original_status_cb(bs, info->offset_completed + operation_offset, 3969 current_work_size + projected_work_size, 3970 info->original_cb_opaque); 3971 } 3972 3973 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 3974 BlockDriverAmendStatusCB *status_cb, 3975 void *cb_opaque) 3976 { 3977 BDRVQcow2State *s = bs->opaque; 3978 int old_version = s->qcow_version, new_version = old_version; 3979 uint64_t new_size = 0; 3980 const char *backing_file = NULL, *backing_format = NULL; 3981 bool lazy_refcounts = s->use_lazy_refcounts; 3982 const char *compat = NULL; 3983 uint64_t cluster_size = s->cluster_size; 3984 bool encrypt; 3985 int encformat; 3986 int refcount_bits = s->refcount_bits; 3987 Error *local_err = NULL; 3988 int ret; 3989 QemuOptDesc *desc = opts->list->desc; 3990 Qcow2AmendHelperCBInfo helper_cb_info; 3991 3992 while (desc && desc->name) { 3993 if (!qemu_opt_find(opts, desc->name)) { 3994 /* only change explicitly defined options */ 3995 desc++; 3996 continue; 3997 } 3998 3999 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 4000 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 4001 if (!compat) { 4002 /* preserve default */ 4003 } else if (!strcmp(compat, "0.10")) { 4004 new_version = 2; 4005 } else if (!strcmp(compat, "1.1")) { 4006 new_version = 3; 4007 } else { 4008 error_report("Unknown compatibility level %s", compat); 4009 return -EINVAL; 4010 } 4011 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) { 4012 error_report("Cannot change preallocation mode"); 4013 return -ENOTSUP; 4014 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 4015 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 4016 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 4017 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 4018 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 4019 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 4020 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) { 4021 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, 4022 !!s->crypto); 4023 4024 if (encrypt != !!s->crypto) { 4025 error_report("Changing the encryption flag is not supported"); 4026 return -ENOTSUP; 4027 } 4028 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) { 4029 encformat = qcow2_crypt_method_from_format( 4030 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT)); 4031 4032 if (encformat != s->crypt_method_header) { 4033 error_report("Changing the encryption format is not supported"); 4034 return -ENOTSUP; 4035 } 4036 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) { 4037 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 4038 cluster_size); 4039 if (cluster_size != s->cluster_size) { 4040 error_report("Changing the cluster size is not supported"); 4041 return -ENOTSUP; 4042 } 4043 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 4044 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 4045 lazy_refcounts); 4046 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 4047 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 4048 refcount_bits); 4049 4050 if (refcount_bits <= 0 || refcount_bits > 64 || 4051 !is_power_of_2(refcount_bits)) 4052 { 4053 error_report("Refcount width must be a power of two and may " 4054 "not exceed 64 bits"); 4055 return -EINVAL; 4056 } 4057 } else { 4058 /* if this point is reached, this probably means a new option was 4059 * added without having it covered here */ 4060 abort(); 4061 } 4062 4063 desc++; 4064 } 4065 4066 helper_cb_info = (Qcow2AmendHelperCBInfo){ 4067 .original_status_cb = status_cb, 4068 .original_cb_opaque = cb_opaque, 4069 .total_operations = (new_version < old_version) 4070 + (s->refcount_bits != refcount_bits) 4071 }; 4072 4073 /* Upgrade first (some features may require compat=1.1) */ 4074 if (new_version > old_version) { 4075 s->qcow_version = new_version; 4076 ret = qcow2_update_header(bs); 4077 if (ret < 0) { 4078 s->qcow_version = old_version; 4079 return ret; 4080 } 4081 } 4082 4083 if (s->refcount_bits != refcount_bits) { 4084 int refcount_order = ctz32(refcount_bits); 4085 4086 if (new_version < 3 && refcount_bits != 16) { 4087 error_report("Different refcount widths than 16 bits require " 4088 "compatibility level 1.1 or above (use compat=1.1 or " 4089 "greater)"); 4090 return -EINVAL; 4091 } 4092 4093 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 4094 ret = qcow2_change_refcount_order(bs, refcount_order, 4095 &qcow2_amend_helper_cb, 4096 &helper_cb_info, &local_err); 4097 if (ret < 0) { 4098 error_report_err(local_err); 4099 return ret; 4100 } 4101 } 4102 4103 if (backing_file || backing_format) { 4104 ret = qcow2_change_backing_file(bs, 4105 backing_file ?: s->image_backing_file, 4106 backing_format ?: s->image_backing_format); 4107 if (ret < 0) { 4108 return ret; 4109 } 4110 } 4111 4112 if (s->use_lazy_refcounts != lazy_refcounts) { 4113 if (lazy_refcounts) { 4114 if (new_version < 3) { 4115 error_report("Lazy refcounts only supported with compatibility " 4116 "level 1.1 and above (use compat=1.1 or greater)"); 4117 return -EINVAL; 4118 } 4119 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4120 ret = qcow2_update_header(bs); 4121 if (ret < 0) { 4122 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4123 return ret; 4124 } 4125 s->use_lazy_refcounts = true; 4126 } else { 4127 /* make image clean first */ 4128 ret = qcow2_mark_clean(bs); 4129 if (ret < 0) { 4130 return ret; 4131 } 4132 /* now disallow lazy refcounts */ 4133 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4134 ret = qcow2_update_header(bs); 4135 if (ret < 0) { 4136 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4137 return ret; 4138 } 4139 s->use_lazy_refcounts = false; 4140 } 4141 } 4142 4143 if (new_size) { 4144 BlockBackend *blk = blk_new(BLK_PERM_RESIZE, BLK_PERM_ALL); 4145 ret = blk_insert_bs(blk, bs, &local_err); 4146 if (ret < 0) { 4147 error_report_err(local_err); 4148 blk_unref(blk); 4149 return ret; 4150 } 4151 4152 ret = blk_truncate(blk, new_size, PREALLOC_MODE_OFF, &local_err); 4153 blk_unref(blk); 4154 if (ret < 0) { 4155 error_report_err(local_err); 4156 return ret; 4157 } 4158 } 4159 4160 /* Downgrade last (so unsupported features can be removed before) */ 4161 if (new_version < old_version) { 4162 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 4163 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 4164 &helper_cb_info); 4165 if (ret < 0) { 4166 return ret; 4167 } 4168 } 4169 4170 return 0; 4171 } 4172 4173 /* 4174 * If offset or size are negative, respectively, they will not be included in 4175 * the BLOCK_IMAGE_CORRUPTED event emitted. 4176 * fatal will be ignored for read-only BDS; corruptions found there will always 4177 * be considered non-fatal. 4178 */ 4179 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 4180 int64_t size, const char *message_format, ...) 4181 { 4182 BDRVQcow2State *s = bs->opaque; 4183 const char *node_name; 4184 char *message; 4185 va_list ap; 4186 4187 fatal = fatal && !bs->read_only; 4188 4189 if (s->signaled_corruption && 4190 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 4191 { 4192 return; 4193 } 4194 4195 va_start(ap, message_format); 4196 message = g_strdup_vprintf(message_format, ap); 4197 va_end(ap); 4198 4199 if (fatal) { 4200 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 4201 "corruption events will be suppressed\n", message); 4202 } else { 4203 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 4204 "corruption events will be suppressed\n", message); 4205 } 4206 4207 node_name = bdrv_get_node_name(bs); 4208 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 4209 *node_name != '\0', node_name, 4210 message, offset >= 0, offset, 4211 size >= 0, size, 4212 fatal, &error_abort); 4213 g_free(message); 4214 4215 if (fatal) { 4216 qcow2_mark_corrupt(bs); 4217 bs->drv = NULL; /* make BDS unusable */ 4218 } 4219 4220 s->signaled_corruption = true; 4221 } 4222 4223 static QemuOptsList qcow2_create_opts = { 4224 .name = "qcow2-create-opts", 4225 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 4226 .desc = { 4227 { 4228 .name = BLOCK_OPT_SIZE, 4229 .type = QEMU_OPT_SIZE, 4230 .help = "Virtual disk size" 4231 }, 4232 { 4233 .name = BLOCK_OPT_COMPAT_LEVEL, 4234 .type = QEMU_OPT_STRING, 4235 .help = "Compatibility level (0.10 or 1.1)" 4236 }, 4237 { 4238 .name = BLOCK_OPT_BACKING_FILE, 4239 .type = QEMU_OPT_STRING, 4240 .help = "File name of a base image" 4241 }, 4242 { 4243 .name = BLOCK_OPT_BACKING_FMT, 4244 .type = QEMU_OPT_STRING, 4245 .help = "Image format of the base image" 4246 }, 4247 { 4248 .name = BLOCK_OPT_ENCRYPT, 4249 .type = QEMU_OPT_BOOL, 4250 .help = "Encrypt the image with format 'aes'. (Deprecated " 4251 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", 4252 }, 4253 { 4254 .name = BLOCK_OPT_ENCRYPT_FORMAT, 4255 .type = QEMU_OPT_STRING, 4256 .help = "Encrypt the image, format choices: 'aes', 'luks'", 4257 }, 4258 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 4259 "ID of secret providing qcow AES key or LUKS passphrase"), 4260 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), 4261 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), 4262 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), 4263 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), 4264 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), 4265 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 4266 { 4267 .name = BLOCK_OPT_CLUSTER_SIZE, 4268 .type = QEMU_OPT_SIZE, 4269 .help = "qcow2 cluster size", 4270 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) 4271 }, 4272 { 4273 .name = BLOCK_OPT_PREALLOC, 4274 .type = QEMU_OPT_STRING, 4275 .help = "Preallocation mode (allowed values: off, metadata, " 4276 "falloc, full)" 4277 }, 4278 { 4279 .name = BLOCK_OPT_LAZY_REFCOUNTS, 4280 .type = QEMU_OPT_BOOL, 4281 .help = "Postpone refcount updates", 4282 .def_value_str = "off" 4283 }, 4284 { 4285 .name = BLOCK_OPT_REFCOUNT_BITS, 4286 .type = QEMU_OPT_NUMBER, 4287 .help = "Width of a reference count entry in bits", 4288 .def_value_str = "16" 4289 }, 4290 { /* end of list */ } 4291 } 4292 }; 4293 4294 BlockDriver bdrv_qcow2 = { 4295 .format_name = "qcow2", 4296 .instance_size = sizeof(BDRVQcow2State), 4297 .bdrv_probe = qcow2_probe, 4298 .bdrv_open = qcow2_open, 4299 .bdrv_close = qcow2_close, 4300 .bdrv_reopen_prepare = qcow2_reopen_prepare, 4301 .bdrv_reopen_commit = qcow2_reopen_commit, 4302 .bdrv_reopen_abort = qcow2_reopen_abort, 4303 .bdrv_join_options = qcow2_join_options, 4304 .bdrv_child_perm = bdrv_format_default_perms, 4305 .bdrv_create = qcow2_create, 4306 .bdrv_has_zero_init = bdrv_has_zero_init_1, 4307 .bdrv_co_get_block_status = qcow2_co_get_block_status, 4308 4309 .bdrv_co_preadv = qcow2_co_preadv, 4310 .bdrv_co_pwritev = qcow2_co_pwritev, 4311 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 4312 4313 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 4314 .bdrv_co_pdiscard = qcow2_co_pdiscard, 4315 .bdrv_truncate = qcow2_truncate, 4316 .bdrv_co_pwritev_compressed = qcow2_co_pwritev_compressed, 4317 .bdrv_make_empty = qcow2_make_empty, 4318 4319 .bdrv_snapshot_create = qcow2_snapshot_create, 4320 .bdrv_snapshot_goto = qcow2_snapshot_goto, 4321 .bdrv_snapshot_delete = qcow2_snapshot_delete, 4322 .bdrv_snapshot_list = qcow2_snapshot_list, 4323 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 4324 .bdrv_measure = qcow2_measure, 4325 .bdrv_get_info = qcow2_get_info, 4326 .bdrv_get_specific_info = qcow2_get_specific_info, 4327 4328 .bdrv_save_vmstate = qcow2_save_vmstate, 4329 .bdrv_load_vmstate = qcow2_load_vmstate, 4330 4331 .supports_backing = true, 4332 .bdrv_change_backing_file = qcow2_change_backing_file, 4333 4334 .bdrv_refresh_limits = qcow2_refresh_limits, 4335 .bdrv_invalidate_cache = qcow2_invalidate_cache, 4336 .bdrv_inactivate = qcow2_inactivate, 4337 4338 .create_opts = &qcow2_create_opts, 4339 .bdrv_check = qcow2_check, 4340 .bdrv_amend_options = qcow2_amend_options, 4341 4342 .bdrv_detach_aio_context = qcow2_detach_aio_context, 4343 .bdrv_attach_aio_context = qcow2_attach_aio_context, 4344 4345 .bdrv_reopen_bitmaps_rw = qcow2_reopen_bitmaps_rw, 4346 .bdrv_can_store_new_dirty_bitmap = qcow2_can_store_new_dirty_bitmap, 4347 .bdrv_remove_persistent_dirty_bitmap = qcow2_remove_persistent_dirty_bitmap, 4348 }; 4349 4350 static void bdrv_qcow2_init(void) 4351 { 4352 bdrv_register(&bdrv_qcow2); 4353 } 4354 4355 block_init(bdrv_qcow2_init); 4356