1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include "block/block_int.h" 26 #include "sysemu/block-backend.h" 27 #include "qemu/module.h" 28 #include <zlib.h> 29 #include "block/qcow2.h" 30 #include "qemu/error-report.h" 31 #include "qapi/qmp/qerror.h" 32 #include "qapi/qmp/qbool.h" 33 #include "qapi/util.h" 34 #include "qapi/qmp/types.h" 35 #include "qapi-event.h" 36 #include "trace.h" 37 #include "qemu/option_int.h" 38 #include "qemu/cutils.h" 39 #include "qemu/bswap.h" 40 #include "qapi/opts-visitor.h" 41 #include "qapi-visit.h" 42 #include "block/crypto.h" 43 44 /* 45 Differences with QCOW: 46 47 - Support for multiple incremental snapshots. 48 - Memory management by reference counts. 49 - Clusters which have a reference count of one have the bit 50 QCOW_OFLAG_COPIED to optimize write performance. 51 - Size of compressed clusters is stored in sectors to reduce bit usage 52 in the cluster offsets. 53 - Support for storing additional data (such as the VM state) in the 54 snapshots. 55 - If a backing store is used, the cluster size is not constrained 56 (could be backported to QCOW). 57 - L2 tables have always a size of one cluster. 58 */ 59 60 61 typedef struct { 62 uint32_t magic; 63 uint32_t len; 64 } QEMU_PACKED QCowExtension; 65 66 #define QCOW2_EXT_MAGIC_END 0 67 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 68 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 69 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 70 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 71 72 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 73 { 74 const QCowHeader *cow_header = (const void *)buf; 75 76 if (buf_size >= sizeof(QCowHeader) && 77 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 78 be32_to_cpu(cow_header->version) >= 2) 79 return 100; 80 else 81 return 0; 82 } 83 84 85 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 86 uint8_t *buf, size_t buflen, 87 void *opaque, Error **errp) 88 { 89 BlockDriverState *bs = opaque; 90 BDRVQcow2State *s = bs->opaque; 91 ssize_t ret; 92 93 if ((offset + buflen) > s->crypto_header.length) { 94 error_setg(errp, "Request for data outside of extension header"); 95 return -1; 96 } 97 98 ret = bdrv_pread(bs->file, 99 s->crypto_header.offset + offset, buf, buflen); 100 if (ret < 0) { 101 error_setg_errno(errp, -ret, "Could not read encryption header"); 102 return -1; 103 } 104 return ret; 105 } 106 107 108 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 109 void *opaque, Error **errp) 110 { 111 BlockDriverState *bs = opaque; 112 BDRVQcow2State *s = bs->opaque; 113 int64_t ret; 114 int64_t clusterlen; 115 116 ret = qcow2_alloc_clusters(bs, headerlen); 117 if (ret < 0) { 118 error_setg_errno(errp, -ret, 119 "Cannot allocate cluster for LUKS header size %zu", 120 headerlen); 121 return -1; 122 } 123 124 s->crypto_header.length = headerlen; 125 s->crypto_header.offset = ret; 126 127 /* Zero fill remaining space in cluster so it has predictable 128 * content in case of future spec changes */ 129 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 130 ret = bdrv_pwrite_zeroes(bs->file, 131 ret + headerlen, 132 clusterlen - headerlen, 0); 133 if (ret < 0) { 134 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 135 return -1; 136 } 137 138 return ret; 139 } 140 141 142 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 143 const uint8_t *buf, size_t buflen, 144 void *opaque, Error **errp) 145 { 146 BlockDriverState *bs = opaque; 147 BDRVQcow2State *s = bs->opaque; 148 ssize_t ret; 149 150 if ((offset + buflen) > s->crypto_header.length) { 151 error_setg(errp, "Request for data outside of extension header"); 152 return -1; 153 } 154 155 ret = bdrv_pwrite(bs->file, 156 s->crypto_header.offset + offset, buf, buflen); 157 if (ret < 0) { 158 error_setg_errno(errp, -ret, "Could not read encryption header"); 159 return -1; 160 } 161 return ret; 162 } 163 164 165 /* 166 * read qcow2 extension and fill bs 167 * start reading from start_offset 168 * finish reading upon magic of value 0 or when end_offset reached 169 * unknown magic is skipped (future extension this version knows nothing about) 170 * return 0 upon success, non-0 otherwise 171 */ 172 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 173 uint64_t end_offset, void **p_feature_table, 174 int flags, bool *need_update_header, 175 Error **errp) 176 { 177 BDRVQcow2State *s = bs->opaque; 178 QCowExtension ext; 179 uint64_t offset; 180 int ret; 181 Qcow2BitmapHeaderExt bitmaps_ext; 182 183 if (need_update_header != NULL) { 184 *need_update_header = false; 185 } 186 187 #ifdef DEBUG_EXT 188 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 189 #endif 190 offset = start_offset; 191 while (offset < end_offset) { 192 193 #ifdef DEBUG_EXT 194 /* Sanity check */ 195 if (offset > s->cluster_size) 196 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 197 198 printf("attempting to read extended header in offset %lu\n", offset); 199 #endif 200 201 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); 202 if (ret < 0) { 203 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 204 "pread fail from offset %" PRIu64, offset); 205 return 1; 206 } 207 be32_to_cpus(&ext.magic); 208 be32_to_cpus(&ext.len); 209 offset += sizeof(ext); 210 #ifdef DEBUG_EXT 211 printf("ext.magic = 0x%x\n", ext.magic); 212 #endif 213 if (offset > end_offset || ext.len > end_offset - offset) { 214 error_setg(errp, "Header extension too large"); 215 return -EINVAL; 216 } 217 218 switch (ext.magic) { 219 case QCOW2_EXT_MAGIC_END: 220 return 0; 221 222 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 223 if (ext.len >= sizeof(bs->backing_format)) { 224 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 225 " too large (>=%zu)", ext.len, 226 sizeof(bs->backing_format)); 227 return 2; 228 } 229 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); 230 if (ret < 0) { 231 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 232 "Could not read format name"); 233 return 3; 234 } 235 bs->backing_format[ext.len] = '\0'; 236 s->image_backing_format = g_strdup(bs->backing_format); 237 #ifdef DEBUG_EXT 238 printf("Qcow2: Got format extension %s\n", bs->backing_format); 239 #endif 240 break; 241 242 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 243 if (p_feature_table != NULL) { 244 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 245 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 246 if (ret < 0) { 247 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 248 "Could not read table"); 249 return ret; 250 } 251 252 *p_feature_table = feature_table; 253 } 254 break; 255 256 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 257 unsigned int cflags = 0; 258 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 259 error_setg(errp, "CRYPTO header extension only " 260 "expected with LUKS encryption method"); 261 return -EINVAL; 262 } 263 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 264 error_setg(errp, "CRYPTO header extension size %u, " 265 "but expected size %zu", ext.len, 266 sizeof(Qcow2CryptoHeaderExtension)); 267 return -EINVAL; 268 } 269 270 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); 271 if (ret < 0) { 272 error_setg_errno(errp, -ret, 273 "Unable to read CRYPTO header extension"); 274 return ret; 275 } 276 be64_to_cpus(&s->crypto_header.offset); 277 be64_to_cpus(&s->crypto_header.length); 278 279 if ((s->crypto_header.offset % s->cluster_size) != 0) { 280 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 281 "not a multiple of cluster size '%u'", 282 s->crypto_header.offset, s->cluster_size); 283 return -EINVAL; 284 } 285 286 if (flags & BDRV_O_NO_IO) { 287 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 288 } 289 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 290 qcow2_crypto_hdr_read_func, 291 bs, cflags, errp); 292 if (!s->crypto) { 293 return -EINVAL; 294 } 295 } break; 296 297 case QCOW2_EXT_MAGIC_BITMAPS: 298 if (ext.len != sizeof(bitmaps_ext)) { 299 error_setg_errno(errp, -ret, "bitmaps_ext: " 300 "Invalid extension length"); 301 return -EINVAL; 302 } 303 304 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 305 error_report("WARNING: a program lacking bitmap support " 306 "modified this file, so all bitmaps are now " 307 "considered inconsistent. Some clusters may be " 308 "leaked, run 'qemu-img check -r' on the image " 309 "file to fix."); 310 if (need_update_header != NULL) { 311 /* Updating is needed to drop invalid bitmap extension. */ 312 *need_update_header = true; 313 } 314 break; 315 } 316 317 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); 318 if (ret < 0) { 319 error_setg_errno(errp, -ret, "bitmaps_ext: " 320 "Could not read ext header"); 321 return ret; 322 } 323 324 if (bitmaps_ext.reserved32 != 0) { 325 error_setg_errno(errp, -ret, "bitmaps_ext: " 326 "Reserved field is not zero"); 327 return -EINVAL; 328 } 329 330 be32_to_cpus(&bitmaps_ext.nb_bitmaps); 331 be64_to_cpus(&bitmaps_ext.bitmap_directory_size); 332 be64_to_cpus(&bitmaps_ext.bitmap_directory_offset); 333 334 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 335 error_setg(errp, 336 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 337 "exceeding the QEMU supported maximum of %d", 338 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 339 return -EINVAL; 340 } 341 342 if (bitmaps_ext.nb_bitmaps == 0) { 343 error_setg(errp, "found bitmaps extension with zero bitmaps"); 344 return -EINVAL; 345 } 346 347 if (bitmaps_ext.bitmap_directory_offset & (s->cluster_size - 1)) { 348 error_setg(errp, "bitmaps_ext: " 349 "invalid bitmap directory offset"); 350 return -EINVAL; 351 } 352 353 if (bitmaps_ext.bitmap_directory_size > 354 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 355 error_setg(errp, "bitmaps_ext: " 356 "bitmap directory size (%" PRIu64 ") exceeds " 357 "the maximum supported size (%d)", 358 bitmaps_ext.bitmap_directory_size, 359 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 360 return -EINVAL; 361 } 362 363 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 364 s->bitmap_directory_offset = 365 bitmaps_ext.bitmap_directory_offset; 366 s->bitmap_directory_size = 367 bitmaps_ext.bitmap_directory_size; 368 369 #ifdef DEBUG_EXT 370 printf("Qcow2: Got bitmaps extension: " 371 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 372 s->bitmap_directory_offset, s->nb_bitmaps); 373 #endif 374 break; 375 376 default: 377 /* unknown magic - save it in case we need to rewrite the header */ 378 { 379 Qcow2UnknownHeaderExtension *uext; 380 381 uext = g_malloc0(sizeof(*uext) + ext.len); 382 uext->magic = ext.magic; 383 uext->len = ext.len; 384 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 385 386 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 387 if (ret < 0) { 388 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 389 "Could not read data"); 390 return ret; 391 } 392 } 393 break; 394 } 395 396 offset += ((ext.len + 7) & ~7); 397 } 398 399 return 0; 400 } 401 402 static void cleanup_unknown_header_ext(BlockDriverState *bs) 403 { 404 BDRVQcow2State *s = bs->opaque; 405 Qcow2UnknownHeaderExtension *uext, *next; 406 407 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 408 QLIST_REMOVE(uext, next); 409 g_free(uext); 410 } 411 } 412 413 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 414 uint64_t mask) 415 { 416 char *features = g_strdup(""); 417 char *old; 418 419 while (table && table->name[0] != '\0') { 420 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 421 if (mask & (1ULL << table->bit)) { 422 old = features; 423 features = g_strdup_printf("%s%s%.46s", old, *old ? ", " : "", 424 table->name); 425 g_free(old); 426 mask &= ~(1ULL << table->bit); 427 } 428 } 429 table++; 430 } 431 432 if (mask) { 433 old = features; 434 features = g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64, 435 old, *old ? ", " : "", mask); 436 g_free(old); 437 } 438 439 error_setg(errp, "Unsupported qcow2 feature(s): %s", features); 440 g_free(features); 441 } 442 443 /* 444 * Sets the dirty bit and flushes afterwards if necessary. 445 * 446 * The incompatible_features bit is only set if the image file header was 447 * updated successfully. Therefore it is not required to check the return 448 * value of this function. 449 */ 450 int qcow2_mark_dirty(BlockDriverState *bs) 451 { 452 BDRVQcow2State *s = bs->opaque; 453 uint64_t val; 454 int ret; 455 456 assert(s->qcow_version >= 3); 457 458 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 459 return 0; /* already dirty */ 460 } 461 462 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 463 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 464 &val, sizeof(val)); 465 if (ret < 0) { 466 return ret; 467 } 468 ret = bdrv_flush(bs->file->bs); 469 if (ret < 0) { 470 return ret; 471 } 472 473 /* Only treat image as dirty if the header was updated successfully */ 474 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 475 return 0; 476 } 477 478 /* 479 * Clears the dirty bit and flushes before if necessary. Only call this 480 * function when there are no pending requests, it does not guard against 481 * concurrent requests dirtying the image. 482 */ 483 static int qcow2_mark_clean(BlockDriverState *bs) 484 { 485 BDRVQcow2State *s = bs->opaque; 486 487 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 488 int ret; 489 490 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 491 492 ret = bdrv_flush(bs); 493 if (ret < 0) { 494 return ret; 495 } 496 497 return qcow2_update_header(bs); 498 } 499 return 0; 500 } 501 502 /* 503 * Marks the image as corrupt. 504 */ 505 int qcow2_mark_corrupt(BlockDriverState *bs) 506 { 507 BDRVQcow2State *s = bs->opaque; 508 509 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 510 return qcow2_update_header(bs); 511 } 512 513 /* 514 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 515 * before if necessary. 516 */ 517 int qcow2_mark_consistent(BlockDriverState *bs) 518 { 519 BDRVQcow2State *s = bs->opaque; 520 521 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 522 int ret = bdrv_flush(bs); 523 if (ret < 0) { 524 return ret; 525 } 526 527 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 528 return qcow2_update_header(bs); 529 } 530 return 0; 531 } 532 533 static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result, 534 BdrvCheckMode fix) 535 { 536 int ret = qcow2_check_refcounts(bs, result, fix); 537 if (ret < 0) { 538 return ret; 539 } 540 541 if (fix && result->check_errors == 0 && result->corruptions == 0) { 542 ret = qcow2_mark_clean(bs); 543 if (ret < 0) { 544 return ret; 545 } 546 return qcow2_mark_consistent(bs); 547 } 548 return ret; 549 } 550 551 static int validate_table_offset(BlockDriverState *bs, uint64_t offset, 552 uint64_t entries, size_t entry_len) 553 { 554 BDRVQcow2State *s = bs->opaque; 555 uint64_t size; 556 557 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 558 * because values will be passed to qemu functions taking int64_t. */ 559 if (entries > INT64_MAX / entry_len) { 560 return -EINVAL; 561 } 562 563 size = entries * entry_len; 564 565 if (INT64_MAX - size < offset) { 566 return -EINVAL; 567 } 568 569 /* Tables must be cluster aligned */ 570 if (offset_into_cluster(s, offset) != 0) { 571 return -EINVAL; 572 } 573 574 return 0; 575 } 576 577 static QemuOptsList qcow2_runtime_opts = { 578 .name = "qcow2", 579 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 580 .desc = { 581 { 582 .name = QCOW2_OPT_LAZY_REFCOUNTS, 583 .type = QEMU_OPT_BOOL, 584 .help = "Postpone refcount updates", 585 }, 586 { 587 .name = QCOW2_OPT_DISCARD_REQUEST, 588 .type = QEMU_OPT_BOOL, 589 .help = "Pass guest discard requests to the layer below", 590 }, 591 { 592 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 593 .type = QEMU_OPT_BOOL, 594 .help = "Generate discard requests when snapshot related space " 595 "is freed", 596 }, 597 { 598 .name = QCOW2_OPT_DISCARD_OTHER, 599 .type = QEMU_OPT_BOOL, 600 .help = "Generate discard requests when other clusters are freed", 601 }, 602 { 603 .name = QCOW2_OPT_OVERLAP, 604 .type = QEMU_OPT_STRING, 605 .help = "Selects which overlap checks to perform from a range of " 606 "templates (none, constant, cached, all)", 607 }, 608 { 609 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 610 .type = QEMU_OPT_STRING, 611 .help = "Selects which overlap checks to perform from a range of " 612 "templates (none, constant, cached, all)", 613 }, 614 { 615 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 616 .type = QEMU_OPT_BOOL, 617 .help = "Check for unintended writes into the main qcow2 header", 618 }, 619 { 620 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 621 .type = QEMU_OPT_BOOL, 622 .help = "Check for unintended writes into the active L1 table", 623 }, 624 { 625 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 626 .type = QEMU_OPT_BOOL, 627 .help = "Check for unintended writes into an active L2 table", 628 }, 629 { 630 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 631 .type = QEMU_OPT_BOOL, 632 .help = "Check for unintended writes into the refcount table", 633 }, 634 { 635 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 636 .type = QEMU_OPT_BOOL, 637 .help = "Check for unintended writes into a refcount block", 638 }, 639 { 640 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 641 .type = QEMU_OPT_BOOL, 642 .help = "Check for unintended writes into the snapshot table", 643 }, 644 { 645 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 646 .type = QEMU_OPT_BOOL, 647 .help = "Check for unintended writes into an inactive L1 table", 648 }, 649 { 650 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 651 .type = QEMU_OPT_BOOL, 652 .help = "Check for unintended writes into an inactive L2 table", 653 }, 654 { 655 .name = QCOW2_OPT_CACHE_SIZE, 656 .type = QEMU_OPT_SIZE, 657 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 658 "cache size", 659 }, 660 { 661 .name = QCOW2_OPT_L2_CACHE_SIZE, 662 .type = QEMU_OPT_SIZE, 663 .help = "Maximum L2 table cache size", 664 }, 665 { 666 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 667 .type = QEMU_OPT_SIZE, 668 .help = "Maximum refcount block cache size", 669 }, 670 { 671 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 672 .type = QEMU_OPT_NUMBER, 673 .help = "Clean unused cache entries after this time (in seconds)", 674 }, 675 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 676 "ID of secret providing qcow2 AES key or LUKS passphrase"), 677 { /* end of list */ } 678 }, 679 }; 680 681 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 682 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 683 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 684 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 685 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 686 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 687 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 688 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 689 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 690 }; 691 692 static void cache_clean_timer_cb(void *opaque) 693 { 694 BlockDriverState *bs = opaque; 695 BDRVQcow2State *s = bs->opaque; 696 qcow2_cache_clean_unused(bs, s->l2_table_cache); 697 qcow2_cache_clean_unused(bs, s->refcount_block_cache); 698 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 699 (int64_t) s->cache_clean_interval * 1000); 700 } 701 702 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 703 { 704 BDRVQcow2State *s = bs->opaque; 705 if (s->cache_clean_interval > 0) { 706 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL, 707 SCALE_MS, cache_clean_timer_cb, 708 bs); 709 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 710 (int64_t) s->cache_clean_interval * 1000); 711 } 712 } 713 714 static void cache_clean_timer_del(BlockDriverState *bs) 715 { 716 BDRVQcow2State *s = bs->opaque; 717 if (s->cache_clean_timer) { 718 timer_del(s->cache_clean_timer); 719 timer_free(s->cache_clean_timer); 720 s->cache_clean_timer = NULL; 721 } 722 } 723 724 static void qcow2_detach_aio_context(BlockDriverState *bs) 725 { 726 cache_clean_timer_del(bs); 727 } 728 729 static void qcow2_attach_aio_context(BlockDriverState *bs, 730 AioContext *new_context) 731 { 732 cache_clean_timer_init(bs, new_context); 733 } 734 735 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 736 uint64_t *l2_cache_size, 737 uint64_t *refcount_cache_size, Error **errp) 738 { 739 BDRVQcow2State *s = bs->opaque; 740 uint64_t combined_cache_size; 741 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 742 743 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 744 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 745 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 746 747 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 748 *l2_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 0); 749 *refcount_cache_size = qemu_opt_get_size(opts, 750 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 751 752 if (combined_cache_size_set) { 753 if (l2_cache_size_set && refcount_cache_size_set) { 754 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 755 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 756 "the same time"); 757 return; 758 } else if (*l2_cache_size > combined_cache_size) { 759 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 760 QCOW2_OPT_CACHE_SIZE); 761 return; 762 } else if (*refcount_cache_size > combined_cache_size) { 763 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 764 QCOW2_OPT_CACHE_SIZE); 765 return; 766 } 767 768 if (l2_cache_size_set) { 769 *refcount_cache_size = combined_cache_size - *l2_cache_size; 770 } else if (refcount_cache_size_set) { 771 *l2_cache_size = combined_cache_size - *refcount_cache_size; 772 } else { 773 *refcount_cache_size = combined_cache_size 774 / (DEFAULT_L2_REFCOUNT_SIZE_RATIO + 1); 775 *l2_cache_size = combined_cache_size - *refcount_cache_size; 776 } 777 } else { 778 if (!l2_cache_size_set && !refcount_cache_size_set) { 779 *l2_cache_size = MAX(DEFAULT_L2_CACHE_BYTE_SIZE, 780 (uint64_t)DEFAULT_L2_CACHE_CLUSTERS 781 * s->cluster_size); 782 *refcount_cache_size = *l2_cache_size 783 / DEFAULT_L2_REFCOUNT_SIZE_RATIO; 784 } else if (!l2_cache_size_set) { 785 *l2_cache_size = *refcount_cache_size 786 * DEFAULT_L2_REFCOUNT_SIZE_RATIO; 787 } else if (!refcount_cache_size_set) { 788 *refcount_cache_size = *l2_cache_size 789 / DEFAULT_L2_REFCOUNT_SIZE_RATIO; 790 } 791 } 792 } 793 794 typedef struct Qcow2ReopenState { 795 Qcow2Cache *l2_table_cache; 796 Qcow2Cache *refcount_block_cache; 797 bool use_lazy_refcounts; 798 int overlap_check; 799 bool discard_passthrough[QCOW2_DISCARD_MAX]; 800 uint64_t cache_clean_interval; 801 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 802 } Qcow2ReopenState; 803 804 static int qcow2_update_options_prepare(BlockDriverState *bs, 805 Qcow2ReopenState *r, 806 QDict *options, int flags, 807 Error **errp) 808 { 809 BDRVQcow2State *s = bs->opaque; 810 QemuOpts *opts = NULL; 811 const char *opt_overlap_check, *opt_overlap_check_template; 812 int overlap_check_template = 0; 813 uint64_t l2_cache_size, refcount_cache_size; 814 int i; 815 const char *encryptfmt; 816 QDict *encryptopts = NULL; 817 Error *local_err = NULL; 818 int ret; 819 820 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 821 encryptfmt = qdict_get_try_str(encryptopts, "format"); 822 823 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 824 qemu_opts_absorb_qdict(opts, options, &local_err); 825 if (local_err) { 826 error_propagate(errp, local_err); 827 ret = -EINVAL; 828 goto fail; 829 } 830 831 /* get L2 table/refcount block cache size from command line options */ 832 read_cache_sizes(bs, opts, &l2_cache_size, &refcount_cache_size, 833 &local_err); 834 if (local_err) { 835 error_propagate(errp, local_err); 836 ret = -EINVAL; 837 goto fail; 838 } 839 840 l2_cache_size /= s->cluster_size; 841 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 842 l2_cache_size = MIN_L2_CACHE_SIZE; 843 } 844 if (l2_cache_size > INT_MAX) { 845 error_setg(errp, "L2 cache size too big"); 846 ret = -EINVAL; 847 goto fail; 848 } 849 850 refcount_cache_size /= s->cluster_size; 851 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 852 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 853 } 854 if (refcount_cache_size > INT_MAX) { 855 error_setg(errp, "Refcount cache size too big"); 856 ret = -EINVAL; 857 goto fail; 858 } 859 860 /* alloc new L2 table/refcount block cache, flush old one */ 861 if (s->l2_table_cache) { 862 ret = qcow2_cache_flush(bs, s->l2_table_cache); 863 if (ret) { 864 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 865 goto fail; 866 } 867 } 868 869 if (s->refcount_block_cache) { 870 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 871 if (ret) { 872 error_setg_errno(errp, -ret, 873 "Failed to flush the refcount block cache"); 874 goto fail; 875 } 876 } 877 878 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size); 879 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size); 880 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 881 error_setg(errp, "Could not allocate metadata caches"); 882 ret = -ENOMEM; 883 goto fail; 884 } 885 886 /* New interval for cache cleanup timer */ 887 r->cache_clean_interval = 888 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 889 s->cache_clean_interval); 890 #ifndef CONFIG_LINUX 891 if (r->cache_clean_interval != 0) { 892 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 893 " not supported on this host"); 894 ret = -EINVAL; 895 goto fail; 896 } 897 #endif 898 if (r->cache_clean_interval > UINT_MAX) { 899 error_setg(errp, "Cache clean interval too big"); 900 ret = -EINVAL; 901 goto fail; 902 } 903 904 /* lazy-refcounts; flush if going from enabled to disabled */ 905 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 906 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 907 if (r->use_lazy_refcounts && s->qcow_version < 3) { 908 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 909 "qemu 1.1 compatibility level"); 910 ret = -EINVAL; 911 goto fail; 912 } 913 914 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 915 ret = qcow2_mark_clean(bs); 916 if (ret < 0) { 917 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 918 goto fail; 919 } 920 } 921 922 /* Overlap check options */ 923 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 924 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 925 if (opt_overlap_check_template && opt_overlap_check && 926 strcmp(opt_overlap_check_template, opt_overlap_check)) 927 { 928 error_setg(errp, "Conflicting values for qcow2 options '" 929 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 930 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 931 ret = -EINVAL; 932 goto fail; 933 } 934 if (!opt_overlap_check) { 935 opt_overlap_check = opt_overlap_check_template ?: "cached"; 936 } 937 938 if (!strcmp(opt_overlap_check, "none")) { 939 overlap_check_template = 0; 940 } else if (!strcmp(opt_overlap_check, "constant")) { 941 overlap_check_template = QCOW2_OL_CONSTANT; 942 } else if (!strcmp(opt_overlap_check, "cached")) { 943 overlap_check_template = QCOW2_OL_CACHED; 944 } else if (!strcmp(opt_overlap_check, "all")) { 945 overlap_check_template = QCOW2_OL_ALL; 946 } else { 947 error_setg(errp, "Unsupported value '%s' for qcow2 option " 948 "'overlap-check'. Allowed are any of the following: " 949 "none, constant, cached, all", opt_overlap_check); 950 ret = -EINVAL; 951 goto fail; 952 } 953 954 r->overlap_check = 0; 955 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 956 /* overlap-check defines a template bitmask, but every flag may be 957 * overwritten through the associated boolean option */ 958 r->overlap_check |= 959 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 960 overlap_check_template & (1 << i)) << i; 961 } 962 963 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 964 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 965 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 966 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 967 flags & BDRV_O_UNMAP); 968 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 969 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 970 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 971 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 972 973 switch (s->crypt_method_header) { 974 case QCOW_CRYPT_NONE: 975 if (encryptfmt) { 976 error_setg(errp, "No encryption in image header, but options " 977 "specified format '%s'", encryptfmt); 978 ret = -EINVAL; 979 goto fail; 980 } 981 break; 982 983 case QCOW_CRYPT_AES: 984 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 985 error_setg(errp, 986 "Header reported 'aes' encryption format but " 987 "options specify '%s'", encryptfmt); 988 ret = -EINVAL; 989 goto fail; 990 } 991 qdict_del(encryptopts, "format"); 992 r->crypto_opts = block_crypto_open_opts_init( 993 Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); 994 break; 995 996 case QCOW_CRYPT_LUKS: 997 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 998 error_setg(errp, 999 "Header reported 'luks' encryption format but " 1000 "options specify '%s'", encryptfmt); 1001 ret = -EINVAL; 1002 goto fail; 1003 } 1004 qdict_del(encryptopts, "format"); 1005 r->crypto_opts = block_crypto_open_opts_init( 1006 Q_CRYPTO_BLOCK_FORMAT_LUKS, encryptopts, errp); 1007 break; 1008 1009 default: 1010 error_setg(errp, "Unsupported encryption method %d", 1011 s->crypt_method_header); 1012 break; 1013 } 1014 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) { 1015 ret = -EINVAL; 1016 goto fail; 1017 } 1018 1019 ret = 0; 1020 fail: 1021 QDECREF(encryptopts); 1022 qemu_opts_del(opts); 1023 opts = NULL; 1024 return ret; 1025 } 1026 1027 static void qcow2_update_options_commit(BlockDriverState *bs, 1028 Qcow2ReopenState *r) 1029 { 1030 BDRVQcow2State *s = bs->opaque; 1031 int i; 1032 1033 if (s->l2_table_cache) { 1034 qcow2_cache_destroy(bs, s->l2_table_cache); 1035 } 1036 if (s->refcount_block_cache) { 1037 qcow2_cache_destroy(bs, s->refcount_block_cache); 1038 } 1039 s->l2_table_cache = r->l2_table_cache; 1040 s->refcount_block_cache = r->refcount_block_cache; 1041 1042 s->overlap_check = r->overlap_check; 1043 s->use_lazy_refcounts = r->use_lazy_refcounts; 1044 1045 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1046 s->discard_passthrough[i] = r->discard_passthrough[i]; 1047 } 1048 1049 if (s->cache_clean_interval != r->cache_clean_interval) { 1050 cache_clean_timer_del(bs); 1051 s->cache_clean_interval = r->cache_clean_interval; 1052 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1053 } 1054 1055 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1056 s->crypto_opts = r->crypto_opts; 1057 } 1058 1059 static void qcow2_update_options_abort(BlockDriverState *bs, 1060 Qcow2ReopenState *r) 1061 { 1062 if (r->l2_table_cache) { 1063 qcow2_cache_destroy(bs, r->l2_table_cache); 1064 } 1065 if (r->refcount_block_cache) { 1066 qcow2_cache_destroy(bs, r->refcount_block_cache); 1067 } 1068 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1069 } 1070 1071 static int qcow2_update_options(BlockDriverState *bs, QDict *options, 1072 int flags, Error **errp) 1073 { 1074 Qcow2ReopenState r = {}; 1075 int ret; 1076 1077 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1078 if (ret >= 0) { 1079 qcow2_update_options_commit(bs, &r); 1080 } else { 1081 qcow2_update_options_abort(bs, &r); 1082 } 1083 1084 return ret; 1085 } 1086 1087 static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, 1088 Error **errp) 1089 { 1090 BDRVQcow2State *s = bs->opaque; 1091 unsigned int len, i; 1092 int ret = 0; 1093 QCowHeader header; 1094 Error *local_err = NULL; 1095 uint64_t ext_end; 1096 uint64_t l1_vm_state_index; 1097 bool update_header = false; 1098 1099 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 1100 if (ret < 0) { 1101 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1102 goto fail; 1103 } 1104 be32_to_cpus(&header.magic); 1105 be32_to_cpus(&header.version); 1106 be64_to_cpus(&header.backing_file_offset); 1107 be32_to_cpus(&header.backing_file_size); 1108 be64_to_cpus(&header.size); 1109 be32_to_cpus(&header.cluster_bits); 1110 be32_to_cpus(&header.crypt_method); 1111 be64_to_cpus(&header.l1_table_offset); 1112 be32_to_cpus(&header.l1_size); 1113 be64_to_cpus(&header.refcount_table_offset); 1114 be32_to_cpus(&header.refcount_table_clusters); 1115 be64_to_cpus(&header.snapshots_offset); 1116 be32_to_cpus(&header.nb_snapshots); 1117 1118 if (header.magic != QCOW_MAGIC) { 1119 error_setg(errp, "Image is not in qcow2 format"); 1120 ret = -EINVAL; 1121 goto fail; 1122 } 1123 if (header.version < 2 || header.version > 3) { 1124 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1125 ret = -ENOTSUP; 1126 goto fail; 1127 } 1128 1129 s->qcow_version = header.version; 1130 1131 /* Initialise cluster size */ 1132 if (header.cluster_bits < MIN_CLUSTER_BITS || 1133 header.cluster_bits > MAX_CLUSTER_BITS) { 1134 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1135 header.cluster_bits); 1136 ret = -EINVAL; 1137 goto fail; 1138 } 1139 1140 s->cluster_bits = header.cluster_bits; 1141 s->cluster_size = 1 << s->cluster_bits; 1142 s->cluster_sectors = 1 << (s->cluster_bits - 9); 1143 1144 /* Initialise version 3 header fields */ 1145 if (header.version == 2) { 1146 header.incompatible_features = 0; 1147 header.compatible_features = 0; 1148 header.autoclear_features = 0; 1149 header.refcount_order = 4; 1150 header.header_length = 72; 1151 } else { 1152 be64_to_cpus(&header.incompatible_features); 1153 be64_to_cpus(&header.compatible_features); 1154 be64_to_cpus(&header.autoclear_features); 1155 be32_to_cpus(&header.refcount_order); 1156 be32_to_cpus(&header.header_length); 1157 1158 if (header.header_length < 104) { 1159 error_setg(errp, "qcow2 header too short"); 1160 ret = -EINVAL; 1161 goto fail; 1162 } 1163 } 1164 1165 if (header.header_length > s->cluster_size) { 1166 error_setg(errp, "qcow2 header exceeds cluster size"); 1167 ret = -EINVAL; 1168 goto fail; 1169 } 1170 1171 if (header.header_length > sizeof(header)) { 1172 s->unknown_header_fields_size = header.header_length - sizeof(header); 1173 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1174 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 1175 s->unknown_header_fields_size); 1176 if (ret < 0) { 1177 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1178 "fields"); 1179 goto fail; 1180 } 1181 } 1182 1183 if (header.backing_file_offset > s->cluster_size) { 1184 error_setg(errp, "Invalid backing file offset"); 1185 ret = -EINVAL; 1186 goto fail; 1187 } 1188 1189 if (header.backing_file_offset) { 1190 ext_end = header.backing_file_offset; 1191 } else { 1192 ext_end = 1 << header.cluster_bits; 1193 } 1194 1195 /* Handle feature bits */ 1196 s->incompatible_features = header.incompatible_features; 1197 s->compatible_features = header.compatible_features; 1198 s->autoclear_features = header.autoclear_features; 1199 1200 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1201 void *feature_table = NULL; 1202 qcow2_read_extensions(bs, header.header_length, ext_end, 1203 &feature_table, flags, NULL, NULL); 1204 report_unsupported_feature(errp, feature_table, 1205 s->incompatible_features & 1206 ~QCOW2_INCOMPAT_MASK); 1207 ret = -ENOTSUP; 1208 g_free(feature_table); 1209 goto fail; 1210 } 1211 1212 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1213 /* Corrupt images may not be written to unless they are being repaired 1214 */ 1215 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1216 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1217 "read/write"); 1218 ret = -EACCES; 1219 goto fail; 1220 } 1221 } 1222 1223 /* Check support for various header values */ 1224 if (header.refcount_order > 6) { 1225 error_setg(errp, "Reference count entry width too large; may not " 1226 "exceed 64 bits"); 1227 ret = -EINVAL; 1228 goto fail; 1229 } 1230 s->refcount_order = header.refcount_order; 1231 s->refcount_bits = 1 << s->refcount_order; 1232 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1233 s->refcount_max += s->refcount_max - 1; 1234 1235 s->crypt_method_header = header.crypt_method; 1236 if (s->crypt_method_header) { 1237 if (bdrv_uses_whitelist() && 1238 s->crypt_method_header == QCOW_CRYPT_AES) { 1239 error_setg(errp, 1240 "Use of AES-CBC encrypted qcow2 images is no longer " 1241 "supported in system emulators"); 1242 error_append_hint(errp, 1243 "You can use 'qemu-img convert' to convert your " 1244 "image to an alternative supported format, such " 1245 "as unencrypted qcow2, or raw with the LUKS " 1246 "format instead.\n"); 1247 ret = -ENOSYS; 1248 goto fail; 1249 } 1250 1251 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1252 s->crypt_physical_offset = false; 1253 } else { 1254 /* Assuming LUKS and any future crypt methods we 1255 * add will all use physical offsets, due to the 1256 * fact that the alternative is insecure... */ 1257 s->crypt_physical_offset = true; 1258 } 1259 1260 bs->encrypted = true; 1261 } 1262 1263 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 1264 s->l2_size = 1 << s->l2_bits; 1265 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1266 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1267 s->refcount_block_size = 1 << s->refcount_block_bits; 1268 bs->total_sectors = header.size / 512; 1269 s->csize_shift = (62 - (s->cluster_bits - 8)); 1270 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1271 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1272 1273 s->refcount_table_offset = header.refcount_table_offset; 1274 s->refcount_table_size = 1275 header.refcount_table_clusters << (s->cluster_bits - 3); 1276 1277 if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) { 1278 error_setg(errp, "Reference count table too large"); 1279 ret = -EINVAL; 1280 goto fail; 1281 } 1282 1283 ret = validate_table_offset(bs, s->refcount_table_offset, 1284 s->refcount_table_size, sizeof(uint64_t)); 1285 if (ret < 0) { 1286 error_setg(errp, "Invalid reference count table offset"); 1287 goto fail; 1288 } 1289 1290 /* Snapshot table offset/length */ 1291 if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) { 1292 error_setg(errp, "Too many snapshots"); 1293 ret = -EINVAL; 1294 goto fail; 1295 } 1296 1297 ret = validate_table_offset(bs, header.snapshots_offset, 1298 header.nb_snapshots, 1299 sizeof(QCowSnapshotHeader)); 1300 if (ret < 0) { 1301 error_setg(errp, "Invalid snapshot table offset"); 1302 goto fail; 1303 } 1304 1305 /* read the level 1 table */ 1306 if (header.l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 1307 error_setg(errp, "Active L1 table too large"); 1308 ret = -EFBIG; 1309 goto fail; 1310 } 1311 s->l1_size = header.l1_size; 1312 1313 l1_vm_state_index = size_to_l1(s, header.size); 1314 if (l1_vm_state_index > INT_MAX) { 1315 error_setg(errp, "Image is too big"); 1316 ret = -EFBIG; 1317 goto fail; 1318 } 1319 s->l1_vm_state_index = l1_vm_state_index; 1320 1321 /* the L1 table must contain at least enough entries to put 1322 header.size bytes */ 1323 if (s->l1_size < s->l1_vm_state_index) { 1324 error_setg(errp, "L1 table is too small"); 1325 ret = -EINVAL; 1326 goto fail; 1327 } 1328 1329 ret = validate_table_offset(bs, header.l1_table_offset, 1330 header.l1_size, sizeof(uint64_t)); 1331 if (ret < 0) { 1332 error_setg(errp, "Invalid L1 table offset"); 1333 goto fail; 1334 } 1335 s->l1_table_offset = header.l1_table_offset; 1336 1337 1338 if (s->l1_size > 0) { 1339 s->l1_table = qemu_try_blockalign(bs->file->bs, 1340 align_offset(s->l1_size * sizeof(uint64_t), 512)); 1341 if (s->l1_table == NULL) { 1342 error_setg(errp, "Could not allocate L1 table"); 1343 ret = -ENOMEM; 1344 goto fail; 1345 } 1346 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 1347 s->l1_size * sizeof(uint64_t)); 1348 if (ret < 0) { 1349 error_setg_errno(errp, -ret, "Could not read L1 table"); 1350 goto fail; 1351 } 1352 for(i = 0;i < s->l1_size; i++) { 1353 be64_to_cpus(&s->l1_table[i]); 1354 } 1355 } 1356 1357 /* Parse driver-specific options */ 1358 ret = qcow2_update_options(bs, options, flags, errp); 1359 if (ret < 0) { 1360 goto fail; 1361 } 1362 1363 s->cluster_cache_offset = -1; 1364 s->flags = flags; 1365 1366 ret = qcow2_refcount_init(bs); 1367 if (ret != 0) { 1368 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1369 goto fail; 1370 } 1371 1372 QLIST_INIT(&s->cluster_allocs); 1373 QTAILQ_INIT(&s->discards); 1374 1375 /* read qcow2 extensions */ 1376 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1377 flags, &update_header, &local_err)) { 1378 error_propagate(errp, local_err); 1379 ret = -EINVAL; 1380 goto fail; 1381 } 1382 1383 /* qcow2_read_extension may have set up the crypto context 1384 * if the crypt method needs a header region, some methods 1385 * don't need header extensions, so must check here 1386 */ 1387 if (s->crypt_method_header && !s->crypto) { 1388 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1389 unsigned int cflags = 0; 1390 if (flags & BDRV_O_NO_IO) { 1391 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1392 } 1393 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1394 NULL, NULL, cflags, errp); 1395 if (!s->crypto) { 1396 ret = -EINVAL; 1397 goto fail; 1398 } 1399 } else if (!(flags & BDRV_O_NO_IO)) { 1400 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1401 s->crypt_method_header); 1402 ret = -EINVAL; 1403 goto fail; 1404 } 1405 } 1406 1407 /* read the backing file name */ 1408 if (header.backing_file_offset != 0) { 1409 len = header.backing_file_size; 1410 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1411 len >= sizeof(bs->backing_file)) { 1412 error_setg(errp, "Backing file name too long"); 1413 ret = -EINVAL; 1414 goto fail; 1415 } 1416 ret = bdrv_pread(bs->file, header.backing_file_offset, 1417 bs->backing_file, len); 1418 if (ret < 0) { 1419 error_setg_errno(errp, -ret, "Could not read backing file name"); 1420 goto fail; 1421 } 1422 bs->backing_file[len] = '\0'; 1423 s->image_backing_file = g_strdup(bs->backing_file); 1424 } 1425 1426 /* Internal snapshots */ 1427 s->snapshots_offset = header.snapshots_offset; 1428 s->nb_snapshots = header.nb_snapshots; 1429 1430 ret = qcow2_read_snapshots(bs); 1431 if (ret < 0) { 1432 error_setg_errno(errp, -ret, "Could not read snapshots"); 1433 goto fail; 1434 } 1435 1436 /* Clear unknown autoclear feature bits */ 1437 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1438 update_header = 1439 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); 1440 if (update_header) { 1441 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1442 } 1443 1444 if (qcow2_load_autoloading_dirty_bitmaps(bs, &local_err)) { 1445 update_header = false; 1446 } 1447 if (local_err != NULL) { 1448 error_propagate(errp, local_err); 1449 ret = -EINVAL; 1450 goto fail; 1451 } 1452 1453 if (update_header) { 1454 ret = qcow2_update_header(bs); 1455 if (ret < 0) { 1456 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1457 goto fail; 1458 } 1459 } 1460 1461 /* Initialise locks */ 1462 qemu_co_mutex_init(&s->lock); 1463 bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP; 1464 1465 /* Repair image if dirty */ 1466 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && 1467 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1468 BdrvCheckResult result = {0}; 1469 1470 ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1471 if (ret < 0) { 1472 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1473 goto fail; 1474 } 1475 } 1476 1477 #ifdef DEBUG_ALLOC 1478 { 1479 BdrvCheckResult result = {0}; 1480 qcow2_check_refcounts(bs, &result, 0); 1481 } 1482 #endif 1483 return ret; 1484 1485 fail: 1486 g_free(s->unknown_header_fields); 1487 cleanup_unknown_header_ext(bs); 1488 qcow2_free_snapshots(bs); 1489 qcow2_refcount_close(bs); 1490 qemu_vfree(s->l1_table); 1491 /* else pre-write overlap checks in cache_destroy may crash */ 1492 s->l1_table = NULL; 1493 cache_clean_timer_del(bs); 1494 if (s->l2_table_cache) { 1495 qcow2_cache_destroy(bs, s->l2_table_cache); 1496 } 1497 if (s->refcount_block_cache) { 1498 qcow2_cache_destroy(bs, s->refcount_block_cache); 1499 } 1500 qcrypto_block_free(s->crypto); 1501 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1502 return ret; 1503 } 1504 1505 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1506 Error **errp) 1507 { 1508 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 1509 false, errp); 1510 if (!bs->file) { 1511 return -EINVAL; 1512 } 1513 1514 return qcow2_do_open(bs, options, flags, errp); 1515 } 1516 1517 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1518 { 1519 BDRVQcow2State *s = bs->opaque; 1520 1521 if (bs->encrypted) { 1522 /* Encryption works on a sector granularity */ 1523 bs->bl.request_alignment = BDRV_SECTOR_SIZE; 1524 } 1525 bs->bl.pwrite_zeroes_alignment = s->cluster_size; 1526 bs->bl.pdiscard_alignment = s->cluster_size; 1527 } 1528 1529 static int qcow2_reopen_prepare(BDRVReopenState *state, 1530 BlockReopenQueue *queue, Error **errp) 1531 { 1532 Qcow2ReopenState *r; 1533 int ret; 1534 1535 r = g_new0(Qcow2ReopenState, 1); 1536 state->opaque = r; 1537 1538 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1539 state->flags, errp); 1540 if (ret < 0) { 1541 goto fail; 1542 } 1543 1544 /* We need to write out any unwritten data if we reopen read-only. */ 1545 if ((state->flags & BDRV_O_RDWR) == 0) { 1546 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1547 if (ret < 0) { 1548 goto fail; 1549 } 1550 1551 ret = bdrv_flush(state->bs); 1552 if (ret < 0) { 1553 goto fail; 1554 } 1555 1556 ret = qcow2_mark_clean(state->bs); 1557 if (ret < 0) { 1558 goto fail; 1559 } 1560 } 1561 1562 return 0; 1563 1564 fail: 1565 qcow2_update_options_abort(state->bs, r); 1566 g_free(r); 1567 return ret; 1568 } 1569 1570 static void qcow2_reopen_commit(BDRVReopenState *state) 1571 { 1572 qcow2_update_options_commit(state->bs, state->opaque); 1573 g_free(state->opaque); 1574 } 1575 1576 static void qcow2_reopen_abort(BDRVReopenState *state) 1577 { 1578 qcow2_update_options_abort(state->bs, state->opaque); 1579 g_free(state->opaque); 1580 } 1581 1582 static void qcow2_join_options(QDict *options, QDict *old_options) 1583 { 1584 bool has_new_overlap_template = 1585 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 1586 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 1587 bool has_new_total_cache_size = 1588 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 1589 bool has_all_cache_options; 1590 1591 /* New overlap template overrides all old overlap options */ 1592 if (has_new_overlap_template) { 1593 qdict_del(old_options, QCOW2_OPT_OVERLAP); 1594 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 1595 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 1596 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 1597 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 1598 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 1599 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 1600 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 1601 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 1602 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 1603 } 1604 1605 /* New total cache size overrides all old options */ 1606 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 1607 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 1608 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1609 } 1610 1611 qdict_join(options, old_options, false); 1612 1613 /* 1614 * If after merging all cache size options are set, an old total size is 1615 * overwritten. Do keep all options, however, if all three are new. The 1616 * resulting error message is what we want to happen. 1617 */ 1618 has_all_cache_options = 1619 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 1620 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 1621 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1622 1623 if (has_all_cache_options && !has_new_total_cache_size) { 1624 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 1625 } 1626 } 1627 1628 static int64_t coroutine_fn qcow2_co_get_block_status(BlockDriverState *bs, 1629 int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file) 1630 { 1631 BDRVQcow2State *s = bs->opaque; 1632 uint64_t cluster_offset; 1633 int index_in_cluster, ret; 1634 unsigned int bytes; 1635 int64_t status = 0; 1636 1637 bytes = MIN(INT_MAX, nb_sectors * BDRV_SECTOR_SIZE); 1638 qemu_co_mutex_lock(&s->lock); 1639 ret = qcow2_get_cluster_offset(bs, sector_num << 9, &bytes, 1640 &cluster_offset); 1641 qemu_co_mutex_unlock(&s->lock); 1642 if (ret < 0) { 1643 return ret; 1644 } 1645 1646 *pnum = bytes >> BDRV_SECTOR_BITS; 1647 1648 if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED && 1649 !s->crypto) { 1650 index_in_cluster = sector_num & (s->cluster_sectors - 1); 1651 cluster_offset |= (index_in_cluster << BDRV_SECTOR_BITS); 1652 *file = bs->file->bs; 1653 status |= BDRV_BLOCK_OFFSET_VALID | cluster_offset; 1654 } 1655 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { 1656 status |= BDRV_BLOCK_ZERO; 1657 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { 1658 status |= BDRV_BLOCK_DATA; 1659 } 1660 return status; 1661 } 1662 1663 /* handle reading after the end of the backing file */ 1664 int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov, 1665 int64_t offset, int bytes) 1666 { 1667 uint64_t bs_size = bs->total_sectors * BDRV_SECTOR_SIZE; 1668 int n1; 1669 1670 if ((offset + bytes) <= bs_size) { 1671 return bytes; 1672 } 1673 1674 if (offset >= bs_size) { 1675 n1 = 0; 1676 } else { 1677 n1 = bs_size - offset; 1678 } 1679 1680 qemu_iovec_memset(qiov, n1, 0, bytes - n1); 1681 1682 return n1; 1683 } 1684 1685 static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, 1686 uint64_t bytes, QEMUIOVector *qiov, 1687 int flags) 1688 { 1689 BDRVQcow2State *s = bs->opaque; 1690 int offset_in_cluster, n1; 1691 int ret; 1692 unsigned int cur_bytes; /* number of bytes in current iteration */ 1693 uint64_t cluster_offset = 0; 1694 uint64_t bytes_done = 0; 1695 QEMUIOVector hd_qiov; 1696 uint8_t *cluster_data = NULL; 1697 1698 qemu_iovec_init(&hd_qiov, qiov->niov); 1699 1700 qemu_co_mutex_lock(&s->lock); 1701 1702 while (bytes != 0) { 1703 1704 /* prepare next request */ 1705 cur_bytes = MIN(bytes, INT_MAX); 1706 if (s->crypto) { 1707 cur_bytes = MIN(cur_bytes, 1708 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1709 } 1710 1711 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); 1712 if (ret < 0) { 1713 goto fail; 1714 } 1715 1716 offset_in_cluster = offset_into_cluster(s, offset); 1717 1718 qemu_iovec_reset(&hd_qiov); 1719 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1720 1721 switch (ret) { 1722 case QCOW2_CLUSTER_UNALLOCATED: 1723 1724 if (bs->backing) { 1725 /* read from the base image */ 1726 n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov, 1727 offset, cur_bytes); 1728 if (n1 > 0) { 1729 QEMUIOVector local_qiov; 1730 1731 qemu_iovec_init(&local_qiov, hd_qiov.niov); 1732 qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1); 1733 1734 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 1735 qemu_co_mutex_unlock(&s->lock); 1736 ret = bdrv_co_preadv(bs->backing, offset, n1, 1737 &local_qiov, 0); 1738 qemu_co_mutex_lock(&s->lock); 1739 1740 qemu_iovec_destroy(&local_qiov); 1741 1742 if (ret < 0) { 1743 goto fail; 1744 } 1745 } 1746 } else { 1747 /* Note: in this case, no need to wait */ 1748 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1749 } 1750 break; 1751 1752 case QCOW2_CLUSTER_ZERO_PLAIN: 1753 case QCOW2_CLUSTER_ZERO_ALLOC: 1754 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1755 break; 1756 1757 case QCOW2_CLUSTER_COMPRESSED: 1758 /* add AIO support for compressed blocks ? */ 1759 ret = qcow2_decompress_cluster(bs, cluster_offset); 1760 if (ret < 0) { 1761 goto fail; 1762 } 1763 1764 qemu_iovec_from_buf(&hd_qiov, 0, 1765 s->cluster_cache + offset_in_cluster, 1766 cur_bytes); 1767 break; 1768 1769 case QCOW2_CLUSTER_NORMAL: 1770 if ((cluster_offset & 511) != 0) { 1771 ret = -EIO; 1772 goto fail; 1773 } 1774 1775 if (bs->encrypted) { 1776 assert(s->crypto); 1777 1778 /* 1779 * For encrypted images, read everything into a temporary 1780 * contiguous buffer on which the AES functions can work. 1781 */ 1782 if (!cluster_data) { 1783 cluster_data = 1784 qemu_try_blockalign(bs->file->bs, 1785 QCOW_MAX_CRYPT_CLUSTERS 1786 * s->cluster_size); 1787 if (cluster_data == NULL) { 1788 ret = -ENOMEM; 1789 goto fail; 1790 } 1791 } 1792 1793 assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1794 qemu_iovec_reset(&hd_qiov); 1795 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1796 } 1797 1798 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1799 qemu_co_mutex_unlock(&s->lock); 1800 ret = bdrv_co_preadv(bs->file, 1801 cluster_offset + offset_in_cluster, 1802 cur_bytes, &hd_qiov, 0); 1803 qemu_co_mutex_lock(&s->lock); 1804 if (ret < 0) { 1805 goto fail; 1806 } 1807 if (bs->encrypted) { 1808 assert(s->crypto); 1809 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1810 assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1811 if (qcrypto_block_decrypt(s->crypto, 1812 (s->crypt_physical_offset ? 1813 cluster_offset + offset_in_cluster : 1814 offset) >> BDRV_SECTOR_BITS, 1815 cluster_data, 1816 cur_bytes, 1817 NULL) < 0) { 1818 ret = -EIO; 1819 goto fail; 1820 } 1821 qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes); 1822 } 1823 break; 1824 1825 default: 1826 g_assert_not_reached(); 1827 ret = -EIO; 1828 goto fail; 1829 } 1830 1831 bytes -= cur_bytes; 1832 offset += cur_bytes; 1833 bytes_done += cur_bytes; 1834 } 1835 ret = 0; 1836 1837 fail: 1838 qemu_co_mutex_unlock(&s->lock); 1839 1840 qemu_iovec_destroy(&hd_qiov); 1841 qemu_vfree(cluster_data); 1842 1843 return ret; 1844 } 1845 1846 /* Check if it's possible to merge a write request with the writing of 1847 * the data from the COW regions */ 1848 static bool merge_cow(uint64_t offset, unsigned bytes, 1849 QEMUIOVector *hd_qiov, QCowL2Meta *l2meta) 1850 { 1851 QCowL2Meta *m; 1852 1853 for (m = l2meta; m != NULL; m = m->next) { 1854 /* If both COW regions are empty then there's nothing to merge */ 1855 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 1856 continue; 1857 } 1858 1859 /* The data (middle) region must be immediately after the 1860 * start region */ 1861 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 1862 continue; 1863 } 1864 1865 /* The end region must be immediately after the data (middle) 1866 * region */ 1867 if (m->offset + m->cow_end.offset != offset + bytes) { 1868 continue; 1869 } 1870 1871 /* Make sure that adding both COW regions to the QEMUIOVector 1872 * does not exceed IOV_MAX */ 1873 if (hd_qiov->niov > IOV_MAX - 2) { 1874 continue; 1875 } 1876 1877 m->data_qiov = hd_qiov; 1878 return true; 1879 } 1880 1881 return false; 1882 } 1883 1884 static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, 1885 uint64_t bytes, QEMUIOVector *qiov, 1886 int flags) 1887 { 1888 BDRVQcow2State *s = bs->opaque; 1889 int offset_in_cluster; 1890 int ret; 1891 unsigned int cur_bytes; /* number of sectors in current iteration */ 1892 uint64_t cluster_offset; 1893 QEMUIOVector hd_qiov; 1894 uint64_t bytes_done = 0; 1895 uint8_t *cluster_data = NULL; 1896 QCowL2Meta *l2meta = NULL; 1897 1898 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 1899 1900 qemu_iovec_init(&hd_qiov, qiov->niov); 1901 1902 s->cluster_cache_offset = -1; /* disable compressed cache */ 1903 1904 qemu_co_mutex_lock(&s->lock); 1905 1906 while (bytes != 0) { 1907 1908 l2meta = NULL; 1909 1910 trace_qcow2_writev_start_part(qemu_coroutine_self()); 1911 offset_in_cluster = offset_into_cluster(s, offset); 1912 cur_bytes = MIN(bytes, INT_MAX); 1913 if (bs->encrypted) { 1914 cur_bytes = MIN(cur_bytes, 1915 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 1916 - offset_in_cluster); 1917 } 1918 1919 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 1920 &cluster_offset, &l2meta); 1921 if (ret < 0) { 1922 goto fail; 1923 } 1924 1925 assert((cluster_offset & 511) == 0); 1926 1927 qemu_iovec_reset(&hd_qiov); 1928 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1929 1930 if (bs->encrypted) { 1931 assert(s->crypto); 1932 if (!cluster_data) { 1933 cluster_data = qemu_try_blockalign(bs->file->bs, 1934 QCOW_MAX_CRYPT_CLUSTERS 1935 * s->cluster_size); 1936 if (cluster_data == NULL) { 1937 ret = -ENOMEM; 1938 goto fail; 1939 } 1940 } 1941 1942 assert(hd_qiov.size <= 1943 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1944 qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); 1945 1946 if (qcrypto_block_encrypt(s->crypto, 1947 (s->crypt_physical_offset ? 1948 cluster_offset + offset_in_cluster : 1949 offset) >> BDRV_SECTOR_BITS, 1950 cluster_data, 1951 cur_bytes, NULL) < 0) { 1952 ret = -EIO; 1953 goto fail; 1954 } 1955 1956 qemu_iovec_reset(&hd_qiov); 1957 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1958 } 1959 1960 ret = qcow2_pre_write_overlap_check(bs, 0, 1961 cluster_offset + offset_in_cluster, cur_bytes); 1962 if (ret < 0) { 1963 goto fail; 1964 } 1965 1966 /* If we need to do COW, check if it's possible to merge the 1967 * writing of the guest data together with that of the COW regions. 1968 * If it's not possible (or not necessary) then write the 1969 * guest data now. */ 1970 if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) { 1971 qemu_co_mutex_unlock(&s->lock); 1972 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 1973 trace_qcow2_writev_data(qemu_coroutine_self(), 1974 cluster_offset + offset_in_cluster); 1975 ret = bdrv_co_pwritev(bs->file, 1976 cluster_offset + offset_in_cluster, 1977 cur_bytes, &hd_qiov, 0); 1978 qemu_co_mutex_lock(&s->lock); 1979 if (ret < 0) { 1980 goto fail; 1981 } 1982 } 1983 1984 while (l2meta != NULL) { 1985 QCowL2Meta *next; 1986 1987 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 1988 if (ret < 0) { 1989 goto fail; 1990 } 1991 1992 /* Take the request off the list of running requests */ 1993 if (l2meta->nb_clusters != 0) { 1994 QLIST_REMOVE(l2meta, next_in_flight); 1995 } 1996 1997 qemu_co_queue_restart_all(&l2meta->dependent_requests); 1998 1999 next = l2meta->next; 2000 g_free(l2meta); 2001 l2meta = next; 2002 } 2003 2004 bytes -= cur_bytes; 2005 offset += cur_bytes; 2006 bytes_done += cur_bytes; 2007 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2008 } 2009 ret = 0; 2010 2011 fail: 2012 while (l2meta != NULL) { 2013 QCowL2Meta *next; 2014 2015 if (l2meta->nb_clusters != 0) { 2016 QLIST_REMOVE(l2meta, next_in_flight); 2017 } 2018 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2019 2020 next = l2meta->next; 2021 g_free(l2meta); 2022 l2meta = next; 2023 } 2024 2025 qemu_co_mutex_unlock(&s->lock); 2026 2027 qemu_iovec_destroy(&hd_qiov); 2028 qemu_vfree(cluster_data); 2029 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2030 2031 return ret; 2032 } 2033 2034 static int qcow2_inactivate(BlockDriverState *bs) 2035 { 2036 BDRVQcow2State *s = bs->opaque; 2037 int ret, result = 0; 2038 Error *local_err = NULL; 2039 2040 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2041 if (ret) { 2042 result = ret; 2043 error_report("Failed to flush the L2 table cache: %s", 2044 strerror(-ret)); 2045 } 2046 2047 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2048 if (ret) { 2049 result = ret; 2050 error_report("Failed to flush the refcount block cache: %s", 2051 strerror(-ret)); 2052 } 2053 2054 qcow2_store_persistent_dirty_bitmaps(bs, &local_err); 2055 if (local_err != NULL) { 2056 result = -EINVAL; 2057 error_report_err(local_err); 2058 error_report("Persistent bitmaps are lost for node '%s'", 2059 bdrv_get_device_or_node_name(bs)); 2060 } 2061 2062 if (result == 0) { 2063 qcow2_mark_clean(bs); 2064 } 2065 2066 return result; 2067 } 2068 2069 static void qcow2_close(BlockDriverState *bs) 2070 { 2071 BDRVQcow2State *s = bs->opaque; 2072 qemu_vfree(s->l1_table); 2073 /* else pre-write overlap checks in cache_destroy may crash */ 2074 s->l1_table = NULL; 2075 2076 if (!(s->flags & BDRV_O_INACTIVE)) { 2077 qcow2_inactivate(bs); 2078 } 2079 2080 cache_clean_timer_del(bs); 2081 qcow2_cache_destroy(bs, s->l2_table_cache); 2082 qcow2_cache_destroy(bs, s->refcount_block_cache); 2083 2084 qcrypto_block_free(s->crypto); 2085 s->crypto = NULL; 2086 2087 g_free(s->unknown_header_fields); 2088 cleanup_unknown_header_ext(bs); 2089 2090 g_free(s->image_backing_file); 2091 g_free(s->image_backing_format); 2092 2093 g_free(s->cluster_cache); 2094 qemu_vfree(s->cluster_data); 2095 qcow2_refcount_close(bs); 2096 qcow2_free_snapshots(bs); 2097 } 2098 2099 static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp) 2100 { 2101 BDRVQcow2State *s = bs->opaque; 2102 int flags = s->flags; 2103 QCryptoBlock *crypto = NULL; 2104 QDict *options; 2105 Error *local_err = NULL; 2106 int ret; 2107 2108 /* 2109 * Backing files are read-only which makes all of their metadata immutable, 2110 * that means we don't have to worry about reopening them here. 2111 */ 2112 2113 crypto = s->crypto; 2114 s->crypto = NULL; 2115 2116 qcow2_close(bs); 2117 2118 memset(s, 0, sizeof(BDRVQcow2State)); 2119 options = qdict_clone_shallow(bs->options); 2120 2121 flags &= ~BDRV_O_INACTIVE; 2122 ret = qcow2_do_open(bs, options, flags, &local_err); 2123 QDECREF(options); 2124 if (local_err) { 2125 error_propagate(errp, local_err); 2126 error_prepend(errp, "Could not reopen qcow2 layer: "); 2127 bs->drv = NULL; 2128 return; 2129 } else if (ret < 0) { 2130 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer"); 2131 bs->drv = NULL; 2132 return; 2133 } 2134 2135 s->crypto = crypto; 2136 } 2137 2138 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2139 size_t len, size_t buflen) 2140 { 2141 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2142 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2143 2144 if (buflen < ext_len) { 2145 return -ENOSPC; 2146 } 2147 2148 *ext_backing_fmt = (QCowExtension) { 2149 .magic = cpu_to_be32(magic), 2150 .len = cpu_to_be32(len), 2151 }; 2152 2153 if (len) { 2154 memcpy(buf + sizeof(QCowExtension), s, len); 2155 } 2156 2157 return ext_len; 2158 } 2159 2160 /* 2161 * Updates the qcow2 header, including the variable length parts of it, i.e. 2162 * the backing file name and all extensions. qcow2 was not designed to allow 2163 * such changes, so if we run out of space (we can only use the first cluster) 2164 * this function may fail. 2165 * 2166 * Returns 0 on success, -errno in error cases. 2167 */ 2168 int qcow2_update_header(BlockDriverState *bs) 2169 { 2170 BDRVQcow2State *s = bs->opaque; 2171 QCowHeader *header; 2172 char *buf; 2173 size_t buflen = s->cluster_size; 2174 int ret; 2175 uint64_t total_size; 2176 uint32_t refcount_table_clusters; 2177 size_t header_length; 2178 Qcow2UnknownHeaderExtension *uext; 2179 2180 buf = qemu_blockalign(bs, buflen); 2181 2182 /* Header structure */ 2183 header = (QCowHeader*) buf; 2184 2185 if (buflen < sizeof(*header)) { 2186 ret = -ENOSPC; 2187 goto fail; 2188 } 2189 2190 header_length = sizeof(*header) + s->unknown_header_fields_size; 2191 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2192 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2193 2194 *header = (QCowHeader) { 2195 /* Version 2 fields */ 2196 .magic = cpu_to_be32(QCOW_MAGIC), 2197 .version = cpu_to_be32(s->qcow_version), 2198 .backing_file_offset = 0, 2199 .backing_file_size = 0, 2200 .cluster_bits = cpu_to_be32(s->cluster_bits), 2201 .size = cpu_to_be64(total_size), 2202 .crypt_method = cpu_to_be32(s->crypt_method_header), 2203 .l1_size = cpu_to_be32(s->l1_size), 2204 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2205 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2206 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2207 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2208 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2209 2210 /* Version 3 fields */ 2211 .incompatible_features = cpu_to_be64(s->incompatible_features), 2212 .compatible_features = cpu_to_be64(s->compatible_features), 2213 .autoclear_features = cpu_to_be64(s->autoclear_features), 2214 .refcount_order = cpu_to_be32(s->refcount_order), 2215 .header_length = cpu_to_be32(header_length), 2216 }; 2217 2218 /* For older versions, write a shorter header */ 2219 switch (s->qcow_version) { 2220 case 2: 2221 ret = offsetof(QCowHeader, incompatible_features); 2222 break; 2223 case 3: 2224 ret = sizeof(*header); 2225 break; 2226 default: 2227 ret = -EINVAL; 2228 goto fail; 2229 } 2230 2231 buf += ret; 2232 buflen -= ret; 2233 memset(buf, 0, buflen); 2234 2235 /* Preserve any unknown field in the header */ 2236 if (s->unknown_header_fields_size) { 2237 if (buflen < s->unknown_header_fields_size) { 2238 ret = -ENOSPC; 2239 goto fail; 2240 } 2241 2242 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2243 buf += s->unknown_header_fields_size; 2244 buflen -= s->unknown_header_fields_size; 2245 } 2246 2247 /* Backing file format header extension */ 2248 if (s->image_backing_format) { 2249 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2250 s->image_backing_format, 2251 strlen(s->image_backing_format), 2252 buflen); 2253 if (ret < 0) { 2254 goto fail; 2255 } 2256 2257 buf += ret; 2258 buflen -= ret; 2259 } 2260 2261 /* Full disk encryption header pointer extension */ 2262 if (s->crypto_header.offset != 0) { 2263 cpu_to_be64s(&s->crypto_header.offset); 2264 cpu_to_be64s(&s->crypto_header.length); 2265 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2266 &s->crypto_header, sizeof(s->crypto_header), 2267 buflen); 2268 be64_to_cpus(&s->crypto_header.offset); 2269 be64_to_cpus(&s->crypto_header.length); 2270 if (ret < 0) { 2271 goto fail; 2272 } 2273 buf += ret; 2274 buflen -= ret; 2275 } 2276 2277 /* Feature table */ 2278 if (s->qcow_version >= 3) { 2279 Qcow2Feature features[] = { 2280 { 2281 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2282 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2283 .name = "dirty bit", 2284 }, 2285 { 2286 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2287 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 2288 .name = "corrupt bit", 2289 }, 2290 { 2291 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 2292 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 2293 .name = "lazy refcounts", 2294 }, 2295 }; 2296 2297 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 2298 features, sizeof(features), buflen); 2299 if (ret < 0) { 2300 goto fail; 2301 } 2302 buf += ret; 2303 buflen -= ret; 2304 } 2305 2306 /* Bitmap extension */ 2307 if (s->nb_bitmaps > 0) { 2308 Qcow2BitmapHeaderExt bitmaps_header = { 2309 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 2310 .bitmap_directory_size = 2311 cpu_to_be64(s->bitmap_directory_size), 2312 .bitmap_directory_offset = 2313 cpu_to_be64(s->bitmap_directory_offset) 2314 }; 2315 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 2316 &bitmaps_header, sizeof(bitmaps_header), 2317 buflen); 2318 if (ret < 0) { 2319 goto fail; 2320 } 2321 buf += ret; 2322 buflen -= ret; 2323 } 2324 2325 /* Keep unknown header extensions */ 2326 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 2327 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 2328 if (ret < 0) { 2329 goto fail; 2330 } 2331 2332 buf += ret; 2333 buflen -= ret; 2334 } 2335 2336 /* End of header extensions */ 2337 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 2338 if (ret < 0) { 2339 goto fail; 2340 } 2341 2342 buf += ret; 2343 buflen -= ret; 2344 2345 /* Backing file name */ 2346 if (s->image_backing_file) { 2347 size_t backing_file_len = strlen(s->image_backing_file); 2348 2349 if (buflen < backing_file_len) { 2350 ret = -ENOSPC; 2351 goto fail; 2352 } 2353 2354 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 2355 strncpy(buf, s->image_backing_file, buflen); 2356 2357 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 2358 header->backing_file_size = cpu_to_be32(backing_file_len); 2359 } 2360 2361 /* Write the new header */ 2362 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 2363 if (ret < 0) { 2364 goto fail; 2365 } 2366 2367 ret = 0; 2368 fail: 2369 qemu_vfree(header); 2370 return ret; 2371 } 2372 2373 static int qcow2_change_backing_file(BlockDriverState *bs, 2374 const char *backing_file, const char *backing_fmt) 2375 { 2376 BDRVQcow2State *s = bs->opaque; 2377 2378 if (backing_file && strlen(backing_file) > 1023) { 2379 return -EINVAL; 2380 } 2381 2382 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2383 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2384 2385 g_free(s->image_backing_file); 2386 g_free(s->image_backing_format); 2387 2388 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 2389 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 2390 2391 return qcow2_update_header(bs); 2392 } 2393 2394 static int qcow2_crypt_method_from_format(const char *encryptfmt) 2395 { 2396 if (g_str_equal(encryptfmt, "luks")) { 2397 return QCOW_CRYPT_LUKS; 2398 } else if (g_str_equal(encryptfmt, "aes")) { 2399 return QCOW_CRYPT_AES; 2400 } else { 2401 return -EINVAL; 2402 } 2403 } 2404 2405 static int qcow2_set_up_encryption(BlockDriverState *bs, const char *encryptfmt, 2406 QemuOpts *opts, Error **errp) 2407 { 2408 BDRVQcow2State *s = bs->opaque; 2409 QCryptoBlockCreateOptions *cryptoopts = NULL; 2410 QCryptoBlock *crypto = NULL; 2411 int ret = -EINVAL; 2412 QDict *options, *encryptopts; 2413 int fmt; 2414 2415 options = qemu_opts_to_qdict(opts, NULL); 2416 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 2417 QDECREF(options); 2418 2419 fmt = qcow2_crypt_method_from_format(encryptfmt); 2420 2421 switch (fmt) { 2422 case QCOW_CRYPT_LUKS: 2423 cryptoopts = block_crypto_create_opts_init( 2424 Q_CRYPTO_BLOCK_FORMAT_LUKS, encryptopts, errp); 2425 break; 2426 case QCOW_CRYPT_AES: 2427 cryptoopts = block_crypto_create_opts_init( 2428 Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); 2429 break; 2430 default: 2431 error_setg(errp, "Unknown encryption format '%s'", encryptfmt); 2432 break; 2433 } 2434 if (!cryptoopts) { 2435 ret = -EINVAL; 2436 goto out; 2437 } 2438 s->crypt_method_header = fmt; 2439 2440 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 2441 qcow2_crypto_hdr_init_func, 2442 qcow2_crypto_hdr_write_func, 2443 bs, errp); 2444 if (!crypto) { 2445 ret = -EINVAL; 2446 goto out; 2447 } 2448 2449 ret = qcow2_update_header(bs); 2450 if (ret < 0) { 2451 error_setg_errno(errp, -ret, "Could not write encryption header"); 2452 goto out; 2453 } 2454 2455 out: 2456 QDECREF(encryptopts); 2457 qcrypto_block_free(crypto); 2458 qapi_free_QCryptoBlockCreateOptions(cryptoopts); 2459 return ret; 2460 } 2461 2462 2463 /** 2464 * Preallocates metadata structures for data clusters between @offset (in the 2465 * guest disk) and @new_length (which is thus generally the new guest disk 2466 * size). 2467 * 2468 * Returns: 0 on success, -errno on failure. 2469 */ 2470 static int preallocate(BlockDriverState *bs, 2471 uint64_t offset, uint64_t new_length) 2472 { 2473 BDRVQcow2State *s = bs->opaque; 2474 uint64_t bytes; 2475 uint64_t host_offset = 0; 2476 unsigned int cur_bytes; 2477 int ret; 2478 QCowL2Meta *meta; 2479 2480 if (qemu_in_coroutine()) { 2481 qemu_co_mutex_lock(&s->lock); 2482 } 2483 2484 assert(offset <= new_length); 2485 bytes = new_length - offset; 2486 2487 while (bytes) { 2488 cur_bytes = MIN(bytes, INT_MAX); 2489 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2490 &host_offset, &meta); 2491 if (ret < 0) { 2492 goto done; 2493 } 2494 2495 while (meta) { 2496 QCowL2Meta *next = meta->next; 2497 2498 ret = qcow2_alloc_cluster_link_l2(bs, meta); 2499 if (ret < 0) { 2500 qcow2_free_any_clusters(bs, meta->alloc_offset, 2501 meta->nb_clusters, QCOW2_DISCARD_NEVER); 2502 goto done; 2503 } 2504 2505 /* There are no dependent requests, but we need to remove our 2506 * request from the list of in-flight requests */ 2507 QLIST_REMOVE(meta, next_in_flight); 2508 2509 g_free(meta); 2510 meta = next; 2511 } 2512 2513 /* TODO Preallocate data if requested */ 2514 2515 bytes -= cur_bytes; 2516 offset += cur_bytes; 2517 } 2518 2519 /* 2520 * It is expected that the image file is large enough to actually contain 2521 * all of the allocated clusters (otherwise we get failing reads after 2522 * EOF). Extend the image to the last allocated sector. 2523 */ 2524 if (host_offset != 0) { 2525 uint8_t data = 0; 2526 ret = bdrv_pwrite(bs->file, (host_offset + cur_bytes) - 1, 2527 &data, 1); 2528 if (ret < 0) { 2529 goto done; 2530 } 2531 } 2532 2533 ret = 0; 2534 2535 done: 2536 if (qemu_in_coroutine()) { 2537 qemu_co_mutex_unlock(&s->lock); 2538 } 2539 return ret; 2540 } 2541 2542 /* qcow2_refcount_metadata_size: 2543 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 2544 * @cluster_size: size of a cluster, in bytes 2545 * @refcount_order: refcount bits power-of-2 exponent 2546 * @generous_increase: allow for the refcount table to be 1.5x as large as it 2547 * needs to be 2548 * 2549 * Returns: Number of bytes required for refcount blocks and table metadata. 2550 */ 2551 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 2552 int refcount_order, bool generous_increase, 2553 uint64_t *refblock_count) 2554 { 2555 /* 2556 * Every host cluster is reference-counted, including metadata (even 2557 * refcount metadata is recursively included). 2558 * 2559 * An accurate formula for the size of refcount metadata size is difficult 2560 * to derive. An easier method of calculation is finding the fixed point 2561 * where no further refcount blocks or table clusters are required to 2562 * reference count every cluster. 2563 */ 2564 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t); 2565 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 2566 int64_t table = 0; /* number of refcount table clusters */ 2567 int64_t blocks = 0; /* number of refcount block clusters */ 2568 int64_t last; 2569 int64_t n = 0; 2570 2571 do { 2572 last = n; 2573 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 2574 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 2575 n = clusters + blocks + table; 2576 2577 if (n == last && generous_increase) { 2578 clusters += DIV_ROUND_UP(table, 2); 2579 n = 0; /* force another loop */ 2580 generous_increase = false; 2581 } 2582 } while (n != last); 2583 2584 if (refblock_count) { 2585 *refblock_count = blocks; 2586 } 2587 2588 return (blocks + table) * cluster_size; 2589 } 2590 2591 /** 2592 * qcow2_calc_prealloc_size: 2593 * @total_size: virtual disk size in bytes 2594 * @cluster_size: cluster size in bytes 2595 * @refcount_order: refcount bits power-of-2 exponent 2596 * 2597 * Returns: Total number of bytes required for the fully allocated image 2598 * (including metadata). 2599 */ 2600 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 2601 size_t cluster_size, 2602 int refcount_order) 2603 { 2604 int64_t meta_size = 0; 2605 uint64_t nl1e, nl2e; 2606 int64_t aligned_total_size = align_offset(total_size, cluster_size); 2607 2608 /* header: 1 cluster */ 2609 meta_size += cluster_size; 2610 2611 /* total size of L2 tables */ 2612 nl2e = aligned_total_size / cluster_size; 2613 nl2e = align_offset(nl2e, cluster_size / sizeof(uint64_t)); 2614 meta_size += nl2e * sizeof(uint64_t); 2615 2616 /* total size of L1 tables */ 2617 nl1e = nl2e * sizeof(uint64_t) / cluster_size; 2618 nl1e = align_offset(nl1e, cluster_size / sizeof(uint64_t)); 2619 meta_size += nl1e * sizeof(uint64_t); 2620 2621 /* total size of refcount table and blocks */ 2622 meta_size += qcow2_refcount_metadata_size( 2623 (meta_size + aligned_total_size) / cluster_size, 2624 cluster_size, refcount_order, false, NULL); 2625 2626 return meta_size + aligned_total_size; 2627 } 2628 2629 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp) 2630 { 2631 size_t cluster_size; 2632 int cluster_bits; 2633 2634 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 2635 DEFAULT_CLUSTER_SIZE); 2636 cluster_bits = ctz32(cluster_size); 2637 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 2638 (1 << cluster_bits) != cluster_size) 2639 { 2640 error_setg(errp, "Cluster size must be a power of two between %d and " 2641 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 2642 return 0; 2643 } 2644 return cluster_size; 2645 } 2646 2647 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 2648 { 2649 char *buf; 2650 int ret; 2651 2652 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 2653 if (!buf) { 2654 ret = 3; /* default */ 2655 } else if (!strcmp(buf, "0.10")) { 2656 ret = 2; 2657 } else if (!strcmp(buf, "1.1")) { 2658 ret = 3; 2659 } else { 2660 error_setg(errp, "Invalid compatibility level: '%s'", buf); 2661 ret = -EINVAL; 2662 } 2663 g_free(buf); 2664 return ret; 2665 } 2666 2667 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 2668 Error **errp) 2669 { 2670 uint64_t refcount_bits; 2671 2672 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 2673 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 2674 error_setg(errp, "Refcount width must be a power of two and may not " 2675 "exceed 64 bits"); 2676 return 0; 2677 } 2678 2679 if (version < 3 && refcount_bits != 16) { 2680 error_setg(errp, "Different refcount widths than 16 bits require " 2681 "compatibility level 1.1 or above (use compat=1.1 or " 2682 "greater)"); 2683 return 0; 2684 } 2685 2686 return refcount_bits; 2687 } 2688 2689 static int qcow2_create2(const char *filename, int64_t total_size, 2690 const char *backing_file, const char *backing_format, 2691 int flags, size_t cluster_size, PreallocMode prealloc, 2692 QemuOpts *opts, int version, int refcount_order, 2693 const char *encryptfmt, Error **errp) 2694 { 2695 QDict *options; 2696 2697 /* 2698 * Open the image file and write a minimal qcow2 header. 2699 * 2700 * We keep things simple and start with a zero-sized image. We also 2701 * do without refcount blocks or a L1 table for now. We'll fix the 2702 * inconsistency later. 2703 * 2704 * We do need a refcount table because growing the refcount table means 2705 * allocating two new refcount blocks - the seconds of which would be at 2706 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 2707 * size for any qcow2 image. 2708 */ 2709 BlockBackend *blk; 2710 QCowHeader *header; 2711 uint64_t* refcount_table; 2712 Error *local_err = NULL; 2713 int ret; 2714 2715 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 2716 int64_t prealloc_size = 2717 qcow2_calc_prealloc_size(total_size, cluster_size, refcount_order); 2718 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, prealloc_size, &error_abort); 2719 qemu_opt_set(opts, BLOCK_OPT_PREALLOC, PreallocMode_lookup[prealloc], 2720 &error_abort); 2721 } 2722 2723 ret = bdrv_create_file(filename, opts, &local_err); 2724 if (ret < 0) { 2725 error_propagate(errp, local_err); 2726 return ret; 2727 } 2728 2729 blk = blk_new_open(filename, NULL, NULL, 2730 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 2731 &local_err); 2732 if (blk == NULL) { 2733 error_propagate(errp, local_err); 2734 return -EIO; 2735 } 2736 2737 blk_set_allow_write_beyond_eof(blk, true); 2738 2739 /* Write the header */ 2740 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 2741 header = g_malloc0(cluster_size); 2742 *header = (QCowHeader) { 2743 .magic = cpu_to_be32(QCOW_MAGIC), 2744 .version = cpu_to_be32(version), 2745 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 2746 .size = cpu_to_be64(0), 2747 .l1_table_offset = cpu_to_be64(0), 2748 .l1_size = cpu_to_be32(0), 2749 .refcount_table_offset = cpu_to_be64(cluster_size), 2750 .refcount_table_clusters = cpu_to_be32(1), 2751 .refcount_order = cpu_to_be32(refcount_order), 2752 .header_length = cpu_to_be32(sizeof(*header)), 2753 }; 2754 2755 /* We'll update this to correct value later */ 2756 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 2757 2758 if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) { 2759 header->compatible_features |= 2760 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 2761 } 2762 2763 ret = blk_pwrite(blk, 0, header, cluster_size, 0); 2764 g_free(header); 2765 if (ret < 0) { 2766 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 2767 goto out; 2768 } 2769 2770 /* Write a refcount table with one refcount block */ 2771 refcount_table = g_malloc0(2 * cluster_size); 2772 refcount_table[0] = cpu_to_be64(2 * cluster_size); 2773 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); 2774 g_free(refcount_table); 2775 2776 if (ret < 0) { 2777 error_setg_errno(errp, -ret, "Could not write refcount table"); 2778 goto out; 2779 } 2780 2781 blk_unref(blk); 2782 blk = NULL; 2783 2784 /* 2785 * And now open the image and make it consistent first (i.e. increase the 2786 * refcount of the cluster that is occupied by the header and the refcount 2787 * table) 2788 */ 2789 options = qdict_new(); 2790 qdict_put_str(options, "driver", "qcow2"); 2791 blk = blk_new_open(filename, NULL, options, 2792 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 2793 &local_err); 2794 if (blk == NULL) { 2795 error_propagate(errp, local_err); 2796 ret = -EIO; 2797 goto out; 2798 } 2799 2800 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 2801 if (ret < 0) { 2802 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 2803 "header and refcount table"); 2804 goto out; 2805 2806 } else if (ret != 0) { 2807 error_report("Huh, first cluster in empty image is already in use?"); 2808 abort(); 2809 } 2810 2811 /* Create a full header (including things like feature table) */ 2812 ret = qcow2_update_header(blk_bs(blk)); 2813 if (ret < 0) { 2814 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 2815 goto out; 2816 } 2817 2818 /* Okay, now that we have a valid image, let's give it the right size */ 2819 ret = blk_truncate(blk, total_size, PREALLOC_MODE_OFF, errp); 2820 if (ret < 0) { 2821 error_prepend(errp, "Could not resize image: "); 2822 goto out; 2823 } 2824 2825 /* Want a backing file? There you go.*/ 2826 if (backing_file) { 2827 ret = bdrv_change_backing_file(blk_bs(blk), backing_file, backing_format); 2828 if (ret < 0) { 2829 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 2830 "with format '%s'", backing_file, backing_format); 2831 goto out; 2832 } 2833 } 2834 2835 /* Want encryption? There you go. */ 2836 if (encryptfmt) { 2837 ret = qcow2_set_up_encryption(blk_bs(blk), encryptfmt, opts, errp); 2838 if (ret < 0) { 2839 goto out; 2840 } 2841 } 2842 2843 /* And if we're supposed to preallocate metadata, do that now */ 2844 if (prealloc != PREALLOC_MODE_OFF) { 2845 ret = preallocate(blk_bs(blk), 0, total_size); 2846 if (ret < 0) { 2847 error_setg_errno(errp, -ret, "Could not preallocate metadata"); 2848 goto out; 2849 } 2850 } 2851 2852 blk_unref(blk); 2853 blk = NULL; 2854 2855 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 2856 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 2857 * have to setup decryption context. We're not doing any I/O on the top 2858 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 2859 * not have effect. 2860 */ 2861 options = qdict_new(); 2862 qdict_put_str(options, "driver", "qcow2"); 2863 blk = blk_new_open(filename, NULL, options, 2864 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 2865 &local_err); 2866 if (blk == NULL) { 2867 error_propagate(errp, local_err); 2868 ret = -EIO; 2869 goto out; 2870 } 2871 2872 ret = 0; 2873 out: 2874 if (blk) { 2875 blk_unref(blk); 2876 } 2877 return ret; 2878 } 2879 2880 static int qcow2_create(const char *filename, QemuOpts *opts, Error **errp) 2881 { 2882 char *backing_file = NULL; 2883 char *backing_fmt = NULL; 2884 char *buf = NULL; 2885 uint64_t size = 0; 2886 int flags = 0; 2887 size_t cluster_size = DEFAULT_CLUSTER_SIZE; 2888 PreallocMode prealloc; 2889 int version; 2890 uint64_t refcount_bits; 2891 int refcount_order; 2892 char *encryptfmt = NULL; 2893 Error *local_err = NULL; 2894 int ret; 2895 2896 /* Read out options */ 2897 size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 2898 BDRV_SECTOR_SIZE); 2899 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 2900 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); 2901 encryptfmt = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 2902 if (encryptfmt) { 2903 if (qemu_opt_get(opts, BLOCK_OPT_ENCRYPT)) { 2904 error_setg(errp, "Options " BLOCK_OPT_ENCRYPT " and " 2905 BLOCK_OPT_ENCRYPT_FORMAT " are mutually exclusive"); 2906 ret = -EINVAL; 2907 goto finish; 2908 } 2909 } else if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) { 2910 encryptfmt = g_strdup("aes"); 2911 } 2912 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 2913 if (local_err) { 2914 error_propagate(errp, local_err); 2915 ret = -EINVAL; 2916 goto finish; 2917 } 2918 buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 2919 prealloc = qapi_enum_parse(PreallocMode_lookup, buf, 2920 PREALLOC_MODE__MAX, PREALLOC_MODE_OFF, 2921 &local_err); 2922 if (local_err) { 2923 error_propagate(errp, local_err); 2924 ret = -EINVAL; 2925 goto finish; 2926 } 2927 2928 version = qcow2_opt_get_version_del(opts, &local_err); 2929 if (local_err) { 2930 error_propagate(errp, local_err); 2931 ret = -EINVAL; 2932 goto finish; 2933 } 2934 2935 if (qemu_opt_get_bool_del(opts, BLOCK_OPT_LAZY_REFCOUNTS, false)) { 2936 flags |= BLOCK_FLAG_LAZY_REFCOUNTS; 2937 } 2938 2939 if (backing_file && prealloc != PREALLOC_MODE_OFF) { 2940 error_setg(errp, "Backing file and preallocation cannot be used at " 2941 "the same time"); 2942 ret = -EINVAL; 2943 goto finish; 2944 } 2945 2946 if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) { 2947 error_setg(errp, "Lazy refcounts only supported with compatibility " 2948 "level 1.1 and above (use compat=1.1 or greater)"); 2949 ret = -EINVAL; 2950 goto finish; 2951 } 2952 2953 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 2954 if (local_err) { 2955 error_propagate(errp, local_err); 2956 ret = -EINVAL; 2957 goto finish; 2958 } 2959 2960 refcount_order = ctz32(refcount_bits); 2961 2962 ret = qcow2_create2(filename, size, backing_file, backing_fmt, flags, 2963 cluster_size, prealloc, opts, version, refcount_order, 2964 encryptfmt, &local_err); 2965 error_propagate(errp, local_err); 2966 2967 finish: 2968 g_free(backing_file); 2969 g_free(backing_fmt); 2970 g_free(encryptfmt); 2971 g_free(buf); 2972 return ret; 2973 } 2974 2975 2976 static bool is_zero_sectors(BlockDriverState *bs, int64_t start, 2977 uint32_t count) 2978 { 2979 int nr; 2980 BlockDriverState *file; 2981 int64_t res; 2982 2983 if (start + count > bs->total_sectors) { 2984 count = bs->total_sectors - start; 2985 } 2986 2987 if (!count) { 2988 return true; 2989 } 2990 res = bdrv_get_block_status_above(bs, NULL, start, count, 2991 &nr, &file); 2992 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == count; 2993 } 2994 2995 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, 2996 int64_t offset, int bytes, BdrvRequestFlags flags) 2997 { 2998 int ret; 2999 BDRVQcow2State *s = bs->opaque; 3000 3001 uint32_t head = offset % s->cluster_size; 3002 uint32_t tail = (offset + bytes) % s->cluster_size; 3003 3004 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3005 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3006 tail = 0; 3007 } 3008 3009 if (head || tail) { 3010 int64_t cl_start = (offset - head) >> BDRV_SECTOR_BITS; 3011 uint64_t off; 3012 unsigned int nr; 3013 3014 assert(head + bytes <= s->cluster_size); 3015 3016 /* check whether remainder of cluster already reads as zero */ 3017 if (!(is_zero_sectors(bs, cl_start, 3018 DIV_ROUND_UP(head, BDRV_SECTOR_SIZE)) && 3019 is_zero_sectors(bs, (offset + bytes) >> BDRV_SECTOR_BITS, 3020 DIV_ROUND_UP(-tail & (s->cluster_size - 1), 3021 BDRV_SECTOR_SIZE)))) { 3022 return -ENOTSUP; 3023 } 3024 3025 qemu_co_mutex_lock(&s->lock); 3026 /* We can have new write after previous check */ 3027 offset = cl_start << BDRV_SECTOR_BITS; 3028 bytes = s->cluster_size; 3029 nr = s->cluster_size; 3030 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); 3031 if (ret != QCOW2_CLUSTER_UNALLOCATED && 3032 ret != QCOW2_CLUSTER_ZERO_PLAIN && 3033 ret != QCOW2_CLUSTER_ZERO_ALLOC) { 3034 qemu_co_mutex_unlock(&s->lock); 3035 return -ENOTSUP; 3036 } 3037 } else { 3038 qemu_co_mutex_lock(&s->lock); 3039 } 3040 3041 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 3042 3043 /* Whatever is left can use real zero clusters */ 3044 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags); 3045 qemu_co_mutex_unlock(&s->lock); 3046 3047 return ret; 3048 } 3049 3050 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 3051 int64_t offset, int bytes) 3052 { 3053 int ret; 3054 BDRVQcow2State *s = bs->opaque; 3055 3056 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 3057 assert(bytes < s->cluster_size); 3058 /* Ignore partial clusters, except for the special case of the 3059 * complete partial cluster at the end of an unaligned file */ 3060 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 3061 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 3062 return -ENOTSUP; 3063 } 3064 } 3065 3066 qemu_co_mutex_lock(&s->lock); 3067 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 3068 false); 3069 qemu_co_mutex_unlock(&s->lock); 3070 return ret; 3071 } 3072 3073 static int qcow2_truncate(BlockDriverState *bs, int64_t offset, 3074 PreallocMode prealloc, Error **errp) 3075 { 3076 BDRVQcow2State *s = bs->opaque; 3077 uint64_t old_length; 3078 int64_t new_l1_size; 3079 int ret; 3080 3081 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 3082 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 3083 { 3084 error_setg(errp, "Unsupported preallocation mode '%s'", 3085 PreallocMode_lookup[prealloc]); 3086 return -ENOTSUP; 3087 } 3088 3089 if (offset & 511) { 3090 error_setg(errp, "The new size must be a multiple of 512"); 3091 return -EINVAL; 3092 } 3093 3094 /* cannot proceed if image has snapshots */ 3095 if (s->nb_snapshots) { 3096 error_setg(errp, "Can't resize an image which has snapshots"); 3097 return -ENOTSUP; 3098 } 3099 3100 /* cannot proceed if image has bitmaps */ 3101 if (s->nb_bitmaps) { 3102 /* TODO: resize bitmaps in the image */ 3103 error_setg(errp, "Can't resize an image which has bitmaps"); 3104 return -ENOTSUP; 3105 } 3106 3107 old_length = bs->total_sectors * 512; 3108 3109 /* shrinking is currently not supported */ 3110 if (offset < old_length) { 3111 error_setg(errp, "qcow2 doesn't support shrinking images yet"); 3112 return -ENOTSUP; 3113 } 3114 3115 new_l1_size = size_to_l1(s, offset); 3116 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 3117 if (ret < 0) { 3118 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 3119 return ret; 3120 } 3121 3122 switch (prealloc) { 3123 case PREALLOC_MODE_OFF: 3124 break; 3125 3126 case PREALLOC_MODE_METADATA: 3127 ret = preallocate(bs, old_length, offset); 3128 if (ret < 0) { 3129 error_setg_errno(errp, -ret, "Preallocation failed"); 3130 return ret; 3131 } 3132 break; 3133 3134 case PREALLOC_MODE_FALLOC: 3135 case PREALLOC_MODE_FULL: 3136 { 3137 int64_t allocation_start, host_offset, guest_offset; 3138 int64_t clusters_allocated; 3139 int64_t old_file_size, new_file_size; 3140 uint64_t nb_new_data_clusters, nb_new_l2_tables; 3141 3142 old_file_size = bdrv_getlength(bs->file->bs); 3143 if (old_file_size < 0) { 3144 error_setg_errno(errp, -old_file_size, 3145 "Failed to inquire current file length"); 3146 return ret; 3147 } 3148 3149 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length, 3150 s->cluster_size); 3151 3152 /* This is an overestimation; we will not actually allocate space for 3153 * these in the file but just make sure the new refcount structures are 3154 * able to cover them so we will not have to allocate new refblocks 3155 * while entering the data blocks in the potentially new L2 tables. 3156 * (We do not actually care where the L2 tables are placed. Maybe they 3157 * are already allocated or they can be placed somewhere before 3158 * @old_file_size. It does not matter because they will be fully 3159 * allocated automatically, so they do not need to be covered by the 3160 * preallocation. All that matters is that we will not have to allocate 3161 * new refcount structures for them.) */ 3162 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 3163 s->cluster_size / sizeof(uint64_t)); 3164 /* The cluster range may not be aligned to L2 boundaries, so add one L2 3165 * table for a potential head/tail */ 3166 nb_new_l2_tables++; 3167 3168 allocation_start = qcow2_refcount_area(bs, old_file_size, 3169 nb_new_data_clusters + 3170 nb_new_l2_tables, 3171 true, 0, 0); 3172 if (allocation_start < 0) { 3173 error_setg_errno(errp, -allocation_start, 3174 "Failed to resize refcount structures"); 3175 return -allocation_start; 3176 } 3177 3178 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 3179 nb_new_data_clusters); 3180 if (clusters_allocated < 0) { 3181 error_setg_errno(errp, -clusters_allocated, 3182 "Failed to allocate data clusters"); 3183 return -clusters_allocated; 3184 } 3185 3186 assert(clusters_allocated == nb_new_data_clusters); 3187 3188 /* Allocate the data area */ 3189 new_file_size = allocation_start + 3190 nb_new_data_clusters * s->cluster_size; 3191 ret = bdrv_truncate(bs->file, new_file_size, prealloc, errp); 3192 if (ret < 0) { 3193 error_prepend(errp, "Failed to resize underlying file: "); 3194 qcow2_free_clusters(bs, allocation_start, 3195 nb_new_data_clusters * s->cluster_size, 3196 QCOW2_DISCARD_OTHER); 3197 return ret; 3198 } 3199 3200 /* Create the necessary L2 entries */ 3201 host_offset = allocation_start; 3202 guest_offset = old_length; 3203 while (nb_new_data_clusters) { 3204 int64_t guest_cluster = guest_offset >> s->cluster_bits; 3205 int64_t nb_clusters = MIN(nb_new_data_clusters, 3206 s->l2_size - guest_cluster % s->l2_size); 3207 QCowL2Meta allocation = { 3208 .offset = guest_offset, 3209 .alloc_offset = host_offset, 3210 .nb_clusters = nb_clusters, 3211 }; 3212 qemu_co_queue_init(&allocation.dependent_requests); 3213 3214 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 3215 if (ret < 0) { 3216 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 3217 qcow2_free_clusters(bs, host_offset, 3218 nb_new_data_clusters * s->cluster_size, 3219 QCOW2_DISCARD_OTHER); 3220 return ret; 3221 } 3222 3223 guest_offset += nb_clusters * s->cluster_size; 3224 host_offset += nb_clusters * s->cluster_size; 3225 nb_new_data_clusters -= nb_clusters; 3226 } 3227 break; 3228 } 3229 3230 default: 3231 g_assert_not_reached(); 3232 } 3233 3234 if (prealloc != PREALLOC_MODE_OFF) { 3235 /* Flush metadata before actually changing the image size */ 3236 ret = bdrv_flush(bs); 3237 if (ret < 0) { 3238 error_setg_errno(errp, -ret, 3239 "Failed to flush the preallocated area to disk"); 3240 return ret; 3241 } 3242 } 3243 3244 /* write updated header.size */ 3245 offset = cpu_to_be64(offset); 3246 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 3247 &offset, sizeof(uint64_t)); 3248 if (ret < 0) { 3249 error_setg_errno(errp, -ret, "Failed to update the image size"); 3250 return ret; 3251 } 3252 3253 s->l1_vm_state_index = new_l1_size; 3254 return 0; 3255 } 3256 3257 /* XXX: put compressed sectors first, then all the cluster aligned 3258 tables to avoid losing bytes in alignment */ 3259 static coroutine_fn int 3260 qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 3261 uint64_t bytes, QEMUIOVector *qiov) 3262 { 3263 BDRVQcow2State *s = bs->opaque; 3264 QEMUIOVector hd_qiov; 3265 struct iovec iov; 3266 z_stream strm; 3267 int ret, out_len; 3268 uint8_t *buf, *out_buf; 3269 int64_t cluster_offset; 3270 3271 if (bytes == 0) { 3272 /* align end of file to a sector boundary to ease reading with 3273 sector based I/Os */ 3274 cluster_offset = bdrv_getlength(bs->file->bs); 3275 if (cluster_offset < 0) { 3276 return cluster_offset; 3277 } 3278 return bdrv_truncate(bs->file, cluster_offset, PREALLOC_MODE_OFF, NULL); 3279 } 3280 3281 buf = qemu_blockalign(bs, s->cluster_size); 3282 if (bytes != s->cluster_size) { 3283 if (bytes > s->cluster_size || 3284 offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) 3285 { 3286 qemu_vfree(buf); 3287 return -EINVAL; 3288 } 3289 /* Zero-pad last write if image size is not cluster aligned */ 3290 memset(buf + bytes, 0, s->cluster_size - bytes); 3291 } 3292 qemu_iovec_to_buf(qiov, 0, buf, bytes); 3293 3294 out_buf = g_malloc(s->cluster_size); 3295 3296 /* best compression, small window, no zlib header */ 3297 memset(&strm, 0, sizeof(strm)); 3298 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, 3299 Z_DEFLATED, -12, 3300 9, Z_DEFAULT_STRATEGY); 3301 if (ret != 0) { 3302 ret = -EINVAL; 3303 goto fail; 3304 } 3305 3306 strm.avail_in = s->cluster_size; 3307 strm.next_in = (uint8_t *)buf; 3308 strm.avail_out = s->cluster_size; 3309 strm.next_out = out_buf; 3310 3311 ret = deflate(&strm, Z_FINISH); 3312 if (ret != Z_STREAM_END && ret != Z_OK) { 3313 deflateEnd(&strm); 3314 ret = -EINVAL; 3315 goto fail; 3316 } 3317 out_len = strm.next_out - out_buf; 3318 3319 deflateEnd(&strm); 3320 3321 if (ret != Z_STREAM_END || out_len >= s->cluster_size) { 3322 /* could not compress: write normal cluster */ 3323 ret = qcow2_co_pwritev(bs, offset, bytes, qiov, 0); 3324 if (ret < 0) { 3325 goto fail; 3326 } 3327 goto success; 3328 } 3329 3330 qemu_co_mutex_lock(&s->lock); 3331 cluster_offset = 3332 qcow2_alloc_compressed_cluster_offset(bs, offset, out_len); 3333 if (!cluster_offset) { 3334 qemu_co_mutex_unlock(&s->lock); 3335 ret = -EIO; 3336 goto fail; 3337 } 3338 cluster_offset &= s->cluster_offset_mask; 3339 3340 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len); 3341 qemu_co_mutex_unlock(&s->lock); 3342 if (ret < 0) { 3343 goto fail; 3344 } 3345 3346 iov = (struct iovec) { 3347 .iov_base = out_buf, 3348 .iov_len = out_len, 3349 }; 3350 qemu_iovec_init_external(&hd_qiov, &iov, 1); 3351 3352 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 3353 ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0); 3354 if (ret < 0) { 3355 goto fail; 3356 } 3357 success: 3358 ret = 0; 3359 fail: 3360 qemu_vfree(buf); 3361 g_free(out_buf); 3362 return ret; 3363 } 3364 3365 static int make_completely_empty(BlockDriverState *bs) 3366 { 3367 BDRVQcow2State *s = bs->opaque; 3368 Error *local_err = NULL; 3369 int ret, l1_clusters; 3370 int64_t offset; 3371 uint64_t *new_reftable = NULL; 3372 uint64_t rt_entry, l1_size2; 3373 struct { 3374 uint64_t l1_offset; 3375 uint64_t reftable_offset; 3376 uint32_t reftable_clusters; 3377 } QEMU_PACKED l1_ofs_rt_ofs_cls; 3378 3379 ret = qcow2_cache_empty(bs, s->l2_table_cache); 3380 if (ret < 0) { 3381 goto fail; 3382 } 3383 3384 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 3385 if (ret < 0) { 3386 goto fail; 3387 } 3388 3389 /* Refcounts will be broken utterly */ 3390 ret = qcow2_mark_dirty(bs); 3391 if (ret < 0) { 3392 goto fail; 3393 } 3394 3395 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3396 3397 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 3398 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t); 3399 3400 /* After this call, neither the in-memory nor the on-disk refcount 3401 * information accurately describe the actual references */ 3402 3403 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 3404 l1_clusters * s->cluster_size, 0); 3405 if (ret < 0) { 3406 goto fail_broken_refcounts; 3407 } 3408 memset(s->l1_table, 0, l1_size2); 3409 3410 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 3411 3412 /* Overwrite enough clusters at the beginning of the sectors to place 3413 * the refcount table, a refcount block and the L1 table in; this may 3414 * overwrite parts of the existing refcount and L1 table, which is not 3415 * an issue because the dirty flag is set, complete data loss is in fact 3416 * desired and partial data loss is consequently fine as well */ 3417 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 3418 (2 + l1_clusters) * s->cluster_size, 0); 3419 /* This call (even if it failed overall) may have overwritten on-disk 3420 * refcount structures; in that case, the in-memory refcount information 3421 * will probably differ from the on-disk information which makes the BDS 3422 * unusable */ 3423 if (ret < 0) { 3424 goto fail_broken_refcounts; 3425 } 3426 3427 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3428 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 3429 3430 /* "Create" an empty reftable (one cluster) directly after the image 3431 * header and an empty L1 table three clusters after the image header; 3432 * the cluster between those two will be used as the first refblock */ 3433 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 3434 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 3435 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 3436 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 3437 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); 3438 if (ret < 0) { 3439 goto fail_broken_refcounts; 3440 } 3441 3442 s->l1_table_offset = 3 * s->cluster_size; 3443 3444 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t)); 3445 if (!new_reftable) { 3446 ret = -ENOMEM; 3447 goto fail_broken_refcounts; 3448 } 3449 3450 s->refcount_table_offset = s->cluster_size; 3451 s->refcount_table_size = s->cluster_size / sizeof(uint64_t); 3452 s->max_refcount_table_index = 0; 3453 3454 g_free(s->refcount_table); 3455 s->refcount_table = new_reftable; 3456 new_reftable = NULL; 3457 3458 /* Now the in-memory refcount information again corresponds to the on-disk 3459 * information (reftable is empty and no refblocks (the refblock cache is 3460 * empty)); however, this means some clusters (e.g. the image header) are 3461 * referenced, but not refcounted, but the normal qcow2 code assumes that 3462 * the in-memory information is always correct */ 3463 3464 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 3465 3466 /* Enter the first refblock into the reftable */ 3467 rt_entry = cpu_to_be64(2 * s->cluster_size); 3468 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, 3469 &rt_entry, sizeof(rt_entry)); 3470 if (ret < 0) { 3471 goto fail_broken_refcounts; 3472 } 3473 s->refcount_table[0] = 2 * s->cluster_size; 3474 3475 s->free_cluster_index = 0; 3476 assert(3 + l1_clusters <= s->refcount_block_size); 3477 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 3478 if (offset < 0) { 3479 ret = offset; 3480 goto fail_broken_refcounts; 3481 } else if (offset > 0) { 3482 error_report("First cluster in emptied image is in use"); 3483 abort(); 3484 } 3485 3486 /* Now finally the in-memory information corresponds to the on-disk 3487 * structures and is correct */ 3488 ret = qcow2_mark_clean(bs); 3489 if (ret < 0) { 3490 goto fail; 3491 } 3492 3493 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, 3494 PREALLOC_MODE_OFF, &local_err); 3495 if (ret < 0) { 3496 error_report_err(local_err); 3497 goto fail; 3498 } 3499 3500 return 0; 3501 3502 fail_broken_refcounts: 3503 /* The BDS is unusable at this point. If we wanted to make it usable, we 3504 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 3505 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 3506 * again. However, because the functions which could have caused this error 3507 * path to be taken are used by those functions as well, it's very likely 3508 * that that sequence will fail as well. Therefore, just eject the BDS. */ 3509 bs->drv = NULL; 3510 3511 fail: 3512 g_free(new_reftable); 3513 return ret; 3514 } 3515 3516 static int qcow2_make_empty(BlockDriverState *bs) 3517 { 3518 BDRVQcow2State *s = bs->opaque; 3519 uint64_t offset, end_offset; 3520 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 3521 int l1_clusters, ret = 0; 3522 3523 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 3524 3525 if (s->qcow_version >= 3 && !s->snapshots && 3526 3 + l1_clusters <= s->refcount_block_size) { 3527 /* The following function only works for qcow2 v3 images (it requires 3528 * the dirty flag) and only as long as there are no snapshots (because 3529 * it completely empties the image). Furthermore, the L1 table and three 3530 * additional clusters (image header, refcount table, one refcount 3531 * block) have to fit inside one refcount block. */ 3532 return make_completely_empty(bs); 3533 } 3534 3535 /* This fallback code simply discards every active cluster; this is slow, 3536 * but works in all cases */ 3537 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 3538 for (offset = 0; offset < end_offset; offset += step) { 3539 /* As this function is generally used after committing an external 3540 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 3541 * default action for this kind of discard is to pass the discard, 3542 * which will ideally result in an actually smaller image file, as 3543 * is probably desired. */ 3544 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 3545 QCOW2_DISCARD_SNAPSHOT, true); 3546 if (ret < 0) { 3547 break; 3548 } 3549 } 3550 3551 return ret; 3552 } 3553 3554 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 3555 { 3556 BDRVQcow2State *s = bs->opaque; 3557 int ret; 3558 3559 qemu_co_mutex_lock(&s->lock); 3560 ret = qcow2_cache_write(bs, s->l2_table_cache); 3561 if (ret < 0) { 3562 qemu_co_mutex_unlock(&s->lock); 3563 return ret; 3564 } 3565 3566 if (qcow2_need_accurate_refcounts(s)) { 3567 ret = qcow2_cache_write(bs, s->refcount_block_cache); 3568 if (ret < 0) { 3569 qemu_co_mutex_unlock(&s->lock); 3570 return ret; 3571 } 3572 } 3573 qemu_co_mutex_unlock(&s->lock); 3574 3575 return 0; 3576 } 3577 3578 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 3579 Error **errp) 3580 { 3581 Error *local_err = NULL; 3582 BlockMeasureInfo *info; 3583 uint64_t required = 0; /* bytes that contribute to required size */ 3584 uint64_t virtual_size; /* disk size as seen by guest */ 3585 uint64_t refcount_bits; 3586 uint64_t l2_tables; 3587 size_t cluster_size; 3588 int version; 3589 char *optstr; 3590 PreallocMode prealloc; 3591 bool has_backing_file; 3592 3593 /* Parse image creation options */ 3594 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 3595 if (local_err) { 3596 goto err; 3597 } 3598 3599 version = qcow2_opt_get_version_del(opts, &local_err); 3600 if (local_err) { 3601 goto err; 3602 } 3603 3604 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 3605 if (local_err) { 3606 goto err; 3607 } 3608 3609 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 3610 prealloc = qapi_enum_parse(PreallocMode_lookup, optstr, 3611 PREALLOC_MODE__MAX, PREALLOC_MODE_OFF, 3612 &local_err); 3613 g_free(optstr); 3614 if (local_err) { 3615 goto err; 3616 } 3617 3618 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 3619 has_backing_file = !!optstr; 3620 g_free(optstr); 3621 3622 virtual_size = align_offset(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 3623 cluster_size); 3624 3625 /* Check that virtual disk size is valid */ 3626 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 3627 cluster_size / sizeof(uint64_t)); 3628 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { 3629 error_setg(&local_err, "The image size is too large " 3630 "(try using a larger cluster size)"); 3631 goto err; 3632 } 3633 3634 /* Account for input image */ 3635 if (in_bs) { 3636 int64_t ssize = bdrv_getlength(in_bs); 3637 if (ssize < 0) { 3638 error_setg_errno(&local_err, -ssize, 3639 "Unable to get image virtual_size"); 3640 goto err; 3641 } 3642 3643 virtual_size = align_offset(ssize, cluster_size); 3644 3645 if (has_backing_file) { 3646 /* We don't how much of the backing chain is shared by the input 3647 * image and the new image file. In the worst case the new image's 3648 * backing file has nothing in common with the input image. Be 3649 * conservative and assume all clusters need to be written. 3650 */ 3651 required = virtual_size; 3652 } else { 3653 int cluster_sectors = cluster_size / BDRV_SECTOR_SIZE; 3654 int64_t sector_num; 3655 int pnum = 0; 3656 3657 for (sector_num = 0; 3658 sector_num < ssize / BDRV_SECTOR_SIZE; 3659 sector_num += pnum) { 3660 int nb_sectors = MIN(ssize / BDRV_SECTOR_SIZE - sector_num, 3661 BDRV_REQUEST_MAX_SECTORS); 3662 BlockDriverState *file; 3663 int64_t ret; 3664 3665 ret = bdrv_get_block_status_above(in_bs, NULL, 3666 sector_num, nb_sectors, 3667 &pnum, &file); 3668 if (ret < 0) { 3669 error_setg_errno(&local_err, -ret, 3670 "Unable to get block status"); 3671 goto err; 3672 } 3673 3674 if (ret & BDRV_BLOCK_ZERO) { 3675 /* Skip zero regions (safe with no backing file) */ 3676 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 3677 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 3678 /* Extend pnum to end of cluster for next iteration */ 3679 pnum = ROUND_UP(sector_num + pnum, cluster_sectors) - 3680 sector_num; 3681 3682 /* Count clusters we've seen */ 3683 required += (sector_num % cluster_sectors + pnum) * 3684 BDRV_SECTOR_SIZE; 3685 } 3686 } 3687 } 3688 } 3689 3690 /* Take into account preallocation. Nothing special is needed for 3691 * PREALLOC_MODE_METADATA since metadata is always counted. 3692 */ 3693 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 3694 required = virtual_size; 3695 } 3696 3697 info = g_new(BlockMeasureInfo, 1); 3698 info->fully_allocated = 3699 qcow2_calc_prealloc_size(virtual_size, cluster_size, 3700 ctz32(refcount_bits)); 3701 3702 /* Remove data clusters that are not required. This overestimates the 3703 * required size because metadata needed for the fully allocated file is 3704 * still counted. 3705 */ 3706 info->required = info->fully_allocated - virtual_size + required; 3707 return info; 3708 3709 err: 3710 error_propagate(errp, local_err); 3711 return NULL; 3712 } 3713 3714 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 3715 { 3716 BDRVQcow2State *s = bs->opaque; 3717 bdi->unallocated_blocks_are_zero = true; 3718 bdi->can_write_zeroes_with_unmap = (s->qcow_version >= 3); 3719 bdi->cluster_size = s->cluster_size; 3720 bdi->vm_state_offset = qcow2_vm_state_offset(s); 3721 return 0; 3722 } 3723 3724 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs) 3725 { 3726 BDRVQcow2State *s = bs->opaque; 3727 ImageInfoSpecific *spec_info; 3728 QCryptoBlockInfo *encrypt_info = NULL; 3729 3730 if (s->crypto != NULL) { 3731 encrypt_info = qcrypto_block_get_info(s->crypto, &error_abort); 3732 } 3733 3734 spec_info = g_new(ImageInfoSpecific, 1); 3735 *spec_info = (ImageInfoSpecific){ 3736 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 3737 .u.qcow2.data = g_new(ImageInfoSpecificQCow2, 1), 3738 }; 3739 if (s->qcow_version == 2) { 3740 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 3741 .compat = g_strdup("0.10"), 3742 .refcount_bits = s->refcount_bits, 3743 }; 3744 } else if (s->qcow_version == 3) { 3745 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 3746 .compat = g_strdup("1.1"), 3747 .lazy_refcounts = s->compatible_features & 3748 QCOW2_COMPAT_LAZY_REFCOUNTS, 3749 .has_lazy_refcounts = true, 3750 .corrupt = s->incompatible_features & 3751 QCOW2_INCOMPAT_CORRUPT, 3752 .has_corrupt = true, 3753 .refcount_bits = s->refcount_bits, 3754 }; 3755 } else { 3756 /* if this assertion fails, this probably means a new version was 3757 * added without having it covered here */ 3758 assert(false); 3759 } 3760 3761 if (encrypt_info) { 3762 ImageInfoSpecificQCow2Encryption *qencrypt = 3763 g_new(ImageInfoSpecificQCow2Encryption, 1); 3764 switch (encrypt_info->format) { 3765 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 3766 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 3767 qencrypt->u.aes = encrypt_info->u.qcow; 3768 break; 3769 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 3770 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 3771 qencrypt->u.luks = encrypt_info->u.luks; 3772 break; 3773 default: 3774 abort(); 3775 } 3776 /* Since we did shallow copy above, erase any pointers 3777 * in the original info */ 3778 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 3779 qapi_free_QCryptoBlockInfo(encrypt_info); 3780 3781 spec_info->u.qcow2.data->has_encrypt = true; 3782 spec_info->u.qcow2.data->encrypt = qencrypt; 3783 } 3784 3785 return spec_info; 3786 } 3787 3788 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 3789 int64_t pos) 3790 { 3791 BDRVQcow2State *s = bs->opaque; 3792 3793 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 3794 return bs->drv->bdrv_co_pwritev(bs, qcow2_vm_state_offset(s) + pos, 3795 qiov->size, qiov, 0); 3796 } 3797 3798 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 3799 int64_t pos) 3800 { 3801 BDRVQcow2State *s = bs->opaque; 3802 3803 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 3804 return bs->drv->bdrv_co_preadv(bs, qcow2_vm_state_offset(s) + pos, 3805 qiov->size, qiov, 0); 3806 } 3807 3808 /* 3809 * Downgrades an image's version. To achieve this, any incompatible features 3810 * have to be removed. 3811 */ 3812 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 3813 BlockDriverAmendStatusCB *status_cb, void *cb_opaque) 3814 { 3815 BDRVQcow2State *s = bs->opaque; 3816 int current_version = s->qcow_version; 3817 int ret; 3818 3819 if (target_version == current_version) { 3820 return 0; 3821 } else if (target_version > current_version) { 3822 return -EINVAL; 3823 } else if (target_version != 2) { 3824 return -EINVAL; 3825 } 3826 3827 if (s->refcount_order != 4) { 3828 error_report("compat=0.10 requires refcount_bits=16"); 3829 return -ENOTSUP; 3830 } 3831 3832 /* clear incompatible features */ 3833 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 3834 ret = qcow2_mark_clean(bs); 3835 if (ret < 0) { 3836 return ret; 3837 } 3838 } 3839 3840 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 3841 * the first place; if that happens nonetheless, returning -ENOTSUP is the 3842 * best thing to do anyway */ 3843 3844 if (s->incompatible_features) { 3845 return -ENOTSUP; 3846 } 3847 3848 /* since we can ignore compatible features, we can set them to 0 as well */ 3849 s->compatible_features = 0; 3850 /* if lazy refcounts have been used, they have already been fixed through 3851 * clearing the dirty flag */ 3852 3853 /* clearing autoclear features is trivial */ 3854 s->autoclear_features = 0; 3855 3856 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 3857 if (ret < 0) { 3858 return ret; 3859 } 3860 3861 s->qcow_version = target_version; 3862 ret = qcow2_update_header(bs); 3863 if (ret < 0) { 3864 s->qcow_version = current_version; 3865 return ret; 3866 } 3867 return 0; 3868 } 3869 3870 typedef enum Qcow2AmendOperation { 3871 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 3872 * statically initialized to so that the helper CB can discern the first 3873 * invocation from an operation change */ 3874 QCOW2_NO_OPERATION = 0, 3875 3876 QCOW2_CHANGING_REFCOUNT_ORDER, 3877 QCOW2_DOWNGRADING, 3878 } Qcow2AmendOperation; 3879 3880 typedef struct Qcow2AmendHelperCBInfo { 3881 /* The code coordinating the amend operations should only modify 3882 * these four fields; the rest will be managed by the CB */ 3883 BlockDriverAmendStatusCB *original_status_cb; 3884 void *original_cb_opaque; 3885 3886 Qcow2AmendOperation current_operation; 3887 3888 /* Total number of operations to perform (only set once) */ 3889 int total_operations; 3890 3891 /* The following fields are managed by the CB */ 3892 3893 /* Number of operations completed */ 3894 int operations_completed; 3895 3896 /* Cumulative offset of all completed operations */ 3897 int64_t offset_completed; 3898 3899 Qcow2AmendOperation last_operation; 3900 int64_t last_work_size; 3901 } Qcow2AmendHelperCBInfo; 3902 3903 static void qcow2_amend_helper_cb(BlockDriverState *bs, 3904 int64_t operation_offset, 3905 int64_t operation_work_size, void *opaque) 3906 { 3907 Qcow2AmendHelperCBInfo *info = opaque; 3908 int64_t current_work_size; 3909 int64_t projected_work_size; 3910 3911 if (info->current_operation != info->last_operation) { 3912 if (info->last_operation != QCOW2_NO_OPERATION) { 3913 info->offset_completed += info->last_work_size; 3914 info->operations_completed++; 3915 } 3916 3917 info->last_operation = info->current_operation; 3918 } 3919 3920 assert(info->total_operations > 0); 3921 assert(info->operations_completed < info->total_operations); 3922 3923 info->last_work_size = operation_work_size; 3924 3925 current_work_size = info->offset_completed + operation_work_size; 3926 3927 /* current_work_size is the total work size for (operations_completed + 1) 3928 * operations (which includes this one), so multiply it by the number of 3929 * operations not covered and divide it by the number of operations 3930 * covered to get a projection for the operations not covered */ 3931 projected_work_size = current_work_size * (info->total_operations - 3932 info->operations_completed - 1) 3933 / (info->operations_completed + 1); 3934 3935 info->original_status_cb(bs, info->offset_completed + operation_offset, 3936 current_work_size + projected_work_size, 3937 info->original_cb_opaque); 3938 } 3939 3940 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 3941 BlockDriverAmendStatusCB *status_cb, 3942 void *cb_opaque) 3943 { 3944 BDRVQcow2State *s = bs->opaque; 3945 int old_version = s->qcow_version, new_version = old_version; 3946 uint64_t new_size = 0; 3947 const char *backing_file = NULL, *backing_format = NULL; 3948 bool lazy_refcounts = s->use_lazy_refcounts; 3949 const char *compat = NULL; 3950 uint64_t cluster_size = s->cluster_size; 3951 bool encrypt; 3952 int encformat; 3953 int refcount_bits = s->refcount_bits; 3954 Error *local_err = NULL; 3955 int ret; 3956 QemuOptDesc *desc = opts->list->desc; 3957 Qcow2AmendHelperCBInfo helper_cb_info; 3958 3959 while (desc && desc->name) { 3960 if (!qemu_opt_find(opts, desc->name)) { 3961 /* only change explicitly defined options */ 3962 desc++; 3963 continue; 3964 } 3965 3966 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 3967 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 3968 if (!compat) { 3969 /* preserve default */ 3970 } else if (!strcmp(compat, "0.10")) { 3971 new_version = 2; 3972 } else if (!strcmp(compat, "1.1")) { 3973 new_version = 3; 3974 } else { 3975 error_report("Unknown compatibility level %s", compat); 3976 return -EINVAL; 3977 } 3978 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) { 3979 error_report("Cannot change preallocation mode"); 3980 return -ENOTSUP; 3981 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 3982 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 3983 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 3984 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 3985 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 3986 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 3987 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) { 3988 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, 3989 !!s->crypto); 3990 3991 if (encrypt != !!s->crypto) { 3992 error_report("Changing the encryption flag is not supported"); 3993 return -ENOTSUP; 3994 } 3995 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) { 3996 encformat = qcow2_crypt_method_from_format( 3997 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT)); 3998 3999 if (encformat != s->crypt_method_header) { 4000 error_report("Changing the encryption format is not supported"); 4001 return -ENOTSUP; 4002 } 4003 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) { 4004 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 4005 cluster_size); 4006 if (cluster_size != s->cluster_size) { 4007 error_report("Changing the cluster size is not supported"); 4008 return -ENOTSUP; 4009 } 4010 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 4011 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 4012 lazy_refcounts); 4013 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 4014 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 4015 refcount_bits); 4016 4017 if (refcount_bits <= 0 || refcount_bits > 64 || 4018 !is_power_of_2(refcount_bits)) 4019 { 4020 error_report("Refcount width must be a power of two and may " 4021 "not exceed 64 bits"); 4022 return -EINVAL; 4023 } 4024 } else { 4025 /* if this point is reached, this probably means a new option was 4026 * added without having it covered here */ 4027 abort(); 4028 } 4029 4030 desc++; 4031 } 4032 4033 helper_cb_info = (Qcow2AmendHelperCBInfo){ 4034 .original_status_cb = status_cb, 4035 .original_cb_opaque = cb_opaque, 4036 .total_operations = (new_version < old_version) 4037 + (s->refcount_bits != refcount_bits) 4038 }; 4039 4040 /* Upgrade first (some features may require compat=1.1) */ 4041 if (new_version > old_version) { 4042 s->qcow_version = new_version; 4043 ret = qcow2_update_header(bs); 4044 if (ret < 0) { 4045 s->qcow_version = old_version; 4046 return ret; 4047 } 4048 } 4049 4050 if (s->refcount_bits != refcount_bits) { 4051 int refcount_order = ctz32(refcount_bits); 4052 4053 if (new_version < 3 && refcount_bits != 16) { 4054 error_report("Different refcount widths than 16 bits require " 4055 "compatibility level 1.1 or above (use compat=1.1 or " 4056 "greater)"); 4057 return -EINVAL; 4058 } 4059 4060 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 4061 ret = qcow2_change_refcount_order(bs, refcount_order, 4062 &qcow2_amend_helper_cb, 4063 &helper_cb_info, &local_err); 4064 if (ret < 0) { 4065 error_report_err(local_err); 4066 return ret; 4067 } 4068 } 4069 4070 if (backing_file || backing_format) { 4071 ret = qcow2_change_backing_file(bs, 4072 backing_file ?: s->image_backing_file, 4073 backing_format ?: s->image_backing_format); 4074 if (ret < 0) { 4075 return ret; 4076 } 4077 } 4078 4079 if (s->use_lazy_refcounts != lazy_refcounts) { 4080 if (lazy_refcounts) { 4081 if (new_version < 3) { 4082 error_report("Lazy refcounts only supported with compatibility " 4083 "level 1.1 and above (use compat=1.1 or greater)"); 4084 return -EINVAL; 4085 } 4086 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4087 ret = qcow2_update_header(bs); 4088 if (ret < 0) { 4089 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4090 return ret; 4091 } 4092 s->use_lazy_refcounts = true; 4093 } else { 4094 /* make image clean first */ 4095 ret = qcow2_mark_clean(bs); 4096 if (ret < 0) { 4097 return ret; 4098 } 4099 /* now disallow lazy refcounts */ 4100 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4101 ret = qcow2_update_header(bs); 4102 if (ret < 0) { 4103 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4104 return ret; 4105 } 4106 s->use_lazy_refcounts = false; 4107 } 4108 } 4109 4110 if (new_size) { 4111 BlockBackend *blk = blk_new(BLK_PERM_RESIZE, BLK_PERM_ALL); 4112 ret = blk_insert_bs(blk, bs, &local_err); 4113 if (ret < 0) { 4114 error_report_err(local_err); 4115 blk_unref(blk); 4116 return ret; 4117 } 4118 4119 ret = blk_truncate(blk, new_size, PREALLOC_MODE_OFF, &local_err); 4120 blk_unref(blk); 4121 if (ret < 0) { 4122 error_report_err(local_err); 4123 return ret; 4124 } 4125 } 4126 4127 /* Downgrade last (so unsupported features can be removed before) */ 4128 if (new_version < old_version) { 4129 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 4130 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 4131 &helper_cb_info); 4132 if (ret < 0) { 4133 return ret; 4134 } 4135 } 4136 4137 return 0; 4138 } 4139 4140 /* 4141 * If offset or size are negative, respectively, they will not be included in 4142 * the BLOCK_IMAGE_CORRUPTED event emitted. 4143 * fatal will be ignored for read-only BDS; corruptions found there will always 4144 * be considered non-fatal. 4145 */ 4146 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 4147 int64_t size, const char *message_format, ...) 4148 { 4149 BDRVQcow2State *s = bs->opaque; 4150 const char *node_name; 4151 char *message; 4152 va_list ap; 4153 4154 fatal = fatal && !bs->read_only; 4155 4156 if (s->signaled_corruption && 4157 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 4158 { 4159 return; 4160 } 4161 4162 va_start(ap, message_format); 4163 message = g_strdup_vprintf(message_format, ap); 4164 va_end(ap); 4165 4166 if (fatal) { 4167 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 4168 "corruption events will be suppressed\n", message); 4169 } else { 4170 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 4171 "corruption events will be suppressed\n", message); 4172 } 4173 4174 node_name = bdrv_get_node_name(bs); 4175 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 4176 *node_name != '\0', node_name, 4177 message, offset >= 0, offset, 4178 size >= 0, size, 4179 fatal, &error_abort); 4180 g_free(message); 4181 4182 if (fatal) { 4183 qcow2_mark_corrupt(bs); 4184 bs->drv = NULL; /* make BDS unusable */ 4185 } 4186 4187 s->signaled_corruption = true; 4188 } 4189 4190 static QemuOptsList qcow2_create_opts = { 4191 .name = "qcow2-create-opts", 4192 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 4193 .desc = { 4194 { 4195 .name = BLOCK_OPT_SIZE, 4196 .type = QEMU_OPT_SIZE, 4197 .help = "Virtual disk size" 4198 }, 4199 { 4200 .name = BLOCK_OPT_COMPAT_LEVEL, 4201 .type = QEMU_OPT_STRING, 4202 .help = "Compatibility level (0.10 or 1.1)" 4203 }, 4204 { 4205 .name = BLOCK_OPT_BACKING_FILE, 4206 .type = QEMU_OPT_STRING, 4207 .help = "File name of a base image" 4208 }, 4209 { 4210 .name = BLOCK_OPT_BACKING_FMT, 4211 .type = QEMU_OPT_STRING, 4212 .help = "Image format of the base image" 4213 }, 4214 { 4215 .name = BLOCK_OPT_ENCRYPT, 4216 .type = QEMU_OPT_BOOL, 4217 .help = "Encrypt the image with format 'aes'. (Deprecated " 4218 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", 4219 }, 4220 { 4221 .name = BLOCK_OPT_ENCRYPT_FORMAT, 4222 .type = QEMU_OPT_STRING, 4223 .help = "Encrypt the image, format choices: 'aes', 'luks'", 4224 }, 4225 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 4226 "ID of secret providing qcow AES key or LUKS passphrase"), 4227 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), 4228 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), 4229 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), 4230 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), 4231 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), 4232 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 4233 { 4234 .name = BLOCK_OPT_CLUSTER_SIZE, 4235 .type = QEMU_OPT_SIZE, 4236 .help = "qcow2 cluster size", 4237 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) 4238 }, 4239 { 4240 .name = BLOCK_OPT_PREALLOC, 4241 .type = QEMU_OPT_STRING, 4242 .help = "Preallocation mode (allowed values: off, metadata, " 4243 "falloc, full)" 4244 }, 4245 { 4246 .name = BLOCK_OPT_LAZY_REFCOUNTS, 4247 .type = QEMU_OPT_BOOL, 4248 .help = "Postpone refcount updates", 4249 .def_value_str = "off" 4250 }, 4251 { 4252 .name = BLOCK_OPT_REFCOUNT_BITS, 4253 .type = QEMU_OPT_NUMBER, 4254 .help = "Width of a reference count entry in bits", 4255 .def_value_str = "16" 4256 }, 4257 { /* end of list */ } 4258 } 4259 }; 4260 4261 BlockDriver bdrv_qcow2 = { 4262 .format_name = "qcow2", 4263 .instance_size = sizeof(BDRVQcow2State), 4264 .bdrv_probe = qcow2_probe, 4265 .bdrv_open = qcow2_open, 4266 .bdrv_close = qcow2_close, 4267 .bdrv_reopen_prepare = qcow2_reopen_prepare, 4268 .bdrv_reopen_commit = qcow2_reopen_commit, 4269 .bdrv_reopen_abort = qcow2_reopen_abort, 4270 .bdrv_join_options = qcow2_join_options, 4271 .bdrv_child_perm = bdrv_format_default_perms, 4272 .bdrv_create = qcow2_create, 4273 .bdrv_has_zero_init = bdrv_has_zero_init_1, 4274 .bdrv_co_get_block_status = qcow2_co_get_block_status, 4275 4276 .bdrv_co_preadv = qcow2_co_preadv, 4277 .bdrv_co_pwritev = qcow2_co_pwritev, 4278 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 4279 4280 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 4281 .bdrv_co_pdiscard = qcow2_co_pdiscard, 4282 .bdrv_truncate = qcow2_truncate, 4283 .bdrv_co_pwritev_compressed = qcow2_co_pwritev_compressed, 4284 .bdrv_make_empty = qcow2_make_empty, 4285 4286 .bdrv_snapshot_create = qcow2_snapshot_create, 4287 .bdrv_snapshot_goto = qcow2_snapshot_goto, 4288 .bdrv_snapshot_delete = qcow2_snapshot_delete, 4289 .bdrv_snapshot_list = qcow2_snapshot_list, 4290 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 4291 .bdrv_measure = qcow2_measure, 4292 .bdrv_get_info = qcow2_get_info, 4293 .bdrv_get_specific_info = qcow2_get_specific_info, 4294 4295 .bdrv_save_vmstate = qcow2_save_vmstate, 4296 .bdrv_load_vmstate = qcow2_load_vmstate, 4297 4298 .supports_backing = true, 4299 .bdrv_change_backing_file = qcow2_change_backing_file, 4300 4301 .bdrv_refresh_limits = qcow2_refresh_limits, 4302 .bdrv_invalidate_cache = qcow2_invalidate_cache, 4303 .bdrv_inactivate = qcow2_inactivate, 4304 4305 .create_opts = &qcow2_create_opts, 4306 .bdrv_check = qcow2_check, 4307 .bdrv_amend_options = qcow2_amend_options, 4308 4309 .bdrv_detach_aio_context = qcow2_detach_aio_context, 4310 .bdrv_attach_aio_context = qcow2_attach_aio_context, 4311 4312 .bdrv_reopen_bitmaps_rw = qcow2_reopen_bitmaps_rw, 4313 .bdrv_can_store_new_dirty_bitmap = qcow2_can_store_new_dirty_bitmap, 4314 .bdrv_remove_persistent_dirty_bitmap = qcow2_remove_persistent_dirty_bitmap, 4315 }; 4316 4317 static void bdrv_qcow2_init(void) 4318 { 4319 bdrv_register(&bdrv_qcow2); 4320 } 4321 4322 block_init(bdrv_qcow2_init); 4323