1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 27 #define ZLIB_CONST 28 #include <zlib.h> 29 30 #include "block/block_int.h" 31 #include "block/qdict.h" 32 #include "sysemu/block-backend.h" 33 #include "qemu/module.h" 34 #include "qcow2.h" 35 #include "qemu/error-report.h" 36 #include "qapi/error.h" 37 #include "qapi/qapi-events-block-core.h" 38 #include "qapi/qmp/qdict.h" 39 #include "qapi/qmp/qstring.h" 40 #include "trace.h" 41 #include "qemu/option_int.h" 42 #include "qemu/cutils.h" 43 #include "qemu/bswap.h" 44 #include "qapi/qobject-input-visitor.h" 45 #include "qapi/qapi-visit-block-core.h" 46 #include "crypto.h" 47 #include "block/thread-pool.h" 48 49 /* 50 Differences with QCOW: 51 52 - Support for multiple incremental snapshots. 53 - Memory management by reference counts. 54 - Clusters which have a reference count of one have the bit 55 QCOW_OFLAG_COPIED to optimize write performance. 56 - Size of compressed clusters is stored in sectors to reduce bit usage 57 in the cluster offsets. 58 - Support for storing additional data (such as the VM state) in the 59 snapshots. 60 - If a backing store is used, the cluster size is not constrained 61 (could be backported to QCOW). 62 - L2 tables have always a size of one cluster. 63 */ 64 65 66 typedef struct { 67 uint32_t magic; 68 uint32_t len; 69 } QEMU_PACKED QCowExtension; 70 71 #define QCOW2_EXT_MAGIC_END 0 72 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 73 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 74 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 75 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 76 77 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 78 { 79 const QCowHeader *cow_header = (const void *)buf; 80 81 if (buf_size >= sizeof(QCowHeader) && 82 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 83 be32_to_cpu(cow_header->version) >= 2) 84 return 100; 85 else 86 return 0; 87 } 88 89 90 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 91 uint8_t *buf, size_t buflen, 92 void *opaque, Error **errp) 93 { 94 BlockDriverState *bs = opaque; 95 BDRVQcow2State *s = bs->opaque; 96 ssize_t ret; 97 98 if ((offset + buflen) > s->crypto_header.length) { 99 error_setg(errp, "Request for data outside of extension header"); 100 return -1; 101 } 102 103 ret = bdrv_pread(bs->file, 104 s->crypto_header.offset + offset, buf, buflen); 105 if (ret < 0) { 106 error_setg_errno(errp, -ret, "Could not read encryption header"); 107 return -1; 108 } 109 return ret; 110 } 111 112 113 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 114 void *opaque, Error **errp) 115 { 116 BlockDriverState *bs = opaque; 117 BDRVQcow2State *s = bs->opaque; 118 int64_t ret; 119 int64_t clusterlen; 120 121 ret = qcow2_alloc_clusters(bs, headerlen); 122 if (ret < 0) { 123 error_setg_errno(errp, -ret, 124 "Cannot allocate cluster for LUKS header size %zu", 125 headerlen); 126 return -1; 127 } 128 129 s->crypto_header.length = headerlen; 130 s->crypto_header.offset = ret; 131 132 /* Zero fill remaining space in cluster so it has predictable 133 * content in case of future spec changes */ 134 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 135 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen) == 0); 136 ret = bdrv_pwrite_zeroes(bs->file, 137 ret + headerlen, 138 clusterlen - headerlen, 0); 139 if (ret < 0) { 140 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 141 return -1; 142 } 143 144 return ret; 145 } 146 147 148 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 149 const uint8_t *buf, size_t buflen, 150 void *opaque, Error **errp) 151 { 152 BlockDriverState *bs = opaque; 153 BDRVQcow2State *s = bs->opaque; 154 ssize_t ret; 155 156 if ((offset + buflen) > s->crypto_header.length) { 157 error_setg(errp, "Request for data outside of extension header"); 158 return -1; 159 } 160 161 ret = bdrv_pwrite(bs->file, 162 s->crypto_header.offset + offset, buf, buflen); 163 if (ret < 0) { 164 error_setg_errno(errp, -ret, "Could not read encryption header"); 165 return -1; 166 } 167 return ret; 168 } 169 170 171 /* 172 * read qcow2 extension and fill bs 173 * start reading from start_offset 174 * finish reading upon magic of value 0 or when end_offset reached 175 * unknown magic is skipped (future extension this version knows nothing about) 176 * return 0 upon success, non-0 otherwise 177 */ 178 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 179 uint64_t end_offset, void **p_feature_table, 180 int flags, bool *need_update_header, 181 Error **errp) 182 { 183 BDRVQcow2State *s = bs->opaque; 184 QCowExtension ext; 185 uint64_t offset; 186 int ret; 187 Qcow2BitmapHeaderExt bitmaps_ext; 188 189 if (need_update_header != NULL) { 190 *need_update_header = false; 191 } 192 193 #ifdef DEBUG_EXT 194 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 195 #endif 196 offset = start_offset; 197 while (offset < end_offset) { 198 199 #ifdef DEBUG_EXT 200 /* Sanity check */ 201 if (offset > s->cluster_size) 202 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 203 204 printf("attempting to read extended header in offset %lu\n", offset); 205 #endif 206 207 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); 208 if (ret < 0) { 209 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 210 "pread fail from offset %" PRIu64, offset); 211 return 1; 212 } 213 be32_to_cpus(&ext.magic); 214 be32_to_cpus(&ext.len); 215 offset += sizeof(ext); 216 #ifdef DEBUG_EXT 217 printf("ext.magic = 0x%x\n", ext.magic); 218 #endif 219 if (offset > end_offset || ext.len > end_offset - offset) { 220 error_setg(errp, "Header extension too large"); 221 return -EINVAL; 222 } 223 224 switch (ext.magic) { 225 case QCOW2_EXT_MAGIC_END: 226 return 0; 227 228 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 229 if (ext.len >= sizeof(bs->backing_format)) { 230 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 231 " too large (>=%zu)", ext.len, 232 sizeof(bs->backing_format)); 233 return 2; 234 } 235 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); 236 if (ret < 0) { 237 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 238 "Could not read format name"); 239 return 3; 240 } 241 bs->backing_format[ext.len] = '\0'; 242 s->image_backing_format = g_strdup(bs->backing_format); 243 #ifdef DEBUG_EXT 244 printf("Qcow2: Got format extension %s\n", bs->backing_format); 245 #endif 246 break; 247 248 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 249 if (p_feature_table != NULL) { 250 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 251 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 252 if (ret < 0) { 253 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 254 "Could not read table"); 255 return ret; 256 } 257 258 *p_feature_table = feature_table; 259 } 260 break; 261 262 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 263 unsigned int cflags = 0; 264 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 265 error_setg(errp, "CRYPTO header extension only " 266 "expected with LUKS encryption method"); 267 return -EINVAL; 268 } 269 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 270 error_setg(errp, "CRYPTO header extension size %u, " 271 "but expected size %zu", ext.len, 272 sizeof(Qcow2CryptoHeaderExtension)); 273 return -EINVAL; 274 } 275 276 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); 277 if (ret < 0) { 278 error_setg_errno(errp, -ret, 279 "Unable to read CRYPTO header extension"); 280 return ret; 281 } 282 be64_to_cpus(&s->crypto_header.offset); 283 be64_to_cpus(&s->crypto_header.length); 284 285 if ((s->crypto_header.offset % s->cluster_size) != 0) { 286 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 287 "not a multiple of cluster size '%u'", 288 s->crypto_header.offset, s->cluster_size); 289 return -EINVAL; 290 } 291 292 if (flags & BDRV_O_NO_IO) { 293 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 294 } 295 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 296 qcow2_crypto_hdr_read_func, 297 bs, cflags, errp); 298 if (!s->crypto) { 299 return -EINVAL; 300 } 301 } break; 302 303 case QCOW2_EXT_MAGIC_BITMAPS: 304 if (ext.len != sizeof(bitmaps_ext)) { 305 error_setg_errno(errp, -ret, "bitmaps_ext: " 306 "Invalid extension length"); 307 return -EINVAL; 308 } 309 310 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 311 if (s->qcow_version < 3) { 312 /* Let's be a bit more specific */ 313 warn_report("This qcow2 v2 image contains bitmaps, but " 314 "they may have been modified by a program " 315 "without persistent bitmap support; so now " 316 "they must all be considered inconsistent"); 317 } else { 318 warn_report("a program lacking bitmap support " 319 "modified this file, so all bitmaps are now " 320 "considered inconsistent"); 321 } 322 error_printf("Some clusters may be leaked, " 323 "run 'qemu-img check -r' on the image " 324 "file to fix."); 325 if (need_update_header != NULL) { 326 /* Updating is needed to drop invalid bitmap extension. */ 327 *need_update_header = true; 328 } 329 break; 330 } 331 332 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); 333 if (ret < 0) { 334 error_setg_errno(errp, -ret, "bitmaps_ext: " 335 "Could not read ext header"); 336 return ret; 337 } 338 339 if (bitmaps_ext.reserved32 != 0) { 340 error_setg_errno(errp, -ret, "bitmaps_ext: " 341 "Reserved field is not zero"); 342 return -EINVAL; 343 } 344 345 be32_to_cpus(&bitmaps_ext.nb_bitmaps); 346 be64_to_cpus(&bitmaps_ext.bitmap_directory_size); 347 be64_to_cpus(&bitmaps_ext.bitmap_directory_offset); 348 349 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 350 error_setg(errp, 351 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 352 "exceeding the QEMU supported maximum of %d", 353 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 354 return -EINVAL; 355 } 356 357 if (bitmaps_ext.nb_bitmaps == 0) { 358 error_setg(errp, "found bitmaps extension with zero bitmaps"); 359 return -EINVAL; 360 } 361 362 if (bitmaps_ext.bitmap_directory_offset & (s->cluster_size - 1)) { 363 error_setg(errp, "bitmaps_ext: " 364 "invalid bitmap directory offset"); 365 return -EINVAL; 366 } 367 368 if (bitmaps_ext.bitmap_directory_size > 369 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 370 error_setg(errp, "bitmaps_ext: " 371 "bitmap directory size (%" PRIu64 ") exceeds " 372 "the maximum supported size (%d)", 373 bitmaps_ext.bitmap_directory_size, 374 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 375 return -EINVAL; 376 } 377 378 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 379 s->bitmap_directory_offset = 380 bitmaps_ext.bitmap_directory_offset; 381 s->bitmap_directory_size = 382 bitmaps_ext.bitmap_directory_size; 383 384 #ifdef DEBUG_EXT 385 printf("Qcow2: Got bitmaps extension: " 386 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 387 s->bitmap_directory_offset, s->nb_bitmaps); 388 #endif 389 break; 390 391 default: 392 /* unknown magic - save it in case we need to rewrite the header */ 393 /* If you add a new feature, make sure to also update the fast 394 * path of qcow2_make_empty() to deal with it. */ 395 { 396 Qcow2UnknownHeaderExtension *uext; 397 398 uext = g_malloc0(sizeof(*uext) + ext.len); 399 uext->magic = ext.magic; 400 uext->len = ext.len; 401 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 402 403 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 404 if (ret < 0) { 405 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 406 "Could not read data"); 407 return ret; 408 } 409 } 410 break; 411 } 412 413 offset += ((ext.len + 7) & ~7); 414 } 415 416 return 0; 417 } 418 419 static void cleanup_unknown_header_ext(BlockDriverState *bs) 420 { 421 BDRVQcow2State *s = bs->opaque; 422 Qcow2UnknownHeaderExtension *uext, *next; 423 424 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 425 QLIST_REMOVE(uext, next); 426 g_free(uext); 427 } 428 } 429 430 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 431 uint64_t mask) 432 { 433 char *features = g_strdup(""); 434 char *old; 435 436 while (table && table->name[0] != '\0') { 437 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 438 if (mask & (1ULL << table->bit)) { 439 old = features; 440 features = g_strdup_printf("%s%s%.46s", old, *old ? ", " : "", 441 table->name); 442 g_free(old); 443 mask &= ~(1ULL << table->bit); 444 } 445 } 446 table++; 447 } 448 449 if (mask) { 450 old = features; 451 features = g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64, 452 old, *old ? ", " : "", mask); 453 g_free(old); 454 } 455 456 error_setg(errp, "Unsupported qcow2 feature(s): %s", features); 457 g_free(features); 458 } 459 460 /* 461 * Sets the dirty bit and flushes afterwards if necessary. 462 * 463 * The incompatible_features bit is only set if the image file header was 464 * updated successfully. Therefore it is not required to check the return 465 * value of this function. 466 */ 467 int qcow2_mark_dirty(BlockDriverState *bs) 468 { 469 BDRVQcow2State *s = bs->opaque; 470 uint64_t val; 471 int ret; 472 473 assert(s->qcow_version >= 3); 474 475 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 476 return 0; /* already dirty */ 477 } 478 479 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 480 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 481 &val, sizeof(val)); 482 if (ret < 0) { 483 return ret; 484 } 485 ret = bdrv_flush(bs->file->bs); 486 if (ret < 0) { 487 return ret; 488 } 489 490 /* Only treat image as dirty if the header was updated successfully */ 491 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 492 return 0; 493 } 494 495 /* 496 * Clears the dirty bit and flushes before if necessary. Only call this 497 * function when there are no pending requests, it does not guard against 498 * concurrent requests dirtying the image. 499 */ 500 static int qcow2_mark_clean(BlockDriverState *bs) 501 { 502 BDRVQcow2State *s = bs->opaque; 503 504 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 505 int ret; 506 507 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 508 509 ret = qcow2_flush_caches(bs); 510 if (ret < 0) { 511 return ret; 512 } 513 514 return qcow2_update_header(bs); 515 } 516 return 0; 517 } 518 519 /* 520 * Marks the image as corrupt. 521 */ 522 int qcow2_mark_corrupt(BlockDriverState *bs) 523 { 524 BDRVQcow2State *s = bs->opaque; 525 526 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 527 return qcow2_update_header(bs); 528 } 529 530 /* 531 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 532 * before if necessary. 533 */ 534 int qcow2_mark_consistent(BlockDriverState *bs) 535 { 536 BDRVQcow2State *s = bs->opaque; 537 538 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 539 int ret = qcow2_flush_caches(bs); 540 if (ret < 0) { 541 return ret; 542 } 543 544 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 545 return qcow2_update_header(bs); 546 } 547 return 0; 548 } 549 550 static int coroutine_fn qcow2_co_check_locked(BlockDriverState *bs, 551 BdrvCheckResult *result, 552 BdrvCheckMode fix) 553 { 554 int ret = qcow2_check_refcounts(bs, result, fix); 555 if (ret < 0) { 556 return ret; 557 } 558 559 if (fix && result->check_errors == 0 && result->corruptions == 0) { 560 ret = qcow2_mark_clean(bs); 561 if (ret < 0) { 562 return ret; 563 } 564 return qcow2_mark_consistent(bs); 565 } 566 return ret; 567 } 568 569 static int coroutine_fn qcow2_co_check(BlockDriverState *bs, 570 BdrvCheckResult *result, 571 BdrvCheckMode fix) 572 { 573 BDRVQcow2State *s = bs->opaque; 574 int ret; 575 576 qemu_co_mutex_lock(&s->lock); 577 ret = qcow2_co_check_locked(bs, result, fix); 578 qemu_co_mutex_unlock(&s->lock); 579 return ret; 580 } 581 582 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, 583 uint64_t entries, size_t entry_len, 584 int64_t max_size_bytes, const char *table_name, 585 Error **errp) 586 { 587 BDRVQcow2State *s = bs->opaque; 588 589 if (entries > max_size_bytes / entry_len) { 590 error_setg(errp, "%s too large", table_name); 591 return -EFBIG; 592 } 593 594 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 595 * because values will be passed to qemu functions taking int64_t. */ 596 if ((INT64_MAX - entries * entry_len < offset) || 597 (offset_into_cluster(s, offset) != 0)) { 598 error_setg(errp, "%s offset invalid", table_name); 599 return -EINVAL; 600 } 601 602 return 0; 603 } 604 605 static QemuOptsList qcow2_runtime_opts = { 606 .name = "qcow2", 607 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 608 .desc = { 609 { 610 .name = QCOW2_OPT_LAZY_REFCOUNTS, 611 .type = QEMU_OPT_BOOL, 612 .help = "Postpone refcount updates", 613 }, 614 { 615 .name = QCOW2_OPT_DISCARD_REQUEST, 616 .type = QEMU_OPT_BOOL, 617 .help = "Pass guest discard requests to the layer below", 618 }, 619 { 620 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 621 .type = QEMU_OPT_BOOL, 622 .help = "Generate discard requests when snapshot related space " 623 "is freed", 624 }, 625 { 626 .name = QCOW2_OPT_DISCARD_OTHER, 627 .type = QEMU_OPT_BOOL, 628 .help = "Generate discard requests when other clusters are freed", 629 }, 630 { 631 .name = QCOW2_OPT_OVERLAP, 632 .type = QEMU_OPT_STRING, 633 .help = "Selects which overlap checks to perform from a range of " 634 "templates (none, constant, cached, all)", 635 }, 636 { 637 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 638 .type = QEMU_OPT_STRING, 639 .help = "Selects which overlap checks to perform from a range of " 640 "templates (none, constant, cached, all)", 641 }, 642 { 643 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 644 .type = QEMU_OPT_BOOL, 645 .help = "Check for unintended writes into the main qcow2 header", 646 }, 647 { 648 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 649 .type = QEMU_OPT_BOOL, 650 .help = "Check for unintended writes into the active L1 table", 651 }, 652 { 653 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 654 .type = QEMU_OPT_BOOL, 655 .help = "Check for unintended writes into an active L2 table", 656 }, 657 { 658 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 659 .type = QEMU_OPT_BOOL, 660 .help = "Check for unintended writes into the refcount table", 661 }, 662 { 663 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 664 .type = QEMU_OPT_BOOL, 665 .help = "Check for unintended writes into a refcount block", 666 }, 667 { 668 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 669 .type = QEMU_OPT_BOOL, 670 .help = "Check for unintended writes into the snapshot table", 671 }, 672 { 673 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 674 .type = QEMU_OPT_BOOL, 675 .help = "Check for unintended writes into an inactive L1 table", 676 }, 677 { 678 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 679 .type = QEMU_OPT_BOOL, 680 .help = "Check for unintended writes into an inactive L2 table", 681 }, 682 { 683 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 684 .type = QEMU_OPT_BOOL, 685 .help = "Check for unintended writes into the bitmap directory", 686 }, 687 { 688 .name = QCOW2_OPT_CACHE_SIZE, 689 .type = QEMU_OPT_SIZE, 690 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 691 "cache size", 692 }, 693 { 694 .name = QCOW2_OPT_L2_CACHE_SIZE, 695 .type = QEMU_OPT_SIZE, 696 .help = "Maximum L2 table cache size", 697 }, 698 { 699 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 700 .type = QEMU_OPT_SIZE, 701 .help = "Size of each entry in the L2 cache", 702 }, 703 { 704 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 705 .type = QEMU_OPT_SIZE, 706 .help = "Maximum refcount block cache size", 707 }, 708 { 709 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 710 .type = QEMU_OPT_NUMBER, 711 .help = "Clean unused cache entries after this time (in seconds)", 712 }, 713 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 714 "ID of secret providing qcow2 AES key or LUKS passphrase"), 715 { /* end of list */ } 716 }, 717 }; 718 719 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 720 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 721 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 722 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 723 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 724 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 725 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 726 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 727 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 728 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 729 }; 730 731 static void cache_clean_timer_cb(void *opaque) 732 { 733 BlockDriverState *bs = opaque; 734 BDRVQcow2State *s = bs->opaque; 735 qcow2_cache_clean_unused(s->l2_table_cache); 736 qcow2_cache_clean_unused(s->refcount_block_cache); 737 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 738 (int64_t) s->cache_clean_interval * 1000); 739 } 740 741 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 742 { 743 BDRVQcow2State *s = bs->opaque; 744 if (s->cache_clean_interval > 0) { 745 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL, 746 SCALE_MS, cache_clean_timer_cb, 747 bs); 748 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 749 (int64_t) s->cache_clean_interval * 1000); 750 } 751 } 752 753 static void cache_clean_timer_del(BlockDriverState *bs) 754 { 755 BDRVQcow2State *s = bs->opaque; 756 if (s->cache_clean_timer) { 757 timer_del(s->cache_clean_timer); 758 timer_free(s->cache_clean_timer); 759 s->cache_clean_timer = NULL; 760 } 761 } 762 763 static void qcow2_detach_aio_context(BlockDriverState *bs) 764 { 765 cache_clean_timer_del(bs); 766 } 767 768 static void qcow2_attach_aio_context(BlockDriverState *bs, 769 AioContext *new_context) 770 { 771 cache_clean_timer_init(bs, new_context); 772 } 773 774 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 775 uint64_t *l2_cache_size, 776 uint64_t *l2_cache_entry_size, 777 uint64_t *refcount_cache_size, Error **errp) 778 { 779 BDRVQcow2State *s = bs->opaque; 780 uint64_t combined_cache_size, l2_cache_max_setting; 781 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 782 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size; 783 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; 784 uint64_t max_l2_cache = virtual_disk_size / (s->cluster_size / 8); 785 786 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 787 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 788 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 789 790 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 791 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 792 DEFAULT_L2_CACHE_MAX_SIZE); 793 *refcount_cache_size = qemu_opt_get_size(opts, 794 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 795 796 *l2_cache_entry_size = qemu_opt_get_size( 797 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size); 798 799 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting); 800 801 if (combined_cache_size_set) { 802 if (l2_cache_size_set && refcount_cache_size_set) { 803 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 804 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 805 "at the same time"); 806 return; 807 } else if (l2_cache_size_set && 808 (l2_cache_max_setting > combined_cache_size)) { 809 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 810 QCOW2_OPT_CACHE_SIZE); 811 return; 812 } else if (*refcount_cache_size > combined_cache_size) { 813 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 814 QCOW2_OPT_CACHE_SIZE); 815 return; 816 } 817 818 if (l2_cache_size_set) { 819 *refcount_cache_size = combined_cache_size - *l2_cache_size; 820 } else if (refcount_cache_size_set) { 821 *l2_cache_size = combined_cache_size - *refcount_cache_size; 822 } else { 823 /* Assign as much memory as possible to the L2 cache, and 824 * use the remainder for the refcount cache */ 825 if (combined_cache_size >= max_l2_cache + min_refcount_cache) { 826 *l2_cache_size = max_l2_cache; 827 *refcount_cache_size = combined_cache_size - *l2_cache_size; 828 } else { 829 *refcount_cache_size = 830 MIN(combined_cache_size, min_refcount_cache); 831 *l2_cache_size = combined_cache_size - *refcount_cache_size; 832 } 833 } 834 } 835 /* l2_cache_size and refcount_cache_size are ensured to have at least 836 * their minimum values in qcow2_update_options_prepare() */ 837 838 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) || 839 *l2_cache_entry_size > s->cluster_size || 840 !is_power_of_2(*l2_cache_entry_size)) { 841 error_setg(errp, "L2 cache entry size must be a power of two " 842 "between %d and the cluster size (%d)", 843 1 << MIN_CLUSTER_BITS, s->cluster_size); 844 return; 845 } 846 } 847 848 typedef struct Qcow2ReopenState { 849 Qcow2Cache *l2_table_cache; 850 Qcow2Cache *refcount_block_cache; 851 int l2_slice_size; /* Number of entries in a slice of the L2 table */ 852 bool use_lazy_refcounts; 853 int overlap_check; 854 bool discard_passthrough[QCOW2_DISCARD_MAX]; 855 uint64_t cache_clean_interval; 856 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 857 } Qcow2ReopenState; 858 859 static int qcow2_update_options_prepare(BlockDriverState *bs, 860 Qcow2ReopenState *r, 861 QDict *options, int flags, 862 Error **errp) 863 { 864 BDRVQcow2State *s = bs->opaque; 865 QemuOpts *opts = NULL; 866 const char *opt_overlap_check, *opt_overlap_check_template; 867 int overlap_check_template = 0; 868 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size; 869 int i; 870 const char *encryptfmt; 871 QDict *encryptopts = NULL; 872 Error *local_err = NULL; 873 int ret; 874 875 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 876 encryptfmt = qdict_get_try_str(encryptopts, "format"); 877 878 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 879 qemu_opts_absorb_qdict(opts, options, &local_err); 880 if (local_err) { 881 error_propagate(errp, local_err); 882 ret = -EINVAL; 883 goto fail; 884 } 885 886 /* get L2 table/refcount block cache size from command line options */ 887 read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size, 888 &refcount_cache_size, &local_err); 889 if (local_err) { 890 error_propagate(errp, local_err); 891 ret = -EINVAL; 892 goto fail; 893 } 894 895 l2_cache_size /= l2_cache_entry_size; 896 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 897 l2_cache_size = MIN_L2_CACHE_SIZE; 898 } 899 if (l2_cache_size > INT_MAX) { 900 error_setg(errp, "L2 cache size too big"); 901 ret = -EINVAL; 902 goto fail; 903 } 904 905 refcount_cache_size /= s->cluster_size; 906 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 907 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 908 } 909 if (refcount_cache_size > INT_MAX) { 910 error_setg(errp, "Refcount cache size too big"); 911 ret = -EINVAL; 912 goto fail; 913 } 914 915 /* alloc new L2 table/refcount block cache, flush old one */ 916 if (s->l2_table_cache) { 917 ret = qcow2_cache_flush(bs, s->l2_table_cache); 918 if (ret) { 919 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 920 goto fail; 921 } 922 } 923 924 if (s->refcount_block_cache) { 925 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 926 if (ret) { 927 error_setg_errno(errp, -ret, 928 "Failed to flush the refcount block cache"); 929 goto fail; 930 } 931 } 932 933 r->l2_slice_size = l2_cache_entry_size / sizeof(uint64_t); 934 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size, 935 l2_cache_entry_size); 936 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size, 937 s->cluster_size); 938 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 939 error_setg(errp, "Could not allocate metadata caches"); 940 ret = -ENOMEM; 941 goto fail; 942 } 943 944 /* New interval for cache cleanup timer */ 945 r->cache_clean_interval = 946 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 947 DEFAULT_CACHE_CLEAN_INTERVAL); 948 #ifndef CONFIG_LINUX 949 if (r->cache_clean_interval != 0) { 950 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 951 " not supported on this host"); 952 ret = -EINVAL; 953 goto fail; 954 } 955 #endif 956 if (r->cache_clean_interval > UINT_MAX) { 957 error_setg(errp, "Cache clean interval too big"); 958 ret = -EINVAL; 959 goto fail; 960 } 961 962 /* lazy-refcounts; flush if going from enabled to disabled */ 963 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 964 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 965 if (r->use_lazy_refcounts && s->qcow_version < 3) { 966 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 967 "qemu 1.1 compatibility level"); 968 ret = -EINVAL; 969 goto fail; 970 } 971 972 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 973 ret = qcow2_mark_clean(bs); 974 if (ret < 0) { 975 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 976 goto fail; 977 } 978 } 979 980 /* Overlap check options */ 981 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 982 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 983 if (opt_overlap_check_template && opt_overlap_check && 984 strcmp(opt_overlap_check_template, opt_overlap_check)) 985 { 986 error_setg(errp, "Conflicting values for qcow2 options '" 987 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 988 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 989 ret = -EINVAL; 990 goto fail; 991 } 992 if (!opt_overlap_check) { 993 opt_overlap_check = opt_overlap_check_template ?: "cached"; 994 } 995 996 if (!strcmp(opt_overlap_check, "none")) { 997 overlap_check_template = 0; 998 } else if (!strcmp(opt_overlap_check, "constant")) { 999 overlap_check_template = QCOW2_OL_CONSTANT; 1000 } else if (!strcmp(opt_overlap_check, "cached")) { 1001 overlap_check_template = QCOW2_OL_CACHED; 1002 } else if (!strcmp(opt_overlap_check, "all")) { 1003 overlap_check_template = QCOW2_OL_ALL; 1004 } else { 1005 error_setg(errp, "Unsupported value '%s' for qcow2 option " 1006 "'overlap-check'. Allowed are any of the following: " 1007 "none, constant, cached, all", opt_overlap_check); 1008 ret = -EINVAL; 1009 goto fail; 1010 } 1011 1012 r->overlap_check = 0; 1013 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 1014 /* overlap-check defines a template bitmask, but every flag may be 1015 * overwritten through the associated boolean option */ 1016 r->overlap_check |= 1017 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 1018 overlap_check_template & (1 << i)) << i; 1019 } 1020 1021 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 1022 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 1023 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 1024 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 1025 flags & BDRV_O_UNMAP); 1026 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 1027 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 1028 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 1029 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 1030 1031 switch (s->crypt_method_header) { 1032 case QCOW_CRYPT_NONE: 1033 if (encryptfmt) { 1034 error_setg(errp, "No encryption in image header, but options " 1035 "specified format '%s'", encryptfmt); 1036 ret = -EINVAL; 1037 goto fail; 1038 } 1039 break; 1040 1041 case QCOW_CRYPT_AES: 1042 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 1043 error_setg(errp, 1044 "Header reported 'aes' encryption format but " 1045 "options specify '%s'", encryptfmt); 1046 ret = -EINVAL; 1047 goto fail; 1048 } 1049 qdict_put_str(encryptopts, "format", "qcow"); 1050 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1051 break; 1052 1053 case QCOW_CRYPT_LUKS: 1054 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 1055 error_setg(errp, 1056 "Header reported 'luks' encryption format but " 1057 "options specify '%s'", encryptfmt); 1058 ret = -EINVAL; 1059 goto fail; 1060 } 1061 qdict_put_str(encryptopts, "format", "luks"); 1062 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1063 break; 1064 1065 default: 1066 error_setg(errp, "Unsupported encryption method %d", 1067 s->crypt_method_header); 1068 break; 1069 } 1070 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) { 1071 ret = -EINVAL; 1072 goto fail; 1073 } 1074 1075 ret = 0; 1076 fail: 1077 qobject_unref(encryptopts); 1078 qemu_opts_del(opts); 1079 opts = NULL; 1080 return ret; 1081 } 1082 1083 static void qcow2_update_options_commit(BlockDriverState *bs, 1084 Qcow2ReopenState *r) 1085 { 1086 BDRVQcow2State *s = bs->opaque; 1087 int i; 1088 1089 if (s->l2_table_cache) { 1090 qcow2_cache_destroy(s->l2_table_cache); 1091 } 1092 if (s->refcount_block_cache) { 1093 qcow2_cache_destroy(s->refcount_block_cache); 1094 } 1095 s->l2_table_cache = r->l2_table_cache; 1096 s->refcount_block_cache = r->refcount_block_cache; 1097 s->l2_slice_size = r->l2_slice_size; 1098 1099 s->overlap_check = r->overlap_check; 1100 s->use_lazy_refcounts = r->use_lazy_refcounts; 1101 1102 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1103 s->discard_passthrough[i] = r->discard_passthrough[i]; 1104 } 1105 1106 if (s->cache_clean_interval != r->cache_clean_interval) { 1107 cache_clean_timer_del(bs); 1108 s->cache_clean_interval = r->cache_clean_interval; 1109 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1110 } 1111 1112 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1113 s->crypto_opts = r->crypto_opts; 1114 } 1115 1116 static void qcow2_update_options_abort(BlockDriverState *bs, 1117 Qcow2ReopenState *r) 1118 { 1119 if (r->l2_table_cache) { 1120 qcow2_cache_destroy(r->l2_table_cache); 1121 } 1122 if (r->refcount_block_cache) { 1123 qcow2_cache_destroy(r->refcount_block_cache); 1124 } 1125 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1126 } 1127 1128 static int qcow2_update_options(BlockDriverState *bs, QDict *options, 1129 int flags, Error **errp) 1130 { 1131 Qcow2ReopenState r = {}; 1132 int ret; 1133 1134 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1135 if (ret >= 0) { 1136 qcow2_update_options_commit(bs, &r); 1137 } else { 1138 qcow2_update_options_abort(bs, &r); 1139 } 1140 1141 return ret; 1142 } 1143 1144 /* Called with s->lock held. */ 1145 static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, 1146 int flags, Error **errp) 1147 { 1148 BDRVQcow2State *s = bs->opaque; 1149 unsigned int len, i; 1150 int ret = 0; 1151 QCowHeader header; 1152 Error *local_err = NULL; 1153 uint64_t ext_end; 1154 uint64_t l1_vm_state_index; 1155 bool update_header = false; 1156 1157 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 1158 if (ret < 0) { 1159 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1160 goto fail; 1161 } 1162 be32_to_cpus(&header.magic); 1163 be32_to_cpus(&header.version); 1164 be64_to_cpus(&header.backing_file_offset); 1165 be32_to_cpus(&header.backing_file_size); 1166 be64_to_cpus(&header.size); 1167 be32_to_cpus(&header.cluster_bits); 1168 be32_to_cpus(&header.crypt_method); 1169 be64_to_cpus(&header.l1_table_offset); 1170 be32_to_cpus(&header.l1_size); 1171 be64_to_cpus(&header.refcount_table_offset); 1172 be32_to_cpus(&header.refcount_table_clusters); 1173 be64_to_cpus(&header.snapshots_offset); 1174 be32_to_cpus(&header.nb_snapshots); 1175 1176 if (header.magic != QCOW_MAGIC) { 1177 error_setg(errp, "Image is not in qcow2 format"); 1178 ret = -EINVAL; 1179 goto fail; 1180 } 1181 if (header.version < 2 || header.version > 3) { 1182 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1183 ret = -ENOTSUP; 1184 goto fail; 1185 } 1186 1187 s->qcow_version = header.version; 1188 1189 /* Initialise cluster size */ 1190 if (header.cluster_bits < MIN_CLUSTER_BITS || 1191 header.cluster_bits > MAX_CLUSTER_BITS) { 1192 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1193 header.cluster_bits); 1194 ret = -EINVAL; 1195 goto fail; 1196 } 1197 1198 s->cluster_bits = header.cluster_bits; 1199 s->cluster_size = 1 << s->cluster_bits; 1200 s->cluster_sectors = 1 << (s->cluster_bits - BDRV_SECTOR_BITS); 1201 1202 /* Initialise version 3 header fields */ 1203 if (header.version == 2) { 1204 header.incompatible_features = 0; 1205 header.compatible_features = 0; 1206 header.autoclear_features = 0; 1207 header.refcount_order = 4; 1208 header.header_length = 72; 1209 } else { 1210 be64_to_cpus(&header.incompatible_features); 1211 be64_to_cpus(&header.compatible_features); 1212 be64_to_cpus(&header.autoclear_features); 1213 be32_to_cpus(&header.refcount_order); 1214 be32_to_cpus(&header.header_length); 1215 1216 if (header.header_length < 104) { 1217 error_setg(errp, "qcow2 header too short"); 1218 ret = -EINVAL; 1219 goto fail; 1220 } 1221 } 1222 1223 if (header.header_length > s->cluster_size) { 1224 error_setg(errp, "qcow2 header exceeds cluster size"); 1225 ret = -EINVAL; 1226 goto fail; 1227 } 1228 1229 if (header.header_length > sizeof(header)) { 1230 s->unknown_header_fields_size = header.header_length - sizeof(header); 1231 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1232 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 1233 s->unknown_header_fields_size); 1234 if (ret < 0) { 1235 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1236 "fields"); 1237 goto fail; 1238 } 1239 } 1240 1241 if (header.backing_file_offset > s->cluster_size) { 1242 error_setg(errp, "Invalid backing file offset"); 1243 ret = -EINVAL; 1244 goto fail; 1245 } 1246 1247 if (header.backing_file_offset) { 1248 ext_end = header.backing_file_offset; 1249 } else { 1250 ext_end = 1 << header.cluster_bits; 1251 } 1252 1253 /* Handle feature bits */ 1254 s->incompatible_features = header.incompatible_features; 1255 s->compatible_features = header.compatible_features; 1256 s->autoclear_features = header.autoclear_features; 1257 1258 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1259 void *feature_table = NULL; 1260 qcow2_read_extensions(bs, header.header_length, ext_end, 1261 &feature_table, flags, NULL, NULL); 1262 report_unsupported_feature(errp, feature_table, 1263 s->incompatible_features & 1264 ~QCOW2_INCOMPAT_MASK); 1265 ret = -ENOTSUP; 1266 g_free(feature_table); 1267 goto fail; 1268 } 1269 1270 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1271 /* Corrupt images may not be written to unless they are being repaired 1272 */ 1273 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1274 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1275 "read/write"); 1276 ret = -EACCES; 1277 goto fail; 1278 } 1279 } 1280 1281 /* Check support for various header values */ 1282 if (header.refcount_order > 6) { 1283 error_setg(errp, "Reference count entry width too large; may not " 1284 "exceed 64 bits"); 1285 ret = -EINVAL; 1286 goto fail; 1287 } 1288 s->refcount_order = header.refcount_order; 1289 s->refcount_bits = 1 << s->refcount_order; 1290 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1291 s->refcount_max += s->refcount_max - 1; 1292 1293 s->crypt_method_header = header.crypt_method; 1294 if (s->crypt_method_header) { 1295 if (bdrv_uses_whitelist() && 1296 s->crypt_method_header == QCOW_CRYPT_AES) { 1297 error_setg(errp, 1298 "Use of AES-CBC encrypted qcow2 images is no longer " 1299 "supported in system emulators"); 1300 error_append_hint(errp, 1301 "You can use 'qemu-img convert' to convert your " 1302 "image to an alternative supported format, such " 1303 "as unencrypted qcow2, or raw with the LUKS " 1304 "format instead.\n"); 1305 ret = -ENOSYS; 1306 goto fail; 1307 } 1308 1309 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1310 s->crypt_physical_offset = false; 1311 } else { 1312 /* Assuming LUKS and any future crypt methods we 1313 * add will all use physical offsets, due to the 1314 * fact that the alternative is insecure... */ 1315 s->crypt_physical_offset = true; 1316 } 1317 1318 bs->encrypted = true; 1319 } 1320 1321 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 1322 s->l2_size = 1 << s->l2_bits; 1323 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1324 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1325 s->refcount_block_size = 1 << s->refcount_block_bits; 1326 bs->total_sectors = header.size / BDRV_SECTOR_SIZE; 1327 s->csize_shift = (62 - (s->cluster_bits - 8)); 1328 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1329 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1330 1331 s->refcount_table_offset = header.refcount_table_offset; 1332 s->refcount_table_size = 1333 header.refcount_table_clusters << (s->cluster_bits - 3); 1334 1335 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) { 1336 error_setg(errp, "Image does not contain a reference count table"); 1337 ret = -EINVAL; 1338 goto fail; 1339 } 1340 1341 ret = qcow2_validate_table(bs, s->refcount_table_offset, 1342 header.refcount_table_clusters, 1343 s->cluster_size, QCOW_MAX_REFTABLE_SIZE, 1344 "Reference count table", errp); 1345 if (ret < 0) { 1346 goto fail; 1347 } 1348 1349 /* The total size in bytes of the snapshot table is checked in 1350 * qcow2_read_snapshots() because the size of each snapshot is 1351 * variable and we don't know it yet. 1352 * Here we only check the offset and number of snapshots. */ 1353 ret = qcow2_validate_table(bs, header.snapshots_offset, 1354 header.nb_snapshots, 1355 sizeof(QCowSnapshotHeader), 1356 sizeof(QCowSnapshotHeader) * QCOW_MAX_SNAPSHOTS, 1357 "Snapshot table", errp); 1358 if (ret < 0) { 1359 goto fail; 1360 } 1361 1362 /* read the level 1 table */ 1363 ret = qcow2_validate_table(bs, header.l1_table_offset, 1364 header.l1_size, sizeof(uint64_t), 1365 QCOW_MAX_L1_SIZE, "Active L1 table", errp); 1366 if (ret < 0) { 1367 goto fail; 1368 } 1369 s->l1_size = header.l1_size; 1370 s->l1_table_offset = header.l1_table_offset; 1371 1372 l1_vm_state_index = size_to_l1(s, header.size); 1373 if (l1_vm_state_index > INT_MAX) { 1374 error_setg(errp, "Image is too big"); 1375 ret = -EFBIG; 1376 goto fail; 1377 } 1378 s->l1_vm_state_index = l1_vm_state_index; 1379 1380 /* the L1 table must contain at least enough entries to put 1381 header.size bytes */ 1382 if (s->l1_size < s->l1_vm_state_index) { 1383 error_setg(errp, "L1 table is too small"); 1384 ret = -EINVAL; 1385 goto fail; 1386 } 1387 1388 if (s->l1_size > 0) { 1389 s->l1_table = qemu_try_blockalign(bs->file->bs, 1390 ROUND_UP(s->l1_size * sizeof(uint64_t), 512)); 1391 if (s->l1_table == NULL) { 1392 error_setg(errp, "Could not allocate L1 table"); 1393 ret = -ENOMEM; 1394 goto fail; 1395 } 1396 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 1397 s->l1_size * sizeof(uint64_t)); 1398 if (ret < 0) { 1399 error_setg_errno(errp, -ret, "Could not read L1 table"); 1400 goto fail; 1401 } 1402 for(i = 0;i < s->l1_size; i++) { 1403 be64_to_cpus(&s->l1_table[i]); 1404 } 1405 } 1406 1407 /* Parse driver-specific options */ 1408 ret = qcow2_update_options(bs, options, flags, errp); 1409 if (ret < 0) { 1410 goto fail; 1411 } 1412 1413 s->cluster_cache_offset = -1; 1414 s->flags = flags; 1415 1416 ret = qcow2_refcount_init(bs); 1417 if (ret != 0) { 1418 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1419 goto fail; 1420 } 1421 1422 QLIST_INIT(&s->cluster_allocs); 1423 QTAILQ_INIT(&s->discards); 1424 1425 /* read qcow2 extensions */ 1426 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1427 flags, &update_header, &local_err)) { 1428 error_propagate(errp, local_err); 1429 ret = -EINVAL; 1430 goto fail; 1431 } 1432 1433 /* qcow2_read_extension may have set up the crypto context 1434 * if the crypt method needs a header region, some methods 1435 * don't need header extensions, so must check here 1436 */ 1437 if (s->crypt_method_header && !s->crypto) { 1438 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1439 unsigned int cflags = 0; 1440 if (flags & BDRV_O_NO_IO) { 1441 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1442 } 1443 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1444 NULL, NULL, cflags, errp); 1445 if (!s->crypto) { 1446 ret = -EINVAL; 1447 goto fail; 1448 } 1449 } else if (!(flags & BDRV_O_NO_IO)) { 1450 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1451 s->crypt_method_header); 1452 ret = -EINVAL; 1453 goto fail; 1454 } 1455 } 1456 1457 /* read the backing file name */ 1458 if (header.backing_file_offset != 0) { 1459 len = header.backing_file_size; 1460 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1461 len >= sizeof(bs->backing_file)) { 1462 error_setg(errp, "Backing file name too long"); 1463 ret = -EINVAL; 1464 goto fail; 1465 } 1466 ret = bdrv_pread(bs->file, header.backing_file_offset, 1467 bs->backing_file, len); 1468 if (ret < 0) { 1469 error_setg_errno(errp, -ret, "Could not read backing file name"); 1470 goto fail; 1471 } 1472 bs->backing_file[len] = '\0'; 1473 s->image_backing_file = g_strdup(bs->backing_file); 1474 } 1475 1476 /* Internal snapshots */ 1477 s->snapshots_offset = header.snapshots_offset; 1478 s->nb_snapshots = header.nb_snapshots; 1479 1480 ret = qcow2_read_snapshots(bs); 1481 if (ret < 0) { 1482 error_setg_errno(errp, -ret, "Could not read snapshots"); 1483 goto fail; 1484 } 1485 1486 /* Clear unknown autoclear feature bits */ 1487 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1488 update_header = 1489 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); 1490 if (update_header) { 1491 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1492 } 1493 1494 /* == Handle persistent dirty bitmaps == 1495 * 1496 * We want load dirty bitmaps in three cases: 1497 * 1498 * 1. Normal open of the disk in active mode, not related to invalidation 1499 * after migration. 1500 * 1501 * 2. Invalidation of the target vm after pre-copy phase of migration, if 1502 * bitmaps are _not_ migrating through migration channel, i.e. 1503 * 'dirty-bitmaps' capability is disabled. 1504 * 1505 * 3. Invalidation of source vm after failed or canceled migration. 1506 * This is a very interesting case. There are two possible types of 1507 * bitmaps: 1508 * 1509 * A. Stored on inactivation and removed. They should be loaded from the 1510 * image. 1511 * 1512 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through 1513 * the migration channel (with dirty-bitmaps capability). 1514 * 1515 * On the other hand, there are two possible sub-cases: 1516 * 1517 * 3.1 disk was changed by somebody else while were inactive. In this 1518 * case all in-RAM dirty bitmaps (both persistent and not) are 1519 * definitely invalid. And we don't have any method to determine 1520 * this. 1521 * 1522 * Simple and safe thing is to just drop all the bitmaps of type B on 1523 * inactivation. But in this case we lose bitmaps in valid 4.2 case. 1524 * 1525 * On the other hand, resuming source vm, if disk was already changed 1526 * is a bad thing anyway: not only bitmaps, the whole vm state is 1527 * out of sync with disk. 1528 * 1529 * This means, that user or management tool, who for some reason 1530 * decided to resume source vm, after disk was already changed by 1531 * target vm, should at least drop all dirty bitmaps by hand. 1532 * 1533 * So, we can ignore this case for now, but TODO: "generation" 1534 * extension for qcow2, to determine, that image was changed after 1535 * last inactivation. And if it is changed, we will drop (or at least 1536 * mark as 'invalid' all the bitmaps of type B, both persistent 1537 * and not). 1538 * 1539 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved 1540 * to disk ('dirty-bitmaps' capability disabled), or not saved 1541 * ('dirty-bitmaps' capability enabled), but we don't need to care 1542 * of: let's load bitmaps as always: stored bitmaps will be loaded, 1543 * and not stored has flag IN_USE=1 in the image and will be skipped 1544 * on loading. 1545 * 1546 * One remaining possible case when we don't want load bitmaps: 1547 * 1548 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or 1549 * will be loaded on invalidation, no needs try loading them before) 1550 */ 1551 1552 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) { 1553 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */ 1554 bool header_updated = qcow2_load_dirty_bitmaps(bs, &local_err); 1555 1556 update_header = update_header && !header_updated; 1557 } 1558 if (local_err != NULL) { 1559 error_propagate(errp, local_err); 1560 ret = -EINVAL; 1561 goto fail; 1562 } 1563 1564 if (update_header) { 1565 ret = qcow2_update_header(bs); 1566 if (ret < 0) { 1567 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1568 goto fail; 1569 } 1570 } 1571 1572 bs->supported_zero_flags = header.version >= 3 ? BDRV_REQ_MAY_UNMAP : 0; 1573 1574 /* Repair image if dirty */ 1575 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && 1576 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1577 BdrvCheckResult result = {0}; 1578 1579 ret = qcow2_co_check_locked(bs, &result, 1580 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1581 if (ret < 0 || result.check_errors) { 1582 if (ret >= 0) { 1583 ret = -EIO; 1584 } 1585 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1586 goto fail; 1587 } 1588 } 1589 1590 #ifdef DEBUG_ALLOC 1591 { 1592 BdrvCheckResult result = {0}; 1593 qcow2_check_refcounts(bs, &result, 0); 1594 } 1595 #endif 1596 1597 qemu_co_queue_init(&s->compress_wait_queue); 1598 1599 return ret; 1600 1601 fail: 1602 g_free(s->unknown_header_fields); 1603 cleanup_unknown_header_ext(bs); 1604 qcow2_free_snapshots(bs); 1605 qcow2_refcount_close(bs); 1606 qemu_vfree(s->l1_table); 1607 /* else pre-write overlap checks in cache_destroy may crash */ 1608 s->l1_table = NULL; 1609 cache_clean_timer_del(bs); 1610 if (s->l2_table_cache) { 1611 qcow2_cache_destroy(s->l2_table_cache); 1612 } 1613 if (s->refcount_block_cache) { 1614 qcow2_cache_destroy(s->refcount_block_cache); 1615 } 1616 qcrypto_block_free(s->crypto); 1617 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1618 return ret; 1619 } 1620 1621 typedef struct QCow2OpenCo { 1622 BlockDriverState *bs; 1623 QDict *options; 1624 int flags; 1625 Error **errp; 1626 int ret; 1627 } QCow2OpenCo; 1628 1629 static void coroutine_fn qcow2_open_entry(void *opaque) 1630 { 1631 QCow2OpenCo *qoc = opaque; 1632 BDRVQcow2State *s = qoc->bs->opaque; 1633 1634 qemu_co_mutex_lock(&s->lock); 1635 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); 1636 qemu_co_mutex_unlock(&s->lock); 1637 } 1638 1639 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1640 Error **errp) 1641 { 1642 BDRVQcow2State *s = bs->opaque; 1643 QCow2OpenCo qoc = { 1644 .bs = bs, 1645 .options = options, 1646 .flags = flags, 1647 .errp = errp, 1648 .ret = -EINPROGRESS 1649 }; 1650 1651 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 1652 false, errp); 1653 if (!bs->file) { 1654 return -EINVAL; 1655 } 1656 1657 /* Initialise locks */ 1658 qemu_co_mutex_init(&s->lock); 1659 1660 if (qemu_in_coroutine()) { 1661 /* From bdrv_co_create. */ 1662 qcow2_open_entry(&qoc); 1663 } else { 1664 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc)); 1665 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); 1666 } 1667 return qoc.ret; 1668 } 1669 1670 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1671 { 1672 BDRVQcow2State *s = bs->opaque; 1673 1674 if (bs->encrypted) { 1675 /* Encryption works on a sector granularity */ 1676 bs->bl.request_alignment = BDRV_SECTOR_SIZE; 1677 } 1678 bs->bl.pwrite_zeroes_alignment = s->cluster_size; 1679 bs->bl.pdiscard_alignment = s->cluster_size; 1680 } 1681 1682 static int qcow2_reopen_prepare(BDRVReopenState *state, 1683 BlockReopenQueue *queue, Error **errp) 1684 { 1685 Qcow2ReopenState *r; 1686 int ret; 1687 1688 r = g_new0(Qcow2ReopenState, 1); 1689 state->opaque = r; 1690 1691 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1692 state->flags, errp); 1693 if (ret < 0) { 1694 goto fail; 1695 } 1696 1697 /* We need to write out any unwritten data if we reopen read-only. */ 1698 if ((state->flags & BDRV_O_RDWR) == 0) { 1699 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1700 if (ret < 0) { 1701 goto fail; 1702 } 1703 1704 ret = bdrv_flush(state->bs); 1705 if (ret < 0) { 1706 goto fail; 1707 } 1708 1709 ret = qcow2_mark_clean(state->bs); 1710 if (ret < 0) { 1711 goto fail; 1712 } 1713 } 1714 1715 return 0; 1716 1717 fail: 1718 qcow2_update_options_abort(state->bs, r); 1719 g_free(r); 1720 return ret; 1721 } 1722 1723 static void qcow2_reopen_commit(BDRVReopenState *state) 1724 { 1725 qcow2_update_options_commit(state->bs, state->opaque); 1726 g_free(state->opaque); 1727 } 1728 1729 static void qcow2_reopen_abort(BDRVReopenState *state) 1730 { 1731 qcow2_update_options_abort(state->bs, state->opaque); 1732 g_free(state->opaque); 1733 } 1734 1735 static void qcow2_join_options(QDict *options, QDict *old_options) 1736 { 1737 bool has_new_overlap_template = 1738 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 1739 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 1740 bool has_new_total_cache_size = 1741 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 1742 bool has_all_cache_options; 1743 1744 /* New overlap template overrides all old overlap options */ 1745 if (has_new_overlap_template) { 1746 qdict_del(old_options, QCOW2_OPT_OVERLAP); 1747 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 1748 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 1749 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 1750 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 1751 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 1752 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 1753 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 1754 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 1755 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 1756 } 1757 1758 /* New total cache size overrides all old options */ 1759 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 1760 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 1761 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1762 } 1763 1764 qdict_join(options, old_options, false); 1765 1766 /* 1767 * If after merging all cache size options are set, an old total size is 1768 * overwritten. Do keep all options, however, if all three are new. The 1769 * resulting error message is what we want to happen. 1770 */ 1771 has_all_cache_options = 1772 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 1773 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 1774 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1775 1776 if (has_all_cache_options && !has_new_total_cache_size) { 1777 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 1778 } 1779 } 1780 1781 static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs, 1782 bool want_zero, 1783 int64_t offset, int64_t count, 1784 int64_t *pnum, int64_t *map, 1785 BlockDriverState **file) 1786 { 1787 BDRVQcow2State *s = bs->opaque; 1788 uint64_t cluster_offset; 1789 int index_in_cluster, ret; 1790 unsigned int bytes; 1791 int status = 0; 1792 1793 bytes = MIN(INT_MAX, count); 1794 qemu_co_mutex_lock(&s->lock); 1795 ret = qcow2_get_cluster_offset(bs, offset, &bytes, &cluster_offset); 1796 qemu_co_mutex_unlock(&s->lock); 1797 if (ret < 0) { 1798 return ret; 1799 } 1800 1801 *pnum = bytes; 1802 1803 if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED && 1804 !s->crypto) { 1805 index_in_cluster = offset & (s->cluster_size - 1); 1806 *map = cluster_offset | index_in_cluster; 1807 *file = bs->file->bs; 1808 status |= BDRV_BLOCK_OFFSET_VALID; 1809 } 1810 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { 1811 status |= BDRV_BLOCK_ZERO; 1812 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { 1813 status |= BDRV_BLOCK_DATA; 1814 } 1815 return status; 1816 } 1817 1818 static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs, 1819 QCowL2Meta **pl2meta, 1820 bool link_l2) 1821 { 1822 int ret = 0; 1823 QCowL2Meta *l2meta = *pl2meta; 1824 1825 while (l2meta != NULL) { 1826 QCowL2Meta *next; 1827 1828 if (link_l2) { 1829 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 1830 if (ret) { 1831 goto out; 1832 } 1833 } else { 1834 qcow2_alloc_cluster_abort(bs, l2meta); 1835 } 1836 1837 /* Take the request off the list of running requests */ 1838 if (l2meta->nb_clusters != 0) { 1839 QLIST_REMOVE(l2meta, next_in_flight); 1840 } 1841 1842 qemu_co_queue_restart_all(&l2meta->dependent_requests); 1843 1844 next = l2meta->next; 1845 g_free(l2meta); 1846 l2meta = next; 1847 } 1848 out: 1849 *pl2meta = l2meta; 1850 return ret; 1851 } 1852 1853 static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, 1854 uint64_t bytes, QEMUIOVector *qiov, 1855 int flags) 1856 { 1857 BDRVQcow2State *s = bs->opaque; 1858 int offset_in_cluster; 1859 int ret; 1860 unsigned int cur_bytes; /* number of bytes in current iteration */ 1861 uint64_t cluster_offset = 0; 1862 uint64_t bytes_done = 0; 1863 QEMUIOVector hd_qiov; 1864 uint8_t *cluster_data = NULL; 1865 1866 qemu_iovec_init(&hd_qiov, qiov->niov); 1867 1868 qemu_co_mutex_lock(&s->lock); 1869 1870 while (bytes != 0) { 1871 1872 /* prepare next request */ 1873 cur_bytes = MIN(bytes, INT_MAX); 1874 if (s->crypto) { 1875 cur_bytes = MIN(cur_bytes, 1876 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1877 } 1878 1879 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); 1880 if (ret < 0) { 1881 goto fail; 1882 } 1883 1884 offset_in_cluster = offset_into_cluster(s, offset); 1885 1886 qemu_iovec_reset(&hd_qiov); 1887 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1888 1889 switch (ret) { 1890 case QCOW2_CLUSTER_UNALLOCATED: 1891 1892 if (bs->backing) { 1893 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 1894 qemu_co_mutex_unlock(&s->lock); 1895 ret = bdrv_co_preadv(bs->backing, offset, cur_bytes, 1896 &hd_qiov, 0); 1897 qemu_co_mutex_lock(&s->lock); 1898 if (ret < 0) { 1899 goto fail; 1900 } 1901 } else { 1902 /* Note: in this case, no need to wait */ 1903 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1904 } 1905 break; 1906 1907 case QCOW2_CLUSTER_ZERO_PLAIN: 1908 case QCOW2_CLUSTER_ZERO_ALLOC: 1909 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1910 break; 1911 1912 case QCOW2_CLUSTER_COMPRESSED: 1913 /* add AIO support for compressed blocks ? */ 1914 ret = qcow2_decompress_cluster(bs, cluster_offset); 1915 if (ret < 0) { 1916 goto fail; 1917 } 1918 1919 qemu_iovec_from_buf(&hd_qiov, 0, 1920 s->cluster_cache + offset_in_cluster, 1921 cur_bytes); 1922 break; 1923 1924 case QCOW2_CLUSTER_NORMAL: 1925 if ((cluster_offset & 511) != 0) { 1926 ret = -EIO; 1927 goto fail; 1928 } 1929 1930 if (bs->encrypted) { 1931 assert(s->crypto); 1932 1933 /* 1934 * For encrypted images, read everything into a temporary 1935 * contiguous buffer on which the AES functions can work. 1936 */ 1937 if (!cluster_data) { 1938 cluster_data = 1939 qemu_try_blockalign(bs->file->bs, 1940 QCOW_MAX_CRYPT_CLUSTERS 1941 * s->cluster_size); 1942 if (cluster_data == NULL) { 1943 ret = -ENOMEM; 1944 goto fail; 1945 } 1946 } 1947 1948 assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1949 qemu_iovec_reset(&hd_qiov); 1950 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1951 } 1952 1953 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1954 qemu_co_mutex_unlock(&s->lock); 1955 ret = bdrv_co_preadv(bs->file, 1956 cluster_offset + offset_in_cluster, 1957 cur_bytes, &hd_qiov, 0); 1958 qemu_co_mutex_lock(&s->lock); 1959 if (ret < 0) { 1960 goto fail; 1961 } 1962 if (bs->encrypted) { 1963 assert(s->crypto); 1964 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1965 assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1966 if (qcrypto_block_decrypt(s->crypto, 1967 (s->crypt_physical_offset ? 1968 cluster_offset + offset_in_cluster : 1969 offset), 1970 cluster_data, 1971 cur_bytes, 1972 NULL) < 0) { 1973 ret = -EIO; 1974 goto fail; 1975 } 1976 qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes); 1977 } 1978 break; 1979 1980 default: 1981 g_assert_not_reached(); 1982 ret = -EIO; 1983 goto fail; 1984 } 1985 1986 bytes -= cur_bytes; 1987 offset += cur_bytes; 1988 bytes_done += cur_bytes; 1989 } 1990 ret = 0; 1991 1992 fail: 1993 qemu_co_mutex_unlock(&s->lock); 1994 1995 qemu_iovec_destroy(&hd_qiov); 1996 qemu_vfree(cluster_data); 1997 1998 return ret; 1999 } 2000 2001 /* Check if it's possible to merge a write request with the writing of 2002 * the data from the COW regions */ 2003 static bool merge_cow(uint64_t offset, unsigned bytes, 2004 QEMUIOVector *hd_qiov, QCowL2Meta *l2meta) 2005 { 2006 QCowL2Meta *m; 2007 2008 for (m = l2meta; m != NULL; m = m->next) { 2009 /* If both COW regions are empty then there's nothing to merge */ 2010 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 2011 continue; 2012 } 2013 2014 /* The data (middle) region must be immediately after the 2015 * start region */ 2016 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 2017 continue; 2018 } 2019 2020 /* The end region must be immediately after the data (middle) 2021 * region */ 2022 if (m->offset + m->cow_end.offset != offset + bytes) { 2023 continue; 2024 } 2025 2026 /* Make sure that adding both COW regions to the QEMUIOVector 2027 * does not exceed IOV_MAX */ 2028 if (hd_qiov->niov > IOV_MAX - 2) { 2029 continue; 2030 } 2031 2032 m->data_qiov = hd_qiov; 2033 return true; 2034 } 2035 2036 return false; 2037 } 2038 2039 static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, 2040 uint64_t bytes, QEMUIOVector *qiov, 2041 int flags) 2042 { 2043 BDRVQcow2State *s = bs->opaque; 2044 int offset_in_cluster; 2045 int ret; 2046 unsigned int cur_bytes; /* number of sectors in current iteration */ 2047 uint64_t cluster_offset; 2048 QEMUIOVector hd_qiov; 2049 uint64_t bytes_done = 0; 2050 uint8_t *cluster_data = NULL; 2051 QCowL2Meta *l2meta = NULL; 2052 2053 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 2054 2055 qemu_iovec_init(&hd_qiov, qiov->niov); 2056 2057 s->cluster_cache_offset = -1; /* disable compressed cache */ 2058 2059 qemu_co_mutex_lock(&s->lock); 2060 2061 while (bytes != 0) { 2062 2063 l2meta = NULL; 2064 2065 trace_qcow2_writev_start_part(qemu_coroutine_self()); 2066 offset_in_cluster = offset_into_cluster(s, offset); 2067 cur_bytes = MIN(bytes, INT_MAX); 2068 if (bs->encrypted) { 2069 cur_bytes = MIN(cur_bytes, 2070 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 2071 - offset_in_cluster); 2072 } 2073 2074 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2075 &cluster_offset, &l2meta); 2076 if (ret < 0) { 2077 goto fail; 2078 } 2079 2080 assert((cluster_offset & 511) == 0); 2081 2082 qemu_iovec_reset(&hd_qiov); 2083 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 2084 2085 if (bs->encrypted) { 2086 assert(s->crypto); 2087 if (!cluster_data) { 2088 cluster_data = qemu_try_blockalign(bs->file->bs, 2089 QCOW_MAX_CRYPT_CLUSTERS 2090 * s->cluster_size); 2091 if (cluster_data == NULL) { 2092 ret = -ENOMEM; 2093 goto fail; 2094 } 2095 } 2096 2097 assert(hd_qiov.size <= 2098 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2099 qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); 2100 2101 if (qcrypto_block_encrypt(s->crypto, 2102 (s->crypt_physical_offset ? 2103 cluster_offset + offset_in_cluster : 2104 offset), 2105 cluster_data, 2106 cur_bytes, NULL) < 0) { 2107 ret = -EIO; 2108 goto fail; 2109 } 2110 2111 qemu_iovec_reset(&hd_qiov); 2112 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 2113 } 2114 2115 ret = qcow2_pre_write_overlap_check(bs, 0, 2116 cluster_offset + offset_in_cluster, cur_bytes); 2117 if (ret < 0) { 2118 goto fail; 2119 } 2120 2121 /* If we need to do COW, check if it's possible to merge the 2122 * writing of the guest data together with that of the COW regions. 2123 * If it's not possible (or not necessary) then write the 2124 * guest data now. */ 2125 if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) { 2126 qemu_co_mutex_unlock(&s->lock); 2127 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 2128 trace_qcow2_writev_data(qemu_coroutine_self(), 2129 cluster_offset + offset_in_cluster); 2130 ret = bdrv_co_pwritev(bs->file, 2131 cluster_offset + offset_in_cluster, 2132 cur_bytes, &hd_qiov, 0); 2133 qemu_co_mutex_lock(&s->lock); 2134 if (ret < 0) { 2135 goto fail; 2136 } 2137 } 2138 2139 ret = qcow2_handle_l2meta(bs, &l2meta, true); 2140 if (ret) { 2141 goto fail; 2142 } 2143 2144 bytes -= cur_bytes; 2145 offset += cur_bytes; 2146 bytes_done += cur_bytes; 2147 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2148 } 2149 ret = 0; 2150 2151 fail: 2152 qcow2_handle_l2meta(bs, &l2meta, false); 2153 2154 qemu_co_mutex_unlock(&s->lock); 2155 2156 qemu_iovec_destroy(&hd_qiov); 2157 qemu_vfree(cluster_data); 2158 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2159 2160 return ret; 2161 } 2162 2163 static int qcow2_inactivate(BlockDriverState *bs) 2164 { 2165 BDRVQcow2State *s = bs->opaque; 2166 int ret, result = 0; 2167 Error *local_err = NULL; 2168 2169 qcow2_store_persistent_dirty_bitmaps(bs, &local_err); 2170 if (local_err != NULL) { 2171 result = -EINVAL; 2172 error_reportf_err(local_err, "Lost persistent bitmaps during " 2173 "inactivation of node '%s': ", 2174 bdrv_get_device_or_node_name(bs)); 2175 } 2176 2177 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2178 if (ret) { 2179 result = ret; 2180 error_report("Failed to flush the L2 table cache: %s", 2181 strerror(-ret)); 2182 } 2183 2184 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2185 if (ret) { 2186 result = ret; 2187 error_report("Failed to flush the refcount block cache: %s", 2188 strerror(-ret)); 2189 } 2190 2191 if (result == 0) { 2192 qcow2_mark_clean(bs); 2193 } 2194 2195 return result; 2196 } 2197 2198 static void qcow2_close(BlockDriverState *bs) 2199 { 2200 BDRVQcow2State *s = bs->opaque; 2201 qemu_vfree(s->l1_table); 2202 /* else pre-write overlap checks in cache_destroy may crash */ 2203 s->l1_table = NULL; 2204 2205 if (!(s->flags & BDRV_O_INACTIVE)) { 2206 qcow2_inactivate(bs); 2207 } 2208 2209 cache_clean_timer_del(bs); 2210 qcow2_cache_destroy(s->l2_table_cache); 2211 qcow2_cache_destroy(s->refcount_block_cache); 2212 2213 qcrypto_block_free(s->crypto); 2214 s->crypto = NULL; 2215 2216 g_free(s->unknown_header_fields); 2217 cleanup_unknown_header_ext(bs); 2218 2219 g_free(s->image_backing_file); 2220 g_free(s->image_backing_format); 2221 2222 g_free(s->cluster_cache); 2223 qemu_vfree(s->cluster_data); 2224 qcow2_refcount_close(bs); 2225 qcow2_free_snapshots(bs); 2226 } 2227 2228 static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, 2229 Error **errp) 2230 { 2231 BDRVQcow2State *s = bs->opaque; 2232 int flags = s->flags; 2233 QCryptoBlock *crypto = NULL; 2234 QDict *options; 2235 Error *local_err = NULL; 2236 int ret; 2237 2238 /* 2239 * Backing files are read-only which makes all of their metadata immutable, 2240 * that means we don't have to worry about reopening them here. 2241 */ 2242 2243 crypto = s->crypto; 2244 s->crypto = NULL; 2245 2246 qcow2_close(bs); 2247 2248 memset(s, 0, sizeof(BDRVQcow2State)); 2249 options = qdict_clone_shallow(bs->options); 2250 2251 flags &= ~BDRV_O_INACTIVE; 2252 qemu_co_mutex_lock(&s->lock); 2253 ret = qcow2_do_open(bs, options, flags, &local_err); 2254 qemu_co_mutex_unlock(&s->lock); 2255 qobject_unref(options); 2256 if (local_err) { 2257 error_propagate_prepend(errp, local_err, 2258 "Could not reopen qcow2 layer: "); 2259 bs->drv = NULL; 2260 return; 2261 } else if (ret < 0) { 2262 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer"); 2263 bs->drv = NULL; 2264 return; 2265 } 2266 2267 s->crypto = crypto; 2268 } 2269 2270 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2271 size_t len, size_t buflen) 2272 { 2273 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2274 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2275 2276 if (buflen < ext_len) { 2277 return -ENOSPC; 2278 } 2279 2280 *ext_backing_fmt = (QCowExtension) { 2281 .magic = cpu_to_be32(magic), 2282 .len = cpu_to_be32(len), 2283 }; 2284 2285 if (len) { 2286 memcpy(buf + sizeof(QCowExtension), s, len); 2287 } 2288 2289 return ext_len; 2290 } 2291 2292 /* 2293 * Updates the qcow2 header, including the variable length parts of it, i.e. 2294 * the backing file name and all extensions. qcow2 was not designed to allow 2295 * such changes, so if we run out of space (we can only use the first cluster) 2296 * this function may fail. 2297 * 2298 * Returns 0 on success, -errno in error cases. 2299 */ 2300 int qcow2_update_header(BlockDriverState *bs) 2301 { 2302 BDRVQcow2State *s = bs->opaque; 2303 QCowHeader *header; 2304 char *buf; 2305 size_t buflen = s->cluster_size; 2306 int ret; 2307 uint64_t total_size; 2308 uint32_t refcount_table_clusters; 2309 size_t header_length; 2310 Qcow2UnknownHeaderExtension *uext; 2311 2312 buf = qemu_blockalign(bs, buflen); 2313 2314 /* Header structure */ 2315 header = (QCowHeader*) buf; 2316 2317 if (buflen < sizeof(*header)) { 2318 ret = -ENOSPC; 2319 goto fail; 2320 } 2321 2322 header_length = sizeof(*header) + s->unknown_header_fields_size; 2323 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2324 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2325 2326 *header = (QCowHeader) { 2327 /* Version 2 fields */ 2328 .magic = cpu_to_be32(QCOW_MAGIC), 2329 .version = cpu_to_be32(s->qcow_version), 2330 .backing_file_offset = 0, 2331 .backing_file_size = 0, 2332 .cluster_bits = cpu_to_be32(s->cluster_bits), 2333 .size = cpu_to_be64(total_size), 2334 .crypt_method = cpu_to_be32(s->crypt_method_header), 2335 .l1_size = cpu_to_be32(s->l1_size), 2336 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2337 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2338 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2339 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2340 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2341 2342 /* Version 3 fields */ 2343 .incompatible_features = cpu_to_be64(s->incompatible_features), 2344 .compatible_features = cpu_to_be64(s->compatible_features), 2345 .autoclear_features = cpu_to_be64(s->autoclear_features), 2346 .refcount_order = cpu_to_be32(s->refcount_order), 2347 .header_length = cpu_to_be32(header_length), 2348 }; 2349 2350 /* For older versions, write a shorter header */ 2351 switch (s->qcow_version) { 2352 case 2: 2353 ret = offsetof(QCowHeader, incompatible_features); 2354 break; 2355 case 3: 2356 ret = sizeof(*header); 2357 break; 2358 default: 2359 ret = -EINVAL; 2360 goto fail; 2361 } 2362 2363 buf += ret; 2364 buflen -= ret; 2365 memset(buf, 0, buflen); 2366 2367 /* Preserve any unknown field in the header */ 2368 if (s->unknown_header_fields_size) { 2369 if (buflen < s->unknown_header_fields_size) { 2370 ret = -ENOSPC; 2371 goto fail; 2372 } 2373 2374 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2375 buf += s->unknown_header_fields_size; 2376 buflen -= s->unknown_header_fields_size; 2377 } 2378 2379 /* Backing file format header extension */ 2380 if (s->image_backing_format) { 2381 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2382 s->image_backing_format, 2383 strlen(s->image_backing_format), 2384 buflen); 2385 if (ret < 0) { 2386 goto fail; 2387 } 2388 2389 buf += ret; 2390 buflen -= ret; 2391 } 2392 2393 /* Full disk encryption header pointer extension */ 2394 if (s->crypto_header.offset != 0) { 2395 cpu_to_be64s(&s->crypto_header.offset); 2396 cpu_to_be64s(&s->crypto_header.length); 2397 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2398 &s->crypto_header, sizeof(s->crypto_header), 2399 buflen); 2400 be64_to_cpus(&s->crypto_header.offset); 2401 be64_to_cpus(&s->crypto_header.length); 2402 if (ret < 0) { 2403 goto fail; 2404 } 2405 buf += ret; 2406 buflen -= ret; 2407 } 2408 2409 /* Feature table */ 2410 if (s->qcow_version >= 3) { 2411 Qcow2Feature features[] = { 2412 { 2413 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2414 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2415 .name = "dirty bit", 2416 }, 2417 { 2418 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2419 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 2420 .name = "corrupt bit", 2421 }, 2422 { 2423 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 2424 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 2425 .name = "lazy refcounts", 2426 }, 2427 }; 2428 2429 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 2430 features, sizeof(features), buflen); 2431 if (ret < 0) { 2432 goto fail; 2433 } 2434 buf += ret; 2435 buflen -= ret; 2436 } 2437 2438 /* Bitmap extension */ 2439 if (s->nb_bitmaps > 0) { 2440 Qcow2BitmapHeaderExt bitmaps_header = { 2441 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 2442 .bitmap_directory_size = 2443 cpu_to_be64(s->bitmap_directory_size), 2444 .bitmap_directory_offset = 2445 cpu_to_be64(s->bitmap_directory_offset) 2446 }; 2447 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 2448 &bitmaps_header, sizeof(bitmaps_header), 2449 buflen); 2450 if (ret < 0) { 2451 goto fail; 2452 } 2453 buf += ret; 2454 buflen -= ret; 2455 } 2456 2457 /* Keep unknown header extensions */ 2458 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 2459 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 2460 if (ret < 0) { 2461 goto fail; 2462 } 2463 2464 buf += ret; 2465 buflen -= ret; 2466 } 2467 2468 /* End of header extensions */ 2469 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 2470 if (ret < 0) { 2471 goto fail; 2472 } 2473 2474 buf += ret; 2475 buflen -= ret; 2476 2477 /* Backing file name */ 2478 if (s->image_backing_file) { 2479 size_t backing_file_len = strlen(s->image_backing_file); 2480 2481 if (buflen < backing_file_len) { 2482 ret = -ENOSPC; 2483 goto fail; 2484 } 2485 2486 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 2487 strncpy(buf, s->image_backing_file, buflen); 2488 2489 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 2490 header->backing_file_size = cpu_to_be32(backing_file_len); 2491 } 2492 2493 /* Write the new header */ 2494 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 2495 if (ret < 0) { 2496 goto fail; 2497 } 2498 2499 ret = 0; 2500 fail: 2501 qemu_vfree(header); 2502 return ret; 2503 } 2504 2505 static int qcow2_change_backing_file(BlockDriverState *bs, 2506 const char *backing_file, const char *backing_fmt) 2507 { 2508 BDRVQcow2State *s = bs->opaque; 2509 2510 if (backing_file && strlen(backing_file) > 1023) { 2511 return -EINVAL; 2512 } 2513 2514 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2515 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2516 2517 g_free(s->image_backing_file); 2518 g_free(s->image_backing_format); 2519 2520 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 2521 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 2522 2523 return qcow2_update_header(bs); 2524 } 2525 2526 static int qcow2_crypt_method_from_format(const char *encryptfmt) 2527 { 2528 if (g_str_equal(encryptfmt, "luks")) { 2529 return QCOW_CRYPT_LUKS; 2530 } else if (g_str_equal(encryptfmt, "aes")) { 2531 return QCOW_CRYPT_AES; 2532 } else { 2533 return -EINVAL; 2534 } 2535 } 2536 2537 static int qcow2_set_up_encryption(BlockDriverState *bs, 2538 QCryptoBlockCreateOptions *cryptoopts, 2539 Error **errp) 2540 { 2541 BDRVQcow2State *s = bs->opaque; 2542 QCryptoBlock *crypto = NULL; 2543 int fmt, ret; 2544 2545 switch (cryptoopts->format) { 2546 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 2547 fmt = QCOW_CRYPT_LUKS; 2548 break; 2549 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 2550 fmt = QCOW_CRYPT_AES; 2551 break; 2552 default: 2553 error_setg(errp, "Crypto format not supported in qcow2"); 2554 return -EINVAL; 2555 } 2556 2557 s->crypt_method_header = fmt; 2558 2559 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 2560 qcow2_crypto_hdr_init_func, 2561 qcow2_crypto_hdr_write_func, 2562 bs, errp); 2563 if (!crypto) { 2564 return -EINVAL; 2565 } 2566 2567 ret = qcow2_update_header(bs); 2568 if (ret < 0) { 2569 error_setg_errno(errp, -ret, "Could not write encryption header"); 2570 goto out; 2571 } 2572 2573 ret = 0; 2574 out: 2575 qcrypto_block_free(crypto); 2576 return ret; 2577 } 2578 2579 /** 2580 * Preallocates metadata structures for data clusters between @offset (in the 2581 * guest disk) and @new_length (which is thus generally the new guest disk 2582 * size). 2583 * 2584 * Returns: 0 on success, -errno on failure. 2585 */ 2586 static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset, 2587 uint64_t new_length) 2588 { 2589 uint64_t bytes; 2590 uint64_t host_offset = 0; 2591 unsigned int cur_bytes; 2592 int ret; 2593 QCowL2Meta *meta; 2594 2595 assert(offset <= new_length); 2596 bytes = new_length - offset; 2597 2598 while (bytes) { 2599 cur_bytes = MIN(bytes, INT_MAX); 2600 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2601 &host_offset, &meta); 2602 if (ret < 0) { 2603 return ret; 2604 } 2605 2606 while (meta) { 2607 QCowL2Meta *next = meta->next; 2608 2609 ret = qcow2_alloc_cluster_link_l2(bs, meta); 2610 if (ret < 0) { 2611 qcow2_free_any_clusters(bs, meta->alloc_offset, 2612 meta->nb_clusters, QCOW2_DISCARD_NEVER); 2613 return ret; 2614 } 2615 2616 /* There are no dependent requests, but we need to remove our 2617 * request from the list of in-flight requests */ 2618 QLIST_REMOVE(meta, next_in_flight); 2619 2620 g_free(meta); 2621 meta = next; 2622 } 2623 2624 /* TODO Preallocate data if requested */ 2625 2626 bytes -= cur_bytes; 2627 offset += cur_bytes; 2628 } 2629 2630 /* 2631 * It is expected that the image file is large enough to actually contain 2632 * all of the allocated clusters (otherwise we get failing reads after 2633 * EOF). Extend the image to the last allocated sector. 2634 */ 2635 if (host_offset != 0) { 2636 uint8_t data = 0; 2637 ret = bdrv_pwrite(bs->file, (host_offset + cur_bytes) - 1, 2638 &data, 1); 2639 if (ret < 0) { 2640 return ret; 2641 } 2642 } 2643 2644 return 0; 2645 } 2646 2647 /* qcow2_refcount_metadata_size: 2648 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 2649 * @cluster_size: size of a cluster, in bytes 2650 * @refcount_order: refcount bits power-of-2 exponent 2651 * @generous_increase: allow for the refcount table to be 1.5x as large as it 2652 * needs to be 2653 * 2654 * Returns: Number of bytes required for refcount blocks and table metadata. 2655 */ 2656 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 2657 int refcount_order, bool generous_increase, 2658 uint64_t *refblock_count) 2659 { 2660 /* 2661 * Every host cluster is reference-counted, including metadata (even 2662 * refcount metadata is recursively included). 2663 * 2664 * An accurate formula for the size of refcount metadata size is difficult 2665 * to derive. An easier method of calculation is finding the fixed point 2666 * where no further refcount blocks or table clusters are required to 2667 * reference count every cluster. 2668 */ 2669 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t); 2670 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 2671 int64_t table = 0; /* number of refcount table clusters */ 2672 int64_t blocks = 0; /* number of refcount block clusters */ 2673 int64_t last; 2674 int64_t n = 0; 2675 2676 do { 2677 last = n; 2678 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 2679 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 2680 n = clusters + blocks + table; 2681 2682 if (n == last && generous_increase) { 2683 clusters += DIV_ROUND_UP(table, 2); 2684 n = 0; /* force another loop */ 2685 generous_increase = false; 2686 } 2687 } while (n != last); 2688 2689 if (refblock_count) { 2690 *refblock_count = blocks; 2691 } 2692 2693 return (blocks + table) * cluster_size; 2694 } 2695 2696 /** 2697 * qcow2_calc_prealloc_size: 2698 * @total_size: virtual disk size in bytes 2699 * @cluster_size: cluster size in bytes 2700 * @refcount_order: refcount bits power-of-2 exponent 2701 * 2702 * Returns: Total number of bytes required for the fully allocated image 2703 * (including metadata). 2704 */ 2705 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 2706 size_t cluster_size, 2707 int refcount_order) 2708 { 2709 int64_t meta_size = 0; 2710 uint64_t nl1e, nl2e; 2711 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size); 2712 2713 /* header: 1 cluster */ 2714 meta_size += cluster_size; 2715 2716 /* total size of L2 tables */ 2717 nl2e = aligned_total_size / cluster_size; 2718 nl2e = ROUND_UP(nl2e, cluster_size / sizeof(uint64_t)); 2719 meta_size += nl2e * sizeof(uint64_t); 2720 2721 /* total size of L1 tables */ 2722 nl1e = nl2e * sizeof(uint64_t) / cluster_size; 2723 nl1e = ROUND_UP(nl1e, cluster_size / sizeof(uint64_t)); 2724 meta_size += nl1e * sizeof(uint64_t); 2725 2726 /* total size of refcount table and blocks */ 2727 meta_size += qcow2_refcount_metadata_size( 2728 (meta_size + aligned_total_size) / cluster_size, 2729 cluster_size, refcount_order, false, NULL); 2730 2731 return meta_size + aligned_total_size; 2732 } 2733 2734 static bool validate_cluster_size(size_t cluster_size, Error **errp) 2735 { 2736 int cluster_bits = ctz32(cluster_size); 2737 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 2738 (1 << cluster_bits) != cluster_size) 2739 { 2740 error_setg(errp, "Cluster size must be a power of two between %d and " 2741 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 2742 return false; 2743 } 2744 return true; 2745 } 2746 2747 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp) 2748 { 2749 size_t cluster_size; 2750 2751 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 2752 DEFAULT_CLUSTER_SIZE); 2753 if (!validate_cluster_size(cluster_size, errp)) { 2754 return 0; 2755 } 2756 return cluster_size; 2757 } 2758 2759 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 2760 { 2761 char *buf; 2762 int ret; 2763 2764 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 2765 if (!buf) { 2766 ret = 3; /* default */ 2767 } else if (!strcmp(buf, "0.10")) { 2768 ret = 2; 2769 } else if (!strcmp(buf, "1.1")) { 2770 ret = 3; 2771 } else { 2772 error_setg(errp, "Invalid compatibility level: '%s'", buf); 2773 ret = -EINVAL; 2774 } 2775 g_free(buf); 2776 return ret; 2777 } 2778 2779 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 2780 Error **errp) 2781 { 2782 uint64_t refcount_bits; 2783 2784 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 2785 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 2786 error_setg(errp, "Refcount width must be a power of two and may not " 2787 "exceed 64 bits"); 2788 return 0; 2789 } 2790 2791 if (version < 3 && refcount_bits != 16) { 2792 error_setg(errp, "Different refcount widths than 16 bits require " 2793 "compatibility level 1.1 or above (use compat=1.1 or " 2794 "greater)"); 2795 return 0; 2796 } 2797 2798 return refcount_bits; 2799 } 2800 2801 static int coroutine_fn 2802 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) 2803 { 2804 BlockdevCreateOptionsQcow2 *qcow2_opts; 2805 QDict *options; 2806 2807 /* 2808 * Open the image file and write a minimal qcow2 header. 2809 * 2810 * We keep things simple and start with a zero-sized image. We also 2811 * do without refcount blocks or a L1 table for now. We'll fix the 2812 * inconsistency later. 2813 * 2814 * We do need a refcount table because growing the refcount table means 2815 * allocating two new refcount blocks - the seconds of which would be at 2816 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 2817 * size for any qcow2 image. 2818 */ 2819 BlockBackend *blk = NULL; 2820 BlockDriverState *bs = NULL; 2821 QCowHeader *header; 2822 size_t cluster_size; 2823 int version; 2824 int refcount_order; 2825 uint64_t* refcount_table; 2826 Error *local_err = NULL; 2827 int ret; 2828 2829 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2); 2830 qcow2_opts = &create_options->u.qcow2; 2831 2832 bs = bdrv_open_blockdev_ref(qcow2_opts->file, errp); 2833 if (bs == NULL) { 2834 return -EIO; 2835 } 2836 2837 /* Validate options and set default values */ 2838 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) { 2839 error_setg(errp, "Image size must be a multiple of 512 bytes"); 2840 ret = -EINVAL; 2841 goto out; 2842 } 2843 2844 if (qcow2_opts->has_version) { 2845 switch (qcow2_opts->version) { 2846 case BLOCKDEV_QCOW2_VERSION_V2: 2847 version = 2; 2848 break; 2849 case BLOCKDEV_QCOW2_VERSION_V3: 2850 version = 3; 2851 break; 2852 default: 2853 g_assert_not_reached(); 2854 } 2855 } else { 2856 version = 3; 2857 } 2858 2859 if (qcow2_opts->has_cluster_size) { 2860 cluster_size = qcow2_opts->cluster_size; 2861 } else { 2862 cluster_size = DEFAULT_CLUSTER_SIZE; 2863 } 2864 2865 if (!validate_cluster_size(cluster_size, errp)) { 2866 ret = -EINVAL; 2867 goto out; 2868 } 2869 2870 if (!qcow2_opts->has_preallocation) { 2871 qcow2_opts->preallocation = PREALLOC_MODE_OFF; 2872 } 2873 if (qcow2_opts->has_backing_file && 2874 qcow2_opts->preallocation != PREALLOC_MODE_OFF) 2875 { 2876 error_setg(errp, "Backing file and preallocation cannot be used at " 2877 "the same time"); 2878 ret = -EINVAL; 2879 goto out; 2880 } 2881 if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) { 2882 error_setg(errp, "Backing format cannot be used without backing file"); 2883 ret = -EINVAL; 2884 goto out; 2885 } 2886 2887 if (!qcow2_opts->has_lazy_refcounts) { 2888 qcow2_opts->lazy_refcounts = false; 2889 } 2890 if (version < 3 && qcow2_opts->lazy_refcounts) { 2891 error_setg(errp, "Lazy refcounts only supported with compatibility " 2892 "level 1.1 and above (use version=v3 or greater)"); 2893 ret = -EINVAL; 2894 goto out; 2895 } 2896 2897 if (!qcow2_opts->has_refcount_bits) { 2898 qcow2_opts->refcount_bits = 16; 2899 } 2900 if (qcow2_opts->refcount_bits > 64 || 2901 !is_power_of_2(qcow2_opts->refcount_bits)) 2902 { 2903 error_setg(errp, "Refcount width must be a power of two and may not " 2904 "exceed 64 bits"); 2905 ret = -EINVAL; 2906 goto out; 2907 } 2908 if (version < 3 && qcow2_opts->refcount_bits != 16) { 2909 error_setg(errp, "Different refcount widths than 16 bits require " 2910 "compatibility level 1.1 or above (use version=v3 or " 2911 "greater)"); 2912 ret = -EINVAL; 2913 goto out; 2914 } 2915 refcount_order = ctz32(qcow2_opts->refcount_bits); 2916 2917 2918 /* Create BlockBackend to write to the image */ 2919 blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); 2920 ret = blk_insert_bs(blk, bs, errp); 2921 if (ret < 0) { 2922 goto out; 2923 } 2924 blk_set_allow_write_beyond_eof(blk, true); 2925 2926 /* Clear the protocol layer and preallocate it if necessary */ 2927 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp); 2928 if (ret < 0) { 2929 goto out; 2930 } 2931 2932 if (qcow2_opts->preallocation == PREALLOC_MODE_FULL || 2933 qcow2_opts->preallocation == PREALLOC_MODE_FALLOC) 2934 { 2935 int64_t prealloc_size = 2936 qcow2_calc_prealloc_size(qcow2_opts->size, cluster_size, 2937 refcount_order); 2938 2939 ret = blk_truncate(blk, prealloc_size, qcow2_opts->preallocation, errp); 2940 if (ret < 0) { 2941 goto out; 2942 } 2943 } 2944 2945 /* Write the header */ 2946 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 2947 header = g_malloc0(cluster_size); 2948 *header = (QCowHeader) { 2949 .magic = cpu_to_be32(QCOW_MAGIC), 2950 .version = cpu_to_be32(version), 2951 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 2952 .size = cpu_to_be64(0), 2953 .l1_table_offset = cpu_to_be64(0), 2954 .l1_size = cpu_to_be32(0), 2955 .refcount_table_offset = cpu_to_be64(cluster_size), 2956 .refcount_table_clusters = cpu_to_be32(1), 2957 .refcount_order = cpu_to_be32(refcount_order), 2958 .header_length = cpu_to_be32(sizeof(*header)), 2959 }; 2960 2961 /* We'll update this to correct value later */ 2962 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 2963 2964 if (qcow2_opts->lazy_refcounts) { 2965 header->compatible_features |= 2966 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 2967 } 2968 2969 ret = blk_pwrite(blk, 0, header, cluster_size, 0); 2970 g_free(header); 2971 if (ret < 0) { 2972 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 2973 goto out; 2974 } 2975 2976 /* Write a refcount table with one refcount block */ 2977 refcount_table = g_malloc0(2 * cluster_size); 2978 refcount_table[0] = cpu_to_be64(2 * cluster_size); 2979 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); 2980 g_free(refcount_table); 2981 2982 if (ret < 0) { 2983 error_setg_errno(errp, -ret, "Could not write refcount table"); 2984 goto out; 2985 } 2986 2987 blk_unref(blk); 2988 blk = NULL; 2989 2990 /* 2991 * And now open the image and make it consistent first (i.e. increase the 2992 * refcount of the cluster that is occupied by the header and the refcount 2993 * table) 2994 */ 2995 options = qdict_new(); 2996 qdict_put_str(options, "driver", "qcow2"); 2997 qdict_put_str(options, "file", bs->node_name); 2998 blk = blk_new_open(NULL, NULL, options, 2999 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 3000 &local_err); 3001 if (blk == NULL) { 3002 error_propagate(errp, local_err); 3003 ret = -EIO; 3004 goto out; 3005 } 3006 3007 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 3008 if (ret < 0) { 3009 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 3010 "header and refcount table"); 3011 goto out; 3012 3013 } else if (ret != 0) { 3014 error_report("Huh, first cluster in empty image is already in use?"); 3015 abort(); 3016 } 3017 3018 /* Create a full header (including things like feature table) */ 3019 ret = qcow2_update_header(blk_bs(blk)); 3020 if (ret < 0) { 3021 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 3022 goto out; 3023 } 3024 3025 /* Okay, now that we have a valid image, let's give it the right size */ 3026 ret = blk_truncate(blk, qcow2_opts->size, PREALLOC_MODE_OFF, errp); 3027 if (ret < 0) { 3028 error_prepend(errp, "Could not resize image: "); 3029 goto out; 3030 } 3031 3032 /* Want a backing file? There you go.*/ 3033 if (qcow2_opts->has_backing_file) { 3034 const char *backing_format = NULL; 3035 3036 if (qcow2_opts->has_backing_fmt) { 3037 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt); 3038 } 3039 3040 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file, 3041 backing_format); 3042 if (ret < 0) { 3043 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 3044 "with format '%s'", qcow2_opts->backing_file, 3045 backing_format); 3046 goto out; 3047 } 3048 } 3049 3050 /* Want encryption? There you go. */ 3051 if (qcow2_opts->has_encrypt) { 3052 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); 3053 if (ret < 0) { 3054 goto out; 3055 } 3056 } 3057 3058 /* And if we're supposed to preallocate metadata, do that now */ 3059 if (qcow2_opts->preallocation != PREALLOC_MODE_OFF) { 3060 BDRVQcow2State *s = blk_bs(blk)->opaque; 3061 qemu_co_mutex_lock(&s->lock); 3062 ret = preallocate_co(blk_bs(blk), 0, qcow2_opts->size); 3063 qemu_co_mutex_unlock(&s->lock); 3064 3065 if (ret < 0) { 3066 error_setg_errno(errp, -ret, "Could not preallocate metadata"); 3067 goto out; 3068 } 3069 } 3070 3071 blk_unref(blk); 3072 blk = NULL; 3073 3074 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 3075 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 3076 * have to setup decryption context. We're not doing any I/O on the top 3077 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 3078 * not have effect. 3079 */ 3080 options = qdict_new(); 3081 qdict_put_str(options, "driver", "qcow2"); 3082 qdict_put_str(options, "file", bs->node_name); 3083 blk = blk_new_open(NULL, NULL, options, 3084 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 3085 &local_err); 3086 if (blk == NULL) { 3087 error_propagate(errp, local_err); 3088 ret = -EIO; 3089 goto out; 3090 } 3091 3092 ret = 0; 3093 out: 3094 blk_unref(blk); 3095 bdrv_unref(bs); 3096 return ret; 3097 } 3098 3099 static int coroutine_fn qcow2_co_create_opts(const char *filename, QemuOpts *opts, 3100 Error **errp) 3101 { 3102 BlockdevCreateOptions *create_options = NULL; 3103 QDict *qdict; 3104 Visitor *v; 3105 BlockDriverState *bs = NULL; 3106 Error *local_err = NULL; 3107 const char *val; 3108 int ret; 3109 3110 /* Only the keyval visitor supports the dotted syntax needed for 3111 * encryption, so go through a QDict before getting a QAPI type. Ignore 3112 * options meant for the protocol layer so that the visitor doesn't 3113 * complain. */ 3114 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts, 3115 true); 3116 3117 /* Handle encryption options */ 3118 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT); 3119 if (val && !strcmp(val, "on")) { 3120 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow"); 3121 } else if (val && !strcmp(val, "off")) { 3122 qdict_del(qdict, BLOCK_OPT_ENCRYPT); 3123 } 3124 3125 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT); 3126 if (val && !strcmp(val, "aes")) { 3127 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow"); 3128 } 3129 3130 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into 3131 * version=v2/v3 below. */ 3132 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL); 3133 if (val && !strcmp(val, "0.10")) { 3134 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2"); 3135 } else if (val && !strcmp(val, "1.1")) { 3136 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3"); 3137 } 3138 3139 /* Change legacy command line options into QMP ones */ 3140 static const QDictRenames opt_renames[] = { 3141 { BLOCK_OPT_BACKING_FILE, "backing-file" }, 3142 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, 3143 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, 3144 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" }, 3145 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" }, 3146 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT }, 3147 { BLOCK_OPT_COMPAT_LEVEL, "version" }, 3148 { NULL, NULL }, 3149 }; 3150 3151 if (!qdict_rename_keys(qdict, opt_renames, errp)) { 3152 ret = -EINVAL; 3153 goto finish; 3154 } 3155 3156 /* Create and open the file (protocol layer) */ 3157 ret = bdrv_create_file(filename, opts, errp); 3158 if (ret < 0) { 3159 goto finish; 3160 } 3161 3162 bs = bdrv_open(filename, NULL, NULL, 3163 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); 3164 if (bs == NULL) { 3165 ret = -EIO; 3166 goto finish; 3167 } 3168 3169 /* Set 'driver' and 'node' options */ 3170 qdict_put_str(qdict, "driver", "qcow2"); 3171 qdict_put_str(qdict, "file", bs->node_name); 3172 3173 /* Now get the QAPI type BlockdevCreateOptions */ 3174 v = qobject_input_visitor_new_flat_confused(qdict, errp); 3175 if (!v) { 3176 ret = -EINVAL; 3177 goto finish; 3178 } 3179 3180 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); 3181 visit_free(v); 3182 3183 if (local_err) { 3184 error_propagate(errp, local_err); 3185 ret = -EINVAL; 3186 goto finish; 3187 } 3188 3189 /* Silently round up size */ 3190 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size, 3191 BDRV_SECTOR_SIZE); 3192 3193 /* Create the qcow2 image (format layer) */ 3194 ret = qcow2_co_create(create_options, errp); 3195 if (ret < 0) { 3196 goto finish; 3197 } 3198 3199 ret = 0; 3200 finish: 3201 qobject_unref(qdict); 3202 bdrv_unref(bs); 3203 qapi_free_BlockdevCreateOptions(create_options); 3204 return ret; 3205 } 3206 3207 3208 static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) 3209 { 3210 int64_t nr; 3211 int res; 3212 3213 /* Clamp to image length, before checking status of underlying sectors */ 3214 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 3215 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset; 3216 } 3217 3218 if (!bytes) { 3219 return true; 3220 } 3221 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL); 3222 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == bytes; 3223 } 3224 3225 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, 3226 int64_t offset, int bytes, BdrvRequestFlags flags) 3227 { 3228 int ret; 3229 BDRVQcow2State *s = bs->opaque; 3230 3231 uint32_t head = offset % s->cluster_size; 3232 uint32_t tail = (offset + bytes) % s->cluster_size; 3233 3234 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3235 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3236 tail = 0; 3237 } 3238 3239 if (head || tail) { 3240 uint64_t off; 3241 unsigned int nr; 3242 3243 assert(head + bytes <= s->cluster_size); 3244 3245 /* check whether remainder of cluster already reads as zero */ 3246 if (!(is_zero(bs, offset - head, head) && 3247 is_zero(bs, offset + bytes, 3248 tail ? s->cluster_size - tail : 0))) { 3249 return -ENOTSUP; 3250 } 3251 3252 qemu_co_mutex_lock(&s->lock); 3253 /* We can have new write after previous check */ 3254 offset = QEMU_ALIGN_DOWN(offset, s->cluster_size); 3255 bytes = s->cluster_size; 3256 nr = s->cluster_size; 3257 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); 3258 if (ret != QCOW2_CLUSTER_UNALLOCATED && 3259 ret != QCOW2_CLUSTER_ZERO_PLAIN && 3260 ret != QCOW2_CLUSTER_ZERO_ALLOC) { 3261 qemu_co_mutex_unlock(&s->lock); 3262 return -ENOTSUP; 3263 } 3264 } else { 3265 qemu_co_mutex_lock(&s->lock); 3266 } 3267 3268 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 3269 3270 /* Whatever is left can use real zero clusters */ 3271 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags); 3272 qemu_co_mutex_unlock(&s->lock); 3273 3274 return ret; 3275 } 3276 3277 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 3278 int64_t offset, int bytes) 3279 { 3280 int ret; 3281 BDRVQcow2State *s = bs->opaque; 3282 3283 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 3284 assert(bytes < s->cluster_size); 3285 /* Ignore partial clusters, except for the special case of the 3286 * complete partial cluster at the end of an unaligned file */ 3287 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 3288 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 3289 return -ENOTSUP; 3290 } 3291 } 3292 3293 qemu_co_mutex_lock(&s->lock); 3294 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 3295 false); 3296 qemu_co_mutex_unlock(&s->lock); 3297 return ret; 3298 } 3299 3300 static int coroutine_fn 3301 qcow2_co_copy_range_from(BlockDriverState *bs, 3302 BdrvChild *src, uint64_t src_offset, 3303 BdrvChild *dst, uint64_t dst_offset, 3304 uint64_t bytes, BdrvRequestFlags read_flags, 3305 BdrvRequestFlags write_flags) 3306 { 3307 BDRVQcow2State *s = bs->opaque; 3308 int ret; 3309 unsigned int cur_bytes; /* number of bytes in current iteration */ 3310 BdrvChild *child = NULL; 3311 BdrvRequestFlags cur_write_flags; 3312 3313 assert(!bs->encrypted); 3314 qemu_co_mutex_lock(&s->lock); 3315 3316 while (bytes != 0) { 3317 uint64_t copy_offset = 0; 3318 /* prepare next request */ 3319 cur_bytes = MIN(bytes, INT_MAX); 3320 cur_write_flags = write_flags; 3321 3322 ret = qcow2_get_cluster_offset(bs, src_offset, &cur_bytes, ©_offset); 3323 if (ret < 0) { 3324 goto out; 3325 } 3326 3327 switch (ret) { 3328 case QCOW2_CLUSTER_UNALLOCATED: 3329 if (bs->backing && bs->backing->bs) { 3330 int64_t backing_length = bdrv_getlength(bs->backing->bs); 3331 if (src_offset >= backing_length) { 3332 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3333 } else { 3334 child = bs->backing; 3335 cur_bytes = MIN(cur_bytes, backing_length - src_offset); 3336 copy_offset = src_offset; 3337 } 3338 } else { 3339 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3340 } 3341 break; 3342 3343 case QCOW2_CLUSTER_ZERO_PLAIN: 3344 case QCOW2_CLUSTER_ZERO_ALLOC: 3345 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3346 break; 3347 3348 case QCOW2_CLUSTER_COMPRESSED: 3349 ret = -ENOTSUP; 3350 goto out; 3351 3352 case QCOW2_CLUSTER_NORMAL: 3353 child = bs->file; 3354 copy_offset += offset_into_cluster(s, src_offset); 3355 if ((copy_offset & 511) != 0) { 3356 ret = -EIO; 3357 goto out; 3358 } 3359 break; 3360 3361 default: 3362 abort(); 3363 } 3364 qemu_co_mutex_unlock(&s->lock); 3365 ret = bdrv_co_copy_range_from(child, 3366 copy_offset, 3367 dst, dst_offset, 3368 cur_bytes, read_flags, cur_write_flags); 3369 qemu_co_mutex_lock(&s->lock); 3370 if (ret < 0) { 3371 goto out; 3372 } 3373 3374 bytes -= cur_bytes; 3375 src_offset += cur_bytes; 3376 dst_offset += cur_bytes; 3377 } 3378 ret = 0; 3379 3380 out: 3381 qemu_co_mutex_unlock(&s->lock); 3382 return ret; 3383 } 3384 3385 static int coroutine_fn 3386 qcow2_co_copy_range_to(BlockDriverState *bs, 3387 BdrvChild *src, uint64_t src_offset, 3388 BdrvChild *dst, uint64_t dst_offset, 3389 uint64_t bytes, BdrvRequestFlags read_flags, 3390 BdrvRequestFlags write_flags) 3391 { 3392 BDRVQcow2State *s = bs->opaque; 3393 int offset_in_cluster; 3394 int ret; 3395 unsigned int cur_bytes; /* number of sectors in current iteration */ 3396 uint64_t cluster_offset; 3397 QCowL2Meta *l2meta = NULL; 3398 3399 assert(!bs->encrypted); 3400 s->cluster_cache_offset = -1; /* disable compressed cache */ 3401 3402 qemu_co_mutex_lock(&s->lock); 3403 3404 while (bytes != 0) { 3405 3406 l2meta = NULL; 3407 3408 offset_in_cluster = offset_into_cluster(s, dst_offset); 3409 cur_bytes = MIN(bytes, INT_MAX); 3410 3411 /* TODO: 3412 * If src->bs == dst->bs, we could simply copy by incrementing 3413 * the refcnt, without copying user data. 3414 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */ 3415 ret = qcow2_alloc_cluster_offset(bs, dst_offset, &cur_bytes, 3416 &cluster_offset, &l2meta); 3417 if (ret < 0) { 3418 goto fail; 3419 } 3420 3421 assert((cluster_offset & 511) == 0); 3422 3423 ret = qcow2_pre_write_overlap_check(bs, 0, 3424 cluster_offset + offset_in_cluster, cur_bytes); 3425 if (ret < 0) { 3426 goto fail; 3427 } 3428 3429 qemu_co_mutex_unlock(&s->lock); 3430 ret = bdrv_co_copy_range_to(src, src_offset, 3431 bs->file, 3432 cluster_offset + offset_in_cluster, 3433 cur_bytes, read_flags, write_flags); 3434 qemu_co_mutex_lock(&s->lock); 3435 if (ret < 0) { 3436 goto fail; 3437 } 3438 3439 ret = qcow2_handle_l2meta(bs, &l2meta, true); 3440 if (ret) { 3441 goto fail; 3442 } 3443 3444 bytes -= cur_bytes; 3445 src_offset += cur_bytes; 3446 dst_offset += cur_bytes; 3447 } 3448 ret = 0; 3449 3450 fail: 3451 qcow2_handle_l2meta(bs, &l2meta, false); 3452 3453 qemu_co_mutex_unlock(&s->lock); 3454 3455 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 3456 3457 return ret; 3458 } 3459 3460 static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset, 3461 PreallocMode prealloc, Error **errp) 3462 { 3463 BDRVQcow2State *s = bs->opaque; 3464 uint64_t old_length; 3465 int64_t new_l1_size; 3466 int ret; 3467 QDict *options; 3468 3469 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 3470 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 3471 { 3472 error_setg(errp, "Unsupported preallocation mode '%s'", 3473 PreallocMode_str(prealloc)); 3474 return -ENOTSUP; 3475 } 3476 3477 if (offset & 511) { 3478 error_setg(errp, "The new size must be a multiple of 512"); 3479 return -EINVAL; 3480 } 3481 3482 qemu_co_mutex_lock(&s->lock); 3483 3484 /* cannot proceed if image has snapshots */ 3485 if (s->nb_snapshots) { 3486 error_setg(errp, "Can't resize an image which has snapshots"); 3487 ret = -ENOTSUP; 3488 goto fail; 3489 } 3490 3491 /* cannot proceed if image has bitmaps */ 3492 if (s->nb_bitmaps) { 3493 /* TODO: resize bitmaps in the image */ 3494 error_setg(errp, "Can't resize an image which has bitmaps"); 3495 ret = -ENOTSUP; 3496 goto fail; 3497 } 3498 3499 old_length = bs->total_sectors * BDRV_SECTOR_SIZE; 3500 new_l1_size = size_to_l1(s, offset); 3501 3502 if (offset < old_length) { 3503 int64_t last_cluster, old_file_size; 3504 if (prealloc != PREALLOC_MODE_OFF) { 3505 error_setg(errp, 3506 "Preallocation can't be used for shrinking an image"); 3507 ret = -EINVAL; 3508 goto fail; 3509 } 3510 3511 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size), 3512 old_length - ROUND_UP(offset, 3513 s->cluster_size), 3514 QCOW2_DISCARD_ALWAYS, true); 3515 if (ret < 0) { 3516 error_setg_errno(errp, -ret, "Failed to discard cropped clusters"); 3517 goto fail; 3518 } 3519 3520 ret = qcow2_shrink_l1_table(bs, new_l1_size); 3521 if (ret < 0) { 3522 error_setg_errno(errp, -ret, 3523 "Failed to reduce the number of L2 tables"); 3524 goto fail; 3525 } 3526 3527 ret = qcow2_shrink_reftable(bs); 3528 if (ret < 0) { 3529 error_setg_errno(errp, -ret, 3530 "Failed to discard unused refblocks"); 3531 goto fail; 3532 } 3533 3534 old_file_size = bdrv_getlength(bs->file->bs); 3535 if (old_file_size < 0) { 3536 error_setg_errno(errp, -old_file_size, 3537 "Failed to inquire current file length"); 3538 ret = old_file_size; 3539 goto fail; 3540 } 3541 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 3542 if (last_cluster < 0) { 3543 error_setg_errno(errp, -last_cluster, 3544 "Failed to find the last cluster"); 3545 ret = last_cluster; 3546 goto fail; 3547 } 3548 if ((last_cluster + 1) * s->cluster_size < old_file_size) { 3549 Error *local_err = NULL; 3550 3551 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size, 3552 PREALLOC_MODE_OFF, &local_err); 3553 if (local_err) { 3554 warn_reportf_err(local_err, 3555 "Failed to truncate the tail of the image: "); 3556 } 3557 } 3558 } else { 3559 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 3560 if (ret < 0) { 3561 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 3562 goto fail; 3563 } 3564 } 3565 3566 switch (prealloc) { 3567 case PREALLOC_MODE_OFF: 3568 break; 3569 3570 case PREALLOC_MODE_METADATA: 3571 ret = preallocate_co(bs, old_length, offset); 3572 if (ret < 0) { 3573 error_setg_errno(errp, -ret, "Preallocation failed"); 3574 goto fail; 3575 } 3576 break; 3577 3578 case PREALLOC_MODE_FALLOC: 3579 case PREALLOC_MODE_FULL: 3580 { 3581 int64_t allocation_start, host_offset, guest_offset; 3582 int64_t clusters_allocated; 3583 int64_t old_file_size, new_file_size; 3584 uint64_t nb_new_data_clusters, nb_new_l2_tables; 3585 3586 old_file_size = bdrv_getlength(bs->file->bs); 3587 if (old_file_size < 0) { 3588 error_setg_errno(errp, -old_file_size, 3589 "Failed to inquire current file length"); 3590 ret = old_file_size; 3591 goto fail; 3592 } 3593 old_file_size = ROUND_UP(old_file_size, s->cluster_size); 3594 3595 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length, 3596 s->cluster_size); 3597 3598 /* This is an overestimation; we will not actually allocate space for 3599 * these in the file but just make sure the new refcount structures are 3600 * able to cover them so we will not have to allocate new refblocks 3601 * while entering the data blocks in the potentially new L2 tables. 3602 * (We do not actually care where the L2 tables are placed. Maybe they 3603 * are already allocated or they can be placed somewhere before 3604 * @old_file_size. It does not matter because they will be fully 3605 * allocated automatically, so they do not need to be covered by the 3606 * preallocation. All that matters is that we will not have to allocate 3607 * new refcount structures for them.) */ 3608 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 3609 s->cluster_size / sizeof(uint64_t)); 3610 /* The cluster range may not be aligned to L2 boundaries, so add one L2 3611 * table for a potential head/tail */ 3612 nb_new_l2_tables++; 3613 3614 allocation_start = qcow2_refcount_area(bs, old_file_size, 3615 nb_new_data_clusters + 3616 nb_new_l2_tables, 3617 true, 0, 0); 3618 if (allocation_start < 0) { 3619 error_setg_errno(errp, -allocation_start, 3620 "Failed to resize refcount structures"); 3621 ret = allocation_start; 3622 goto fail; 3623 } 3624 3625 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 3626 nb_new_data_clusters); 3627 if (clusters_allocated < 0) { 3628 error_setg_errno(errp, -clusters_allocated, 3629 "Failed to allocate data clusters"); 3630 ret = clusters_allocated; 3631 goto fail; 3632 } 3633 3634 assert(clusters_allocated == nb_new_data_clusters); 3635 3636 /* Allocate the data area */ 3637 new_file_size = allocation_start + 3638 nb_new_data_clusters * s->cluster_size; 3639 ret = bdrv_co_truncate(bs->file, new_file_size, prealloc, errp); 3640 if (ret < 0) { 3641 error_prepend(errp, "Failed to resize underlying file: "); 3642 qcow2_free_clusters(bs, allocation_start, 3643 nb_new_data_clusters * s->cluster_size, 3644 QCOW2_DISCARD_OTHER); 3645 goto fail; 3646 } 3647 3648 /* Create the necessary L2 entries */ 3649 host_offset = allocation_start; 3650 guest_offset = old_length; 3651 while (nb_new_data_clusters) { 3652 int64_t nb_clusters = MIN( 3653 nb_new_data_clusters, 3654 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset)); 3655 QCowL2Meta allocation = { 3656 .offset = guest_offset, 3657 .alloc_offset = host_offset, 3658 .nb_clusters = nb_clusters, 3659 }; 3660 qemu_co_queue_init(&allocation.dependent_requests); 3661 3662 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 3663 if (ret < 0) { 3664 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 3665 qcow2_free_clusters(bs, host_offset, 3666 nb_new_data_clusters * s->cluster_size, 3667 QCOW2_DISCARD_OTHER); 3668 goto fail; 3669 } 3670 3671 guest_offset += nb_clusters * s->cluster_size; 3672 host_offset += nb_clusters * s->cluster_size; 3673 nb_new_data_clusters -= nb_clusters; 3674 } 3675 break; 3676 } 3677 3678 default: 3679 g_assert_not_reached(); 3680 } 3681 3682 if (prealloc != PREALLOC_MODE_OFF) { 3683 /* Flush metadata before actually changing the image size */ 3684 ret = qcow2_write_caches(bs); 3685 if (ret < 0) { 3686 error_setg_errno(errp, -ret, 3687 "Failed to flush the preallocated area to disk"); 3688 goto fail; 3689 } 3690 } 3691 3692 bs->total_sectors = offset / BDRV_SECTOR_SIZE; 3693 3694 /* write updated header.size */ 3695 offset = cpu_to_be64(offset); 3696 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 3697 &offset, sizeof(uint64_t)); 3698 if (ret < 0) { 3699 error_setg_errno(errp, -ret, "Failed to update the image size"); 3700 goto fail; 3701 } 3702 3703 s->l1_vm_state_index = new_l1_size; 3704 3705 /* Update cache sizes */ 3706 options = qdict_clone_shallow(bs->options); 3707 ret = qcow2_update_options(bs, options, s->flags, errp); 3708 qobject_unref(options); 3709 if (ret < 0) { 3710 goto fail; 3711 } 3712 ret = 0; 3713 fail: 3714 qemu_co_mutex_unlock(&s->lock); 3715 return ret; 3716 } 3717 3718 /* 3719 * qcow2_compress() 3720 * 3721 * @dest - destination buffer, at least of @size-1 bytes 3722 * @src - source buffer, @size bytes 3723 * 3724 * Returns: compressed size on success 3725 * -1 if compression is inefficient 3726 * -2 on any other error 3727 */ 3728 static ssize_t qcow2_compress(void *dest, const void *src, size_t size) 3729 { 3730 ssize_t ret; 3731 z_stream strm; 3732 3733 /* best compression, small window, no zlib header */ 3734 memset(&strm, 0, sizeof(strm)); 3735 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 3736 -12, 9, Z_DEFAULT_STRATEGY); 3737 if (ret != 0) { 3738 return -2; 3739 } 3740 3741 /* strm.next_in is not const in old zlib versions, such as those used on 3742 * OpenBSD/NetBSD, so cast the const away */ 3743 strm.avail_in = size; 3744 strm.next_in = (void *) src; 3745 strm.avail_out = size - 1; 3746 strm.next_out = dest; 3747 3748 ret = deflate(&strm, Z_FINISH); 3749 if (ret == Z_STREAM_END) { 3750 ret = size - 1 - strm.avail_out; 3751 } else { 3752 ret = (ret == Z_OK ? -1 : -2); 3753 } 3754 3755 deflateEnd(&strm); 3756 3757 return ret; 3758 } 3759 3760 #define MAX_COMPRESS_THREADS 4 3761 3762 typedef struct Qcow2CompressData { 3763 void *dest; 3764 const void *src; 3765 size_t size; 3766 ssize_t ret; 3767 } Qcow2CompressData; 3768 3769 static int qcow2_compress_pool_func(void *opaque) 3770 { 3771 Qcow2CompressData *data = opaque; 3772 3773 data->ret = qcow2_compress(data->dest, data->src, data->size); 3774 3775 return 0; 3776 } 3777 3778 static void qcow2_compress_complete(void *opaque, int ret) 3779 { 3780 qemu_coroutine_enter(opaque); 3781 } 3782 3783 /* See qcow2_compress definition for parameters description */ 3784 static ssize_t qcow2_co_compress(BlockDriverState *bs, 3785 void *dest, const void *src, size_t size) 3786 { 3787 BDRVQcow2State *s = bs->opaque; 3788 BlockAIOCB *acb; 3789 ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); 3790 Qcow2CompressData arg = { 3791 .dest = dest, 3792 .src = src, 3793 .size = size, 3794 }; 3795 3796 while (s->nb_compress_threads >= MAX_COMPRESS_THREADS) { 3797 qemu_co_queue_wait(&s->compress_wait_queue, NULL); 3798 } 3799 3800 s->nb_compress_threads++; 3801 acb = thread_pool_submit_aio(pool, qcow2_compress_pool_func, &arg, 3802 qcow2_compress_complete, 3803 qemu_coroutine_self()); 3804 3805 if (!acb) { 3806 s->nb_compress_threads--; 3807 return -EINVAL; 3808 } 3809 qemu_coroutine_yield(); 3810 s->nb_compress_threads--; 3811 qemu_co_queue_next(&s->compress_wait_queue); 3812 3813 return arg.ret; 3814 } 3815 3816 /* XXX: put compressed sectors first, then all the cluster aligned 3817 tables to avoid losing bytes in alignment */ 3818 static coroutine_fn int 3819 qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 3820 uint64_t bytes, QEMUIOVector *qiov) 3821 { 3822 BDRVQcow2State *s = bs->opaque; 3823 QEMUIOVector hd_qiov; 3824 struct iovec iov; 3825 int ret; 3826 size_t out_len; 3827 uint8_t *buf, *out_buf; 3828 int64_t cluster_offset; 3829 3830 if (bytes == 0) { 3831 /* align end of file to a sector boundary to ease reading with 3832 sector based I/Os */ 3833 cluster_offset = bdrv_getlength(bs->file->bs); 3834 if (cluster_offset < 0) { 3835 return cluster_offset; 3836 } 3837 return bdrv_co_truncate(bs->file, cluster_offset, PREALLOC_MODE_OFF, 3838 NULL); 3839 } 3840 3841 if (offset_into_cluster(s, offset)) { 3842 return -EINVAL; 3843 } 3844 3845 buf = qemu_blockalign(bs, s->cluster_size); 3846 if (bytes != s->cluster_size) { 3847 if (bytes > s->cluster_size || 3848 offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) 3849 { 3850 qemu_vfree(buf); 3851 return -EINVAL; 3852 } 3853 /* Zero-pad last write if image size is not cluster aligned */ 3854 memset(buf + bytes, 0, s->cluster_size - bytes); 3855 } 3856 qemu_iovec_to_buf(qiov, 0, buf, bytes); 3857 3858 out_buf = g_malloc(s->cluster_size); 3859 3860 out_len = qcow2_co_compress(bs, out_buf, buf, s->cluster_size); 3861 if (out_len == -2) { 3862 ret = -EINVAL; 3863 goto fail; 3864 } else if (out_len == -1) { 3865 /* could not compress: write normal cluster */ 3866 ret = qcow2_co_pwritev(bs, offset, bytes, qiov, 0); 3867 if (ret < 0) { 3868 goto fail; 3869 } 3870 goto success; 3871 } 3872 3873 qemu_co_mutex_lock(&s->lock); 3874 cluster_offset = 3875 qcow2_alloc_compressed_cluster_offset(bs, offset, out_len); 3876 if (!cluster_offset) { 3877 qemu_co_mutex_unlock(&s->lock); 3878 ret = -EIO; 3879 goto fail; 3880 } 3881 cluster_offset &= s->cluster_offset_mask; 3882 3883 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len); 3884 qemu_co_mutex_unlock(&s->lock); 3885 if (ret < 0) { 3886 goto fail; 3887 } 3888 3889 iov = (struct iovec) { 3890 .iov_base = out_buf, 3891 .iov_len = out_len, 3892 }; 3893 qemu_iovec_init_external(&hd_qiov, &iov, 1); 3894 3895 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 3896 ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0); 3897 if (ret < 0) { 3898 goto fail; 3899 } 3900 success: 3901 ret = 0; 3902 fail: 3903 qemu_vfree(buf); 3904 g_free(out_buf); 3905 return ret; 3906 } 3907 3908 static int make_completely_empty(BlockDriverState *bs) 3909 { 3910 BDRVQcow2State *s = bs->opaque; 3911 Error *local_err = NULL; 3912 int ret, l1_clusters; 3913 int64_t offset; 3914 uint64_t *new_reftable = NULL; 3915 uint64_t rt_entry, l1_size2; 3916 struct { 3917 uint64_t l1_offset; 3918 uint64_t reftable_offset; 3919 uint32_t reftable_clusters; 3920 } QEMU_PACKED l1_ofs_rt_ofs_cls; 3921 3922 ret = qcow2_cache_empty(bs, s->l2_table_cache); 3923 if (ret < 0) { 3924 goto fail; 3925 } 3926 3927 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 3928 if (ret < 0) { 3929 goto fail; 3930 } 3931 3932 /* Refcounts will be broken utterly */ 3933 ret = qcow2_mark_dirty(bs); 3934 if (ret < 0) { 3935 goto fail; 3936 } 3937 3938 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3939 3940 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 3941 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t); 3942 3943 /* After this call, neither the in-memory nor the on-disk refcount 3944 * information accurately describe the actual references */ 3945 3946 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 3947 l1_clusters * s->cluster_size, 0); 3948 if (ret < 0) { 3949 goto fail_broken_refcounts; 3950 } 3951 memset(s->l1_table, 0, l1_size2); 3952 3953 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 3954 3955 /* Overwrite enough clusters at the beginning of the sectors to place 3956 * the refcount table, a refcount block and the L1 table in; this may 3957 * overwrite parts of the existing refcount and L1 table, which is not 3958 * an issue because the dirty flag is set, complete data loss is in fact 3959 * desired and partial data loss is consequently fine as well */ 3960 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 3961 (2 + l1_clusters) * s->cluster_size, 0); 3962 /* This call (even if it failed overall) may have overwritten on-disk 3963 * refcount structures; in that case, the in-memory refcount information 3964 * will probably differ from the on-disk information which makes the BDS 3965 * unusable */ 3966 if (ret < 0) { 3967 goto fail_broken_refcounts; 3968 } 3969 3970 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 3971 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 3972 3973 /* "Create" an empty reftable (one cluster) directly after the image 3974 * header and an empty L1 table three clusters after the image header; 3975 * the cluster between those two will be used as the first refblock */ 3976 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 3977 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 3978 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 3979 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 3980 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); 3981 if (ret < 0) { 3982 goto fail_broken_refcounts; 3983 } 3984 3985 s->l1_table_offset = 3 * s->cluster_size; 3986 3987 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t)); 3988 if (!new_reftable) { 3989 ret = -ENOMEM; 3990 goto fail_broken_refcounts; 3991 } 3992 3993 s->refcount_table_offset = s->cluster_size; 3994 s->refcount_table_size = s->cluster_size / sizeof(uint64_t); 3995 s->max_refcount_table_index = 0; 3996 3997 g_free(s->refcount_table); 3998 s->refcount_table = new_reftable; 3999 new_reftable = NULL; 4000 4001 /* Now the in-memory refcount information again corresponds to the on-disk 4002 * information (reftable is empty and no refblocks (the refblock cache is 4003 * empty)); however, this means some clusters (e.g. the image header) are 4004 * referenced, but not refcounted, but the normal qcow2 code assumes that 4005 * the in-memory information is always correct */ 4006 4007 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 4008 4009 /* Enter the first refblock into the reftable */ 4010 rt_entry = cpu_to_be64(2 * s->cluster_size); 4011 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, 4012 &rt_entry, sizeof(rt_entry)); 4013 if (ret < 0) { 4014 goto fail_broken_refcounts; 4015 } 4016 s->refcount_table[0] = 2 * s->cluster_size; 4017 4018 s->free_cluster_index = 0; 4019 assert(3 + l1_clusters <= s->refcount_block_size); 4020 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 4021 if (offset < 0) { 4022 ret = offset; 4023 goto fail_broken_refcounts; 4024 } else if (offset > 0) { 4025 error_report("First cluster in emptied image is in use"); 4026 abort(); 4027 } 4028 4029 /* Now finally the in-memory information corresponds to the on-disk 4030 * structures and is correct */ 4031 ret = qcow2_mark_clean(bs); 4032 if (ret < 0) { 4033 goto fail; 4034 } 4035 4036 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, 4037 PREALLOC_MODE_OFF, &local_err); 4038 if (ret < 0) { 4039 error_report_err(local_err); 4040 goto fail; 4041 } 4042 4043 return 0; 4044 4045 fail_broken_refcounts: 4046 /* The BDS is unusable at this point. If we wanted to make it usable, we 4047 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 4048 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 4049 * again. However, because the functions which could have caused this error 4050 * path to be taken are used by those functions as well, it's very likely 4051 * that that sequence will fail as well. Therefore, just eject the BDS. */ 4052 bs->drv = NULL; 4053 4054 fail: 4055 g_free(new_reftable); 4056 return ret; 4057 } 4058 4059 static int qcow2_make_empty(BlockDriverState *bs) 4060 { 4061 BDRVQcow2State *s = bs->opaque; 4062 uint64_t offset, end_offset; 4063 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 4064 int l1_clusters, ret = 0; 4065 4066 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 4067 4068 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps && 4069 3 + l1_clusters <= s->refcount_block_size && 4070 s->crypt_method_header != QCOW_CRYPT_LUKS) { 4071 /* The following function only works for qcow2 v3 images (it 4072 * requires the dirty flag) and only as long as there are no 4073 * features that reserve extra clusters (such as snapshots, 4074 * LUKS header, or persistent bitmaps), because it completely 4075 * empties the image. Furthermore, the L1 table and three 4076 * additional clusters (image header, refcount table, one 4077 * refcount block) have to fit inside one refcount block. */ 4078 return make_completely_empty(bs); 4079 } 4080 4081 /* This fallback code simply discards every active cluster; this is slow, 4082 * but works in all cases */ 4083 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 4084 for (offset = 0; offset < end_offset; offset += step) { 4085 /* As this function is generally used after committing an external 4086 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 4087 * default action for this kind of discard is to pass the discard, 4088 * which will ideally result in an actually smaller image file, as 4089 * is probably desired. */ 4090 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 4091 QCOW2_DISCARD_SNAPSHOT, true); 4092 if (ret < 0) { 4093 break; 4094 } 4095 } 4096 4097 return ret; 4098 } 4099 4100 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 4101 { 4102 BDRVQcow2State *s = bs->opaque; 4103 int ret; 4104 4105 qemu_co_mutex_lock(&s->lock); 4106 ret = qcow2_write_caches(bs); 4107 qemu_co_mutex_unlock(&s->lock); 4108 4109 return ret; 4110 } 4111 4112 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 4113 Error **errp) 4114 { 4115 Error *local_err = NULL; 4116 BlockMeasureInfo *info; 4117 uint64_t required = 0; /* bytes that contribute to required size */ 4118 uint64_t virtual_size; /* disk size as seen by guest */ 4119 uint64_t refcount_bits; 4120 uint64_t l2_tables; 4121 size_t cluster_size; 4122 int version; 4123 char *optstr; 4124 PreallocMode prealloc; 4125 bool has_backing_file; 4126 4127 /* Parse image creation options */ 4128 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 4129 if (local_err) { 4130 goto err; 4131 } 4132 4133 version = qcow2_opt_get_version_del(opts, &local_err); 4134 if (local_err) { 4135 goto err; 4136 } 4137 4138 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 4139 if (local_err) { 4140 goto err; 4141 } 4142 4143 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 4144 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr, 4145 PREALLOC_MODE_OFF, &local_err); 4146 g_free(optstr); 4147 if (local_err) { 4148 goto err; 4149 } 4150 4151 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 4152 has_backing_file = !!optstr; 4153 g_free(optstr); 4154 4155 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); 4156 virtual_size = ROUND_UP(virtual_size, cluster_size); 4157 4158 /* Check that virtual disk size is valid */ 4159 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 4160 cluster_size / sizeof(uint64_t)); 4161 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { 4162 error_setg(&local_err, "The image size is too large " 4163 "(try using a larger cluster size)"); 4164 goto err; 4165 } 4166 4167 /* Account for input image */ 4168 if (in_bs) { 4169 int64_t ssize = bdrv_getlength(in_bs); 4170 if (ssize < 0) { 4171 error_setg_errno(&local_err, -ssize, 4172 "Unable to get image virtual_size"); 4173 goto err; 4174 } 4175 4176 virtual_size = ROUND_UP(ssize, cluster_size); 4177 4178 if (has_backing_file) { 4179 /* We don't how much of the backing chain is shared by the input 4180 * image and the new image file. In the worst case the new image's 4181 * backing file has nothing in common with the input image. Be 4182 * conservative and assume all clusters need to be written. 4183 */ 4184 required = virtual_size; 4185 } else { 4186 int64_t offset; 4187 int64_t pnum = 0; 4188 4189 for (offset = 0; offset < ssize; offset += pnum) { 4190 int ret; 4191 4192 ret = bdrv_block_status_above(in_bs, NULL, offset, 4193 ssize - offset, &pnum, NULL, 4194 NULL); 4195 if (ret < 0) { 4196 error_setg_errno(&local_err, -ret, 4197 "Unable to get block status"); 4198 goto err; 4199 } 4200 4201 if (ret & BDRV_BLOCK_ZERO) { 4202 /* Skip zero regions (safe with no backing file) */ 4203 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 4204 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 4205 /* Extend pnum to end of cluster for next iteration */ 4206 pnum = ROUND_UP(offset + pnum, cluster_size) - offset; 4207 4208 /* Count clusters we've seen */ 4209 required += offset % cluster_size + pnum; 4210 } 4211 } 4212 } 4213 } 4214 4215 /* Take into account preallocation. Nothing special is needed for 4216 * PREALLOC_MODE_METADATA since metadata is always counted. 4217 */ 4218 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 4219 required = virtual_size; 4220 } 4221 4222 info = g_new(BlockMeasureInfo, 1); 4223 info->fully_allocated = 4224 qcow2_calc_prealloc_size(virtual_size, cluster_size, 4225 ctz32(refcount_bits)); 4226 4227 /* Remove data clusters that are not required. This overestimates the 4228 * required size because metadata needed for the fully allocated file is 4229 * still counted. 4230 */ 4231 info->required = info->fully_allocated - virtual_size + required; 4232 return info; 4233 4234 err: 4235 error_propagate(errp, local_err); 4236 return NULL; 4237 } 4238 4239 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 4240 { 4241 BDRVQcow2State *s = bs->opaque; 4242 bdi->unallocated_blocks_are_zero = true; 4243 bdi->cluster_size = s->cluster_size; 4244 bdi->vm_state_offset = qcow2_vm_state_offset(s); 4245 return 0; 4246 } 4247 4248 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs) 4249 { 4250 BDRVQcow2State *s = bs->opaque; 4251 ImageInfoSpecific *spec_info; 4252 QCryptoBlockInfo *encrypt_info = NULL; 4253 4254 if (s->crypto != NULL) { 4255 encrypt_info = qcrypto_block_get_info(s->crypto, &error_abort); 4256 } 4257 4258 spec_info = g_new(ImageInfoSpecific, 1); 4259 *spec_info = (ImageInfoSpecific){ 4260 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 4261 .u.qcow2.data = g_new(ImageInfoSpecificQCow2, 1), 4262 }; 4263 if (s->qcow_version == 2) { 4264 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4265 .compat = g_strdup("0.10"), 4266 .refcount_bits = s->refcount_bits, 4267 }; 4268 } else if (s->qcow_version == 3) { 4269 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4270 .compat = g_strdup("1.1"), 4271 .lazy_refcounts = s->compatible_features & 4272 QCOW2_COMPAT_LAZY_REFCOUNTS, 4273 .has_lazy_refcounts = true, 4274 .corrupt = s->incompatible_features & 4275 QCOW2_INCOMPAT_CORRUPT, 4276 .has_corrupt = true, 4277 .refcount_bits = s->refcount_bits, 4278 }; 4279 } else { 4280 /* if this assertion fails, this probably means a new version was 4281 * added without having it covered here */ 4282 assert(false); 4283 } 4284 4285 if (encrypt_info) { 4286 ImageInfoSpecificQCow2Encryption *qencrypt = 4287 g_new(ImageInfoSpecificQCow2Encryption, 1); 4288 switch (encrypt_info->format) { 4289 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 4290 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 4291 break; 4292 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 4293 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 4294 qencrypt->u.luks = encrypt_info->u.luks; 4295 break; 4296 default: 4297 abort(); 4298 } 4299 /* Since we did shallow copy above, erase any pointers 4300 * in the original info */ 4301 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 4302 qapi_free_QCryptoBlockInfo(encrypt_info); 4303 4304 spec_info->u.qcow2.data->has_encrypt = true; 4305 spec_info->u.qcow2.data->encrypt = qencrypt; 4306 } 4307 4308 return spec_info; 4309 } 4310 4311 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4312 int64_t pos) 4313 { 4314 BDRVQcow2State *s = bs->opaque; 4315 4316 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 4317 return bs->drv->bdrv_co_pwritev(bs, qcow2_vm_state_offset(s) + pos, 4318 qiov->size, qiov, 0); 4319 } 4320 4321 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4322 int64_t pos) 4323 { 4324 BDRVQcow2State *s = bs->opaque; 4325 4326 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 4327 return bs->drv->bdrv_co_preadv(bs, qcow2_vm_state_offset(s) + pos, 4328 qiov->size, qiov, 0); 4329 } 4330 4331 /* 4332 * Downgrades an image's version. To achieve this, any incompatible features 4333 * have to be removed. 4334 */ 4335 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 4336 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 4337 Error **errp) 4338 { 4339 BDRVQcow2State *s = bs->opaque; 4340 int current_version = s->qcow_version; 4341 int ret; 4342 4343 /* This is qcow2_downgrade(), not qcow2_upgrade() */ 4344 assert(target_version < current_version); 4345 4346 /* There are no other versions (now) that you can downgrade to */ 4347 assert(target_version == 2); 4348 4349 if (s->refcount_order != 4) { 4350 error_setg(errp, "compat=0.10 requires refcount_bits=16"); 4351 return -ENOTSUP; 4352 } 4353 4354 /* clear incompatible features */ 4355 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 4356 ret = qcow2_mark_clean(bs); 4357 if (ret < 0) { 4358 error_setg_errno(errp, -ret, "Failed to make the image clean"); 4359 return ret; 4360 } 4361 } 4362 4363 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 4364 * the first place; if that happens nonetheless, returning -ENOTSUP is the 4365 * best thing to do anyway */ 4366 4367 if (s->incompatible_features) { 4368 error_setg(errp, "Cannot downgrade an image with incompatible features " 4369 "%#" PRIx64 " set", s->incompatible_features); 4370 return -ENOTSUP; 4371 } 4372 4373 /* since we can ignore compatible features, we can set them to 0 as well */ 4374 s->compatible_features = 0; 4375 /* if lazy refcounts have been used, they have already been fixed through 4376 * clearing the dirty flag */ 4377 4378 /* clearing autoclear features is trivial */ 4379 s->autoclear_features = 0; 4380 4381 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 4382 if (ret < 0) { 4383 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters"); 4384 return ret; 4385 } 4386 4387 s->qcow_version = target_version; 4388 ret = qcow2_update_header(bs); 4389 if (ret < 0) { 4390 s->qcow_version = current_version; 4391 error_setg_errno(errp, -ret, "Failed to update the image header"); 4392 return ret; 4393 } 4394 return 0; 4395 } 4396 4397 typedef enum Qcow2AmendOperation { 4398 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 4399 * statically initialized to so that the helper CB can discern the first 4400 * invocation from an operation change */ 4401 QCOW2_NO_OPERATION = 0, 4402 4403 QCOW2_CHANGING_REFCOUNT_ORDER, 4404 QCOW2_DOWNGRADING, 4405 } Qcow2AmendOperation; 4406 4407 typedef struct Qcow2AmendHelperCBInfo { 4408 /* The code coordinating the amend operations should only modify 4409 * these four fields; the rest will be managed by the CB */ 4410 BlockDriverAmendStatusCB *original_status_cb; 4411 void *original_cb_opaque; 4412 4413 Qcow2AmendOperation current_operation; 4414 4415 /* Total number of operations to perform (only set once) */ 4416 int total_operations; 4417 4418 /* The following fields are managed by the CB */ 4419 4420 /* Number of operations completed */ 4421 int operations_completed; 4422 4423 /* Cumulative offset of all completed operations */ 4424 int64_t offset_completed; 4425 4426 Qcow2AmendOperation last_operation; 4427 int64_t last_work_size; 4428 } Qcow2AmendHelperCBInfo; 4429 4430 static void qcow2_amend_helper_cb(BlockDriverState *bs, 4431 int64_t operation_offset, 4432 int64_t operation_work_size, void *opaque) 4433 { 4434 Qcow2AmendHelperCBInfo *info = opaque; 4435 int64_t current_work_size; 4436 int64_t projected_work_size; 4437 4438 if (info->current_operation != info->last_operation) { 4439 if (info->last_operation != QCOW2_NO_OPERATION) { 4440 info->offset_completed += info->last_work_size; 4441 info->operations_completed++; 4442 } 4443 4444 info->last_operation = info->current_operation; 4445 } 4446 4447 assert(info->total_operations > 0); 4448 assert(info->operations_completed < info->total_operations); 4449 4450 info->last_work_size = operation_work_size; 4451 4452 current_work_size = info->offset_completed + operation_work_size; 4453 4454 /* current_work_size is the total work size for (operations_completed + 1) 4455 * operations (which includes this one), so multiply it by the number of 4456 * operations not covered and divide it by the number of operations 4457 * covered to get a projection for the operations not covered */ 4458 projected_work_size = current_work_size * (info->total_operations - 4459 info->operations_completed - 1) 4460 / (info->operations_completed + 1); 4461 4462 info->original_status_cb(bs, info->offset_completed + operation_offset, 4463 current_work_size + projected_work_size, 4464 info->original_cb_opaque); 4465 } 4466 4467 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 4468 BlockDriverAmendStatusCB *status_cb, 4469 void *cb_opaque, 4470 Error **errp) 4471 { 4472 BDRVQcow2State *s = bs->opaque; 4473 int old_version = s->qcow_version, new_version = old_version; 4474 uint64_t new_size = 0; 4475 const char *backing_file = NULL, *backing_format = NULL; 4476 bool lazy_refcounts = s->use_lazy_refcounts; 4477 const char *compat = NULL; 4478 uint64_t cluster_size = s->cluster_size; 4479 bool encrypt; 4480 int encformat; 4481 int refcount_bits = s->refcount_bits; 4482 int ret; 4483 QemuOptDesc *desc = opts->list->desc; 4484 Qcow2AmendHelperCBInfo helper_cb_info; 4485 4486 while (desc && desc->name) { 4487 if (!qemu_opt_find(opts, desc->name)) { 4488 /* only change explicitly defined options */ 4489 desc++; 4490 continue; 4491 } 4492 4493 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 4494 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 4495 if (!compat) { 4496 /* preserve default */ 4497 } else if (!strcmp(compat, "0.10")) { 4498 new_version = 2; 4499 } else if (!strcmp(compat, "1.1")) { 4500 new_version = 3; 4501 } else { 4502 error_setg(errp, "Unknown compatibility level %s", compat); 4503 return -EINVAL; 4504 } 4505 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) { 4506 error_setg(errp, "Cannot change preallocation mode"); 4507 return -ENOTSUP; 4508 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 4509 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 4510 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 4511 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 4512 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 4513 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 4514 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) { 4515 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, 4516 !!s->crypto); 4517 4518 if (encrypt != !!s->crypto) { 4519 error_setg(errp, 4520 "Changing the encryption flag is not supported"); 4521 return -ENOTSUP; 4522 } 4523 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) { 4524 encformat = qcow2_crypt_method_from_format( 4525 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT)); 4526 4527 if (encformat != s->crypt_method_header) { 4528 error_setg(errp, 4529 "Changing the encryption format is not supported"); 4530 return -ENOTSUP; 4531 } 4532 } else if (g_str_has_prefix(desc->name, "encrypt.")) { 4533 error_setg(errp, 4534 "Changing the encryption parameters is not supported"); 4535 return -ENOTSUP; 4536 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) { 4537 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 4538 cluster_size); 4539 if (cluster_size != s->cluster_size) { 4540 error_setg(errp, "Changing the cluster size is not supported"); 4541 return -ENOTSUP; 4542 } 4543 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 4544 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 4545 lazy_refcounts); 4546 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 4547 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 4548 refcount_bits); 4549 4550 if (refcount_bits <= 0 || refcount_bits > 64 || 4551 !is_power_of_2(refcount_bits)) 4552 { 4553 error_setg(errp, "Refcount width must be a power of two and " 4554 "may not exceed 64 bits"); 4555 return -EINVAL; 4556 } 4557 } else { 4558 /* if this point is reached, this probably means a new option was 4559 * added without having it covered here */ 4560 abort(); 4561 } 4562 4563 desc++; 4564 } 4565 4566 helper_cb_info = (Qcow2AmendHelperCBInfo){ 4567 .original_status_cb = status_cb, 4568 .original_cb_opaque = cb_opaque, 4569 .total_operations = (new_version < old_version) 4570 + (s->refcount_bits != refcount_bits) 4571 }; 4572 4573 /* Upgrade first (some features may require compat=1.1) */ 4574 if (new_version > old_version) { 4575 s->qcow_version = new_version; 4576 ret = qcow2_update_header(bs); 4577 if (ret < 0) { 4578 s->qcow_version = old_version; 4579 error_setg_errno(errp, -ret, "Failed to update the image header"); 4580 return ret; 4581 } 4582 } 4583 4584 if (s->refcount_bits != refcount_bits) { 4585 int refcount_order = ctz32(refcount_bits); 4586 4587 if (new_version < 3 && refcount_bits != 16) { 4588 error_setg(errp, "Refcount widths other than 16 bits require " 4589 "compatibility level 1.1 or above (use compat=1.1 or " 4590 "greater)"); 4591 return -EINVAL; 4592 } 4593 4594 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 4595 ret = qcow2_change_refcount_order(bs, refcount_order, 4596 &qcow2_amend_helper_cb, 4597 &helper_cb_info, errp); 4598 if (ret < 0) { 4599 return ret; 4600 } 4601 } 4602 4603 if (backing_file || backing_format) { 4604 ret = qcow2_change_backing_file(bs, 4605 backing_file ?: s->image_backing_file, 4606 backing_format ?: s->image_backing_format); 4607 if (ret < 0) { 4608 error_setg_errno(errp, -ret, "Failed to change the backing file"); 4609 return ret; 4610 } 4611 } 4612 4613 if (s->use_lazy_refcounts != lazy_refcounts) { 4614 if (lazy_refcounts) { 4615 if (new_version < 3) { 4616 error_setg(errp, "Lazy refcounts only supported with " 4617 "compatibility level 1.1 and above (use compat=1.1 " 4618 "or greater)"); 4619 return -EINVAL; 4620 } 4621 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4622 ret = qcow2_update_header(bs); 4623 if (ret < 0) { 4624 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4625 error_setg_errno(errp, -ret, "Failed to update the image header"); 4626 return ret; 4627 } 4628 s->use_lazy_refcounts = true; 4629 } else { 4630 /* make image clean first */ 4631 ret = qcow2_mark_clean(bs); 4632 if (ret < 0) { 4633 error_setg_errno(errp, -ret, "Failed to make the image clean"); 4634 return ret; 4635 } 4636 /* now disallow lazy refcounts */ 4637 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4638 ret = qcow2_update_header(bs); 4639 if (ret < 0) { 4640 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4641 error_setg_errno(errp, -ret, "Failed to update the image header"); 4642 return ret; 4643 } 4644 s->use_lazy_refcounts = false; 4645 } 4646 } 4647 4648 if (new_size) { 4649 BlockBackend *blk = blk_new(BLK_PERM_RESIZE, BLK_PERM_ALL); 4650 ret = blk_insert_bs(blk, bs, errp); 4651 if (ret < 0) { 4652 blk_unref(blk); 4653 return ret; 4654 } 4655 4656 ret = blk_truncate(blk, new_size, PREALLOC_MODE_OFF, errp); 4657 blk_unref(blk); 4658 if (ret < 0) { 4659 return ret; 4660 } 4661 } 4662 4663 /* Downgrade last (so unsupported features can be removed before) */ 4664 if (new_version < old_version) { 4665 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 4666 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 4667 &helper_cb_info, errp); 4668 if (ret < 0) { 4669 return ret; 4670 } 4671 } 4672 4673 return 0; 4674 } 4675 4676 /* 4677 * If offset or size are negative, respectively, they will not be included in 4678 * the BLOCK_IMAGE_CORRUPTED event emitted. 4679 * fatal will be ignored for read-only BDS; corruptions found there will always 4680 * be considered non-fatal. 4681 */ 4682 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 4683 int64_t size, const char *message_format, ...) 4684 { 4685 BDRVQcow2State *s = bs->opaque; 4686 const char *node_name; 4687 char *message; 4688 va_list ap; 4689 4690 fatal = fatal && bdrv_is_writable(bs); 4691 4692 if (s->signaled_corruption && 4693 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 4694 { 4695 return; 4696 } 4697 4698 va_start(ap, message_format); 4699 message = g_strdup_vprintf(message_format, ap); 4700 va_end(ap); 4701 4702 if (fatal) { 4703 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 4704 "corruption events will be suppressed\n", message); 4705 } else { 4706 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 4707 "corruption events will be suppressed\n", message); 4708 } 4709 4710 node_name = bdrv_get_node_name(bs); 4711 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 4712 *node_name != '\0', node_name, 4713 message, offset >= 0, offset, 4714 size >= 0, size, 4715 fatal); 4716 g_free(message); 4717 4718 if (fatal) { 4719 qcow2_mark_corrupt(bs); 4720 bs->drv = NULL; /* make BDS unusable */ 4721 } 4722 4723 s->signaled_corruption = true; 4724 } 4725 4726 static QemuOptsList qcow2_create_opts = { 4727 .name = "qcow2-create-opts", 4728 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 4729 .desc = { 4730 { 4731 .name = BLOCK_OPT_SIZE, 4732 .type = QEMU_OPT_SIZE, 4733 .help = "Virtual disk size" 4734 }, 4735 { 4736 .name = BLOCK_OPT_COMPAT_LEVEL, 4737 .type = QEMU_OPT_STRING, 4738 .help = "Compatibility level (0.10 or 1.1)" 4739 }, 4740 { 4741 .name = BLOCK_OPT_BACKING_FILE, 4742 .type = QEMU_OPT_STRING, 4743 .help = "File name of a base image" 4744 }, 4745 { 4746 .name = BLOCK_OPT_BACKING_FMT, 4747 .type = QEMU_OPT_STRING, 4748 .help = "Image format of the base image" 4749 }, 4750 { 4751 .name = BLOCK_OPT_ENCRYPT, 4752 .type = QEMU_OPT_BOOL, 4753 .help = "Encrypt the image with format 'aes'. (Deprecated " 4754 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", 4755 }, 4756 { 4757 .name = BLOCK_OPT_ENCRYPT_FORMAT, 4758 .type = QEMU_OPT_STRING, 4759 .help = "Encrypt the image, format choices: 'aes', 'luks'", 4760 }, 4761 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 4762 "ID of secret providing qcow AES key or LUKS passphrase"), 4763 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), 4764 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), 4765 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), 4766 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), 4767 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), 4768 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 4769 { 4770 .name = BLOCK_OPT_CLUSTER_SIZE, 4771 .type = QEMU_OPT_SIZE, 4772 .help = "qcow2 cluster size", 4773 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) 4774 }, 4775 { 4776 .name = BLOCK_OPT_PREALLOC, 4777 .type = QEMU_OPT_STRING, 4778 .help = "Preallocation mode (allowed values: off, metadata, " 4779 "falloc, full)" 4780 }, 4781 { 4782 .name = BLOCK_OPT_LAZY_REFCOUNTS, 4783 .type = QEMU_OPT_BOOL, 4784 .help = "Postpone refcount updates", 4785 .def_value_str = "off" 4786 }, 4787 { 4788 .name = BLOCK_OPT_REFCOUNT_BITS, 4789 .type = QEMU_OPT_NUMBER, 4790 .help = "Width of a reference count entry in bits", 4791 .def_value_str = "16" 4792 }, 4793 { /* end of list */ } 4794 } 4795 }; 4796 4797 BlockDriver bdrv_qcow2 = { 4798 .format_name = "qcow2", 4799 .instance_size = sizeof(BDRVQcow2State), 4800 .bdrv_probe = qcow2_probe, 4801 .bdrv_open = qcow2_open, 4802 .bdrv_close = qcow2_close, 4803 .bdrv_reopen_prepare = qcow2_reopen_prepare, 4804 .bdrv_reopen_commit = qcow2_reopen_commit, 4805 .bdrv_reopen_abort = qcow2_reopen_abort, 4806 .bdrv_join_options = qcow2_join_options, 4807 .bdrv_child_perm = bdrv_format_default_perms, 4808 .bdrv_co_create_opts = qcow2_co_create_opts, 4809 .bdrv_co_create = qcow2_co_create, 4810 .bdrv_has_zero_init = bdrv_has_zero_init_1, 4811 .bdrv_co_block_status = qcow2_co_block_status, 4812 4813 .bdrv_co_preadv = qcow2_co_preadv, 4814 .bdrv_co_pwritev = qcow2_co_pwritev, 4815 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 4816 4817 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 4818 .bdrv_co_pdiscard = qcow2_co_pdiscard, 4819 .bdrv_co_copy_range_from = qcow2_co_copy_range_from, 4820 .bdrv_co_copy_range_to = qcow2_co_copy_range_to, 4821 .bdrv_co_truncate = qcow2_co_truncate, 4822 .bdrv_co_pwritev_compressed = qcow2_co_pwritev_compressed, 4823 .bdrv_make_empty = qcow2_make_empty, 4824 4825 .bdrv_snapshot_create = qcow2_snapshot_create, 4826 .bdrv_snapshot_goto = qcow2_snapshot_goto, 4827 .bdrv_snapshot_delete = qcow2_snapshot_delete, 4828 .bdrv_snapshot_list = qcow2_snapshot_list, 4829 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 4830 .bdrv_measure = qcow2_measure, 4831 .bdrv_get_info = qcow2_get_info, 4832 .bdrv_get_specific_info = qcow2_get_specific_info, 4833 4834 .bdrv_save_vmstate = qcow2_save_vmstate, 4835 .bdrv_load_vmstate = qcow2_load_vmstate, 4836 4837 .supports_backing = true, 4838 .bdrv_change_backing_file = qcow2_change_backing_file, 4839 4840 .bdrv_refresh_limits = qcow2_refresh_limits, 4841 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache, 4842 .bdrv_inactivate = qcow2_inactivate, 4843 4844 .create_opts = &qcow2_create_opts, 4845 .bdrv_co_check = qcow2_co_check, 4846 .bdrv_amend_options = qcow2_amend_options, 4847 4848 .bdrv_detach_aio_context = qcow2_detach_aio_context, 4849 .bdrv_attach_aio_context = qcow2_attach_aio_context, 4850 4851 .bdrv_reopen_bitmaps_rw = qcow2_reopen_bitmaps_rw, 4852 .bdrv_can_store_new_dirty_bitmap = qcow2_can_store_new_dirty_bitmap, 4853 .bdrv_remove_persistent_dirty_bitmap = qcow2_remove_persistent_dirty_bitmap, 4854 }; 4855 4856 static void bdrv_qcow2_init(void) 4857 { 4858 bdrv_register(&bdrv_qcow2); 4859 } 4860 4861 block_init(bdrv_qcow2_init); 4862